From 59390ef374583211ea4c4b1143f95deb98b0ed95 Mon Sep 17 00:00:00 2001 From: Wilfried Goesgens Date: Mon, 28 May 2018 16:29:30 +0200 Subject: [PATCH] Doc - Improves fetch-script. Fresh Swagger. Sync external repos (#5463) --- .../Books/Drivers/JS/GettingStarted/README.md | 4 +- Documentation/Books/Drivers/JS/README.md | 3 +- .../JS/Reference/Collection/BulkImport.md | 101 + .../Collection/CollectionManipulation.md | 174 + .../Collection/DocumentCollection.md | 110 + .../Collection/DocumentManipulation.md | 313 ++ .../JS/Reference/Collection/EdgeCollection.md | 232 + .../JS/Reference/Collection/Indexes.md | 335 ++ .../Drivers/JS/Reference/Collection/README.md | 115 + .../JS/Reference/Collection/SimpleQueries.md | 167 + .../Books/Drivers/JS/Reference/Cursor.md | 300 ++ .../JS/Reference/Database/AqlUserFunctions.md | 83 + .../JS/Reference/Database/CollectionAccess.md | 103 + .../Database/DatabaseManipulation.md | 204 + .../JS/Reference/Database/FoxxServices.md | 675 +++ .../JS/Reference/Database/GraphAccess.md | 40 + .../JS/Reference/Database/HttpRoutes.md | 38 + .../Drivers/JS/Reference/Database/Queries.md | 126 + .../Drivers/JS/Reference/Database/README.md | 106 + .../JS/Reference/Database/Transactions.md | 100 + .../JS/Reference/Graph/EdgeCollection.md | 252 + .../Books/Drivers/JS/Reference/Graph/Edges.md | 165 + .../Drivers/JS/Reference/Graph/README.md | 71 + .../JS/Reference/Graph/VertexCollection.md | 90 + .../Drivers/JS/Reference/Graph/Vertices.md | 135 + .../Books/Drivers/JS/Reference/README.md | 4464 +---------------- .../Books/Drivers/JS/Reference/Route.md | 368 ++ .../Books/Drivers/Java/Reference/README.md | 10 +- Documentation/Books/Drivers/SUMMARY.md | 25 + .../SpringData/GettingStarted/README.md | 226 +- .../Books/Drivers/SpringData/README.md | 23 +- .../Drivers/SpringData/Reference/README.md | 1429 +++--- .../Kubernetes/DeploymentResource.md | 49 + .../put_api_simple_replace_by_example.md | 1 + .../put_api_simple_update_by_example.md | 1 + Documentation/Scripts/fetchRefs.sh | 4 +- .../system/_admin/aardvark/APP/api-docs.json | 622 +-- 37 files changed, 5678 insertions(+), 5586 deletions(-) create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/README.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Cursor.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/Queries.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/README.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Database/Transactions.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Graph/Edges.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Graph/README.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md create mode 100644 Documentation/Books/Drivers/JS/Reference/Route.md mode change 100644 => 100755 Documentation/Scripts/fetchRefs.sh diff --git a/Documentation/Books/Drivers/JS/GettingStarted/README.md b/Documentation/Books/Drivers/JS/GettingStarted/README.md index 17a2fe6001..9d9da93604 100644 --- a/Documentation/Books/Drivers/JS/GettingStarted/README.md +++ b/Documentation/Books/Drivers/JS/GettingStarted/README.md @@ -206,13 +206,13 @@ const db = new Database({ }); ``` -For AQL please check out the [`aql` template tag](../Reference/README.md#aql) for writing parametrized +For AQL please check out the [aql template tag](../Reference/Database/Queries.md#aql) for writing parametrized AQL queries without making your code vulnerable to injection attacks. ## Error responses If arangojs encounters an API error, it will throw an _ArangoError_ with an -[_errorNum_ as defined in the ArangoDB documentation](https://docs.arangodb.com/devel/Manual/Appendix/ErrorCodes.html) as well as a _code_ and _statusCode_ property indicating the intended and actual HTTP status code of the response. +[_errorNum_ as defined in the ArangoDB documentation](https://docs.arangodb.com/latest/Manual/Appendix/ErrorCodes.html) as well as a _code_ and _statusCode_ property indicating the intended and actual HTTP status code of the response. For any other error responses (4xx/5xx status code), it will throw an _HttpError_ error with the status code indicated by the _code_ and _statusCode_ properties. diff --git a/Documentation/Books/Drivers/JS/README.md b/Documentation/Books/Drivers/JS/README.md index 42ddbcd0da..dce8cb5c43 100644 --- a/Documentation/Books/Drivers/JS/README.md +++ b/Documentation/Books/Drivers/JS/README.md @@ -6,10 +6,11 @@ The official ArangoDB low-level JavaScript client. **Note:** if you are looking for the ArangoDB JavaScript API in [Foxx](https://foxx.arangodb.com) (or the `arangosh` interactive shell) please refer to the documentation about the -[`@arangodb` module](https://docs.arangodb.com/latest/Manual/Foxx/Modules.html#the-arangodb-module) +[`@arangodb` module](https://docs.arangodb.com/latest/Manual/Foxx/Modules/index.html#the-arangodb-module) instead; specifically the `db` object exported by the `@arangodb` module. The JavaScript driver is **only** meant to be used when accessing ArangoDB from **outside** the database. * [Getting Started](GettingStarted/README.md) * [Reference](Reference/README.md) +* [Changelog](https://github.com/arangodb/arangojs/blob/master/CHANGELOG.md#readme) diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md b/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md new file mode 100644 index 0000000000..87fce4d28d --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/BulkImport.md @@ -0,0 +1,101 @@ + +# Bulk importing documents + +This function implements the +[HTTP API for bulk imports](https://docs.arangodb.com/latest/HTTP/BulkImports/index.html). + +## collection.import + +`async collection.import(data, [opts]): Object` + +Bulk imports the given _data_ into the collection. + +**Arguments** + +* **data**: `Array> | Array` + + The data to import. This can be an array of documents: + + ```js + [ + {key1: value1, key2: value2}, // document 1 + {key1: value1, key2: value2}, // document 2 + ... + ] + ``` + + Or it can be an array of value arrays following an array of keys. + + ```js + [ + ['key1', 'key2'], // key names + [value1, value2], // document 1 + [value1, value2], // document 2 + ... + ] + ``` + +* **opts**: `Object` (optional) If _opts_ is set, it must be an object with any + of the following properties: + + * **waitForSync**: `boolean` (Default: `false`) + + Wait until the documents have been synced to disk. + + * **details**: `boolean` (Default: `false`) + + Whether the response should contain additional details about documents that + could not be imported.false\*. + + * **type**: `string` (Default: `"auto"`) + + Indicates which format the data uses. Can be `"documents"`, `"array"` or + `"auto"`. + +If _data_ is a JavaScript array, it will be transmitted as a line-delimited JSON +stream. If _opts.type_ is set to `"array"`, it will be transmitted as regular +JSON instead. If _data_ is a string, it will be transmitted as it is without any +processing. + +For more information on the _opts_ object, see +[the HTTP API documentation for bulk imports](https://docs.arangodb.com/latest/HTTP/BulkImports/ImportingSelfContained.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('users'); + +// document stream +const result = await collection.import([ + {username: 'admin', password: 'hunter2'}, + {username: 'jcd', password: 'bionicman'}, + {username: 'jreyes', password: 'amigo'}, + {username: 'ghermann', password: 'zeitgeist'} +]); +assert.equal(result.created, 4); + +// -- or -- + +// array stream with header +const result = await collection.import([ + ['username', 'password'], // keys + ['admin', 'hunter2'], // row 1 + ['jcd', 'bionicman'], // row 2 + ['jreyes', 'amigo'], + ['ghermann', 'zeitgeist'] +]); +assert.equal(result.created, 4); + +// -- or -- + +// raw line-delimited JSON array stream with header +const result = await collection.import([ + '["username", "password"]', + '["admin", "hunter2"]', + '["jcd", "bionicman"]', + '["jreyes", "amigo"]', + '["ghermann", "zeitgeist"]' +].join('\r\n') + '\r\n'); +assert.equal(result.created, 4); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md b/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md new file mode 100644 index 0000000000..36bf8a778e --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/CollectionManipulation.md @@ -0,0 +1,174 @@ + +# Manipulating the collection + +These functions implement +[the HTTP API for modifying collections](https://docs.arangodb.com/latest/HTTP/Collection/Modifying.html). + +## collection.create + +`async collection.create([properties]): Object` + +Creates a collection with the given _properties_ for this collection's name, +then returns the server response. + +**Arguments** + +* **properties**: `Object` (optional) + + For more information on the _properties_ object, see + [the HTTP API documentation for creating collections](https://docs.arangodb.com/latest/HTTP/Collection/Creating.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('potatos'); +await collection.create() +// the document collection "potatos" now exists + +// -- or -- + +const collection = db.edgeCollection('friends'); +await collection.create({ + waitForSync: true // always sync document changes to disk +}); +// the edge collection "friends" now exists +``` + +## collection.load + +`async collection.load([count]): Object` + +Tells the server to load the collection into memory. + +**Arguments** + +* **count**: `boolean` (Default: `true`) + + If set to `false`, the return value will not include the number of documents + in the collection (which may speed up the process). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +await collection.load(false) +// the collection has now been loaded into memory +``` + +## collection.unload + +`async collection.unload(): Object` + +Tells the server to remove the collection from memory. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +await collection.unload() +// the collection has now been unloaded from memory +``` + +## collection.setProperties + +`async collection.setProperties(properties): Object` + +Replaces the properties of the collection. + +**Arguments** + +* **properties**: `Object` + + For information on the _properties_ argument see + [the HTTP API for modifying collections](https://docs.arangodb.com/latest/HTTP/Collection/Modifying.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const result = await collection.setProperties({waitForSync: true}) +assert.equal(result.waitForSync, true); +// the collection will now wait for data being written to disk +// whenever a document is changed +``` + +## collection.rename + +`async collection.rename(name): Object` + +Renames the collection. The _Collection_ instance will automatically update its +name when the rename succeeds. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const result = await collection.rename('new-collection-name') +assert.equal(result.name, 'new-collection-name'); +assert.equal(collection.name, result.name); +// result contains additional information about the collection +``` + +## collection.rotate + +`async collection.rotate(): Object` + +Rotates the journal of the collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.rotate(); +// data.result will be true if rotation succeeded +``` + +## collection.truncate + +`async collection.truncate(): Object` + +Deletes **all documents** in the collection in the database. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +await collection.truncate(); +// the collection "some-collection" is now empty +``` + +## collection.drop + +`async collection.drop([properties]): Object` + +Deletes the collection from the database. + +**Arguments** + +* **properties**: `Object` (optional) + + An object with the following properties: + + * **isSystem**: `Boolean` (Default: `false`) + + Whether the collection should be dropped even if it is a system collection. + + This parameter must be set to `true` when dropping a system collection. + + For more information on the _properties_ object, see + [the HTTP API documentation for dropping collections](https://docs.arangodb.com/latest/HTTP/Collection/Creating.html#drops-a-collection). + **Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +await collection.drop(); +// the collection "some-collection" no longer exists +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md new file mode 100644 index 0000000000..9bceff537e --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentCollection.md @@ -0,0 +1,110 @@ + +# DocumentCollection API + +The _DocumentCollection API_ extends the +[_Collection API_](README.md) with the following methods. + +## documentCollection.document + +`async documentCollection.document(documentHandle): Object` + +Retrieves the document with the given _documentHandle_ from the collection. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve. This can be either the `_id` or the + `_key` of a document in the collection, or a document (i.e. an object with an + `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('my-docs'); + +try { + const doc = await collection.document('some-key'); + // the document exists + assert.equal(doc._key, 'some-key'); + assert.equal(doc._id, 'my-docs/some-key'); +} catch (err) { + // something went wrong or + // the document does not exist +} + +// -- or -- + +try { + const doc = await collection.document('my-docs/some-key'); + // the document exists + assert.equal(doc._key, 'some-key'); + assert.equal(doc._id, 'my-docs/some-key'); +} catch (err) { + // something went wrong or + // the document does not exist +} +``` + +## documentCollection.save + +`async documentCollection.save(data, [opts]): Object` + +Creates a new document with the given _data_ and returns an object containing +the document's metadata. + +**Arguments** + +* **data**: `Object` + + The data of the new document, may include a `_key`. + +* **opts**: `Object` (optional) + + If _opts_ is set, it must be an object with any of the following properties: + + * **waitForSync**: `boolean` (Default: `false`) + + Wait until document has been synced to disk. + + * **returnNew**: `boolean` (Default: `false`) + + If set to `true`, return additionally the complete new documents under the + attribute `new` in the result. + + * **silent**: `boolean` (Default: `false`) + + If set to true, an empty object will be returned as response. No meta-data + will be returned for the created document. This option can be used to save + some network traffic. + +If a boolean is passed instead of an options object, it will be interpreted as +the _returnNew_ option. + +For more information on the _opts_ object, see +[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('my-docs'); +const data = {some: 'data'}; +const info = await collection.save(data); +assert.equal(info._id, 'my-docs/' + info._key); +const doc2 = await collection.document(info) +assert.equal(doc2._id, info._id); +assert.equal(doc2._rev, info._rev); +assert.equal(doc2.some, data.some); + +// -- or -- + +const db = new Database(); +const collection = db.collection('my-docs'); +const data = {some: 'data'}; +const opts = {returnNew: true}; +const doc = await collection.save(data, opts) +assert.equal(doc1._id, 'my-docs/' + doc1._key); +assert.equal(doc1.new.some, data.some); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md new file mode 100644 index 0000000000..59ff71b12a --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/DocumentManipulation.md @@ -0,0 +1,313 @@ + +# Manipulating documents + +These functions implement the +[HTTP API for manipulating documents](https://docs.arangodb.com/latest/HTTP/Document/index.html). + +## collection.replace + +`async collection.replace(documentHandle, newValue, [opts]): Object` + +Replaces the content of the document with the given _documentHandle_ with the +given _newValue_ and returns an object containing the document's metadata. + +**Note**: The _policy_ option is not available when using the driver with +ArangoDB 3.0 as it is redundant when specifying the _rev_ option. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to replace. This can either be the `_id` or the + `_key` of a document in the collection, or a document (i.e. an object with an + `_id` or `_key` property). + +* **newValue**: `Object` + + The new data of the document. + +* **opts**: `Object` (optional) + + If _opts_ is set, it must be an object with any of the following properties: + + * **waitForSync**: `boolean` (Default: `false`) + + Wait until the document has been synced to disk. Default: `false`. + + * **rev**: `string` (optional) + + Only replace the document if it matches this revision. + + * **policy**: `string` (optional) + + Determines the behaviour when the revision is not matched: + + * if _policy_ is set to `"last"`, the document will be replaced regardless + of the revision. + * if _policy_ is set to `"error"` or not set, the replacement will fail with + an error. + +If a string is passed instead of an options object, it will be interpreted as +the _rev_ option. + +For more information on the _opts_ object, see +[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = {number: 1, hello: 'world'}; +const info1 = await collection.save(data); +const info2 = await collection.replace(info1, {number: 2}); +assert.equal(info2._id, info1._id); +assert.notEqual(info2._rev, info1._rev); +const doc = await collection.document(info1); +assert.equal(doc._id, info1._id); +assert.equal(doc._rev, info2._rev); +assert.equal(doc.number, 2); +assert.equal(doc.hello, undefined); +``` + +## collection.update + +`async collection.update(documentHandle, newValue, [opts]): Object` + +Updates (merges) the content of the document with the given _documentHandle_ +with the given _newValue_ and returns an object containing the document's +metadata. + +**Note**: The _policy_ option is not available when using the driver with +ArangoDB 3.0 as it is redundant when specifying the _rev_ option. + +**Arguments** + +* **documentHandle**: `string` + + Handle of the document to update. This can be either the `_id` or the `_key` + of a document in the collection, or a document (i.e. an object with an `_id` + or `_key` property). + +* **newValue**: `Object` + + The new data of the document. + +* **opts**: `Object` (optional) + + If _opts_ is set, it must be an object with any of the following properties: + + * **waitForSync**: `boolean` (Default: `false`) + + Wait until document has been synced to disk. + + * **keepNull**: `boolean` (Default: `true`) + + If set to `false`, properties with a value of `null` indicate that a + property should be deleted. + + * **mergeObjects**: `boolean` (Default: `true`) + + If set to `false`, object properties that already exist in the old document + will be overwritten rather than merged. This does not affect arrays. + + * **returnOld**: `boolean` (Default: `false`) + + If set to `true`, return additionally the complete previous revision of the + changed documents under the attribute `old` in the result. + + * **returnNew**: `boolean` (Default: `false`) + + If set to `true`, return additionally the complete new documents under the + attribute `new` in the result. + + * **ignoreRevs**: `boolean` (Default: `true`) + + By default, or if this is set to true, the _rev attributes in the given + documents are ignored. If this is set to false, then any _rev attribute + given in a body document is taken as a precondition. The document is only + updated if the current revision is the one specified. + + * **rev**: `string` (optional) + + Only update the document if it matches this revision. + + * **policy**: `string` (optional) + + Determines the behaviour when the revision is not matched: + + * if _policy_ is set to `"last"`, the document will be replaced regardless + of the revision. + * if _policy_ is set to `"error"` or not set, the replacement will fail with + an error. + +If a string is passed instead of an options object, it will be interpreted as +the _rev_ option. + +For more information on the _opts_ object, see +[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const doc = {number: 1, hello: 'world'}; +const doc1 = await collection.save(doc); +const doc2 = await collection.update(doc1, {number: 2}); +assert.equal(doc2._id, doc1._id); +assert.notEqual(doc2._rev, doc1._rev); +const doc3 = await collection.document(doc2); +assert.equal(doc3._id, doc2._id); +assert.equal(doc3._rev, doc2._rev); +assert.equal(doc3.number, 2); +assert.equal(doc3.hello, doc.hello); +``` + +## collection.bulkUpdate + +`async collection.bulkUpdate(documents, [opts]): Object` + +Updates (merges) the content of the documents with the given _documents_ and +returns an array containing the documents' metadata. + +**Note**: This method is new in 3.0 and is available when using the driver with +ArangoDB 3.0 and higher. + +**Arguments** + +* **documents**: `Array` + + Documents to update. Each object must have either the `_id` or the `_key` + property. + +* **opts**: `Object` (optional) + + If _opts_ is set, it must be an object with any of the following properties: + + * **waitForSync**: `boolean` (Default: `false`) + + Wait until document has been synced to disk. + + * **keepNull**: `boolean` (Default: `true`) + + If set to `false`, properties with a value of `null` indicate that a + property should be deleted. + + * **mergeObjects**: `boolean` (Default: `true`) + + If set to `false`, object properties that already exist in the old document + will be overwritten rather than merged. This does not affect arrays. + + * **returnOld**: `boolean` (Default: `false`) + + If set to `true`, return additionally the complete previous revision of the + changed documents under the attribute `old` in the result. + + * **returnNew**: `boolean` (Default: `false`) + + If set to `true`, return additionally the complete new documents under the + attribute `new` in the result. + + * **ignoreRevs**: `boolean` (Default: `true`) + + By default, or if this is set to true, the _rev attributes in the given + documents are ignored. If this is set to false, then any _rev attribute + given in a body document is taken as a precondition. The document is only + updated if the current revision is the one specified. + +For more information on the _opts_ object, see +[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const doc1 = {number: 1, hello: 'world1'}; +const info1 = await collection.save(doc1); +const doc2 = {number: 2, hello: 'world2'}; +const info2 = await collection.save(doc2); +const result = await collection.bulkUpdate([ + {_key: info1._key, number: 3}, + {_key: info2._key, number: 4} +], {returnNew: true}) +``` + +## collection.remove + +`async collection.remove(documentHandle, [opts]): Object` + +Deletes the document with the given _documentHandle_ from the collection. + +**Note**: The _policy_ option is not available when using the driver with +ArangoDB 3.0 as it is redundant when specifying the _rev_ option. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to delete. This can be either the `_id` or the + `_key` of a document in the collection, or a document (i.e. an object with an + `_id` or `_key` property). + +* **opts**: `Object` (optional) + + If _opts_ is set, it must be an object with any of the following properties: + + * **waitForSync**: `boolean` (Default: `false`) + + Wait until document has been synced to disk. + + * **rev**: `string` (optional) + + Only update the document if it matches this revision. + + * **policy**: `string` (optional) + + Determines the behaviour when the revision is not matched: + + * if _policy_ is set to `"last"`, the document will be replaced regardless + of the revision. + * if _policy_ is set to `"error"` or not set, the replacement will fail with + an error. + +If a string is passed instead of an options object, it will be interpreted as +the _rev_ option. + +For more information on the _opts_ object, see +[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +await collection.remove('some-doc'); +// document 'some-collection/some-doc' no longer exists + +// -- or -- + +await collection.remove('some-collection/some-doc'); +// document 'some-collection/some-doc' no longer exists +``` + +## collection.list + +`async collection.list([type]): Array` + +Retrieves a list of references for all documents in the collection. + +**Arguments** + +* **type**: `string` (Default: `"id"`) + + The format of the document references: + + * if _type_ is set to `"id"`, each reference will be the `_id` of the + document. + * if _type_ is set to `"key"`, each reference will be the `_key` of the + document. + * if _type_ is set to `"path"`, each reference will be the URI path of the + document. diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md b/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md new file mode 100644 index 0000000000..6287285dcd --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/EdgeCollection.md @@ -0,0 +1,232 @@ + +# EdgeCollection API + +The _EdgeCollection API_ extends the +[_Collection API_](README.md) with the following methods. + +## edgeCollection.edge + +`async edgeCollection.edge(documentHandle): Object` + +Retrieves the edge with the given _documentHandle_ from the collection. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the edge to retrieve. This can be either the `_id` or the `_key` + of an edge in the collection, or an edge (i.e. an object with an `_id` or + `_key` property). + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection('edges'); + +const edge = await collection.edge('some-key'); +// the edge exists +assert.equal(edge._key, 'some-key'); +assert.equal(edge._id, 'edges/some-key'); + +// -- or -- + +const edge = await collection.edge('edges/some-key'); +// the edge exists +assert.equal(edge._key, 'some-key'); +assert.equal(edge._id, 'edges/some-key'); +``` + +## edgeCollection.save + +`async edgeCollection.save(data, [fromId, toId]): Object` + +Creates a new edge between the documents _fromId_ and _toId_ with the given +_data_ and returns an object containing the edge's metadata. + +**Arguments** + +* **data**: `Object` + + The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_ + needs to contain the properties __from_ and __to_. + +* **fromId**: `string` (optional) + + The handle of the start vertex of this edge. This can be either the `_id` of a + document in the database, the `_key` of an edge in the collection, or a + document (i.e. an object with an `_id` or `_key` property). + +* **toId**: `string` (optional) + + The handle of the end vertex of this edge. This can be either the `_id` of a + document in the database, the `_key` of an edge in the collection, or a + document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection('edges'); +const data = {some: 'data'}; + +const info = await collection.save( + data, + 'vertices/start-vertex', + 'vertices/end-vertex' +); +assert.equal(info._id, 'edges/' + info._key); +const edge = await collection.edge(edge) +assert.equal(edge._key, info._key); +assert.equal(edge._rev, info._rev); +assert.equal(edge.some, data.some); +assert.equal(edge._from, 'vertices/start-vertex'); +assert.equal(edge._to, 'vertices/end-vertex'); + +// -- or -- + +const info = await collection.save({ + some: 'data', + _from: 'verticies/start-vertex', + _to: 'vertices/end-vertex' +}); +// ... +``` + +## edgeCollection.edges + +`async edgeCollection.edges(documentHandle): Array` + +Retrieves a list of all edges of the document with the given _documentHandle_. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve the edges of. This can be either the + `_id` of a document in the database, the `_key` of an edge in the collection, + or a document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/a', 'vertices/c'], + ['z', 'vertices/d', 'vertices/a'] +]) +const edges = await collection.edges('vertices/a'); +assert.equal(edges.length, 3); +assert.deepEqual(edges.map(edge => edge._key), ['x', 'y', 'z']); +``` + +## edgeCollection.inEdges + +`async edgeCollection.inEdges(documentHandle): Array` + +Retrieves a list of all incoming edges of the document with the given +_documentHandle_. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve the edges of. This can be either the + `_id` of a document in the database, the `_key` of an edge in the collection, + or a document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/a', 'vertices/c'], + ['z', 'vertices/d', 'vertices/a'] +]); +const edges = await collection.inEdges('vertices/a'); +assert.equal(edges.length, 1); +assert.equal(edges[0]._key, 'z'); +``` + +## edgeCollection.outEdges + +`async edgeCollection.outEdges(documentHandle): Array` + +Retrieves a list of all outgoing edges of the document with the given +_documentHandle_. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve the edges of. This can be either the + `_id` of a document in the database, the `_key` of an edge in the collection, + or a document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/a', 'vertices/c'], + ['z', 'vertices/d', 'vertices/a'] +]); +const edges = await collection.outEdges('vertices/a'); +assert.equal(edges.length, 2); +assert.deepEqual(edges.map(edge => edge._key), ['x', 'y']); +``` + +## edgeCollection.traversal + +`async edgeCollection.traversal(startVertex, opts): Object` + +Performs a traversal starting from the given _startVertex_ and following edges +contained in this edge collection. + +**Arguments** + +* **startVertex**: `string` + + The handle of the start vertex. This can be either the `_id` of a document in + the database, the `_key` of an edge in the collection, or a document (i.e. an + object with an `_id` or `_key` property). + +* **opts**: `Object` + + See + [the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Traversal/index.html) + for details on the additional arguments. + + Please note that while _opts.filter_, _opts.visitor_, _opts.init_, + _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed + JavaScript code, it's not possible to pass in JavaScript functions directly + because the code needs to be evaluated on the server and will be transmitted + in plain text. + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/b', 'vertices/c'], + ['z', 'vertices/c', 'vertices/d'] +]); +const result = await collection.traversal('vertices/a', { + direction: 'outbound', + visitor: 'result.vertices.push(vertex._key);', + init: 'result.vertices = [];' +}); +assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md b/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md new file mode 100644 index 0000000000..e882d6e680 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/Indexes.md @@ -0,0 +1,335 @@ + +# Manipulating indexes + +These functions implement the +[HTTP API for manipulating indexes](https://docs.arangodb.com/latest/HTTP/Indexes/index.html). + +## collection.createIndex + +`async collection.createIndex(details): Object` + +Creates an arbitrary index on the collection. + +**Arguments** + +* **details**: `Object` + + For information on the possible properties of the _details_ object, see + [the HTTP API for manipulating indexes](https://docs.arangodb.com/latest/HTTP/Indexes/WorkingWith.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const index = await collection.createIndex({type: 'cap', size: 20}); +// the index has been created with the handle `index.id` +``` + +## collection.createCapConstraint + +`async collection.createCapConstraint(size): Object` + +Creates a cap constraint index on the collection. + +**Note**: This method is not available when using the driver with ArangoDB 3.0 +and higher as cap constraints are no longer supported. + +**Arguments** + +* **size**: `Object` + + An object with any of the following properties: + + * **size**: `number` (optional) + + The maximum number of documents in the collection. + + * **byteSize**: `number` (optional) + + The maximum size of active document data in the collection (in bytes). + +If _size_ is a number, it will be interpreted as _size.size_. + +For more information on the properties of the _size_ object see +[the HTTP API for creating cap constraints](https://docs.arangodb.com/latest/HTTP/Indexes/Cap.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +const index = await collection.createCapConstraint(20) +// the index has been created with the handle `index.id` +assert.equal(index.size, 20); + +// -- or -- + +const index = await collection.createCapConstraint({size: 20}) +// the index has been created with the handle `index.id` +assert.equal(index.size, 20); +``` + +## collection.createHashIndex + +`async collection.createHashIndex(fields, [opts]): Object` + +Creates a hash index on the collection. + +**Arguments** + +* **fields**: `Array` + + An array of names of document fields on which to create the index. If the + value is a string, it will be wrapped in an array automatically. + +* **opts**: `Object` (optional) + + Additional options for this index. If the value is a boolean, it will be + interpreted as _opts.unique_. + +For more information on hash indexes, see +[the HTTP API for hash indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Hash.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +const index = await collection.createHashIndex('favorite-color'); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['favorite-color']); + +// -- or -- + +const index = await collection.createHashIndex(['favorite-color']); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['favorite-color']); +``` + +## collection.createSkipList + +`async collection.createSkipList(fields, [opts]): Object` + +Creates a skiplist index on the collection. + +**Arguments** + +* **fields**: `Array` + + An array of names of document fields on which to create the index. If the + value is a string, it will be wrapped in an array automatically. + +* **opts**: `Object` (optional) + + Additional options for this index. If the value is a boolean, it will be + interpreted as _opts.unique_. + +For more information on skiplist indexes, see +[the HTTP API for skiplist indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Skiplist.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +const index = await collection.createSkipList('favorite-color') +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['favorite-color']); + +// -- or -- + +const index = await collection.createSkipList(['favorite-color']) +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['favorite-color']); +``` + +## collection.createGeoIndex + +`async collection.createGeoIndex(fields, [opts]): Object` + +Creates a geo-spatial index on the collection. + +**Arguments** + +* **fields**: `Array` + + An array of names of document fields on which to create the index. Currently, + geo indexes must cover exactly one field. If the value is a string, it will be + wrapped in an array automatically. + +* **opts**: `Object` (optional) + + An object containing additional properties of the index. + +For more information on the properties of the _opts_ object see +[the HTTP API for manipulating geo indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Geo.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +const index = await collection.createGeoIndex(['latitude', 'longitude']); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['longitude', 'latitude']); + +// -- or -- + +const index = await collection.createGeoIndex('location', {geoJson: true}); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['location']); +``` + +## collection.createFulltextIndex + +`async collection.createFulltextIndex(fields, [minLength]): Object` + +Creates a fulltext index on the collection. + +**Arguments** + +* **fields**: `Array` + + An array of names of document fields on which to create the index. Currently, + fulltext indexes must cover exactly one field. If the value is a string, it + will be wrapped in an array automatically. + +* **minLength** (optional): + + Minimum character length of words to index. Uses a server-specific default + value if not specified. + +For more information on fulltext indexes, see +[the HTTP API for fulltext indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Fulltext.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +const index = await collection.createFulltextIndex('description'); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['description']); + +// -- or -- + +const index = await collection.createFulltextIndex(['description']); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['description']); +``` + +## collection.createPersistentIndex + +`async collection.createPersistentIndex(fields, [opts]): Object` + +Creates a Persistent index on the collection. Persistent indexes are similarly +in operation to skiplist indexes, only that these indexes are in disk as opposed +to in memory. This reduces memory usage and DB startup time, with the trade-off +being that it will always be orders of magnitude slower than in-memory indexes. + +**Arguments** + +* **fields**: `Array` + + An array of names of document fields on which to create the index. + +* **opts**: `Object` (optional) + + An object containing additional properties of the index. + +For more information on the properties of the _opts_ object see +[the HTTP API for manipulating Persistent indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Persistent.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); + +const index = await collection.createPersistentIndex(['name', 'email']); +// the index has been created with the handle `index.id` +assert.deepEqual(index.fields, ['name', 'email']); +``` + +## collection.index + +`async collection.index(indexHandle): Object` + +Fetches information about the index with the given _indexHandle_ and returns it. + +**Arguments** + +* **indexHandle**: `string` + + The handle of the index to look up. This can either be a fully-qualified + identifier or the collection-specific key of the index. If the value is an + object, its _id_ property will be used instead. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const index = await collection.createFulltextIndex('description'); +const result = await collection.index(index.id); +assert.equal(result.id, index.id); +// result contains the properties of the index + +// -- or -- + +const result = await collection.index(index.id.split('/')[1]); +assert.equal(result.id, index.id); +// result contains the properties of the index +``` + +## collection.indexes + +`async collection.indexes(): Array` + +Fetches a list of all indexes on this collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +await collection.createFulltextIndex('description') +const indexes = await collection.indexes(); +assert.equal(indexes.length, 1); +// indexes contains information about the index +``` + +## collection.dropIndex + +`async collection.dropIndex(indexHandle): Object` + +Deletes the index with the given _indexHandle_ from the collection. + +**Arguments** + +* **indexHandle**: `string` + + The handle of the index to delete. This can either be a fully-qualified + identifier or the collection-specific key of the index. If the value is an + object, its _id_ property will be used instead. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const index = await collection.createFulltextIndex('description'); +await collection.dropIndex(index.id); +// the index has been removed from the collection + +// -- or -- + +await collection.dropIndex(index.id.split('/')[1]); +// the index has been removed from the collection +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/README.md b/Documentation/Books/Drivers/JS/Reference/Collection/README.md new file mode 100644 index 0000000000..ab37ec19e1 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/README.md @@ -0,0 +1,115 @@ + +# Collection API + +These functions implement the +[HTTP API for manipulating collections](https://docs.arangodb.com/latest/HTTP/Collection/index.html). + +The _Collection API_ is implemented by all _Collection_ instances, regardless of +their specific type. I.e. it represents a shared subset between instances of +[_DocumentCollection_](DocumentCollection.md), +[_EdgeCollection_](EdgeCollection.md), +[_GraphVertexCollection_](../Graph/VertexCollection.md) and +[_GraphEdgeCollection_](../Graph/EdgeCollection.md). + +## Getting information about the collection + +See +[the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Collection/Getting.html) +for details. + +### collection.get + +`async collection.get(): Object` + +Retrieves general information about the collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.get(); +// data contains general information about the collection +``` + +### collection.properties + +`async collection.properties(): Object` + +Retrieves the collection's properties. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.properties(); +// data contains the collection's properties +``` + +### collection.count + +`async collection.count(): Object` + +Retrieves information about the number of documents in a collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.count(); +// data contains the collection's count +``` + +### collection.figures + +`async collection.figures(): Object` + +Retrieves statistics for a collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.figures(); +// data contains the collection's figures +``` + +### collection.revision + +`async collection.revision(): Object` + +Retrieves the collection revision ID. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.revision(); +// data contains the collection's revision +``` + +### collection.checksum + +`async collection.checksum([opts]): Object` + +Retrieves the collection checksum. + +**Arguments** + +* **opts**: `Object` (optional) + + For information on the possible options see + [the HTTP API for getting collection information](https://docs.arangodb.com/latest/HTTP/Collection/Getting.html). + +**Examples** + +```js +const db = new Database(); +const collection = db.collection('some-collection'); +const data = await collection.checksum(); +// data contains the collection's checksum +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md b/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md new file mode 100644 index 0000000000..4ebf166d30 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Collection/SimpleQueries.md @@ -0,0 +1,167 @@ + +# Simple queries + +These functions implement the +[HTTP API for simple queries](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html). + +## collection.all + +`async collection.all([opts]): Cursor` + +Performs a query to fetch all documents in the collection. Returns a +[new _Cursor_ instance](../Cursor.md) for the query results. + +**Arguments** + +* **opts**: `Object` (optional) + + For information on the possible options see + [the HTTP API for returning all documents](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#return-all-documents). + +## collection.any + +`async collection.any(): Object` + +Fetches a document from the collection at random. + +## collection.byExample + +`async collection.byExample(example, [opts]): Cursor` + +Performs a query to fetch all documents in the collection matching the given +_example_. Returns a [new _Cursor_ instance](../Cursor.md) for the query results. + +**Arguments** + +* **example**: _Object_ + + An object representing an example for documents to be matched against. + +* **opts**: _Object_ (optional) + + For information on the possible options see + [the HTTP API for fetching documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#find-documents-matching-an-example). + +## collection.firstExample + +`async collection.firstExample(example): Object` + +Fetches the first document in the collection matching the given _example_. + +**Arguments** + +* **example**: _Object_ + + An object representing an example for documents to be matched against. + +## collection.removeByExample + +`async collection.removeByExample(example, [opts]): Object` + +Removes all documents in the collection matching the given _example_. + +**Arguments** + +* **example**: _Object_ + + An object representing an example for documents to be matched against. + +* **opts**: _Object_ (optional) + + For information on the possible options see + [the HTTP API for removing documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#remove-documents-by-example). + +## collection.replaceByExample + +`async collection.replaceByExample(example, newValue, [opts]): Object` + +Replaces all documents in the collection matching the given _example_ with the +given _newValue_. + +**Arguments** + +* **example**: _Object_ + + An object representing an example for documents to be matched against. + +* **newValue**: _Object_ + + The new value to replace matching documents with. + +* **opts**: _Object_ (optional) + + For information on the possible options see + [the HTTP API for replacing documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#replace-documents-by-example). + +## collection.updateByExample + +`async collection.updateByExample(example, newValue, [opts]): Object` + +Updates (patches) all documents in the collection matching the given _example_ +with the given _newValue_. + +**Arguments** + +* **example**: _Object_ + + An object representing an example for documents to be matched against. + +* **newValue**: _Object_ + + The new value to update matching documents with. + +* **opts**: _Object_ (optional) + + For information on the possible options see + [the HTTP API for updating documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#update-documents-by-example). + +## collection.lookupByKeys + +`async collection.lookupByKeys(keys): Array` + +Fetches the documents with the given _keys_ from the collection. Returns an +array of the matching documents. + +**Arguments** + +* **keys**: _Array_ + + An array of document keys to look up. + +## collection.removeByKeys + +`async collection.removeByKeys(keys, [opts]): Object` + +Deletes the documents with the given _keys_ from the collection. + +**Arguments** + +* **keys**: _Array_ + + An array of document keys to delete. + +* **opts**: _Object_ (optional) + + For information on the possible options see + [the HTTP API for removing documents by keys](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#remove-documents-by-their-keys). + +## collection.fulltext + +`async collection.fulltext(fieldName, query, [opts]): Cursor` + +Performs a fulltext query in the given _fieldName_ on the collection. + +**Arguments** + +* **fieldName**: _String_ + + Name of the field to search on documents in the collection. + +* **query**: _String_ + + Fulltext query string to search for. + +* **opts**: _Object_ (optional) + + For information on the possible options see + [the HTTP API for fulltext queries](https://docs.arangodb.com/latest/HTTP/Indexes/Fulltext.html). diff --git a/Documentation/Books/Drivers/JS/Reference/Cursor.md b/Documentation/Books/Drivers/JS/Reference/Cursor.md new file mode 100644 index 0000000000..c76a3f9ccf --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Cursor.md @@ -0,0 +1,300 @@ + +# Cursor API + +_Cursor_ instances provide an abstraction over the HTTP API's limitations. +Unless a method explicitly exhausts the cursor, the driver will only fetch as +many batches from the server as necessary. Like the server-side cursors, +_Cursor_ instances are incrementally depleted as they are read from. + +```js +const db = new Database(); +const cursor = await db.query('FOR x IN 1..5 RETURN x'); +// query result list: [1, 2, 3, 4, 5] +const value = await cursor.next(); +assert.equal(value, 1); +// remaining result list: [2, 3, 4, 5] +``` + +## cursor.count + +`cursor.count: number` + +The total number of documents in the query result. This is only available if the +`count` option was used. + +## cursor.all + +`async cursor.all(): Array` + +Exhausts the cursor, then returns an array containing all values in the cursor's +remaining result list. + +**Examples** + +```js +const cursor = await db._query('FOR x IN 1..5 RETURN x'); +const result = await cursor.all() +// result is an array containing the entire query result +assert.deepEqual(result, [1, 2, 3, 4, 5]); +assert.equal(cursor.hasNext(), false); +``` + +## cursor.next + +`async cursor.next(): Object` + +Advances the cursor and returns the next value in the cursor's remaining result +list. If the cursor has already been exhausted, returns `undefined` instead. + +**Examples** + +```js +// query result list: [1, 2, 3, 4, 5] +const val = await cursor.next(); +assert.equal(val, 1); +// remaining result list: [2, 3, 4, 5] + +const val2 = await cursor.next(); +assert.equal(val2, 2); +// remaining result list: [3, 4, 5] +``` + +## cursor.hasNext + +`cursor.hasNext(): boolean` + +Returns `true` if the cursor has more values or `false` if the cursor has been +exhausted. + +**Examples** + +```js +await cursor.all(); // exhausts the cursor +assert.equal(cursor.hasNext(), false); +``` + +## cursor.each + +`async cursor.each(fn): any` + +Advances the cursor by applying the function _fn_ to each value in the cursor's +remaining result list until the cursor is exhausted or _fn_ explicitly returns +`false`. + +Returns the last return value of _fn_. + +Equivalent to _Array.prototype.forEach_ (except async). + +**Arguments** + +* **fn**: `Function` + + A function that will be invoked for each value in the cursor's remaining + result list until it explicitly returns `false` or the cursor is exhausted. + + The function receives the following arguments: + + * **value**: `any` + + The value in the cursor's remaining result list. + + * **index**: `number` + + The index of the value in the cursor's remaining result list. + + * **cursor**: `Cursor` + + The cursor itself. + +**Examples** + +```js +const results = []; +function doStuff(value) { + const VALUE = value.toUpperCase(); + results.push(VALUE); + return VALUE; +} + +const cursor = await db.query('FOR x IN ["a", "b", "c"] RETURN x') +const last = await cursor.each(doStuff); +assert.deepEqual(results, ['A', 'B', 'C']); +assert.equal(cursor.hasNext(), false); +assert.equal(last, 'C'); +``` + +## cursor.every + +`async cursor.every(fn): boolean` + +Advances the cursor by applying the function _fn_ to each value in the cursor's +remaining result list until the cursor is exhausted or _fn_ returns a value that +evaluates to `false`. + +Returns `false` if _fn_ returned a value that evaluates to `false`, or `true` +otherwise. + +Equivalent to _Array.prototype.every_ (except async). + +**Arguments** + +* **fn**: `Function` + + A function that will be invoked for each value in the cursor's remaining + result list until it returns a value that evaluates to `false` or the cursor + is exhausted. + + The function receives the following arguments: + + * **value**: `any` + + The value in the cursor's remaining result list. + + * **index**: `number` + + The index of the value in the cursor's remaining result list. + + * **cursor**: `Cursor` + + The cursor itself. + +```js +const even = value => value % 2 === 0; + +const cursor = await db.query('FOR x IN 2..5 RETURN x'); +const result = await cursor.every(even); +assert.equal(result, false); // 3 is not even +assert.equal(cursor.hasNext(), true); + +const value = await cursor.next(); +assert.equal(value, 4); // next value after 3 +``` + +## cursor.some + +`async cursor.some(fn): boolean` + +Advances the cursor by applying the function _fn_ to each value in the cursor's +remaining result list until the cursor is exhausted or _fn_ returns a value that +evaluates to `true`. + +Returns `true` if _fn_ returned a value that evalutes to `true`, or `false` +otherwise. + +Equivalent to _Array.prototype.some_ (except async). + +**Examples** + +```js +const even = value => value % 2 === 0; + +const cursor = await db.query('FOR x IN 1..5 RETURN x'); +const result = await cursor.some(even); +assert.equal(result, true); // 2 is even +assert.equal(cursor.hasNext(), true); + +const value = await cursor.next(); +assert.equal(value, 3); // next value after 2 +``` + +## cursor.map + +`cursor.map(fn): Array` + +Advances the cursor by applying the function _fn_ to each value in the cursor's +remaining result list until the cursor is exhausted. + +Returns an array of the return values of _fn_. + +Equivalent to _Array.prototype.map_ (except async). + +**Note**: This creates an array of all return values. It is probably a bad idea +to do this for very large query result sets. + +**Arguments** + +* **fn**: `Function` + + A function that will be invoked for each value in the cursor's remaining + result list until the cursor is exhausted. + + The function receives the following arguments: + + * **value**: `any` + + The value in the cursor's remaining result list. + + * **index**: `number` + + The index of the value in the cursor's remaining result list. + + * **cursor**: `Cursor` + + The cursor itself. + +**Examples** + +```js +const square = value => value * value; +const cursor = await db.query('FOR x IN 1..5 RETURN x'); +const result = await cursor.map(square); +assert.equal(result.length, 5); +assert.deepEqual(result, [1, 4, 9, 16, 25]); +assert.equal(cursor.hasNext(), false); +``` + +## cursor.reduce + +`cursor.reduce(fn, [accu]): any` + +Exhausts the cursor by reducing the values in the cursor's remaining result list +with the given function _fn_. If _accu_ is not provided, the first value in the +cursor's remaining result list will be used instead (the function will not be +invoked for that value). + +Equivalent to _Array.prototype.reduce_ (except async). + +**Arguments** + +* **fn**: `Function` + + A function that will be invoked for each value in the cursor's remaining + result list until the cursor is exhausted. + + The function receives the following arguments: + + * **accu**: `any` + + The return value of the previous call to _fn_. If this is the first call, + _accu_ will be set to the _accu_ value passed to _reduce_ or the first value + in the cursor's remaining result list. + + * **value**: `any` + + The value in the cursor's remaining result list. + + * **index**: `number` + + The index of the value in the cursor's remaining result list. + + * **cursor**: `Cursor` + + The cursor itself. + +**Examples** + +```js +const add = (a, b) => a + b; +const baseline = 1000; + +const cursor = await db.query('FOR x IN 1..5 RETURN x'); +const result = await cursor.reduce(add, baseline) +assert.equal(result, baseline + 1 + 2 + 3 + 4 + 5); +assert.equal(cursor.hasNext(), false); + +// -- or -- + +const result = await cursor.reduce(add); +assert.equal(result, 1 + 2 + 3 + 4 + 5); +assert.equal(cursor.hasNext(), false); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md b/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md new file mode 100644 index 0000000000..c627bed487 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/AqlUserFunctions.md @@ -0,0 +1,83 @@ + +# Managing AQL user functions + +These functions implement the +[HTTP API for managing AQL user functions](https://docs.arangodb.com/latest/HTTP/AqlUserFunctions/index.html). + +## database.listFunctions + +`async database.listFunctions(): Array` + +Fetches a list of all AQL user functions registered with the database. + +**Examples** + +```js +const db = new Database(); +const functions = db.listFunctions(); +// functions is a list of function descriptions +``` + +## database.createFunction + +`async database.createFunction(name, code): Object` + +Creates an AQL user function with the given _name_ and _code_ if it does not +already exist or replaces it if a function with the same name already existed. + +**Arguments** + +* **name**: `string` + + A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"`. + +* **code**: `string` + + A string evaluating to a JavaScript function (not a JavaScript function + object). + +**Examples** + +```js +const db = new Database(); +await db.createFunction( + 'ACME::ACCOUNTING::CALCULATE_VAT', + String(function (price) { + return price * 0.19; + }) +); +// Use the new function in an AQL query with template handler: +const cursor = await db.query(aql` + FOR product IN products + RETURN MERGE( + {vat: ACME::ACCOUNTING::CALCULATE_VAT(product.price)}, + product + ) +`); +// cursor is a cursor for the query result +``` + +## database.dropFunction + +`async database.dropFunction(name, [group]): Object` + +Deletes the AQL user function with the given name from the database. + +**Arguments** + +* **name**: `string` + + The name of the user function to drop. + +* **group**: `boolean` (Default: `false`) + + If set to `true`, all functions with a name starting with _name_ will be + deleted; otherwise only the function with the exact name will be deleted. + +**Examples** + +```js +const db = new Database(); +await db.dropFunction('ACME::ACCOUNTING::CALCULATE_VAT'); +// the function no longer exists +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md new file mode 100644 index 0000000000..0878c53a77 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/CollectionAccess.md @@ -0,0 +1,103 @@ + +# Accessing collections + +These functions implement the +[HTTP API for accessing collections](https://docs.arangodb.com/latest/HTTP/Collection/Getting.html). + +## database.collection + +`database.collection(collectionName): DocumentCollection` + +Returns a _DocumentCollection_ instance for the given collection name. + +**Arguments** + +* **collectionName**: `string` + + Name of the edge collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.collection("potatos"); +``` + +## database.edgeCollection + +`database.edgeCollection(collectionName): EdgeCollection` + +Returns an _EdgeCollection_ instance for the given collection name. + +**Arguments** + +* **collectionName**: `string` + + Name of the edge collection. + +**Examples** + +```js +const db = new Database(); +const collection = db.edgeCollection("potatos"); +``` + +## database.listCollections + +`async database.listCollections([excludeSystem]): Array` + +Fetches all collections from the database and returns an array of collection +descriptions. + +**Arguments** + +* **excludeSystem**: `boolean` (Default: `true`) + + Whether system collections should be excluded. + +**Examples** + +```js +const db = new Database(); + +const collections = await db.listCollections(); +// collections is an array of collection descriptions +// not including system collections + +// -- or -- + +const collections = await db.listCollections(false); +// collections is an array of collection descriptions +// including system collections +``` + +## database.collections + +`async database.collections([excludeSystem]): Array` + +Fetches all collections from the database and returns an array of +_DocumentCollection_ and _EdgeCollection_ instances for the collections. + +**Arguments** + +* **excludeSystem**: `boolean` (Default: `true`) + + Whether system collections should be excluded. + +**Examples** + +```js +const db = new Database(); + +const collections = await db.collections() +// collections is an array of DocumentCollection +// and EdgeCollection instances +// not including system collections + +// -- or -- + +const collections = await db.collections(false) +// collections is an array of DocumentCollection +// and EdgeCollection instances +// including system collections +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md b/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md new file mode 100644 index 0000000000..20bb032bb6 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/DatabaseManipulation.md @@ -0,0 +1,204 @@ + +# Manipulating databases + +These functions implement the +[HTTP API for manipulating databases](https://docs.arangodb.com/latest/HTTP/Database/index.html). + +## database.acquireHostList + +`async database.acquireHostList(): this` + +Updates the URL list by requesting a list of all coordinators in the cluster and adding any endpoints not initially specified in the _url_ configuration. + +For long-running processes communicating with an ArangoDB cluster it is recommended to run this method repeatedly (e.g. once per hour) to make sure new coordinators are picked up correctly and can be used for fail-over or load balancing. + +**Note**: This method can not be used when the arangojs instance was created with `isAbsolute: true`. + +## database.useDatabase + +`database.useDatabase(databaseName): this` + +Updates the _Database_ instance and its connection string to use the given +_databaseName_, then returns itself. + +**Note**: This method can not be used when the arangojs instance was created with `isAbsolute: true`. + +**Arguments** + +* **databaseName**: `string` + + The name of the database to use. + +**Examples** + +```js +const db = new Database(); +db.useDatabase("test"); +// The database instance now uses the database "test". +``` + +## database.useBasicAuth + +`database.useBasicAuth(username, password): this` + +Updates the _Database_ instance's `authorization` header to use Basic +authentication with the given _username_ and _password_, then returns itself. + +**Arguments** + +* **username**: `string` (Default: `"root"`) + + The username to authenticate with. + +* **password**: `string` (Default: `""`) + + The password to authenticate with. + +**Examples** + +```js +const db = new Database(); +db.useDatabase("test"); +db.useBasicAuth("admin", "hunter2"); +// The database instance now uses the database "test" +// with the username "admin" and password "hunter2". +``` + +## database.useBearerAuth + +`database.useBearerAuth(token): this` + +Updates the _Database_ instance's `authorization` header to use Bearer +authentication with the given authentication token, then returns itself. + +**Arguments** + +* **token**: `string` + + The token to authenticate with. + +**Examples** + +```js +const db = new Database(); +db.useBearerAuth("keyboardcat"); +// The database instance now uses Bearer authentication. +``` + +## database.createDatabase + +`async database.createDatabase(databaseName, [users]): Object` + +Creates a new database with the given _databaseName_. + +**Arguments** + +* **databaseName**: `string` + + Name of the database to create. + +* **users**: `Array` (optional) + + If specified, the array must contain objects with the following properties: + + * **username**: `string` + + The username of the user to create for the database. + + * **passwd**: `string` (Default: empty) + + The password of the user. + + * **active**: `boolean` (Default: `true`) + + Whether the user is active. + + * **extra**: `Object` (optional) + + An object containing additional user data. + +**Examples** + +```js +const db = new Database(); +const info = await db.createDatabase('mydb', [{username: 'root'}]); +// the database has been created +``` + +## database.get + +`async database.get(): Object` + +Fetches the database description for the active database from the server. + +**Examples** + +```js +const db = new Database(); +const info = await db.get(); +// the database exists +``` + +## database.listDatabases + +`async database.listDatabases(): Array` + +Fetches all databases from the server and returns an array of their names. + +**Examples** + +```js +const db = new Database(); +const names = await db.listDatabases(); +// databases is an array of database names +``` + +## database.listUserDatabases + +`async database.listUserDatabases(): Array` + +Fetches all databases accessible to the active user from the server and returns +an array of their names. + +**Examples** + +```js +const db = new Database(); +const names = await db.listUserDatabases(); +// databases is an array of database names +``` + +## database.dropDatabase + +`async database.dropDatabase(databaseName): Object` + +Deletes the database with the given _databaseName_ from the server. + +```js +const db = new Database(); +await db.dropDatabase('mydb'); +// database "mydb" no longer exists +``` + +## database.truncate + +`async database.truncate([excludeSystem]): Object` + +Deletes **all documents in all collections** in the active database. + +**Arguments** + +* **excludeSystem**: `boolean` (Default: `true`) + + Whether system collections should be excluded. Note that this option will be + ignored because truncating system collections is not supported anymore for + some system collections. + +**Examples** + +```js +const db = new Database(); + +await db.truncate(); +// all non-system collections in this database are now empty +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md b/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md new file mode 100644 index 0000000000..d532e5e618 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/FoxxServices.md @@ -0,0 +1,675 @@ + +# Managing Foxx services + +## database.listServices + +`async database.listServices([excludeSystem]): Array` + +Fetches a list of all installed service. + +**Arguments** + +* **excludeSystem**: `boolean` (Default: `true`) + + Whether system services should be excluded. + +**Examples** + +```js +const services = await db.listServices(); + +// -- or -- + +const services = await db.listServices(false); +``` + +## database.installService + +`async database.installService(mount, source, [options]): Object` + +Installs a new service. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **source**: `Buffer | Readable | File | string` + + The service bundle to install. + +* **options**: `Object` (optional) + + An object with any of the following properties: + + * **configuration**: `Object` (optional) + + An object mapping configuration option names to values. + + * **dependencies**: `Object` (optional) + + An object mapping dependency aliases to mount points. + + * **development**: `boolean` (Default: `false`) + + Whether the service should be installed in development mode. + + * **legacy**: `boolean` (Default: `false`) + + Whether the service should be installed in legacy compatibility mode. + + This overrides the `engines` option in the service manifest (if any). + + * **setup**: `boolean` (Default: `true`) + + Whether the setup script should be executed. + +**Examples** + +```js +const source = fs.createReadStream('./my-foxx-service.zip'); +const info = await db.installService('/hello', source); + +// -- or -- + +const source = fs.readFileSync('./my-foxx-service.zip'); +const info = await db.installService('/hello', source); + +// -- or -- + +const element = document.getElementById('my-file-input'); +const source = element.files[0]; +const info = await db.installService('/hello', source); +``` + +## database.replaceService + +`async database.replaceService(mount, source, [options]): Object` + +Replaces an existing service with a new service by completely removing the old +service and installing a new service at the same mount point. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **source**: `Buffer | Readable | File | string` + + The service bundle to replace the existing service with. + +* **options**: `Object` (optional) + + An object with any of the following properties: + + * **configuration**: `Object` (optional) + + An object mapping configuration option names to values. + + This configuration will replace the existing configuration. + + * **dependencies**: `Object` (optional) + + An object mapping dependency aliases to mount points. + + These dependencies will replace the existing dependencies. + + * **development**: `boolean` (Default: `false`) + + Whether the new service should be installed in development mode. + + * **legacy**: `boolean` (Default: `false`) + + Whether the new service should be installed in legacy compatibility mode. + + This overrides the `engines` option in the service manifest (if any). + + * **teardown**: `boolean` (Default: `true`) + + Whether the teardown script of the old service should be executed. + + * **setup**: `boolean` (Default: `true`) + + Whether the setup script of the new service should be executed. + +**Examples** + +```js +const source = fs.createReadStream('./my-foxx-service.zip'); +const info = await db.replaceService('/hello', source); + +// -- or -- + +const source = fs.readFileSync('./my-foxx-service.zip'); +const info = await db.replaceService('/hello', source); + +// -- or -- + +const element = document.getElementById('my-file-input'); +const source = element.files[0]; +const info = await db.replaceService('/hello', source); +``` + +## database.upgradeService + +`async database.upgradeService(mount, source, [options]): Object` + +Replaces an existing service with a new service while retaining the old +service's configuration and dependencies. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **source**: `Buffer | Readable | File | string` + + The service bundle to replace the existing service with. + +* **options**: `Object` (optional) + + An object with any of the following properties: + + * **configuration**: `Object` (optional) + + An object mapping configuration option names to values. + + This configuration will be merged into the existing configuration. + + * **dependencies**: `Object` (optional) + + An object mapping dependency aliases to mount points. + + These dependencies will be merged into the existing dependencies. + + * **development**: `boolean` (Default: `false`) + + Whether the new service should be installed in development mode. + + * **legacy**: `boolean` (Default: `false`) + + Whether the new service should be installed in legacy compatibility mode. + + This overrides the `engines` option in the service manifest (if any). + + * **teardown**: `boolean` (Default: `false`) + + Whether the teardown script of the old service should be executed. + + * **setup**: `boolean` (Default: `true`) + + Whether the setup script of the new service should be executed. + +**Examples** + +```js +const source = fs.createReadStream('./my-foxx-service.zip'); +const info = await db.upgradeService('/hello', source); + +// -- or -- + +const source = fs.readFileSync('./my-foxx-service.zip'); +const info = await db.upgradeService('/hello', source); + +// -- or -- + +const element = document.getElementById('my-file-input'); +const source = element.files[0]; +const info = await db.upgradeService('/hello', source); +``` + +## database.uninstallService + +`async database.uninstallService(mount, [options]): void` + +Completely removes a service from the database. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **options**: `Object` (optional) + + An object with any of the following properties: + + * **teardown**: `boolean` (Default: `true`) + + Whether the teardown script should be executed. + +**Examples** + +```js +await db.uninstallService('/my-service'); +// service was uninstalled +``` + +## database.getService + +`async database.getService(mount): Object` + +Retrieves information about a mounted service. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const info = await db.getService('/my-service'); +// info contains detailed information about the service +``` + +## database.getServiceConfiguration + +`async database.getServiceConfiguration(mount, [minimal]): Object` + +Retrieves an object with information about the service's configuration options +and their current values. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **minimal**: `boolean` (Default: `false`) + + Only return the current values. + +**Examples** + +```js +const config = await db.getServiceConfiguration('/my-service'); +// config contains information about the service's configuration +``` + +## database.replaceServiceConfiguration + +`async database.replaceServiceConfiguration(mount, configuration, [minimal]): +Object` + +Replaces the configuration of the given service. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **configuration**: `Object` + + An object mapping configuration option names to values. + +* **minimal**: `boolean` (Default: `false`) + + Only return the current values and warnings (if any). + + **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids + triggering a second request to the database. + +**Examples** + +```js +const config = {currency: 'USD', locale: 'en-US'}; +const info = await db.replaceServiceConfiguration('/my-service', config); +// info.values contains information about the service's configuration +// info.warnings contains any validation errors for the configuration +``` + +## database.updateServiceConfiguration + +`async database.updateServiceConfiguration(mount, configuration, [minimal]): +Object` + +Updates the configuration of the given service my merging the new values into +the existing ones. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **configuration**: `Object` + + An object mapping configuration option names to values. + +* **minimal**: `boolean` (Default: `false`) + + Only return the current values and warnings (if any). + + **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids + triggering a second request to the database. + +**Examples** + +```js +const config = {locale: 'en-US'}; +const info = await db.updateServiceConfiguration('/my-service', config); +// info.values contains information about the service's configuration +// info.warnings contains any validation errors for the configuration +``` + +## database.getServiceDependencies + +`async database.getServiceDependencies(mount, [minimal]): Object` + +Retrieves an object with information about the service's dependencies and their +current mount points. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **minimal**: `boolean` (Default: `false`) + + Only return the current values and warnings (if any). + +**Examples** + +```js +const deps = await db.getServiceDependencies('/my-service'); +// deps contains information about the service's dependencies +``` + +## database.replaceServiceDependencies + +`async database.replaceServiceDependencies(mount, dependencies, [minimal]): +Object` + +Replaces the dependencies for the given service. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **dependencies**: `Object` + + An object mapping dependency aliases to mount points. + +* **minimal**: `boolean` (Default: `false`) + + Only return the current values and warnings (if any). + + **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids + triggering a second request to the database. + +**Examples** + +```js +const deps = {mailer: '/mailer-api', auth: '/remote-auth'}; +const info = await db.replaceServiceDependencies('/my-service', deps); +// info.values contains information about the service's dependencies +// info.warnings contains any validation errors for the dependencies +``` + +## database.updateServiceDependencies + +`async database.updateServiceDependencies(mount, dependencies, [minimal]): +Object` + +Updates the dependencies for the given service by merging the new values into +the existing ones. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **dependencies**: `Object` + + An object mapping dependency aliases to mount points. + +* **minimal**: `boolean` (Default: `false`) + + Only return the current values and warnings (if any). + + **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids + triggering a second request to the database. + +**Examples** + +```js +const deps = {mailer: '/mailer-api'}; +const info = await db.updateServiceDependencies('/my-service', deps); +// info.values contains information about the service's dependencies +// info.warnings contains any validation errors for the dependencies +``` + +## database.enableServiceDevelopmentMode + +`async database.enableServiceDevelopmentMode(mount): Object` + +Enables development mode for the given service. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const info = await db.enableServiceDevelopmentMode('/my-service'); +// the service is now in development mode +// info contains detailed information about the service +``` + +## database.disableServiceDevelopmentMode + +`async database.disableServiceDevelopmentMode(mount): Object` + +Disabled development mode for the given service and commits the service state to +the database. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const info = await db.disableServiceDevelopmentMode('/my-service'); +// the service is now in production mode +// info contains detailed information about the service +``` + +## database.listServiceScripts + +`async database.listServiceScripts(mount): Object` + +Retrieves a list of the service's scripts. + +Returns an object mapping each name to a more readable representation. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const scripts = await db.listServiceScripts('/my-service'); +// scripts is an object listing the service scripts +``` + +## database.runServiceScript + +`async database.runServiceScript(mount, name, [scriptArg]): any` + +Runs a service script and returns the result. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +* **name**: `string` + + Name of the script to execute. + +* **scriptArg**: `any` + + Value that will be passed as an argument to the script. + +**Examples** + +```js +const result = await db.runServiceScript('/my-service', 'setup'); +// result contains the script's exports (if any) +``` + +## database.runServiceTests + +`async database.runServiceTests(mount, [reporter]): any` + +Runs the tests of a given service and returns a formatted report. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database + +* **options**: `Object` (optional) + + An object with any of the following properties: + + * **reporter**: `string` (Default: `default`) + + The reporter to use to process the test results. + + As of ArangoDB 3.2 the following reporters are supported: + + * **stream**: an array of event objects + * **suite**: nested suite objects with test results + * **xunit**: JSONML representation of an XUnit report + * **tap**: an array of TAP event strings + * **default**: an array of test results + + * **idiomatic**: `boolean` (Default: `false`) + + Whether the results should be converted to the apropriate `string` + representation: + + * **xunit** reports will be formatted as XML documents + * **tap** reports will be formatted as TAP streams + * **stream** reports will be formatted as JSON-LD streams + +**Examples** + +```js +const opts = {reporter: 'xunit', idiomatic: true}; +const result = await db.runServiceTests('/my-service', opts); +// result contains the XUnit report as a string +``` + +## database.downloadService + +`async database.downloadService(mount): Buffer | Blob` + +Retrieves a zip bundle containing the service files. + +Returns a `Buffer` in Node or `Blob` in the browser version. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const bundle = await db.downloadService('/my-service'); +// bundle is a Buffer/Blob of the service bundle +``` + +## database.getServiceReadme + +`async database.getServiceReadme(mount): string?` + +Retrieves the text content of the service's `README` or `README.md` file. + +Returns `undefined` if no such file could be found. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const readme = await db.getServiceReadme('/my-service'); +// readme is a string containing the service README's +// text content, or undefined if no README exists +``` + +## database.getServiceDocumentation + +`async database.getServiceDocumentation(mount): Object` + +Retrieves a Swagger API description object for the service installed at the +given mount point. + +**Arguments** + +* **mount**: `string` + + The service's mount point, relative to the database. + +**Examples** + +```js +const spec = await db.getServiceDocumentation('/my-service'); +// spec is a Swagger API description of the service +``` + +## database.commitLocalServiceState + +`async database.commitLocalServiceState([replace]): void` + +Writes all locally available services to the database and updates any service +bundles missing in the database. + +**Arguments** + +* **replace**: `boolean` (Default: `false`) + + Also commit outdated services. + + This can be used to solve some consistency problems when service bundles are + missing in the database or were deleted manually. + +**Examples** + +```js +await db.commitLocalServiceState(); +// all services available on the coordinator have been written to the db + +// -- or -- + +await db.commitLocalServiceState(true); +// all service conflicts have been resolved in favor of this coordinator +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md b/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md new file mode 100644 index 0000000000..5fddbb5e79 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/GraphAccess.md @@ -0,0 +1,40 @@ + +# Accessing graphs + +These functions implement the +[HTTP API for accessing general graphs](https://docs.arangodb.com/latest/HTTP/Gharial/index.html). + +## database.graph + +`database.graph(graphName): Graph` + +Returns a _Graph_ instance representing the graph with the given graph name. + +## database.listGraphs + +`async database.listGraphs(): Array` + +Fetches all graphs from the database and returns an array of graph descriptions. + +**Examples** + +```js +const db = new Database(); +const graphs = await db.listGraphs(); +// graphs is an array of graph descriptions +``` + +## database.graphs + +`async database.graphs(): Array` + +Fetches all graphs from the database and returns an array of _Graph_ instances +for the graphs. + +**Examples** + +```js +const db = new Database(); +const graphs = await db.graphs(); +// graphs is an array of Graph instances +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md b/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md new file mode 100644 index 0000000000..d9219f549e --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/HttpRoutes.md @@ -0,0 +1,38 @@ + +# Arbitrary HTTP routes + +## database.route + +`database.route([path,] [headers]): Route` + +Returns a new _Route_ instance for the given path (relative to the database) +that can be used to perform arbitrary HTTP requests. + +**Arguments** + +* **path**: `string` (optional) + + The database-relative URL of the route. + +* **headers**: `Object` (optional) + + Default headers that should be sent with each request to the route. + +If _path_ is missing, the route will refer to the base URL of the database. + +For more information on _Route_ instances see the +[_Route API_ below](../Route.md). + +**Examples** + +```js +const db = new Database(); +const myFoxxService = db.route('my-foxx-service'); +const response = await myFoxxService.post('users', { + username: 'admin', + password: 'hunter2' +}); +// response.body is the result of +// POST /_db/_system/my-foxx-service/users +// with JSON request body '{"username": "admin", "password": "hunter2"}' +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/Queries.md b/Documentation/Books/Drivers/JS/Reference/Database/Queries.md new file mode 100644 index 0000000000..eb62fa16a0 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/Queries.md @@ -0,0 +1,126 @@ + +# Queries + +This function implements the +[HTTP API for single roundtrip AQL queries](https://docs.arangodb.com/latest/HTTP/AqlQueryCursor/QueryResults.html). + +For collection-specific queries see [simple queries](../Collection/SimpleQueries.md). + +## database.query + +`async database.query(query, [bindVars,] [opts]): Cursor` + +Performs a database query using the given _query_ and _bindVars_, then returns a +[new _Cursor_ instance](../Cursor.md) for the result list. + +**Arguments** + +* **query**: `string` + + An AQL query string or a [query builder](https://npmjs.org/package/aqb) + instance. + +* **bindVars**: `Object` (optional) + + An object defining the variables to bind the query to. + +* **opts**: `Object` (optional) + + Additional parameter object that will be passed to the query API. + Possible keys are _count_ and _options_ (explained below) + +If _opts.count_ is set to `true`, the cursor will have a _count_ property set to +the query result count. +Possible key options in _opts.options_ include: _failOnWarning_, _cache_, profile or _skipInaccessibleCollections_. +For a complete list of query settings please reference the [arangodb.com documentation](https://docs.arangodb.com/latest/AQL/Invocation/WithArangosh.html#setting-options). + +If _query_ is an object with _query_ and _bindVars_ properties, those will be +used as the values of the respective arguments instead. + +**Examples** + +```js +const db = new Database(); +const active = true; + +// Using the aql template tag +const cursor = await db.query(aql` + FOR u IN _users + FILTER u.authData.active == ${active} + RETURN u.user +`); +// cursor is a cursor for the query result + +// -- or -- + +// Old-school JS with explicit bindVars: +db.query( + 'FOR u IN _users ' + + 'FILTER u.authData.active == @active ' + + 'RETURN u.user', + {active: true} +).then(function (cursor) { + // cursor is a cursor for the query result +}); +``` + +## aql + +`aql(strings, ...args): Object` + +Template string handler (aka template tag) for AQL queries. Converts a template +string to an object that can be passed to `database.query` by converting +arguments to bind variables. + +**Note**: If you want to pass a collection name as a bind variable, you need to +pass a _Collection_ instance (e.g. what you get by passing the collection name +to `db.collection`) instead. If you see the error `"array expected as operand to +FOR loop"`, you're likely passing a collection name instead of a collection +instance. + +**Examples** + +```js +const userCollection = db.collection("_users"); +const role = "admin"; + +const query = aql` + FOR user IN ${userCollection} + FILTER user.role == ${role} + RETURN user +`; + +// -- is equivalent to -- +const query = { + query: "FOR user IN @@value0 FILTER user.role == @value1 RETURN user", + bindVars: { "@value0": userCollection.name, value1: role } +}; +``` + +Note how the aql template tag automatically handles collection references +(`@@value0` instead of `@value0`) for us so you don't have to worry about +counting at-symbols. + +Because the aql template tag creates actual bindVars instead of inlining values +directly, it also avoids injection attacks via malicious parameters: + +```js +// malicious user input +const email = '" || (FOR x IN secrets REMOVE x IN secrets) || "'; + +// DON'T do this! +const query = ` + FOR user IN users + FILTER user.email == "${email}" + RETURN user +`; +// FILTER user.email == "" || (FOR x IN secrets REMOVE x IN secrets) || "" + +// instead do this! +const query = aql` + FOR user IN users + FILTER user.email == ${email} + RETURN user +`; +// FILTER user.email == @value0 +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Database/README.md b/Documentation/Books/Drivers/JS/Reference/Database/README.md new file mode 100644 index 0000000000..a46ddc5eed --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/README.md @@ -0,0 +1,106 @@ + +# Database API + +## new Database + +`new Database([config]): Database` + +Creates a new _Database_ instance. + +If _config_ is a string, it will be interpreted as _config.url_. + +**Arguments** + +* **config**: `Object` (optional) + + An object with the following properties: + + * **url**: `string | Array` (Default: `http://localhost:8529`) + + Base URL of the ArangoDB server or list of server URLs. + + **Note**: As of arangojs 6.0.0 it is no longer possible to pass + the username or password from the URL. + + If you want to use ArangoDB with authentication, see + _useBasicAuth_ or _useBearerAuth_ methods. + + If you need to support self-signed HTTPS certificates, you may have to add + your certificates to the _agentOptions_, e.g.: + + ```js + agentOptions: { + ca: [ + fs.readFileSync(".ssl/sub.class1.server.ca.pem"), + fs.readFileSync(".ssl/ca.pem") + ]; + } + ``` + + * **isAbsolute**: `boolean` (Default: `false`) + + If this option is explicitly set to `true`, the _url_ will be treated as the + absolute database path. This is an escape hatch to allow using arangojs with + database APIs exposed with a reverse proxy and makes it impossible to switch + databases with _useDatabase_ or using _acquireHostList_. + + * **arangoVersion**: `number` (Default: `30000`) + + Value of the `x-arango-version` header. This should match the lowest + version of ArangoDB you expect to be using. The format is defined as + `XYYZZ` where `X` is the major version, `Y` is the two-digit minor version + and `Z` is the two-digit bugfix version. + + **Example**: `30102` corresponds to version 3.1.2 of ArangoDB. + + **Note**: The driver will behave differently when using different major + versions of ArangoDB to compensate for API changes. Some functions are + not available on every major version of ArangoDB as indicated in their + descriptions below (e.g. _collection.first_, _collection.bulkUpdate_). + + * **headers**: `Object` (optional) + + An object with additional headers to send with every request. + + Header names should always be lowercase. If an `"authorization"` header is + provided, it will be overridden when using _useBasicAuth_ or _useBearerAuth_. + + * **agent**: `Agent` (optional) + + An http Agent instance to use for connections. + + By default a new + [`http.Agent`](https://nodejs.org/api/http.html#http_new_agent_options) (or + https.Agent) instance will be created using the _agentOptions_. + + This option has no effect when using the browser version of arangojs. + + * **agentOptions**: `Object` (Default: see below) + + An object with options for the agent. This will be ignored if _agent_ is + also provided. + + Default: `{maxSockets: 3, keepAlive: true, keepAliveMsecs: 1000}`. + Browser default: `{maxSockets: 3, keepAlive: false}`; + + The option `maxSockets` can also be used to limit how many requests + arangojs will perform concurrently. The maximum number of requests is + equal to `maxSockets * 2` with `keepAlive: true` or + equal to `maxSockets` with `keepAlive: false`. + + In the browser version of arangojs this option can be used to pass + additional options to the underlying calls of the + [`xhr`](https://www.npmjs.com/package/xhr) module. + + * **loadBalancingStrategy**: `string` (Default: `"NONE"`) + + Determines the behaviour when multiple URLs are provided: + + * `NONE`: No load balancing. All requests will be handled by the first + URL in the list until a network error is encountered. On network error, + arangojs will advance to using the next URL in the list. + + * `ONE_RANDOM`: Randomly picks one URL from the list initially, then + behaves like `NONE`. + + * `ROUND_ROBIN`: Every sequential request uses the next URL in the list. diff --git a/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md b/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md new file mode 100644 index 0000000000..864d17d237 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Database/Transactions.md @@ -0,0 +1,100 @@ + +# Transactions + +This function implements the +[HTTP API for transactions](https://docs.arangodb.com/latest/HTTP/Transaction/index.html). + +## database.transaction + +`async database.transaction(collections, action, [params, [options]]): +Object` + +Performs a server-side transaction and returns its return value. + +**Arguments** + +* **collections**: `Object` + + An object with the following properties: + + * **read**: `Array` (optional) + + An array of names (or a single name) of collections that will be read from + during the transaction. + + * **write**: `Array` (optional) + + An array of names (or a single name) of collections that will be written to + or read from during the transaction. + +* **action**: `string` + + A string evaluating to a JavaScript function to be executed on the server. + + **Note**: For accessing the database from within ArangoDB, see + [the documentation for the `@arangodb` module in ArangoDB](https://docs.arangodb.com/latest/Manual/Appendix/JavaScriptModules/ArangoDB.html). + +* **params**: `Object` (optional) + + Available as variable `params` when the _action_ function is being executed on + server. Check the example below. + +* **options**: `Object` (optional) + + An object with any of the following properties: + + * **lockTimeout**: `number` (optional) + + Determines how long the database will wait while attempting to gain locks on + collections used by the transaction before timing out. + + * **waitForSync**: `boolean` (optional) + + Determines whether to force the transaction to write all data to disk before returning. + + * **maxTransactionSize**: `number` (optional) + + Determines the transaction size limit in bytes. Honored by the RocksDB storage engine only. + + * **intermediateCommitCount**: `number` (optional) + + Determines the maximum number of operations after which an intermediate commit is + performed automatically. Honored by the RocksDB storage engine only. + + * **intermediateCommitSize**: `number` (optional) + + Determine the maximum total size of operations after which an intermediate commit is + performed automatically. Honored by the RocksDB storage engine only. + +If _collections_ is an array or string, it will be treated as +_collections.write_. + +Please note that while _action_ should be a string evaluating to a well-formed +JavaScript function, it's not possible to pass in a JavaScript function directly +because the function needs to be evaluated on the server and will be transmitted +in plain text. + +For more information on transactions, see +[the HTTP API documentation for transactions](https://docs.arangodb.com/latest/HTTP/Transaction/index.html). + +**Examples** + +```js +const db = new Database(); +const action = String(function (params) { + // This code will be executed inside ArangoDB! + const db = require('@arangodb').db; + return db._query(aql` + FOR user IN _users + FILTER user.age > ${params.age} + RETURN u.user + `).toArray(); +}); + +const result = await db.transaction( + {read: '_users'}, + action, + {age: 12} +); +// result contains the return value of the action +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md b/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md new file mode 100644 index 0000000000..a86142c595 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Graph/EdgeCollection.md @@ -0,0 +1,252 @@ + +# GraphEdgeCollection API + +The _GraphEdgeCollection API_ extends the +[_Collection API_](../Collection/README.md) with the following methods. + +## graphEdgeCollection.remove + +`async graphEdgeCollection.remove(documentHandle): Object` + +Deletes the edge with the given _documentHandle_ from the collection. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the edge to retrieve. This can be either the `_id` or the `_key` + of an edge in the collection, or an edge (i.e. an object with an `_id` or + `_key` property). + +**Examples** + +```js +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); + +await collection.remove('some-key') +// document 'edges/some-key' no longer exists + +// -- or -- + +await collection.remove('edges/some-key') +// document 'edges/some-key' no longer exists +``` + +## graphEdgeCollection.edge + +`async graphEdgeCollection.edge(documentHandle): Object` + +Retrieves the edge with the given _documentHandle_ from the collection. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the edge to retrieve. This can be either the `_id` or the `_key` + of an edge in the collection, or an edge (i.e. an object with an `_id` or + `_key` property). + +**Examples** + +```js +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); + +const edge = await collection.edge('some-key'); +// the edge exists +assert.equal(edge._key, 'some-key'); +assert.equal(edge._id, 'edges/some-key'); + +// -- or -- + +const edge = await collection.edge('edges/some-key'); +// the edge exists +assert.equal(edge._key, 'some-key'); +assert.equal(edge._id, 'edges/some-key'); +``` + +## graphEdgeCollection.save + +`async graphEdgeCollection.save(data, [fromId, toId]): Object` + +Creates a new edge between the vertices _fromId_ and _toId_ with the given +_data_. + +**Arguments** + +* **data**: `Object` + + The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_ + needs to contain the properties __from_ and __to_. + +* **fromId**: `string` (optional) + + The handle of the start vertex of this edge. This can be either the `_id` of a + document in the database, the `_key` of an edge in the collection, or a + document (i.e. an object with an `_id` or `_key` property). + +* **toId**: `string` (optional) + + The handle of the end vertex of this edge. This can be either the `_id` of a + document in the database, the `_key` of an edge in the collection, or a + document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); +const edge = await collection.save( + {some: 'data'}, + 'vertices/start-vertex', + 'vertices/end-vertex' +); +assert.equal(edge._id, 'edges/' + edge._key); +assert.equal(edge.some, 'data'); +assert.equal(edge._from, 'vertices/start-vertex'); +assert.equal(edge._to, 'vertices/end-vertex'); +``` + +## graphEdgeCollection.edges + +`async graphEdgeCollection.edges(documentHandle): Array` + +Retrieves a list of all edges of the document with the given _documentHandle_. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve the edges of. This can be either the + `_id` of a document in the database, the `_key` of an edge in the collection, + or a document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/a', 'vertices/c'], + ['z', 'vertices/d', 'vertices/a'] +]); +const edges = await collection.edges('vertices/a'); +assert.equal(edges.length, 3); +assert.deepEqual(edges.map(edge => edge._key), ['x', 'y', 'z']); +``` + +## graphEdgeCollection.inEdges + +`async graphEdgeCollection.inEdges(documentHandle): Array` + +Retrieves a list of all incoming edges of the document with the given +_documentHandle_. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve the edges of. This can be either the + `_id` of a document in the database, the `_key` of an edge in the collection, + or a document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/a', 'vertices/c'], + ['z', 'vertices/d', 'vertices/a'] +]); +const edges = await collection.inEdges('vertices/a'); +assert.equal(edges.length, 1); +assert.equal(edges[0]._key, 'z'); +``` + +## graphEdgeCollection.outEdges + +`async graphEdgeCollection.outEdges(documentHandle): Array` + +Retrieves a list of all outgoing edges of the document with the given +_documentHandle_. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the document to retrieve the edges of. This can be either the + `_id` of a document in the database, the `_key` of an edge in the collection, + or a document (i.e. an object with an `_id` or `_key` property). + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/a', 'vertices/c'], + ['z', 'vertices/d', 'vertices/a'] +]); +const edges = await collection.outEdges('vertices/a'); +assert.equal(edges.length, 2); +assert.deepEqual(edges.map(edge => edge._key), ['x', 'y']); +``` + +## graphEdgeCollection.traversal + +`async graphEdgeCollection.traversal(startVertex, opts): Object` + +Performs a traversal starting from the given _startVertex_ and following edges +contained in this edge collection. + +**Arguments** + +* **startVertex**: `string` + + The handle of the start vertex. This can be either the `_id` of a document in + the database, the `_key` of an edge in the collection, or a document (i.e. an + object with an `_id` or `_key` property). + +* **opts**: `Object` + + See + [the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Traversal/index.html) + for details on the additional arguments. + + Please note that while _opts.filter_, _opts.visitor_, _opts.init_, + _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed + JavaScript code, it's not possible to pass in JavaScript functions directly + because the code needs to be evaluated on the server and will be transmitted + in plain text. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/b', 'vertices/c'], + ['z', 'vertices/c', 'vertices/d'] +]); +const result = await collection.traversal('vertices/a', { + direction: 'outbound', + visitor: 'result.vertices.push(vertex._key);', + init: 'result.vertices = [];' +}); +assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md b/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md new file mode 100644 index 0000000000..efd6a8a9d9 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Graph/Edges.md @@ -0,0 +1,165 @@ + +# Manipulating edges + +## graph.edgeCollection + +`graph.edgeCollection(collectionName): GraphEdgeCollection` + +Returns a new [_GraphEdgeCollection_ instance](EdgeCollection.md) with +the given name bound to this graph. + +**Arguments** + +* **collectionName**: `string` + + Name of the edge collection. + +**Examples** + +```js +const db = new Database(); +// assuming the collections "edges" and "vertices" exist +const graph = db.graph("some-graph"); +const collection = graph.edgeCollection("edges"); +assert.equal(collection.name, "edges"); +// collection is a GraphEdgeCollection +``` + +## graph.addEdgeDefinition + +`async graph.addEdgeDefinition(definition): Object` + +Adds the given edge definition _definition_ to the graph. + +**Arguments** + +* **definition**: `Object` + + For more information on edge definitions see + [the HTTP API for managing graphs](https://docs.arangodb.com/latest/HTTP/Gharial/Management.html). + +**Examples** + +```js +const db = new Database(); +// assuming the collections "edges" and "vertices" exist +const graph = db.graph('some-graph'); +await graph.addEdgeDefinition({ + collection: 'edges', + from: ['vertices'], + to: ['vertices'] +}); +// the edge definition has been added to the graph +``` + +## graph.replaceEdgeDefinition + +`async graph.replaceEdgeDefinition(collectionName, definition): Object` + +Replaces the edge definition for the edge collection named _collectionName_ with +the given _definition_. + +**Arguments** + +* **collectionName**: `string` + + Name of the edge collection to replace the definition of. + +* **definition**: `Object` + + For more information on edge definitions see + [the HTTP API for managing graphs](https://docs.arangodb.com/latest/HTTP/Gharial/Management.html). + +**Examples** + +```js +const db = new Database(); +// assuming the collections "edges", "vertices" and "more-vertices" exist +const graph = db.graph('some-graph'); +await graph.replaceEdgeDefinition('edges', { + collection: 'edges', + from: ['vertices'], + to: ['more-vertices'] +}); +// the edge definition has been modified +``` + +## graph.removeEdgeDefinition + +`async graph.removeEdgeDefinition(definitionName, [dropCollection]): Object` + +Removes the edge definition with the given _definitionName_ form the graph. + +**Arguments** + +* **definitionName**: `string` + + Name of the edge definition to remove from the graph. + +* **dropCollection**: `boolean` (optional) + + If set to `true`, the edge collection associated with the definition will also + be deleted from the database. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); + +await graph.removeEdgeDefinition('edges') +// the edge definition has been removed + +// -- or -- + +await graph.removeEdgeDefinition('edges', true) +// the edge definition has been removed +// and the edge collection "edges" has been dropped +// this may have been a bad idea +``` + +## graph.traversal + +`async graph.traversal(startVertex, opts): Object` + +Performs a traversal starting from the given _startVertex_ and following edges +contained in any of the edge collections of this graph. + +**Arguments** + +* **startVertex**: `string` + + The handle of the start vertex. This can be either the `_id` of a document in + the graph or a document (i.e. an object with an `_id` property). + +* **opts**: `Object` + + See + [the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Traversal/index.html) + for details on the additional arguments. + + Please note that while _opts.filter_, _opts.visitor_, _opts.init_, + _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed + JavaScript functions, it's not possible to pass in JavaScript functions + directly because the functions need to be evaluated on the server and will be + transmitted in plain text. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.edgeCollection('edges'); +await collection.import([ + ['_key', '_from', '_to'], + ['x', 'vertices/a', 'vertices/b'], + ['y', 'vertices/b', 'vertices/c'], + ['z', 'vertices/c', 'vertices/d'] +]) +const result = await graph.traversal('vertices/a', { + direction: 'outbound', + visitor: 'result.vertices.push(vertex._key);', + init: 'result.vertices = [];' +}); +assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/README.md b/Documentation/Books/Drivers/JS/Reference/Graph/README.md new file mode 100644 index 0000000000..d2a7bda4fa --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Graph/README.md @@ -0,0 +1,71 @@ + +# Graph API + +These functions implement the +[HTTP API for manipulating graphs](https://docs.arangodb.com/latest/HTTP/Gharial/index.html). + +## graph.get + +`async graph.get(): Object` + +Retrieves general information about the graph. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const data = await graph.get(); +// data contains general information about the graph +``` + +## graph.create + +`async graph.create(properties): Object` + +Creates a graph with the given _properties_ for this graph's name, then returns +the server response. + +**Arguments** + +* **properties**: `Object` + + For more information on the _properties_ object, see + [the HTTP API documentation for creating graphs](https://docs.arangodb.com/latest/HTTP/Gharial/Management.html). + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const info = await graph.create({ + edgeDefinitions: [{ + collection: 'edges', + from: ['start-vertices'], + to: ['end-vertices'] + }] +}); +// graph now exists +``` + +## graph.drop + +`async graph.drop([dropCollections]): Object` + +Deletes the graph from the database. + +**Arguments** + +* **dropCollections**: `boolean` (optional) + + If set to `true`, the collections associated with the graph will also be + deleted. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +await graph.drop(); +// the graph "some-graph" no longer exists +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md b/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md new file mode 100644 index 0000000000..059225e08e --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Graph/VertexCollection.md @@ -0,0 +1,90 @@ + +# GraphVertexCollection API + +The _GraphVertexCollection API_ extends the +[_Collection API_](../Collection/README.md) with the following methods. + +## graphVertexCollection.remove + +`async graphVertexCollection.remove(documentHandle): Object` + +Deletes the vertex with the given _documentHandle_ from the collection. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the vertex to retrieve. This can be either the `_id` or the + `_key` of a vertex in the collection, or a vertex (i.e. an object with an + `_id` or `_key` property). + +**Examples** + +```js +const graph = db.graph('some-graph'); +const collection = graph.vertexCollection('vertices'); + +await collection.remove('some-key') +// document 'vertices/some-key' no longer exists + +// -- or -- + +await collection.remove('vertices/some-key') +// document 'vertices/some-key' no longer exists +``` + +## graphVertexCollection.vertex + +`async graphVertexCollection.vertex(documentHandle): Object` + +Retrieves the vertex with the given _documentHandle_ from the collection. + +**Arguments** + +* **documentHandle**: `string` + + The handle of the vertex to retrieve. This can be either the `_id` or the + `_key` of a vertex in the collection, or a vertex (i.e. an object with an + `_id` or `_key` property). + +**Examples** + +```js +const graph = db.graph('some-graph'); +const collection = graph.vertexCollection('vertices'); + +const doc = await collection.vertex('some-key'); +// the vertex exists +assert.equal(doc._key, 'some-key'); +assert.equal(doc._id, 'vertices/some-key'); + +// -- or -- + +const doc = await collection.vertex('vertices/some-key'); +// the vertex exists +assert.equal(doc._key, 'some-key'); +assert.equal(doc._id, 'vertices/some-key'); +``` + +## graphVertexCollection.save + +`async graphVertexCollection.save(data): Object` + +Creates a new vertex with the given _data_. + +**Arguments** + +* **data**: `Object` + + The data of the vertex. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +const collection = graph.vertexCollection('vertices'); +const doc = await collection.save({some: 'data'}); +assert.equal(doc._id, 'vertices/' + doc._key); +assert.equal(doc.some, 'data'); +``` diff --git a/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md b/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md new file mode 100644 index 0000000000..6ab1e45495 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Graph/Vertices.md @@ -0,0 +1,135 @@ + +# Manipulating vertices + +## graph.vertexCollection + +`graph.vertexCollection(collectionName): GraphVertexCollection` + +Returns a new [_GraphVertexCollection_ instance](VertexCollection.md) +with the given name for this graph. + +**Arguments** + +* **collectionName**: `string` + + Name of the vertex collection. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph("some-graph"); +const collection = graph.vertexCollection("vertices"); +assert.equal(collection.name, "vertices"); +// collection is a GraphVertexCollection +``` + +## graph.listVertexCollections + +`async graph.listVertexCollections([excludeOrphans]): Array` + +Fetches all vertex collections from the graph and returns an array of collection descriptions. + +**Arguments** + +* **excludeOrphans**: `boolean` (Default: `false`) + + Whether orphan collections should be excluded. + +**Examples** + +```js +const graph = db.graph('some-graph'); + +const collections = await graph.listVertexCollections(); +// collections is an array of collection descriptions +// including orphan collections + +// -- or -- + +const collections = await graph.listVertexCollections(true); +// collections is an array of collection descriptions +// not including orphan collections +``` + +## graph.vertexCollections + +`async graph.vertexCollections([excludeOrphans]): Array` + +Fetches all vertex collections from the database and returns an array of _GraphVertexCollection_ instances for the collections. + +**Arguments** + +* **excludeOrphans**: `boolean` (Default: `false`) + + Whether orphan collections should be excluded. + +**Examples** + +```js +const graph = db.graph('some-graph'); + +const collections = await graph.vertexCollections() +// collections is an array of GraphVertexCollection +// instances including orphan collections + +// -- or -- + +const collections = await graph.vertexCollections(true) +// collections is an array of GraphVertexCollection +// instances not including orphan collections +``` + +## graph.addVertexCollection + +`async graph.addVertexCollection(collectionName): Object` + +Adds the collection with the given _collectionName_ to the graph's vertex +collections. + +**Arguments** + +* **collectionName**: `string` + + Name of the vertex collection to add to the graph. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +await graph.addVertexCollection('vertices'); +// the collection "vertices" has been added to the graph +``` + +## graph.removeVertexCollection + +`async graph.removeVertexCollection(collectionName, [dropCollection]): Object` + +Removes the vertex collection with the given _collectionName_ from the graph. + +**Arguments** + +* **collectionName**: `string` + + Name of the vertex collection to remove from the graph. + +* **dropCollection**: `boolean` (optional) + + If set to `true`, the collection will also be deleted from the database. + +**Examples** + +```js +const db = new Database(); +const graph = db.graph('some-graph'); +await graph.removeVertexCollection('vertices') +// collection "vertices" has been removed from the graph + +// -- or -- + +await graph.removeVertexCollection('vertices', true) +// collection "vertices" has been removed from the graph +// the collection has also been dropped from the database +// this may have been a bad idea +``` diff --git a/Documentation/Books/Drivers/JS/Reference/README.md b/Documentation/Books/Drivers/JS/Reference/README.md index fc34854a05..f06d03882a 100644 --- a/Documentation/Books/Drivers/JS/Reference/README.md +++ b/Documentation/Books/Drivers/JS/Reference/README.md @@ -1,4443 +1,27 @@ # ArangoDB JavaScript Driver - Reference -## Database API - -### new Database - -`new Database([config]): Database` - -Creates a new _Database_ instance. - -If _config_ is a string, it will be interpreted as _config.url_. - -**Arguments** - -* **config**: `Object` (optional) - - An object with the following properties: - - * **url**: `string | Array` (Default: `http://localhost:8529`) - - Base URL of the ArangoDB server or list of server URLs. - - **Note**: As of arangojs 6.0.0 it is no longer possible to pass - the username or password from the URL. - - If you want to use ArangoDB with authentication, see - _useBasicAuth_ or _useBearerAuth_ methods. - - If you need to support self-signed HTTPS certificates, you may have to add - your certificates to the _agentOptions_, e.g.: - - ```js - agentOptions: { - ca: [ - fs.readFileSync(".ssl/sub.class1.server.ca.pem"), - fs.readFileSync(".ssl/ca.pem") - ]; - } - ``` - - * **isAbsolute**: `boolean` (Default: `false`) - - If this option is explicitly set to `true`, the _url_ will be treated as the - absolute database path. This is an escape hatch to allow using arangojs with - database APIs exposed with a reverse proxy and makes it impossible to switch - databases with _useDatabase_ or using _acquireHostList_. - - * **arangoVersion**: `number` (Default: `30000`) - - Value of the `x-arango-version` header. This should match the lowest - version of ArangoDB you expect to be using. The format is defined as - `XYYZZ` where `X` is the major version, `Y` is the two-digit minor version - and `Z` is the two-digit bugfix version. - - **Example**: `30102` corresponds to version 3.1.2 of ArangoDB. - - **Note**: The driver will behave differently when using different major - versions of ArangoDB to compensate for API changes. Some functions are - not available on every major version of ArangoDB as indicated in their - descriptions below (e.g. _collection.first_, _collection.bulkUpdate_). - - * **headers**: `Object` (optional) - - An object with additional headers to send with every request. - - Header names should always be lowercase. If an `"authorization"` header is - provided, it will be overridden when using _useBasicAuth_ or _useBearerAuth_. - - * **agent**: `Agent` (optional) - - An http Agent instance to use for connections. - - By default a new - [`http.Agent`](https://nodejs.org/api/http.html#http_new_agent_options) (or - https.Agent) instance will be created using the _agentOptions_. - - This option has no effect when using the browser version of arangojs. - - * **agentOptions**: `Object` (Default: see below) - - An object with options for the agent. This will be ignored if _agent_ is - also provided. - - Default: `{maxSockets: 3, keepAlive: true, keepAliveMsecs: 1000}`. - Browser default: `{maxSockets: 3, keepAlive: false}`; - - The option `maxSockets` can also be used to limit how many requests - arangojs will perform concurrently. The maximum number of requests is - equal to `maxSockets * 2` with `keepAlive: true` or - equal to `maxSockets` with `keepAlive: false`. - - In the browser version of arangojs this option can be used to pass - additional options to the underlying calls of the - [`xhr`](https://www.npmjs.com/package/xhr) module. - - * **loadBalancingStrategy**: `string` (Default: `"NONE"`) - - Determines the behaviour when multiple URLs are provided: - - * `NONE`: No load balancing. All requests will be handled by the first - URL in the list until a network error is encountered. On network error, - arangojs will advance to using the next URL in the list. - - * `ONE_RANDOM`: Randomly picks one URL from the list initially, then - behaves like `NONE`. - - * `ROUND_ROBIN`: Every sequential request uses the next URL in the list. - -### Manipulating databases - -These functions implement the -[HTTP API for manipulating databases](https://docs.arangodb.com/latest/HTTP/Database/index.html). - -#### database.acquireHostList - -`async database.acquireHostList(): this` - -Updates the URL list by requesting a list of all coordinators in the cluster and adding any endpoints not initially specified in the _url_ configuration. - -For long-running processes communicating with an ArangoDB cluster it is recommended to run this method repeatedly (e.g. once per hour) to make sure new coordinators are picked up correctly and can be used for fail-over or load balancing. - -**Note**: This method can not be used when the arangojs instance was created with `isAbsolute: true`. - -#### database.useDatabase - -`database.useDatabase(databaseName): this` - -Updates the _Database_ instance and its connection string to use the given -_databaseName_, then returns itself. - -**Note**: This method can not be used when the arangojs instance was created with `isAbsolute: true`. - -**Arguments** - -* **databaseName**: `string` - - The name of the database to use. - -**Examples** - -```js -const db = new Database(); -db.useDatabase("test"); -// The database instance now uses the database "test". -``` - -#### database.useBasicAuth - -`database.useBasicAuth(username, password): this` - -Updates the _Database_ instance's `authorization` header to use Basic -authentication with the given _username_ and _password_, then returns itself. - -**Arguments** - -* **username**: `string` (Default: `"root"`) - - The username to authenticate with. - -* **password**: `string` (Default: `""`) - - The password to authenticate with. - -**Examples** - -```js -const db = new Database(); -db.useDatabase("test"); -db.useBasicAuth("admin", "hunter2"); -// The database instance now uses the database "test" -// with the username "admin" and password "hunter2". -``` - -#### database.useBearerAuth - -`database.useBearerAuth(token): this` - -Updates the _Database_ instance's `authorization` header to use Bearer -authentication with the given authentication token, then returns itself. - -**Arguments** - -* **token**: `string` - - The token to authenticate with. - -**Examples** - -```js -const db = new Database(); -db.useBearerAuth("keyboardcat"); -// The database instance now uses Bearer authentication. -``` - -#### database.createDatabase - -`async database.createDatabase(databaseName, [users]): Object` - -Creates a new database with the given _databaseName_. - -**Arguments** - -* **databaseName**: `string` - - Name of the database to create. - -* **users**: `Array` (optional) - - If specified, the array must contain objects with the following properties: - - * **username**: `string` - - The username of the user to create for the database. - - * **passwd**: `string` (Default: empty) - - The password of the user. - - * **active**: `boolean` (Default: `true`) - - Whether the user is active. - - * **extra**: `Object` (optional) - - An object containing additional user data. - -**Examples** - -```js -const db = new Database(); -const info = await db.createDatabase('mydb', [{username: 'root'}]); -// the database has been created -``` - -#### database.get - -`async database.get(): Object` - -Fetches the database description for the active database from the server. - -**Examples** - -```js -const db = new Database(); -const info = await db.get(); -// the database exists -``` - -#### database.listDatabases - -`async database.listDatabases(): Array` - -Fetches all databases from the server and returns an array of their names. - -**Examples** - -```js -const db = new Database(); -const names = await db.listDatabases(); -// databases is an array of database names -``` - -#### database.listUserDatabases - -`async database.listUserDatabases(): Array` - -Fetches all databases accessible to the active user from the server and returns -an array of their names. - -**Examples** - -```js -const db = new Database(); -const names = await db.listUserDatabases(); -// databases is an array of database names -``` - -#### database.dropDatabase - -`async database.dropDatabase(databaseName): Object` - -Deletes the database with the given _databaseName_ from the server. - -```js -const db = new Database(); -await db.dropDatabase('mydb'); -// database "mydb" no longer exists -``` - -#### database.truncate - -`async database.truncate([excludeSystem]): Object` - -Deletes **all documents in all collections** in the active database. - -**Arguments** - -* **excludeSystem**: `boolean` (Default: `true`) - - Whether system collections should be excluded. Note that this option will be - ignored because truncating system collections is not supported anymore for - some system collections. - -**Examples** - -```js -const db = new Database(); - -await db.truncate(); -// all non-system collections in this database are now empty -``` - -### Accessing collections - -These functions implement the -[HTTP API for accessing collections](https://docs.arangodb.com/latest/HTTP/Collection/Getting.html). - -#### database.collection - -`database.collection(collectionName): DocumentCollection` - -Returns a _DocumentCollection_ instance for the given collection name. - -**Arguments** - -* **collectionName**: `string` - - Name of the edge collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection("potatos"); -``` - -#### database.edgeCollection - -`database.edgeCollection(collectionName): EdgeCollection` - -Returns an _EdgeCollection_ instance for the given collection name. - -**Arguments** - -* **collectionName**: `string` - - Name of the edge collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection("potatos"); -``` - -#### database.listCollections - -`async database.listCollections([excludeSystem]): Array` - -Fetches all collections from the database and returns an array of collection -descriptions. - -**Arguments** - -* **excludeSystem**: `boolean` (Default: `true`) - - Whether system collections should be excluded. - -**Examples** - -```js -const db = new Database(); - -const collections = await db.listCollections(); -// collections is an array of collection descriptions -// not including system collections - -// -- or -- - -const collections = await db.listCollections(false); -// collections is an array of collection descriptions -// including system collections -``` - -#### database.collections - -`async database.collections([excludeSystem]): Array` - -Fetches all collections from the database and returns an array of -_DocumentCollection_ and _EdgeCollection_ instances for the collections. - -**Arguments** - -* **excludeSystem**: `boolean` (Default: `true`) - - Whether system collections should be excluded. - -**Examples** - -```js -const db = new Database(); - -const collections = await db.collections() -// collections is an array of DocumentCollection -// and EdgeCollection instances -// not including system collections - -// -- or -- - -const collections = await db.collections(false) -// collections is an array of DocumentCollection -// and EdgeCollection instances -// including system collections -``` - -### Accessing graphs - -These functions implement the -[HTTP API for accessing general graphs](https://docs.arangodb.com/latest/HTTP/Gharial/index.html). - -#### database.graph - -`database.graph(graphName): Graph` - -Returns a _Graph_ instance representing the graph with the given graph name. - -#### database.listGraphs - -`async database.listGraphs(): Array` - -Fetches all graphs from the database and returns an array of graph descriptions. - -**Examples** - -```js -const db = new Database(); -const graphs = await db.listGraphs(); -// graphs is an array of graph descriptions -``` - -#### database.graphs - -`async database.graphs(): Array` - -Fetches all graphs from the database and returns an array of _Graph_ instances -for the graphs. - -**Examples** - -```js -const db = new Database(); -const graphs = await db.graphs(); -// graphs is an array of Graph instances -``` - -### Transactions - -This function implements the -[HTTP API for transactions](https://docs.arangodb.com/latest/HTTP/Transaction/index.html). - -#### database.transaction - -`async database.transaction(collections, action, [params, [options]]): -Object` - -Performs a server-side transaction and returns its return value. - -**Arguments** - -* **collections**: `Object` - - An object with the following properties: - - * **read**: `Array` (optional) - - An array of names (or a single name) of collections that will be read from - during the transaction. - - * **write**: `Array` (optional) - - An array of names (or a single name) of collections that will be written to - or read from during the transaction. - -* **action**: `string` - - A string evaluating to a JavaScript function to be executed on the server. - - **Note**: For accessing the database from within ArangoDB, see - [the documentation for the `@arangodb` module in ArangoDB](https://docs.arangodb.com/3.1/Manual/Appendix/JavaScriptModules/ArangoDB.html). - -* **params**: `Object` (optional) - - Available as variable `params` when the _action_ function is being executed on - server. Check the example below. - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **lockTimeout**: `number` (optional) - - Determines how long the database will wait while attemping to gain locks on - collections used by the transaction before timing out. - - * **waitForSync**: `boolean` (optional) - - Determines whether to force the transaction to write all data to disk before returning. - - * **maxTransactionSize**: `number` (optional) - - Determines the transaction size limit in bytes. Honored by the RocksDB storage engine only. - - * **intermediateCommitCount**: `number` (optional) - - Determines the maximum number of operations after which an intermediate commit is - performed automatically. Honored by the RocksDB storage engine only. - - * **intermediateCommitSize**: `number` (optional) - - Determine the maximum total size of operations after which an intermediate commit is - performed automatically. Honored by the RocksDB storage engine only. - -If _collections_ is an array or string, it will be treated as -_collections.write_. - -Please note that while _action_ should be a string evaluating to a well-formed -JavaScript function, it's not possible to pass in a JavaScript function directly -because the function needs to be evaluated on the server and will be transmitted -in plain text. - -For more information on transactions, see -[the HTTP API documentation for transactions](https://docs.arangodb.com/latest/HTTP/Transaction/index.html). - -**Examples** - -```js -const db = new Database(); -const action = String(function (params) { - // This code will be executed inside ArangoDB! - const db = require('@arangodb').db; - return db._query(aql` - FOR user IN _users - FILTER user.age > ${params.age} - RETURN u.user - `).toArray(); -}); - -const result = await db.transaction( - {read: '_users'}, - action, - {age: 12} -); -// result contains the return value of the action -``` - -### Queries - -This function implements the -[HTTP API for single roundtrip AQL queries](https://docs.arangodb.com/latest/HTTP/AqlQueryCursor/QueryResults.html). - -For collection-specific queries see [simple queries](#simple-queries). - -#### database.query - -`async database.query(query, [bindVars,] [opts]): Cursor` - -Performs a database query using the given _query_ and _bindVars_, then returns a -[new _Cursor_ instance](#cursor-api) for the result list. - -**Arguments** - -* **query**: `string` - - An AQL query string or a [query builder](https://npmjs.org/package/aqb) - instance. - -* **bindVars**: `Object` (optional) - - An object defining the variables to bind the query to. - -* **opts**: `Object` (optional) - - Additional parameter object that will be passed to the query API. - Possible keys are _count_ and _options_ (explained below) - -If _opts.count_ is set to `true`, the cursor will have a _count_ property set to -the query result count. -Possible key options in _opts.options_ include: _failOnWarning_, _cache_, profile or _skipInaccessibleCollections_. -For a complete list of query settings please reference the [arangodb.com documentation](https://docs.arangodb.com/3.3/AQL/Invocation/WithArangosh.html#setting-options). - -If _query_ is an object with _query_ and _bindVars_ properties, those will be -used as the values of the respective arguments instead. - -**Examples** - -```js -const db = new Database(); -const active = true; - -// Using the aql template tag -const cursor = await db.query(aql` - FOR u IN _users - FILTER u.authData.active == ${active} - RETURN u.user -`); -// cursor is a cursor for the query result - -// -- or -- - -// Old-school JS with explicit bindVars: -db.query( - 'FOR u IN _users ' + - 'FILTER u.authData.active == @active ' + - 'RETURN u.user', - {active: true} -).then(function (cursor) { - // cursor is a cursor for the query result -}); -``` - -#### aql - -`aql(strings, ...args): Object` - -Template string handler (aka template tag) for AQL queries. Converts a template -string to an object that can be passed to `database.query` by converting -arguments to bind variables. - -**Note**: If you want to pass a collection name as a bind variable, you need to -pass a _Collection_ instance (e.g. what you get by passing the collection name -to `db.collection`) instead. If you see the error `"array expected as operand to -FOR loop"`, you're likely passing a collection name instead of a collection -instance. - -**Examples** - -```js -const userCollection = db.collection("_users"); -const role = "admin"; - -const query = aql` - FOR user IN ${userCollection} - FILTER user.role == ${role} - RETURN user -`; - -// -- is equivalent to -- -const query = { - query: "FOR user IN @@value0 FILTER user.role == @value1 RETURN user", - bindVars: { "@value0": userCollection.name, value1: role } -}; -``` - -Note how the aql template tag automatically handles collection references -(`@@value0` instead of `@value0`) for us so you don't have to worry about -counting at-symbols. - -Because the aql template tag creates actual bindVars instead of inlining values -directly, it also avoids injection attacks via malicious parameters: - -```js -// malicious user input -const email = '" || (FOR x IN secrets REMOVE x IN secrets) || "'; - -// DON'T do this! -const query = ` - FOR user IN users - FILTER user.email == "${email}" - RETURN user -`; -// FILTER user.email == "" || (FOR x IN secrets REMOVE x IN secrets) || "" - -// instead do this! -const query = aql` - FOR user IN users - FILTER user.email == ${email} - RETURN user -`; -// FILTER user.email == @value0 -``` - -### Managing AQL user functions - -These functions implement the -[HTTP API for managing AQL user functions](https://docs.arangodb.com/latest/HTTP/AqlUserFunctions/index.html). - -#### database.listFunctions - -`async database.listFunctions(): Array` - -Fetches a list of all AQL user functions registered with the database. - -**Examples** - -```js -const db = new Database(); -const functions = db.listFunctions(); -// functions is a list of function descriptions -``` - -#### database.createFunction - -`async database.createFunction(name, code): Object` - -Creates an AQL user function with the given _name_ and _code_ if it does not -already exist or replaces it if a function with the same name already existed. - -**Arguments** - -* **name**: `string` - - A valid AQL function name, e.g.: `"myfuncs::accounting::calculate_vat"`. - -* **code**: `string` - - A string evaluating to a JavaScript function (not a JavaScript function - object). - -**Examples** - -```js -const db = new Database(); -await db.createFunction( - 'ACME::ACCOUNTING::CALCULATE_VAT', - String(function (price) { - return price * 0.19; - }) -); -// Use the new function in an AQL query with template handler: -const cursor = await db.query(aql` - FOR product IN products - RETURN MERGE( - {vat: ACME::ACCOUNTING::CALCULATE_VAT(product.price)}, - product - ) -`); -// cursor is a cursor for the query result -``` - -#### database.dropFunction - -`async database.dropFunction(name, [group]): Object` - -Deletes the AQL user function with the given name from the database. - -**Arguments** - -* **name**: `string` - - The name of the user function to drop. - -* **group**: `boolean` (Default: `false`) - - If set to `true`, all functions with a name starting with _name_ will be - deleted; otherwise only the function with the exact name will be deleted. - -**Examples** - -```js -const db = new Database(); -await db.dropFunction('ACME::ACCOUNTING::CALCULATE_VAT'); -// the function no longer exists -``` - -### Managing Foxx services - -#### database.listServices - -`async database.listServices([excludeSystem]): Array` - -Fetches a list of all installed service. - -**Arguments** - -* **excludeSystem**: `boolean` (Default: `true`) - - Whether system services should be excluded. - -**Examples** - -```js -const services = await db.listServices(); - -// -- or -- - -const services = await db.listServices(false); -``` - -#### database.installService - -`async database.installService(mount, source, [options]): Object` - -Installs a new service. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **source**: `Buffer | Readable | File | string` - - The service bundle to install. - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **configuration**: `Object` (optional) - - An object mapping configuration option names to values. - - * **dependencies**: `Object` (optional) - - An object mapping dependency aliases to mount points. - - * **development**: `boolean` (Default: `false`) - - Whether the service should be installed in development mode. - - * **legacy**: `boolean` (Default: `false`) - - Whether the service should be installed in legacy compatibility mode. - - This overrides the `engines` option in the service manifest (if any). - - * **setup**: `boolean` (Default: `true`) - - Whether the setup script should be executed. - -**Examples** - -```js -const source = fs.createReadStream('./my-foxx-service.zip'); -const info = await db.installService('/hello', source); - -// -- or -- - -const source = fs.readFileSync('./my-foxx-service.zip'); -const info = await db.installService('/hello', source); - -// -- or -- - -const element = document.getElementById('my-file-input'); -const source = element.files[0]; -const info = await db.installService('/hello', source); -``` - -#### database.replaceService - -`async database.replaceService(mount, source, [options]): Object` - -Replaces an existing service with a new service by completely removing the old -service and installing a new service at the same mount point. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **source**: `Buffer | Readable | File | string` - - The service bundle to replace the existing service with. - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **configuration**: `Object` (optional) - - An object mapping configuration option names to values. - - This configuration will replace the existing configuration. - - * **dependencies**: `Object` (optional) - - An object mapping dependency aliases to mount points. - - These dependencies will replace the existing dependencies. - - * **development**: `boolean` (Default: `false`) - - Whether the new service should be installed in development mode. - - * **legacy**: `boolean` (Default: `false`) - - Whether the new service should be installed in legacy compatibility mode. - - This overrides the `engines` option in the service manifest (if any). - - * **teardown**: `boolean` (Default: `true`) - - Whether the teardown script of the old service should be executed. - - * **setup**: `boolean` (Default: `true`) - - Whether the setup script of the new service should be executed. - -**Examples** - -```js -const source = fs.createReadStream('./my-foxx-service.zip'); -const info = await db.replaceService('/hello', source); - -// -- or -- - -const source = fs.readFileSync('./my-foxx-service.zip'); -const info = await db.replaceService('/hello', source); - -// -- or -- - -const element = document.getElementById('my-file-input'); -const source = element.files[0]; -const info = await db.replaceService('/hello', source); -``` - -#### database.upgradeService - -`async database.upgradeService(mount, source, [options]): Object` - -Replaces an existing service with a new service while retaining the old -service's configuration and dependencies. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **source**: `Buffer | Readable | File | string` - - The service bundle to replace the existing service with. - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **configuration**: `Object` (optional) - - An object mapping configuration option names to values. - - This configuration will be merged into the existing configuration. - - * **dependencies**: `Object` (optional) - - An object mapping dependency aliases to mount points. - - These dependencies will be merged into the existing dependencies. - - * **development**: `boolean` (Default: `false`) - - Whether the new service should be installed in development mode. - - * **legacy**: `boolean` (Default: `false`) - - Whether the new service should be installed in legacy compatibility mode. - - This overrides the `engines` option in the service manifest (if any). - - * **teardown**: `boolean` (Default: `false`) - - Whether the teardown script of the old service should be executed. - - * **setup**: `boolean` (Default: `true`) - - Whether the setup script of the new service should be executed. - -**Examples** - -```js -const source = fs.createReadStream('./my-foxx-service.zip'); -const info = await db.upgradeService('/hello', source); - -// -- or -- - -const source = fs.readFileSync('./my-foxx-service.zip'); -const info = await db.upgradeService('/hello', source); - -// -- or -- - -const element = document.getElementById('my-file-input'); -const source = element.files[0]; -const info = await db.upgradeService('/hello', source); -``` - -#### database.uninstallService - -`async database.uninstallService(mount, [options]): void` - -Completely removes a service from the database. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **teardown**: `boolean` (Default: `true`) - - Whether the teardown script should be executed. - -**Examples** - -```js -await db.uninstallService('/my-service'); -// service was uninstalled -``` - -#### database.getService - -`async database.getService(mount): Object` - -Retrieves information about a mounted service. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const info = await db.getService('/my-service'); -// info contains detailed information about the service -``` - -#### database.getServiceConfiguration - -`async database.getServiceConfiguration(mount, [minimal]): Object` - -Retrieves an object with information about the service's configuration options -and their current values. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **minimal**: `boolean` (Default: `false`) - - Only return the current values. - -**Examples** - -```js -const config = await db.getServiceConfiguration('/my-service'); -// config contains information about the service's configuration -``` - -#### database.replaceServiceConfiguration - -`async database.replaceServiceConfiguration(mount, configuration, [minimal]): -Object` - -Replaces the configuration of the given service. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **configuration**: `Object` - - An object mapping configuration option names to values. - -* **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const config = {currency: 'USD', locale: 'en-US'}; -const info = await db.replaceServiceConfiguration('/my-service', config); -// info.values contains information about the service's configuration -// info.warnings contains any validation errors for the configuration -``` - -#### database.updateServiceConfiguration - -`async database.updateServiceConfiguration(mount, configuration, [minimal]): -Object` - -Updates the configuration of the given service my merging the new values into -the existing ones. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **configuration**: `Object` - - An object mapping configuration option names to values. - -* **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const config = {locale: 'en-US'}; -const info = await db.updateServiceConfiguration('/my-service', config); -// info.values contains information about the service's configuration -// info.warnings contains any validation errors for the configuration -``` - -#### database.getServiceDependencies - -`async database.getServiceDependencies(mount, [minimal]): Object` - -Retrieves an object with information about the service's dependencies and their -current mount points. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - -**Examples** - -```js -const deps = await db.getServiceDependencies('/my-service'); -// deps contains information about the service's dependencies -``` - -#### database.replaceServiceDependencies - -`async database.replaceServiceDependencies(mount, dependencies, [minimal]): -Object` - -Replaces the dependencies for the given service. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **dependencies**: `Object` - - An object mapping dependency aliases to mount points. - -* **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const deps = {mailer: '/mailer-api', auth: '/remote-auth'}; -const info = await db.replaceServiceDependencies('/my-service', deps); -// info.values contains information about the service's dependencies -// info.warnings contains any validation errors for the dependencies -``` - -#### database.updateServiceDependencies - -`async database.updateServiceDependencies(mount, dependencies, [minimal]): -Object` - -Updates the dependencies for the given service by merging the new values into -the existing ones. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **dependencies**: `Object` - - An object mapping dependency aliases to mount points. - -* **minimal**: `boolean` (Default: `false`) - - Only return the current values and warnings (if any). - - **Note:** when using ArangoDB 3.2.8 or older, enabling this option avoids - triggering a second request to the database. - -**Examples** - -```js -const deps = {mailer: '/mailer-api'}; -const info = await db.updateServiceDependencies('/my-service', deps); -// info.values contains information about the service's dependencies -// info.warnings contains any validation errors for the dependencies -``` - -#### database.enableServiceDevelopmentMode - -`async database.enableServiceDevelopmentMode(mount): Object` - -Enables development mode for the given service. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const info = await db.enableServiceDevelopmentMode('/my-service'); -// the service is now in development mode -// info contains detailed information about the service -``` - -#### database.disableServiceDevelopmentMode - -`async database.disableServiceDevelopmentMode(mount): Object` - -Disabled development mode for the given service and commits the service state to -the database. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const info = await db.disableServiceDevelopmentMode('/my-service'); -// the service is now in production mode -// info contains detailed information about the service -``` - -#### database.listServiceScripts - -`async database.listServiceScripts(mount): Object` - -Retrieves a list of the service's scripts. - -Returns an object mapping each name to a more readable representation. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const scripts = await db.listServiceScripts('/my-service'); -// scripts is an object listing the service scripts -``` - -#### database.runServiceScript - -`async database.runServiceScript(mount, name, [scriptArg]): any` - -Runs a service script and returns the result. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -* **name**: `string` - - Name of the script to execute. - -* **scriptArg**: `any` - - Value that will be passed as an argument to the script. - -**Examples** - -```js -const result = await db.runServiceScript('/my-service', 'setup'); -// result contains the script's exports (if any) -``` - -#### database.runServiceTests - -`async database.runServiceTests(mount, [reporter]): any` - -Runs the tests of a given service and returns a formatted report. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database - -* **options**: `Object` (optional) - - An object with any of the following properties: - - * **reporter**: `string` (Default: `default`) - - The reporter to use to process the test results. - - As of ArangoDB 3.2 the following reporters are supported: - - * **stream**: an array of event objects - * **suite**: nested suite objects with test results - * **xunit**: JSONML representation of an XUnit report - * **tap**: an array of TAP event strings - * **default**: an array of test results - - * **idiomatic**: `boolean` (Default: `false`) - - Whether the results should be converted to the apropriate `string` - representation: - - * **xunit** reports will be formatted as XML documents - * **tap** reports will be formatted as TAP streams - * **stream** reports will be formatted as JSON-LD streams - -**Examples** - -```js -const opts = {reporter: 'xunit', idiomatic: true}; -const result = await db.runServiceTests('/my-service', opts); -// result contains the XUnit report as a string -``` - -#### database.downloadService - -`async database.downloadService(mount): Buffer | Blob` - -Retrieves a zip bundle containing the service files. - -Returns a `Buffer` in Node or `Blob` in the browser version. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const bundle = await db.downloadService('/my-service'); -// bundle is a Buffer/Blob of the service bundle -``` - -#### database.getServiceReadme - -`async database.getServiceReadme(mount): string?` - -Retrieves the text content of the service's `README` or `README.md` file. - -Returns `undefined` if no such file could be found. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const readme = await db.getServiceReadme('/my-service'); -// readme is a string containing the service README's -// text content, or undefined if no README exists -``` - -#### database.getServiceDocumentation - -`async database.getServiceDocumentation(mount): Object` - -Retrieves a Swagger API description object for the service installed at the -given mount point. - -**Arguments** - -* **mount**: `string` - - The service's mount point, relative to the database. - -**Examples** - -```js -const spec = await db.getServiceDocumentation('/my-service'); -// spec is a Swagger API description of the service -``` - -#### database.commitLocalServiceState - -`async database.commitLocalServiceState([replace]): void` - -Writes all locally available services to the database and updates any service -bundles missing in the database. - -**Arguments** - -* **replace**: `boolean` (Default: `false`) - - Also commit outdated services. - - This can be used to solve some consistency problems when service bundles are - missing in the database or were deleted manually. - -**Examples** - -```js -await db.commitLocalServiceState(); -// all services available on the coordinator have been written to the db - -// -- or -- - -await db.commitLocalServiceState(true); -// all service conflicts have been resolved in favor of this coordinator -``` - -### Arbitrary HTTP routes - -#### database.route - -`database.route([path,] [headers]): Route` - -Returns a new _Route_ instance for the given path (relative to the database) -that can be used to perform arbitrary HTTP requests. - -**Arguments** - -* **path**: `string` (optional) - - The database-relative URL of the route. - -* **headers**: `Object` (optional) - - Default headers that should be sent with each request to the route. - -If _path_ is missing, the route will refer to the base URL of the database. - -For more information on _Route_ instances see the -[_Route API_ below](#route-api). - -**Examples** - -```js -const db = new Database(); -const myFoxxService = db.route('my-foxx-service'); -const response = await myFoxxService.post('users', { - username: 'admin', - password: 'hunter2' -}); -// response.body is the result of -// POST /_db/_system/my-foxx-service/users -// with JSON request body '{"username": "admin", "password": "hunter2"}' -``` - -## Cursor API - -_Cursor_ instances provide an abstraction over the HTTP API's limitations. -Unless a method explicitly exhausts the cursor, the driver will only fetch as -many batches from the server as necessary. Like the server-side cursors, -_Cursor_ instances are incrementally depleted as they are read from. - -```js -const db = new Database(); -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -// query result list: [1, 2, 3, 4, 5] -const value = await cursor.next(); -assert.equal(value, 1); -// remaining result list: [2, 3, 4, 5] -``` - -### cursor.count - -`cursor.count: number` - -The total number of documents in the query result. This is only available if the -`count` option was used. - -### cursor.all - -`async cursor.all(): Array` - -Exhausts the cursor, then returns an array containing all values in the cursor's -remaining result list. - -**Examples** - -```js -const cursor = await db._query('FOR x IN 1..5 RETURN x'); -const result = await cursor.all() -// result is an array containing the entire query result -assert.deepEqual(result, [1, 2, 3, 4, 5]); -assert.equal(cursor.hasNext(), false); -``` - -### cursor.next - -`async cursor.next(): Object` - -Advances the cursor and returns the next value in the cursor's remaining result -list. If the cursor has already been exhausted, returns `undefined` instead. - -**Examples** - -```js -// query result list: [1, 2, 3, 4, 5] -const val = await cursor.next(); -assert.equal(val, 1); -// remaining result list: [2, 3, 4, 5] - -const val2 = await cursor.next(); -assert.equal(val2, 2); -// remaining result list: [3, 4, 5] -``` - -### cursor.hasNext - -`cursor.hasNext(): boolean` - -Returns `true` if the cursor has more values or `false` if the cursor has been -exhausted. - -**Examples** - -```js -await cursor.all(); // exhausts the cursor -assert.equal(cursor.hasNext(), false); -``` - -### cursor.each - -`async cursor.each(fn): any` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted or _fn_ explicitly returns -`false`. - -Returns the last return value of _fn_. - -Equivalent to _Array.prototype.forEach_ (except async). - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until it explicitly returns `false` or the cursor is exhausted. - - The function receives the following arguments: - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -**Examples** - -```js -const results = []; -function doStuff(value) { - const VALUE = value.toUpperCase(); - results.push(VALUE); - return VALUE; -} - -const cursor = await db.query('FOR x IN ["a", "b", "c"] RETURN x') -const last = await cursor.each(doStuff); -assert.deepEqual(results, ['A', 'B', 'C']); -assert.equal(cursor.hasNext(), false); -assert.equal(last, 'C'); -``` - -### cursor.every - -`async cursor.every(fn): boolean` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted or _fn_ returns a value that -evaluates to `false`. - -Returns `false` if _fn_ returned a value that evaluates to `false`, or `true` -otherwise. - -Equivalent to _Array.prototype.every_ (except async). - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until it returns a value that evaluates to `false` or the cursor - is exhausted. - - The function receives the following arguments: - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -```js -const even = value => value % 2 === 0; - -const cursor = await db.query('FOR x IN 2..5 RETURN x'); -const result = await cursor.every(even); -assert.equal(result, false); // 3 is not even -assert.equal(cursor.hasNext(), true); - -const value = await cursor.next(); -assert.equal(value, 4); // next value after 3 -``` - -### cursor.some - -`async cursor.some(fn): boolean` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted or _fn_ returns a value that -evaluates to `true`. - -Returns `true` if _fn_ returned a value that evalutes to `true`, or `false` -otherwise. - -Equivalent to _Array.prototype.some_ (except async). - -**Examples** - -```js -const even = value => value % 2 === 0; - -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.some(even); -assert.equal(result, true); // 2 is even -assert.equal(cursor.hasNext(), true); - -const value = await cursor.next(); -assert.equal(value, 3); // next value after 2 -``` - -### cursor.map - -`cursor.map(fn): Array` - -Advances the cursor by applying the function _fn_ to each value in the cursor's -remaining result list until the cursor is exhausted. - -Returns an array of the return values of _fn_. - -Equivalent to _Array.prototype.map_ (except async). - -**Note**: This creates an array of all return values. It is probably a bad idea -to do this for very large query result sets. - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until the cursor is exhausted. - - The function receives the following arguments: - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -**Examples** - -```js -const square = value => value * value; -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.map(square); -assert.equal(result.length, 5); -assert.deepEqual(result, [1, 4, 9, 16, 25]); -assert.equal(cursor.hasNext(), false); -``` - -### cursor.reduce - -`cursor.reduce(fn, [accu]): any` - -Exhausts the cursor by reducing the values in the cursor's remaining result list -with the given function _fn_. If _accu_ is not provided, the first value in the -cursor's remaining result list will be used instead (the function will not be -invoked for that value). - -Equivalent to _Array.prototype.reduce_ (except async). - -**Arguments** - -* **fn**: `Function` - - A function that will be invoked for each value in the cursor's remaining - result list until the cursor is exhausted. - - The function receives the following arguments: - - * **accu**: `any` - - The return value of the previous call to _fn_. If this is the first call, - _accu_ will be set to the _accu_ value passed to _reduce_ or the first value - in the cursor's remaining result list. - - * **value**: `any` - - The value in the cursor's remaining result list. - - * **index**: `number` - - The index of the value in the cursor's remaining result list. - - * **cursor**: `Cursor` - - The cursor itself. - -**Examples** - -```js -const add = (a, b) => a + b; -const baseline = 1000; - -const cursor = await db.query('FOR x IN 1..5 RETURN x'); -const result = await cursor.reduce(add, baseline) -assert.equal(result, baseline + 1 + 2 + 3 + 4 + 5); -assert.equal(cursor.hasNext(), false); - -// -- or -- - -const result = await cursor.reduce(add); -assert.equal(result, 1 + 2 + 3 + 4 + 5); -assert.equal(cursor.hasNext(), false); -``` - -## Route API - -_Route_ instances provide access for arbitrary HTTP requests. This allows easy -access to Foxx services and other HTTP APIs not covered by the driver itself. - -### route.route - -`route.route([path], [headers]): Route` - -Returns a new _Route_ instance for the given path (relative to the current -route) that can be used to perform arbitrary HTTP requests. - -**Arguments** - -* **path**: `string` (optional) - - The relative URL of the route. - -* **headers**: `Object` (optional) - - Default headers that should be sent with each request to the route. - -If _path_ is missing, the route will refer to the base URL of the database. - -**Examples** - -```js -const db = new Database(); -const route = db.route("my-foxx-service"); -const users = route.route("users"); -// equivalent to db.route('my-foxx-service/users') -``` - -### route.get - -`async route.get([path,] [qs]): Response` - -Performs a GET request to the given URL and returns the server response. - -**Arguments** - -* **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -* **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.get(); -// response.body is the response body of calling -// GET _db/_system/my-foxx-service - -// -- or -- - -const response = await route.get('users'); -// response.body is the response body of calling -// GET _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.get('users', {group: 'admin'}); -// response.body is the response body of calling -// GET _db/_system/my-foxx-service/users?group=admin -``` - -### route.post - -`async route.post([path,] [body, [qs]]): Response` - -Performs a POST request to the given URL and returns the server response. - -**Arguments** - -* **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -* **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - -* **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.post() -// response.body is the response body of calling -// POST _db/_system/my-foxx-service - -// -- or -- - -const response = await route.post('users') -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.post('users', { - username: 'admin', - password: 'hunter2' -}); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/users -// with JSON request body {"username": "admin", "password": "hunter2"} - -// -- or -- - -const response = await route.post('users', { - username: 'admin', - password: 'hunter2' -}, {admin: true}); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/users?admin=true -// with JSON request body {"username": "admin", "password": "hunter2"} -``` - -### route.put - -`async route.put([path,] [body, [qs]]): Response` - -Performs a PUT request to the given URL and returns the server response. - -**Arguments** - -* **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -* **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - -* **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.put(); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service - -// -- or -- - -const response = await route.put('users/admin'); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.put('users/admin', { - username: 'admin', - password: 'hunter2' -}); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service/users/admin -// with JSON request body {"username": "admin", "password": "hunter2"} - -// -- or -- - -const response = await route.put('users/admin', { - username: 'admin', - password: 'hunter2' -}, {admin: true}); -// response.body is the response body of calling -// PUT _db/_system/my-foxx-service/users/admin?admin=true -// with JSON request body {"username": "admin", "password": "hunter2"} -``` - -### route.patch - -`async route.patch([path,] [body, [qs]]): Response` - -Performs a PATCH request to the given URL and returns the server response. - -**Arguments** - -* **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -* **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - -* **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.patch(); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service - -// -- or -- - -const response = await route.patch('users/admin'); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service/users - -// -- or -- - -const response = await route.patch('users/admin', { - password: 'hunter2' -}); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service/users/admin -// with JSON request body {"password": "hunter2"} - -// -- or -- - -const response = await route.patch('users/admin', { - password: 'hunter2' -}, {admin: true}); -// response.body is the response body of calling -// PATCH _db/_system/my-foxx-service/users/admin?admin=true -// with JSON request body {"password": "hunter2"} -``` - -### route.delete - -`async route.delete([path,] [qs]): Response` - -Performs a DELETE request to the given URL and returns the server response. - -**Arguments** - -* **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -* **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.delete() -// response.body is the response body of calling -// DELETE _db/_system/my-foxx-service - -// -- or -- - -const response = await route.delete('users/admin') -// response.body is the response body of calling -// DELETE _db/_system/my-foxx-service/users/admin - -// -- or -- - -const response = await route.delete('users/admin', {permanent: true}) -// response.body is the response body of calling -// DELETE _db/_system/my-foxx-service/users/admin?permanent=true -``` - -### route.head - -`async route.head([path,] [qs]): Response` - -Performs a HEAD request to the given URL and returns the server response. - -**Arguments** - -* **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - -* **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be translated - to a query string. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.head(); -// response is the response object for -// HEAD _db/_system/my-foxx-service -``` - -### route.request - -`async route.request([opts]): Response` - -Performs an arbitrary request to the given URL and returns the server response. - -**Arguments** - -* **opts**: `Object` (optional) - - An object with any of the following properties: - - * **path**: `string` (optional) - - The route-relative URL for the request. If omitted, the request will be made - to the base URL of the route. - - * **absolutePath**: `boolean` (Default: `false`) - - Whether the _path_ is relative to the connection's base URL instead of the - route. - - * **body**: `string` (optional) - - The response body. If _body_ is an object, it will be encoded as JSON. - - * **qs**: `string` (optional) - - The query string for the request. If _qs_ is an object, it will be - translated to a query string. - - * **headers**: `Object` (optional) - - An object containing additional HTTP headers to be sent with the request. - - * **method**: `string` (Default: `"GET"`) - - HTTP method of this request. - -**Examples** - -```js -const db = new Database(); -const route = db.route('my-foxx-service'); -const response = await route.request({ - path: 'hello-world', - method: 'POST', - body: {hello: 'world'}, - qs: {admin: true} -}); -// response.body is the response body of calling -// POST _db/_system/my-foxx-service/hello-world?admin=true -// with JSON request body '{"hello": "world"}' -``` - -## Collection API - -These functions implement the -[HTTP API for manipulating collections](https://docs.arangodb.com/latest/HTTP/Collection/index.html). - -The _Collection API_ is implemented by all _Collection_ instances, regardless of -their specific type. I.e. it represents a shared subset between instances of -[_DocumentCollection_](#documentcollection-api), -[_EdgeCollection_](#edgecollection-api), -[_GraphVertexCollection_](#graphvertexcollection-api) and -[_GraphEdgeCollection_](#graphedgecollection-api). - -### Getting information about the collection - -See -[the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Collection/Getting.html) -for details. - -#### collection.get - -`async collection.get(): Object` - -Retrieves general information about the collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.get(); -// data contains general information about the collection -``` - -#### collection.properties - -`async collection.properties(): Object` - -Retrieves the collection's properties. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.properties(); -// data contains the collection's properties -``` - -#### collection.count - -`async collection.count(): Object` - -Retrieves information about the number of documents in a collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.count(); -// data contains the collection's count -``` - -#### collection.figures - -`async collection.figures(): Object` - -Retrieves statistics for a collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.figures(); -// data contains the collection's figures -``` - -#### collection.revision - -`async collection.revision(): Object` - -Retrieves the collection revision ID. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.revision(); -// data contains the collection's revision -``` - -#### collection.checksum - -`async collection.checksum([opts]): Object` - -Retrieves the collection checksum. - -**Arguments** - -* **opts**: `Object` (optional) - - For information on the possible options see - [the HTTP API for getting collection information](https://docs.arangodb.com/latest/HTTP/Collection/Getting.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.checksum(); -// data contains the collection's checksum -``` - -### Manipulating the collection - -These functions implement -[the HTTP API for modifying collections](https://docs.arangodb.com/latest/HTTP/Collection/Modifying.html). - -#### collection.create - -`async collection.create([properties]): Object` - -Creates a collection with the given _properties_ for this collection's name, -then returns the server response. - -**Arguments** - -* **properties**: `Object` (optional) - - For more information on the _properties_ object, see - [the HTTP API documentation for creating collections](https://docs.arangodb.com/latest/HTTP/Collection/Creating.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('potatos'); -await collection.create() -// the document collection "potatos" now exists - -// -- or -- - -const collection = db.edgeCollection('friends'); -await collection.create({ - waitForSync: true // always sync document changes to disk -}); -// the edge collection "friends" now exists -``` - -#### collection.load - -`async collection.load([count]): Object` - -Tells the server to load the collection into memory. - -**Arguments** - -* **count**: `boolean` (Default: `true`) - - If set to `false`, the return value will not include the number of documents - in the collection (which may speed up the process). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.load(false) -// the collection has now been loaded into memory -``` - -#### collection.unload - -`async collection.unload(): Object` - -Tells the server to remove the collection from memory. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.unload() -// the collection has now been unloaded from memory -``` - -#### collection.setProperties - -`async collection.setProperties(properties): Object` - -Replaces the properties of the collection. - -**Arguments** - -* **properties**: `Object` - - For information on the _properties_ argument see - [the HTTP API for modifying collections](https://docs.arangodb.com/latest/HTTP/Collection/Modifying.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const result = await collection.setProperties({waitForSync: true}) -assert.equal(result.waitForSync, true); -// the collection will now wait for data being written to disk -// whenever a document is changed -``` - -#### collection.rename - -`async collection.rename(name): Object` - -Renames the collection. The _Collection_ instance will automatically update its -name when the rename succeeds. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const result = await collection.rename('new-collection-name') -assert.equal(result.name, 'new-collection-name'); -assert.equal(collection.name, result.name); -// result contains additional information about the collection -``` - -#### collection.rotate - -`async collection.rotate(): Object` - -Rotates the journal of the collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = await collection.rotate(); -// data.result will be true if rotation succeeded -``` - -#### collection.truncate - -`async collection.truncate(): Object` - -Deletes **all documents** in the collection in the database. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.truncate(); -// the collection "some-collection" is now empty -``` - -#### collection.drop - -`async collection.drop([properties]): Object` - -Deletes the collection from the database. - -**Arguments** - -* **properties**: `Object` (optional) - - An object with the following properties: - - * **isSystem**: `Boolean` (Default: `false`) - - Whether the collection should be dropped even if it is a system collection. - - This parameter must be set to `true` when dropping a system collection. - - For more information on the _properties_ object, see - [the HTTP API documentation for dropping collections](https://docs.arangodb.com/3/HTTP/Collection/Creating.html#drops-a-collection). - **Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.drop(); -// the collection "some-collection" no longer exists -``` - -### Manipulating indexes - -These functions implement the -[HTTP API for manipulating indexes](https://docs.arangodb.com/latest/HTTP/Indexes/index.html). - -#### collection.createIndex - -`async collection.createIndex(details): Object` - -Creates an arbitrary index on the collection. - -**Arguments** - -* **details**: `Object` - - For information on the possible properties of the _details_ object, see - [the HTTP API for manipulating indexes](https://docs.arangodb.com/latest/HTTP/Indexes/WorkingWith.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const index = await collection.createIndex({type: 'cap', size: 20}); -// the index has been created with the handle `index.id` -``` - -#### collection.createCapConstraint - -`async collection.createCapConstraint(size): Object` - -Creates a cap constraint index on the collection. - -**Note**: This method is not available when using the driver with ArangoDB 3.0 -and higher as cap constraints are no longer supported. - -**Arguments** - -* **size**: `Object` - - An object with any of the following properties: - - * **size**: `number` (optional) - - The maximum number of documents in the collection. - - * **byteSize**: `number` (optional) - - The maximum size of active document data in the collection (in bytes). - -If _size_ is a number, it will be interpreted as _size.size_. - -For more information on the properties of the _size_ object see -[the HTTP API for creating cap constraints](https://docs.arangodb.com/latest/HTTP/Indexes/Cap.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -const index = await collection.createCapConstraint(20) -// the index has been created with the handle `index.id` -assert.equal(index.size, 20); - -// -- or -- - -const index = await collection.createCapConstraint({size: 20}) -// the index has been created with the handle `index.id` -assert.equal(index.size, 20); -``` - -#### collection.createHashIndex - -`async collection.createHashIndex(fields, [opts]): Object` - -Creates a hash index on the collection. - -**Arguments** - -* **fields**: `Array` - - An array of names of document fields on which to create the index. If the - value is a string, it will be wrapped in an array automatically. - -* **opts**: `Object` (optional) - - Additional options for this index. If the value is a boolean, it will be - interpreted as _opts.unique_. - -For more information on hash indexes, see -[the HTTP API for hash indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Hash.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -const index = await collection.createHashIndex('favorite-color'); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['favorite-color']); - -// -- or -- - -const index = await collection.createHashIndex(['favorite-color']); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['favorite-color']); -``` - -#### collection.createSkipList - -`async collection.createSkipList(fields, [opts]): Object` - -Creates a skiplist index on the collection. - -**Arguments** - -* **fields**: `Array` - - An array of names of document fields on which to create the index. If the - value is a string, it will be wrapped in an array automatically. - -* **opts**: `Object` (optional) - - Additional options for this index. If the value is a boolean, it will be - interpreted as _opts.unique_. - -For more information on skiplist indexes, see -[the HTTP API for skiplist indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Skiplist.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -const index = await collection.createSkipList('favorite-color') -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['favorite-color']); - -// -- or -- - -const index = await collection.createSkipList(['favorite-color']) -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['favorite-color']); -``` - -#### collection.createGeoIndex - -`async collection.createGeoIndex(fields, [opts]): Object` - -Creates a geo-spatial index on the collection. - -**Arguments** - -* **fields**: `Array` - - An array of names of document fields on which to create the index. Currently, - geo indexes must cover exactly one field. If the value is a string, it will be - wrapped in an array automatically. - -* **opts**: `Object` (optional) - - An object containing additional properties of the index. - -For more information on the properties of the _opts_ object see -[the HTTP API for manipulating geo indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Geo.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -const index = await collection.createGeoIndex(['latitude', 'longitude']); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['longitude', 'latitude']); - -// -- or -- - -const index = await collection.createGeoIndex('location', {geoJson: true}); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['location']); -``` - -#### collection.createFulltextIndex - -`async collection.createFulltextIndex(fields, [minLength]): Object` - -Creates a fulltext index on the collection. - -**Arguments** - -* **fields**: `Array` - - An array of names of document fields on which to create the index. Currently, - fulltext indexes must cover exactly one field. If the value is a string, it - will be wrapped in an array automatically. - -* **minLength** (optional): - - Minimum character length of words to index. Uses a server-specific default - value if not specified. - -For more information on fulltext indexes, see -[the HTTP API for fulltext indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Fulltext.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -const index = await collection.createFulltextIndex('description'); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['description']); - -// -- or -- - -const index = await collection.createFulltextIndex(['description']); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['description']); -``` - -#### collection.createPersistentIndex - -`async collection.createPersistentIndex(fields, [opts]): Object` - -Creates a Persistent index on the collection. Persistent indexes are similarly -in operation to skiplist indexes, only that these indexes are in disk as opposed -to in memory. This reduces memory usage and DB startup time, with the trade-off -being that it will always be orders of magnitude slower than in-memory indexes. - -**Arguments** - -* **fields**: `Array` - - An array of names of document fields on which to create the index. - -* **opts**: `Object` (optional) - - An object containing additional properties of the index. - -For more information on the properties of the _opts_ object see -[the HTTP API for manipulating Persistent indexes](https://docs.arangodb.com/latest/HTTP/Indexes/Persistent.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -const index = await collection.createPersistentIndex(['name', 'email']); -// the index has been created with the handle `index.id` -assert.deepEqual(index.fields, ['name', 'email']); -``` - -#### collection.index - -`async collection.index(indexHandle): Object` - -Fetches information about the index with the given _indexHandle_ and returns it. - -**Arguments** - -* **indexHandle**: `string` - - The handle of the index to look up. This can either be a fully-qualified - identifier or the collection-specific key of the index. If the value is an - object, its _id_ property will be used instead. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const index = await collection.createFulltextIndex('description'); -const result = await collection.index(index.id); -assert.equal(result.id, index.id); -// result contains the properties of the index - -// -- or -- - -const result = await collection.index(index.id.split('/')[1]); -assert.equal(result.id, index.id); -// result contains the properties of the index -``` - -#### collection.indexes - -`async collection.indexes(): Array` - -Fetches a list of all indexes on this collection. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -await collection.createFulltextIndex('description') -const indexes = await collection.indexes(); -assert.equal(indexes.length, 1); -// indexes contains information about the index -``` - -#### collection.dropIndex - -`async collection.dropIndex(indexHandle): Object` - -Deletes the index with the given _indexHandle_ from the collection. - -**Arguments** - -* **indexHandle**: `string` - - The handle of the index to delete. This can either be a fully-qualified - identifier or the collection-specific key of the index. If the value is an - object, its _id_ property will be used instead. - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const index = await collection.createFulltextIndex('description'); -await collection.dropIndex(index.id); -// the index has been removed from the collection - -// -- or -- - -await collection.dropIndex(index.id.split('/')[1]); -// the index has been removed from the collection -``` - -### Simple queries - -These functions implement the -[HTTP API for simple queries](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html). - -#### collection.all - -`async collection.all([opts]): Cursor` - -Performs a query to fetch all documents in the collection. Returns a -[new _Cursor_ instance](#cursor-api) for the query results. - -**Arguments** - -* **opts**: `Object` (optional) - - For information on the possible options see - [the HTTP API for returning all documents](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#return-all-documents). - -#### collection.any - -`async collection.any(): Object` - -Fetches a document from the collection at random. - -#### collection.first - -`async collection.first([opts]): Array` - -Performs a query to fetch the first documents in the collection. Returns an -array of the matching documents. - -**Note**: This method is not available when using the driver with ArangoDB 3.0 -and higher as the corresponding API method has been removed. - -**Arguments** - -* **opts**: `Object` (optional) - - For information on the possible options see - [the HTTP API for returning the first documents of a collection](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#find-documents-matching-an-example). - - If _opts_ is a number it is treated as _opts.count_. - -#### collection.last - -`async collection.last([opts]): Array` - -Performs a query to fetch the last documents in the collection. Returns an array -of the matching documents. - -**Note**: This method is not available when using the driver with ArangoDB 3.0 -and higher as the corresponding API method has been removed. - -**Arguments** - -* **opts**: `Object` (optional) - - For information on the possible options see - [the HTTP API for returning the last documents of a collection](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#last-document-of-a-collection). - - If _opts_ is a number it is treated as _opts.count_. - -#### collection.byExample - -`async collection.byExample(example, [opts]): Cursor` - -Performs a query to fetch all documents in the collection matching the given -_example_. Returns a [new _Cursor_ instance](#cursor-api) for the query results. - -**Arguments** - -* **example**: _Object_ - - An object representing an example for documents to be matched against. - -* **opts**: _Object_ (optional) - - For information on the possible options see - [the HTTP API for fetching documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#find-documents-matching-an-example). - -#### collection.firstExample - -`async collection.firstExample(example): Object` - -Fetches the first document in the collection matching the given _example_. - -**Arguments** - -* **example**: _Object_ - - An object representing an example for documents to be matched against. - -#### collection.removeByExample - -`async collection.removeByExample(example, [opts]): Object` - -Removes all documents in the collection matching the given _example_. - -**Arguments** - -* **example**: _Object_ - - An object representing an example for documents to be matched against. - -* **opts**: _Object_ (optional) - - For information on the possible options see - [the HTTP API for removing documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#remove-documents-by-example). - -#### collection.replaceByExample - -`async collection.replaceByExample(example, newValue, [opts]): Object` - -Replaces all documents in the collection matching the given _example_ with the -given _newValue_. - -**Arguments** - -* **example**: _Object_ - - An object representing an example for documents to be matched against. - -* **newValue**: _Object_ - - The new value to replace matching documents with. - -* **opts**: _Object_ (optional) - - For information on the possible options see - [the HTTP API for replacing documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#replace-documents-by-example). - -#### collection.updateByExample - -`async collection.updateByExample(example, newValue, [opts]): Object` - -Updates (patches) all documents in the collection matching the given _example_ -with the given _newValue_. - -**Arguments** - -* **example**: _Object_ - - An object representing an example for documents to be matched against. - -* **newValue**: _Object_ - - The new value to update matching documents with. - -* **opts**: _Object_ (optional) - - For information on the possible options see - [the HTTP API for updating documents by example](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#update-documents-by-example). - -#### collection.lookupByKeys - -`async collection.lookupByKeys(keys): Array` - -Fetches the documents with the given _keys_ from the collection. Returns an -array of the matching documents. - -**Arguments** - -* **keys**: _Array_ - - An array of document keys to look up. - -#### collection.removeByKeys - -`async collection.removeByKeys(keys, [opts]): Object` - -Deletes the documents with the given _keys_ from the collection. - -**Arguments** - -* **keys**: _Array_ - - An array of document keys to delete. - -* **opts**: _Object_ (optional) - - For information on the possible options see - [the HTTP API for removing documents by keys](https://docs.arangodb.com/latest/HTTP/SimpleQuery/index.html#remove-documents-by-their-keys). - -#### collection.fulltext - -`async collection.fulltext(fieldName, query, [opts]): Cursor` - -Performs a fulltext query in the given _fieldName_ on the collection. - -**Arguments** - -* **fieldName**: _String_ - - Name of the field to search on documents in the collection. - -* **query**: _String_ - - Fulltext query string to search for. - -* **opts**: _Object_ (optional) - - For information on the possible options see - [the HTTP API for fulltext queries](https://docs.arangodb.com/latest/HTTP/Indexes/Fulltext.html). - -### Bulk importing documents - -This function implements the -[HTTP API for bulk imports](https://docs.arangodb.com/latest/HTTP/BulkImports/index.html). - -#### collection.import - -`async collection.import(data, [opts]): Object` - -Bulk imports the given _data_ into the collection. - -**Arguments** - -* **data**: `Array> | Array` - - The data to import. This can be an array of documents: - - ```js - [ - {key1: value1, key2: value2}, // document 1 - {key1: value1, key2: value2}, // document 2 - ... - ] - ``` - - Or it can be an array of value arrays following an array of keys. - - ```js - [ - ['key1', 'key2'], // key names - [value1, value2], // document 1 - [value1, value2], // document 2 - ... - ] - ``` - -* **opts**: `Object` (optional) If _opts_ is set, it must be an object with any - of the following properties: - - * **waitForSync**: `boolean` (Default: `false`) - - Wait until the documents have been synced to disk. - - * **details**: `boolean` (Default: `false`) - - Whether the response should contain additional details about documents that - could not be imported.false\*. - - * **type**: `string` (Default: `"auto"`) - - Indicates which format the data uses. Can be `"documents"`, `"array"` or - `"auto"`. - -If _data_ is a JavaScript array, it will be transmitted as a line-delimited JSON -stream. If _opts.type_ is set to `"array"`, it will be transmitted as regular -JSON instead. If _data_ is a string, it will be transmitted as it is without any -processing. - -For more information on the _opts_ object, see -[the HTTP API documentation for bulk imports](https://docs.arangodb.com/latest/HTTP/BulkImports/ImportingSelfContained.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('users'); - -// document stream -const result = await collection.import([ - {username: 'admin', password: 'hunter2'}, - {username: 'jcd', password: 'bionicman'}, - {username: 'jreyes', password: 'amigo'}, - {username: 'ghermann', password: 'zeitgeist'} -]); -assert.equal(result.created, 4); - -// -- or -- - -// array stream with header -const result = await collection.import([ - ['username', 'password'], // keys - ['admin', 'hunter2'], // row 1 - ['jcd', 'bionicman'], // row 2 - ['jreyes', 'amigo'], - ['ghermann', 'zeitgeist'] -]); -assert.equal(result.created, 4); - -// -- or -- - -// raw line-delimited JSON array stream with header -const result = await collection.import([ - '["username", "password"]', - '["admin", "hunter2"]', - '["jcd", "bionicman"]', - '["jreyes", "amigo"]', - '["ghermann", "zeitgeist"]' -].join('\r\n') + '\r\n'); -assert.equal(result.created, 4); -``` - -### Manipulating documents - -These functions implement the -[HTTP API for manipulating documents](https://docs.arangodb.com/latest/HTTP/Document/index.html). - -#### collection.replace - -`async collection.replace(documentHandle, newValue, [opts]): Object` - -Replaces the content of the document with the given _documentHandle_ with the -given _newValue_ and returns an object containing the document's metadata. - -**Note**: The _policy_ option is not available when using the driver with -ArangoDB 3.0 as it is redundant when specifying the _rev_ option. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to replace. This can either be the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -* **newValue**: `Object` - - The new data of the document. - -* **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - * **waitForSync**: `boolean` (Default: `false`) - - Wait until the document has been synced to disk. Default: `false`. - - * **rev**: `string` (optional) - - Only replace the document if it matches this revision. - - * **policy**: `string` (optional) - - Determines the behaviour when the revision is not matched: - - * if _policy_ is set to `"last"`, the document will be replaced regardless - of the revision. - * if _policy_ is set to `"error"` or not set, the replacement will fail with - an error. - -If a string is passed instead of an options object, it will be interpreted as -the _rev_ option. - -For more information on the _opts_ object, see -[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const data = {number: 1, hello: 'world'}; -const info1 = await collection.save(data); -const info2 = await collection.replace(info1, {number: 2}); -assert.equal(info2._id, info1._id); -assert.notEqual(info2._rev, info1._rev); -const doc = await collection.document(info1); -assert.equal(doc._id, info1._id); -assert.equal(doc._rev, info2._rev); -assert.equal(doc.number, 2); -assert.equal(doc.hello, undefined); -``` - -#### collection.update - -`async collection.update(documentHandle, newValue, [opts]): Object` - -Updates (merges) the content of the document with the given _documentHandle_ -with the given _newValue_ and returns an object containing the document's -metadata. - -**Note**: The _policy_ option is not available when using the driver with -ArangoDB 3.0 as it is redundant when specifying the _rev_ option. - -**Arguments** - -* **documentHandle**: `string` - - Handle of the document to update. This can be either the `_id` or the `_key` - of a document in the collection, or a document (i.e. an object with an `_id` - or `_key` property). - -* **newValue**: `Object` - - The new data of the document. - -* **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - * **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - * **keepNull**: `boolean` (Default: `true`) - - If set to `false`, properties with a value of `null` indicate that a - property should be deleted. - - * **mergeObjects**: `boolean` (Default: `true`) - - If set to `false`, object properties that already exist in the old document - will be overwritten rather than merged. This does not affect arrays. - - * **returnOld**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete previous revision of the - changed documents under the attribute `old` in the result. - - * **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new documents under the - attribute `new` in the result. - - * **ignoreRevs**: `boolean` (Default: `true`) - - By default, or if this is set to true, the _rev attributes in the given - documents are ignored. If this is set to false, then any _rev attribute - given in a body document is taken as a precondition. The document is only - updated if the current revision is the one specified. - - * **rev**: `string` (optional) - - Only update the document if it matches this revision. - - * **policy**: `string` (optional) - - Determines the behaviour when the revision is not matched: - - * if _policy_ is set to `"last"`, the document will be replaced regardless - of the revision. - * if _policy_ is set to `"error"` or not set, the replacement will fail with - an error. - -If a string is passed instead of an options object, it will be interpreted as -the _rev_ option. - -For more information on the _opts_ object, see -[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const doc = {number: 1, hello: 'world'}; -const doc1 = await collection.save(doc); -const doc2 = await collection.update(doc1, {number: 2}); -assert.equal(doc2._id, doc1._id); -assert.notEqual(doc2._rev, doc1._rev); -const doc3 = await collection.document(doc2); -assert.equal(doc3._id, doc2._id); -assert.equal(doc3._rev, doc2._rev); -assert.equal(doc3.number, 2); -assert.equal(doc3.hello, doc.hello); -``` - -#### collection.bulkUpdate - -`async collection.bulkUpdate(documents, [opts]): Object` - -Updates (merges) the content of the documents with the given _documents_ and -returns an array containing the documents' metadata. - -**Note**: This method is new in 3.0 and is available when using the driver with -ArangoDB 3.0 and higher. - -**Arguments** - -* **documents**: `Array` - - Documents to update. Each object must have either the `_id` or the `_key` - property. - -* **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - * **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - * **keepNull**: `boolean` (Default: `true`) - - If set to `false`, properties with a value of `null` indicate that a - property should be deleted. - - * **mergeObjects**: `boolean` (Default: `true`) - - If set to `false`, object properties that already exist in the old document - will be overwritten rather than merged. This does not affect arrays. - - * **returnOld**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete previous revision of the - changed documents under the attribute `old` in the result. - - * **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new documents under the - attribute `new` in the result. - - * **ignoreRevs**: `boolean` (Default: `true`) - - By default, or if this is set to true, the _rev attributes in the given - documents are ignored. If this is set to false, then any _rev attribute - given in a body document is taken as a precondition. The document is only - updated if the current revision is the one specified. - -For more information on the _opts_ object, see -[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); -const doc1 = {number: 1, hello: 'world1'}; -const info1 = await collection.save(doc1); -const doc2 = {number: 2, hello: 'world2'}; -const info2 = await collection.save(doc2); -const result = await collection.bulkUpdate([ - {_key: info1._key, number: 3}, - {_key: info2._key, number: 4} -], {returnNew: true}) -``` - -#### collection.remove - -`async collection.remove(documentHandle, [opts]): Object` - -Deletes the document with the given _documentHandle_ from the collection. - -**Note**: The _policy_ option is not available when using the driver with -ArangoDB 3.0 as it is redundant when specifying the _rev_ option. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to delete. This can be either the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -* **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - * **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - * **rev**: `string` (optional) - - Only update the document if it matches this revision. - - * **policy**: `string` (optional) - - Determines the behaviour when the revision is not matched: - - * if _policy_ is set to `"last"`, the document will be replaced regardless - of the revision. - * if _policy_ is set to `"error"` or not set, the replacement will fail with - an error. - -If a string is passed instead of an options object, it will be interpreted as -the _rev_ option. - -For more information on the _opts_ object, see -[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('some-collection'); - -await collection.remove('some-doc'); -// document 'some-collection/some-doc' no longer exists - -// -- or -- - -await collection.remove('some-collection/some-doc'); -// document 'some-collection/some-doc' no longer exists -``` - -#### collection.list - -`async collection.list([type]): Array` - -Retrieves a list of references for all documents in the collection. - -**Arguments** - -* **type**: `string` (Default: `"id"`) - - The format of the document references: - - * if _type_ is set to `"id"`, each reference will be the `_id` of the - document. - * if _type_ is set to `"key"`, each reference will be the `_key` of the - document. - * if _type_ is set to `"path"`, each reference will be the URI path of the - document. - -## DocumentCollection API - -The _DocumentCollection API_ extends the -[_Collection API_ (see above)](#collection-api) with the following methods. - -### documentCollection.document - -`async documentCollection.document(documentHandle): Object` - -Retrieves the document with the given _documentHandle_ from the collection. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve. This can be either the `_id` or the - `_key` of a document in the collection, or a document (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('my-docs'); - -try { - const doc = await collection.document('some-key'); - // the document exists - assert.equal(doc._key, 'some-key'); - assert.equal(doc._id, 'my-docs/some-key'); -} catch (err) { - // something went wrong or - // the document does not exist -} - -// -- or -- - -try { - const doc = await collection.document('my-docs/some-key'); - // the document exists - assert.equal(doc._key, 'some-key'); - assert.equal(doc._id, 'my-docs/some-key'); -} catch (err) { - // something went wrong or - // the document does not exist -} -``` - -### documentCollection.save - -`async documentCollection.save(data, [opts]): Object` - -Creates a new document with the given _data_ and returns an object containing -the document's metadata. - -**Arguments** - -* **data**: `Object` - - The data of the new document, may include a `_key`. - -* **opts**: `Object` (optional) - - If _opts_ is set, it must be an object with any of the following properties: - - * **waitForSync**: `boolean` (Default: `false`) - - Wait until document has been synced to disk. - - * **returnNew**: `boolean` (Default: `false`) - - If set to `true`, return additionally the complete new documents under the - attribute `new` in the result. - - * **silent**: `boolean` (Default: `false`) - - If set to true, an empty object will be returned as response. No meta-data - will be returned for the created document. This option can be used to save - some network traffic. - -If a boolean is passed instead of an options object, it will be interpreted as -the _returnNew_ option. - -For more information on the _opts_ object, see -[the HTTP API documentation for working with documents](https://docs.arangodb.com/latest/HTTP/Document/WorkingWithDocuments.html). - -**Examples** - -```js -const db = new Database(); -const collection = db.collection('my-docs'); -const data = {some: 'data'}; -const info = await collection.save(data); -assert.equal(info._id, 'my-docs/' + info._key); -const doc2 = await collection.document(info) -assert.equal(doc2._id, info._id); -assert.equal(doc2._rev, info._rev); -assert.equal(doc2.some, data.some); - -// -- or -- - -const db = new Database(); -const collection = db.collection('my-docs'); -const data = {some: 'data'}; -const opts = {returnNew: true}; -const doc = await collection.save(data, opts) -assert.equal(doc1._id, 'my-docs/' + doc1._key); -assert.equal(doc1.new.some, data.some); -``` - -## EdgeCollection API - -The _EdgeCollection API_ extends the -[_Collection API_ (see above)](#collection-api) with the following methods. - -### edgeCollection.edge - -`async edgeCollection.edge(documentHandle): Object` - -Retrieves the edge with the given _documentHandle_ from the collection. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the `_key` - of an edge in the collection, or an edge (i.e. an object with an `_id` or - `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection('edges'); - -const edge = await collection.edge('some-key'); -// the edge exists -assert.equal(edge._key, 'some-key'); -assert.equal(edge._id, 'edges/some-key'); - -// -- or -- - -const edge = await collection.edge('edges/some-key'); -// the edge exists -assert.equal(edge._key, 'some-key'); -assert.equal(edge._id, 'edges/some-key'); -``` - -### edgeCollection.save - -`async edgeCollection.save(data, [fromId, toId]): Object` - -Creates a new edge between the documents _fromId_ and _toId_ with the given -_data_ and returns an object containing the edge's metadata. - -**Arguments** - -* **data**: `Object` - - The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_ - needs to contain the properties __from_ and __to_. - -* **fromId**: `string` (optional) - - The handle of the start vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -* **toId**: `string` (optional) - - The handle of the end vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection('edges'); -const data = {some: 'data'}; - -const info = await collection.save( - data, - 'vertices/start-vertex', - 'vertices/end-vertex' -); -assert.equal(info._id, 'edges/' + info._key); -const edge = await collection.edge(edge) -assert.equal(edge._key, info._key); -assert.equal(edge._rev, info._rev); -assert.equal(edge.some, data.some); -assert.equal(edge._from, 'vertices/start-vertex'); -assert.equal(edge._to, 'vertices/end-vertex'); - -// -- or -- - -const info = await collection.save({ - some: 'data', - _from: 'verticies/start-vertex', - _to: 'vertices/end-vertex' -}); -// ... -``` - -### edgeCollection.edges - -`async edgeCollection.edges(documentHandle): Array` - -Retrieves a list of all edges of the document with the given _documentHandle_. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/a', 'vertices/c'], - ['z', 'vertices/d', 'vertices/a'] -]) -const edges = await collection.edges('vertices/a'); -assert.equal(edges.length, 3); -assert.deepEqual(edges.map(edge => edge._key), ['x', 'y', 'z']); -``` - -### edgeCollection.inEdges - -`async edgeCollection.inEdges(documentHandle): Array` - -Retrieves a list of all incoming edges of the document with the given -_documentHandle_. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/a', 'vertices/c'], - ['z', 'vertices/d', 'vertices/a'] -]); -const edges = await collection.inEdges('vertices/a'); -assert.equal(edges.length, 1); -assert.equal(edges[0]._key, 'z'); -``` - -### edgeCollection.outEdges - -`async edgeCollection.outEdges(documentHandle): Array` - -Retrieves a list of all outgoing edges of the document with the given -_documentHandle_. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/a', 'vertices/c'], - ['z', 'vertices/d', 'vertices/a'] -]); -const edges = await collection.outEdges('vertices/a'); -assert.equal(edges.length, 2); -assert.deepEqual(edges.map(edge => edge._key), ['x', 'y']); -``` - -### edgeCollection.traversal - -`async edgeCollection.traversal(startVertex, opts): Object` - -Performs a traversal starting from the given _startVertex_ and following edges -contained in this edge collection. - -**Arguments** - -* **startVertex**: `string` - - The handle of the start vertex. This can be either the `_id` of a document in - the database, the `_key` of an edge in the collection, or a document (i.e. an - object with an `_id` or `_key` property). - -* **opts**: `Object` - - See - [the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Traversal/index.html) - for details on the additional arguments. - - Please note that while _opts.filter_, _opts.visitor_, _opts.init_, - _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed - JavaScript code, it's not possible to pass in JavaScript functions directly - because the code needs to be evaluated on the server and will be transmitted - in plain text. - -**Examples** - -```js -const db = new Database(); -const collection = db.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/b', 'vertices/c'], - ['z', 'vertices/c', 'vertices/d'] -]); -const result = await collection.traversal('vertices/a', { - direction: 'outbound', - visitor: 'result.vertices.push(vertex._key);', - init: 'result.vertices = [];' -}); -assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); -``` - -## Graph API - -These functions implement the -[HTTP API for manipulating graphs](https://docs.arangodb.com/latest/HTTP/Gharial/index.html). - -### graph.get - -`async graph.get(): Object` - -Retrieves general information about the graph. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const data = await graph.get(); -// data contains general information about the graph -``` - -### graph.create - -`async graph.create(properties): Object` - -Creates a graph with the given _properties_ for this graph's name, then returns -the server response. - -**Arguments** - -* **properties**: `Object` - - For more information on the _properties_ object, see - [the HTTP API documentation for creating graphs](https://docs.arangodb.com/latest/HTTP/Gharial/Management.html). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const info = await graph.create({ - edgeDefinitions: [{ - collection: 'edges', - from: ['start-vertices'], - to: ['end-vertices'] - }] -}); -// graph now exists -``` - -### graph.drop - -`async graph.drop([dropCollections]): Object` - -Deletes the graph from the database. - -**Arguments** - -* **dropCollections**: `boolean` (optional) - - If set to `true`, the collections associated with the graph will also be - deleted. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -await graph.drop(); -// the graph "some-graph" no longer exists -``` - -### Manipulating vertices - -#### graph.vertexCollection - -`graph.vertexCollection(collectionName): GraphVertexCollection` - -Returns a new [_GraphVertexCollection_ instance](#graphvertexcollection-api) -with the given name for this graph. - -**Arguments** - -* **collectionName**: `string` - - Name of the vertex collection. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph("some-graph"); -const collection = graph.vertexCollection("vertices"); -assert.equal(collection.name, "vertices"); -// collection is a GraphVertexCollection -``` - -#### graph.listVertexCollections - -`async graph.listVertexCollections([excludeOrphans]): Array` - -Fetches all vertex collections from the graph and returns an array of collection descriptions. - -**Arguments** - -* **excludeOrphans**: `boolean` (Default: `false`) - - Whether orphan collections should be excluded. - -**Examples** - -```js -const graph = db.graph('some-graph'); - -const collections = await graph.listVertexCollections(); -// collections is an array of collection descriptions -// including orphan collections - -// -- or -- - -const collections = await graph.listVertexCollections(true); -// collections is an array of collection descriptions -// not including orphan collections -``` - -#### graph.vertexCollections - -`async graph.vertexCollections([excludeOrphans]): Array` - -Fetches all vertex collections from the database and returns an array of _GraphVertexCollection_ instances for the collections. - -**Arguments** - -* **excludeOrphans**: `boolean` (Default: `false`) - - Whether orphan collections should be excluded. - -**Examples** - -```js -const graph = db.graph('some-graph'); - -const collections = await graph.vertexCollections() -// collections is an array of GraphVertexCollection -// instances including orphan collections - -// -- or -- - -const collections = await graph.vertexCollections(true) -// collections is an array of GraphVertexCollection -// instances not including orphan collections -``` - -#### graph.addVertexCollection - -`async graph.addVertexCollection(collectionName): Object` - -Adds the collection with the given _collectionName_ to the graph's vertex -collections. - -**Arguments** - -* **collectionName**: `string` - - Name of the vertex collection to add to the graph. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -await graph.addVertexCollection('vertices'); -// the collection "vertices" has been added to the graph -``` - -#### graph.removeVertexCollection - -`async graph.removeVertexCollection(collectionName, [dropCollection]): Object` - -Removes the vertex collection with the given _collectionName_ from the graph. - -**Arguments** - -* **collectionName**: `string` - - Name of the vertex collection to remove from the graph. - -* **dropCollection**: `boolean` (optional) - - If set to `true`, the collection will also be deleted from the database. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -await graph.removeVertexCollection('vertices') -// collection "vertices" has been removed from the graph - -// -- or -- - -await graph.removeVertexCollection('vertices', true) -// collection "vertices" has been removed from the graph -// the collection has also been dropped from the database -// this may have been a bad idea -``` - -### Manipulating edges - -#### graph.edgeCollection - -`graph.edgeCollection(collectionName): GraphEdgeCollection` - -Returns a new [_GraphEdgeCollection_ instance](#graphedgecollection-api) with -the given name bound to this graph. - -**Arguments** - -* **collectionName**: `string` - - Name of the edge collection. - -**Examples** - -```js -const db = new Database(); -// assuming the collections "edges" and "vertices" exist -const graph = db.graph("some-graph"); -const collection = graph.edgeCollection("edges"); -assert.equal(collection.name, "edges"); -// collection is a GraphEdgeCollection -``` - -#### graph.addEdgeDefinition - -`async graph.addEdgeDefinition(definition): Object` - -Adds the given edge definition _definition_ to the graph. - -**Arguments** - -* **definition**: `Object` - - For more information on edge definitions see - [the HTTP API for managing graphs](https://docs.arangodb.com/latest/HTTP/Gharial/Management.html). - -**Examples** - -```js -const db = new Database(); -// assuming the collections "edges" and "vertices" exist -const graph = db.graph('some-graph'); -await graph.addEdgeDefinition({ - collection: 'edges', - from: ['vertices'], - to: ['vertices'] -}); -// the edge definition has been added to the graph -``` - -#### graph.replaceEdgeDefinition - -`async graph.replaceEdgeDefinition(collectionName, definition): Object` - -Replaces the edge definition for the edge collection named _collectionName_ with -the given _definition_. - -**Arguments** - -* **collectionName**: `string` - - Name of the edge collection to replace the definition of. - -* **definition**: `Object` - - For more information on edge definitions see - [the HTTP API for managing graphs](https://docs.arangodb.com/latest/HTTP/Gharial/Management.html). - -**Examples** - -```js -const db = new Database(); -// assuming the collections "edges", "vertices" and "more-vertices" exist -const graph = db.graph('some-graph'); -await graph.replaceEdgeDefinition('edges', { - collection: 'edges', - from: ['vertices'], - to: ['more-vertices'] -}); -// the edge definition has been modified -``` - -#### graph.removeEdgeDefinition - -`async graph.removeEdgeDefinition(definitionName, [dropCollection]): Object` - -Removes the edge definition with the given _definitionName_ form the graph. - -**Arguments** - -* **definitionName**: `string` - - Name of the edge definition to remove from the graph. - -* **dropCollection**: `boolean` (optional) - - If set to `true`, the edge collection associated with the definition will also - be deleted from the database. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); - -await graph.removeEdgeDefinition('edges') -// the edge definition has been removed - -// -- or -- - -await graph.removeEdgeDefinition('edges', true) -// the edge definition has been removed -// and the edge collection "edges" has been dropped -// this may have been a bad idea -``` - -#### graph.traversal - -`async graph.traversal(startVertex, opts): Object` - -Performs a traversal starting from the given _startVertex_ and following edges -contained in any of the edge collections of this graph. - -**Arguments** - -* **startVertex**: `string` - - The handle of the start vertex. This can be either the `_id` of a document in - the graph or a document (i.e. an object with an `_id` property). - -* **opts**: `Object` - - See - [the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Traversal/index.html) - for details on the additional arguments. - - Please note that while _opts.filter_, _opts.visitor_, _opts.init_, - _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed - JavaScript functions, it's not possible to pass in JavaScript functions - directly because the functions need to be evaluated on the server and will be - transmitted in plain text. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/b', 'vertices/c'], - ['z', 'vertices/c', 'vertices/d'] -]) -const result = await graph.traversal('vertices/a', { - direction: 'outbound', - visitor: 'result.vertices.push(vertex._key);', - init: 'result.vertices = [];' -}); -assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); -``` - -## GraphVertexCollection API - -The _GraphVertexCollection API_ extends the -[_Collection API_ (see above)](#collection-api) with the following methods. - -#### graphVertexCollection.remove - -`async graphVertexCollection.remove(documentHandle): Object` - -Deletes the vertex with the given _documentHandle_ from the collection. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the vertex to retrieve. This can be either the `_id` or the - `_key` of a vertex in the collection, or a vertex (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const graph = db.graph('some-graph'); -const collection = graph.vertexCollection('vertices'); - -await collection.remove('some-key') -// document 'vertices/some-key' no longer exists - -// -- or -- - -await collection.remove('vertices/some-key') -// document 'vertices/some-key' no longer exists -``` - -### graphVertexCollection.vertex - -`async graphVertexCollection.vertex(documentHandle): Object` - -Retrieves the vertex with the given _documentHandle_ from the collection. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the vertex to retrieve. This can be either the `_id` or the - `_key` of a vertex in the collection, or a vertex (i.e. an object with an - `_id` or `_key` property). - -**Examples** - -```js -const graph = db.graph('some-graph'); -const collection = graph.vertexCollection('vertices'); - -const doc = await collection.vertex('some-key'); -// the vertex exists -assert.equal(doc._key, 'some-key'); -assert.equal(doc._id, 'vertices/some-key'); - -// -- or -- - -const doc = await collection.vertex('vertices/some-key'); -// the vertex exists -assert.equal(doc._key, 'some-key'); -assert.equal(doc._id, 'vertices/some-key'); -``` - -### graphVertexCollection.save - -`async graphVertexCollection.save(data): Object` - -Creates a new vertex with the given _data_. - -**Arguments** - -* **data**: `Object` - - The data of the vertex. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.vertexCollection('vertices'); -const doc = await collection.save({some: 'data'}); -assert.equal(doc._id, 'vertices/' + doc._key); -assert.equal(doc.some, 'data'); -``` - -## GraphEdgeCollection API - -The _GraphEdgeCollection API_ extends the _Collection API_ (see above) with the -following methods. - -#### graphEdgeCollection.remove - -`async graphEdgeCollection.remove(documentHandle): Object` - -Deletes the edge with the given _documentHandle_ from the collection. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the `_key` - of an edge in the collection, or an edge (i.e. an object with an `_id` or - `_key` property). - -**Examples** - -```js -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); - -await collection.remove('some-key') -// document 'edges/some-key' no longer exists - -// -- or -- - -await collection.remove('edges/some-key') -// document 'edges/some-key' no longer exists -``` - -### graphEdgeCollection.edge - -`async graphEdgeCollection.edge(documentHandle): Object` - -Retrieves the edge with the given _documentHandle_ from the collection. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the edge to retrieve. This can be either the `_id` or the `_key` - of an edge in the collection, or an edge (i.e. an object with an `_id` or - `_key` property). - -**Examples** - -```js -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); - -const edge = await collection.edge('some-key'); -// the edge exists -assert.equal(edge._key, 'some-key'); -assert.equal(edge._id, 'edges/some-key'); - -// -- or -- - -const edge = await collection.edge('edges/some-key'); -// the edge exists -assert.equal(edge._key, 'some-key'); -assert.equal(edge._id, 'edges/some-key'); -``` - -### graphEdgeCollection.save - -`async graphEdgeCollection.save(data, [fromId, toId]): Object` - -Creates a new edge between the vertices _fromId_ and _toId_ with the given -_data_. - -**Arguments** - -* **data**: `Object` - - The data of the new edge. If _fromId_ and _toId_ are not specified, the _data_ - needs to contain the properties __from_ and __to_. - -* **fromId**: `string` (optional) - - The handle of the start vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -* **toId**: `string` (optional) - - The handle of the end vertex of this edge. This can be either the `_id` of a - document in the database, the `_key` of an edge in the collection, or a - document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -const edge = await collection.save( - {some: 'data'}, - 'vertices/start-vertex', - 'vertices/end-vertex' -); -assert.equal(edge._id, 'edges/' + edge._key); -assert.equal(edge.some, 'data'); -assert.equal(edge._from, 'vertices/start-vertex'); -assert.equal(edge._to, 'vertices/end-vertex'); -``` - -### graphEdgeCollection.edges - -`async graphEdgeCollection.edges(documentHandle): Array` - -Retrieves a list of all edges of the document with the given _documentHandle_. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/a', 'vertices/c'], - ['z', 'vertices/d', 'vertices/a'] -]); -const edges = await collection.edges('vertices/a'); -assert.equal(edges.length, 3); -assert.deepEqual(edges.map(edge => edge._key), ['x', 'y', 'z']); -``` - -### graphEdgeCollection.inEdges - -`async graphEdgeCollection.inEdges(documentHandle): Array` - -Retrieves a list of all incoming edges of the document with the given -_documentHandle_. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/a', 'vertices/c'], - ['z', 'vertices/d', 'vertices/a'] -]); -const edges = await collection.inEdges('vertices/a'); -assert.equal(edges.length, 1); -assert.equal(edges[0]._key, 'z'); -``` - -### graphEdgeCollection.outEdges - -`async graphEdgeCollection.outEdges(documentHandle): Array` - -Retrieves a list of all outgoing edges of the document with the given -_documentHandle_. - -**Arguments** - -* **documentHandle**: `string` - - The handle of the document to retrieve the edges of. This can be either the - `_id` of a document in the database, the `_key` of an edge in the collection, - or a document (i.e. an object with an `_id` or `_key` property). - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/a', 'vertices/c'], - ['z', 'vertices/d', 'vertices/a'] -]); -const edges = await collection.outEdges('vertices/a'); -assert.equal(edges.length, 2); -assert.deepEqual(edges.map(edge => edge._key), ['x', 'y']); -``` - -### graphEdgeCollection.traversal - -`async graphEdgeCollection.traversal(startVertex, opts): Object` - -Performs a traversal starting from the given _startVertex_ and following edges -contained in this edge collection. - -**Arguments** - -* **startVertex**: `string` - - The handle of the start vertex. This can be either the `_id` of a document in - the database, the `_key` of an edge in the collection, or a document (i.e. an - object with an `_id` or `_key` property). - -* **opts**: `Object` - - See - [the HTTP API documentation](https://docs.arangodb.com/latest/HTTP/Traversal/index.html) - for details on the additional arguments. - - Please note that while _opts.filter_, _opts.visitor_, _opts.init_, - _opts.expander_ and _opts.sort_ should be strings evaluating to well-formed - JavaScript code, it's not possible to pass in JavaScript functions directly - because the code needs to be evaluated on the server and will be transmitted - in plain text. - -**Examples** - -```js -const db = new Database(); -const graph = db.graph('some-graph'); -const collection = graph.edgeCollection('edges'); -await collection.import([ - ['_key', '_from', '_to'], - ['x', 'vertices/a', 'vertices/b'], - ['y', 'vertices/b', 'vertices/c'], - ['z', 'vertices/c', 'vertices/d'] -]); -const result = await collection.traversal('vertices/a', { - direction: 'outbound', - visitor: 'result.vertices.push(vertex._key);', - init: 'result.vertices = [];' -}); -assert.deepEqual(result.vertices, ['a', 'b', 'c', 'd']); -``` +- [Database](Database/README.md) + - [Database Manipulation](Database/DatabaseManipulation.md) + - [Collection Access](Database/CollectionAccess.md) + - [Graph Access](Database/GraphAccess.md) + - [Transactions](Database/Transactions.md) + - [Queries](Database/Queries.md) + - [AQL User Functions](Database/AqlUserFunctions.md) + - [Foxx Services](Database/FoxxServices.md) + - [HTTP Routes](Database/HttpRoutes.md) +- [Cursor](Cursor.md) +- [Route](Route.md) +- [Collection](Collection/README.md) + - [Collection Manipulation](Collection/CollectionManipulation.md) + - [DocumentCollection](Collection/DocumentCollection.md) + - [EdgeCollection](Collection/EdgeCollection.md) + - [Indexes](Collection/Indexes.md) + - [Simple Queries](Collection/SimpleQueries.md) + - [Bulk Import](Collection/BulkImport.md) + - [Document Manipulation](Collection/DocumentManipulation.md) +- [Graph](Graph/README.md) + - [Vertices](Graph/Vertices.md) + - [Edges](Graph/Edges.md) + - [VertexCollection](Graph/VertexCollection.md) + - [EdgeCollection](Graph/EdgeCollection.md) diff --git a/Documentation/Books/Drivers/JS/Reference/Route.md b/Documentation/Books/Drivers/JS/Reference/Route.md new file mode 100644 index 0000000000..5c87f8ae51 --- /dev/null +++ b/Documentation/Books/Drivers/JS/Reference/Route.md @@ -0,0 +1,368 @@ + +# Route API + +_Route_ instances provide access for arbitrary HTTP requests. This allows easy +access to Foxx services and other HTTP APIs not covered by the driver itself. + +## route.route + +`route.route([path], [headers]): Route` + +Returns a new _Route_ instance for the given path (relative to the current +route) that can be used to perform arbitrary HTTP requests. + +**Arguments** + +* **path**: `string` (optional) + + The relative URL of the route. + +* **headers**: `Object` (optional) + + Default headers that should be sent with each request to the route. + +If _path_ is missing, the route will refer to the base URL of the database. + +**Examples** + +```js +const db = new Database(); +const route = db.route("my-foxx-service"); +const users = route.route("users"); +// equivalent to db.route('my-foxx-service/users') +``` + +## route.get + +`async route.get([path,] [qs]): Response` + +Performs a GET request to the given URL and returns the server response. + +**Arguments** + +* **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + +* **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be translated + to a query string. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.get(); +// response.body is the response body of calling +// GET _db/_system/my-foxx-service + +// -- or -- + +const response = await route.get('users'); +// response.body is the response body of calling +// GET _db/_system/my-foxx-service/users + +// -- or -- + +const response = await route.get('users', {group: 'admin'}); +// response.body is the response body of calling +// GET _db/_system/my-foxx-service/users?group=admin +``` + +## route.post + +`async route.post([path,] [body, [qs]]): Response` + +Performs a POST request to the given URL and returns the server response. + +**Arguments** + +* **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + +* **body**: `string` (optional) + + The response body. If _body_ is an object, it will be encoded as JSON. + +* **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be translated + to a query string. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.post() +// response.body is the response body of calling +// POST _db/_system/my-foxx-service + +// -- or -- + +const response = await route.post('users') +// response.body is the response body of calling +// POST _db/_system/my-foxx-service/users + +// -- or -- + +const response = await route.post('users', { + username: 'admin', + password: 'hunter2' +}); +// response.body is the response body of calling +// POST _db/_system/my-foxx-service/users +// with JSON request body {"username": "admin", "password": "hunter2"} + +// -- or -- + +const response = await route.post('users', { + username: 'admin', + password: 'hunter2' +}, {admin: true}); +// response.body is the response body of calling +// POST _db/_system/my-foxx-service/users?admin=true +// with JSON request body {"username": "admin", "password": "hunter2"} +``` + +## route.put + +`async route.put([path,] [body, [qs]]): Response` + +Performs a PUT request to the given URL and returns the server response. + +**Arguments** + +* **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + +* **body**: `string` (optional) + + The response body. If _body_ is an object, it will be encoded as JSON. + +* **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be translated + to a query string. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.put(); +// response.body is the response body of calling +// PUT _db/_system/my-foxx-service + +// -- or -- + +const response = await route.put('users/admin'); +// response.body is the response body of calling +// PUT _db/_system/my-foxx-service/users + +// -- or -- + +const response = await route.put('users/admin', { + username: 'admin', + password: 'hunter2' +}); +// response.body is the response body of calling +// PUT _db/_system/my-foxx-service/users/admin +// with JSON request body {"username": "admin", "password": "hunter2"} + +// -- or -- + +const response = await route.put('users/admin', { + username: 'admin', + password: 'hunter2' +}, {admin: true}); +// response.body is the response body of calling +// PUT _db/_system/my-foxx-service/users/admin?admin=true +// with JSON request body {"username": "admin", "password": "hunter2"} +``` + +## route.patch + +`async route.patch([path,] [body, [qs]]): Response` + +Performs a PATCH request to the given URL and returns the server response. + +**Arguments** + +* **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + +* **body**: `string` (optional) + + The response body. If _body_ is an object, it will be encoded as JSON. + +* **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be translated + to a query string. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.patch(); +// response.body is the response body of calling +// PATCH _db/_system/my-foxx-service + +// -- or -- + +const response = await route.patch('users/admin'); +// response.body is the response body of calling +// PATCH _db/_system/my-foxx-service/users + +// -- or -- + +const response = await route.patch('users/admin', { + password: 'hunter2' +}); +// response.body is the response body of calling +// PATCH _db/_system/my-foxx-service/users/admin +// with JSON request body {"password": "hunter2"} + +// -- or -- + +const response = await route.patch('users/admin', { + password: 'hunter2' +}, {admin: true}); +// response.body is the response body of calling +// PATCH _db/_system/my-foxx-service/users/admin?admin=true +// with JSON request body {"password": "hunter2"} +``` + +## route.delete + +`async route.delete([path,] [qs]): Response` + +Performs a DELETE request to the given URL and returns the server response. + +**Arguments** + +* **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + +* **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be translated + to a query string. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.delete() +// response.body is the response body of calling +// DELETE _db/_system/my-foxx-service + +// -- or -- + +const response = await route.delete('users/admin') +// response.body is the response body of calling +// DELETE _db/_system/my-foxx-service/users/admin + +// -- or -- + +const response = await route.delete('users/admin', {permanent: true}) +// response.body is the response body of calling +// DELETE _db/_system/my-foxx-service/users/admin?permanent=true +``` + +## route.head + +`async route.head([path,] [qs]): Response` + +Performs a HEAD request to the given URL and returns the server response. + +**Arguments** + +* **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + +* **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be translated + to a query string. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.head(); +// response is the response object for +// HEAD _db/_system/my-foxx-service +``` + +## route.request + +`async route.request([opts]): Response` + +Performs an arbitrary request to the given URL and returns the server response. + +**Arguments** + +* **opts**: `Object` (optional) + + An object with any of the following properties: + + * **path**: `string` (optional) + + The route-relative URL for the request. If omitted, the request will be made + to the base URL of the route. + + * **absolutePath**: `boolean` (Default: `false`) + + Whether the _path_ is relative to the connection's base URL instead of the + route. + + * **body**: `string` (optional) + + The response body. If _body_ is an object, it will be encoded as JSON. + + * **qs**: `string` (optional) + + The query string for the request. If _qs_ is an object, it will be + translated to a query string. + + * **headers**: `Object` (optional) + + An object containing additional HTTP headers to be sent with the request. + + * **method**: `string` (Default: `"GET"`) + + HTTP method of this request. + +**Examples** + +```js +const db = new Database(); +const route = db.route('my-foxx-service'); +const response = await route.request({ + path: 'hello-world', + method: 'POST', + body: {hello: 'world'}, + qs: {admin: true} +}); +// response.body is the response body of calling +// POST _db/_system/my-foxx-service/hello-world?admin=true +// with JSON request body '{"hello": "world"}' +``` diff --git a/Documentation/Books/Drivers/Java/Reference/README.md b/Documentation/Books/Drivers/Java/Reference/README.md index d5d762a260..e03716fa1e 100644 --- a/Documentation/Books/Drivers/Java/Reference/README.md +++ b/Documentation/Books/Drivers/Java/Reference/README.md @@ -125,7 +125,7 @@ The second load balancing strategy allows to pick a random host from the configu ## Connection time to live -Since version 4.4 the driver supports setting a TTL for connections managed by the internal connection pool. Setting a TTL helps when using load balancing strategy `ROUND_ROBIN`, because as soon as a coordinator goes down, every open connection to that host will be closed and opened again with another target coordinator. As long as the driver does not have to open new connections (all connections in the pool are used) it will use only the coordinators which never went down. To use the downed coordinator again, when it is running again, the connections in the connection pool have to be closed and opened again with the target host mentioned by the load balancing startegy. To achieve this you can manually call `ArangoDB.shutdown` in your client code or use the TTL for connection so that a downed coordinator (which is then brought up again) will be used again after a certain time. +Since version 4.4 the driver supports setting a TTL for connections managed by the internal connection pool. Setting a TTL helps when using load balancing strategy `ROUND_ROBIN`, because as soon as a coordinator goes down, every open connection to that host will be closed and opened again with another target coordinator. As long as the driver does not have to open new connections (all connections in the pool are used) it will use only the coordinators which never went down. To use the downed coordinator again, when it is running again, the connections in the connection pool have to be closed and opened again with the target host mentioned by the load balancing strategy. To achieve this you can manually call `ArangoDB.shutdown` in your client code or use the TTL for connection so that a downed coordinator (which is then brought up again) will be used again after a certain time. ```Java ArangoDB arango = new ArangoDB.Builder().connectionTtl(5 * 60 * 1000).build(); @@ -133,6 +133,14 @@ ArangoDB arango = new ArangoDB.Builder().connectionTtl(5 * 60 * 1000).build(); In this example all connections will be closed/reopened after 5 minutes. +Connection TTL can be disabled setting it to `null`: + +```Java +.connectionTtl(null) +``` + +The default TTL is `null` (no automatic connection closure). + ## configure VelocyPack serialization Since version `4.1.11` you can extend the VelocyPack serialization by registering additional `VPackModule`s on `ArangoDB.Builder`. diff --git a/Documentation/Books/Drivers/SUMMARY.md b/Documentation/Books/Drivers/SUMMARY.md index 6c284b820d..6b37d23142 100644 --- a/Documentation/Books/Drivers/SUMMARY.md +++ b/Documentation/Books/Drivers/SUMMARY.md @@ -9,6 +9,31 @@ * [ArangoJS - JavaScript Driver](JS/README.md) * [Getting Started](JS/GettingStarted/README.md) * [Reference](JS/Reference/README.md) + * [Collections](JS/Reference/Collection/README.md) + * [Bulk Import](JS/Reference/Collection/BulkImport.md) + * [Altering](JS/Reference/Collection/CollectionManipulation.md) + * [Document](JS/Reference/Collection/DocumentCollection.md) + * [Working with Documents](JS/Reference/Collection/DocumentManipulation.md) + * [Edge Collections](JS/Reference/Collection/EdgeCollection.md) + * [Indices ](JS/Reference/Collection/Indexes.md) + * [Simple Queries](JS/Reference/Collection/SimpleQueries.md) + * [Databases](JS/Reference/Database/README.md) + * [Collections](JS/Reference/Database/CollectionAccess.md) + * [User defined AQL Functions](JS/Reference/Database/AqlUserFunctions.md) + * [Migrating](JS/Reference/Database/DatabaseManipulation.md) + * [foxx](JS/Reference/Database/FoxxServices.md) + * [graphs](JS/Reference/Database/GraphAccess.md) + * [routes](JS/Reference/Database/HttpRoutes.md) + * [AQL Queries](JS/Reference/Database/Queries.md) + * [Transactions](JS/Reference/Database/Transactions.md) + * [Graphs](JS/Reference/Graph/README.md) + * [Edge Collections](JS/Reference/Graph/EdgeCollection.md) + * [Edges](JS/Reference/Graph/Edges.md) + * [Vertex Collections](JS/Reference/Graph/VertexCollection.md) + * [Vertices](JS/Reference/Graph/Vertices.md) + * [Cursors](JS/Reference/Cursor.md) + * [Routes](JS/Reference/Route.md) + # https://@github.com/arangodb/spring-data.git;spring-data;docs/Drivers;;/ * [Spring Data ArangoDB](SpringData/README.md) * [Getting Started](SpringData/GettingStarted/README.md) diff --git a/Documentation/Books/Drivers/SpringData/GettingStarted/README.md b/Documentation/Books/Drivers/SpringData/GettingStarted/README.md index ccca4da0be..0f0e9440e0 100644 --- a/Documentation/Books/Drivers/SpringData/GettingStarted/README.md +++ b/Documentation/Books/Drivers/SpringData/GettingStarted/README.md @@ -1,114 +1,114 @@ -# Spring Data ArangoDB - Getting Started - -## Supported versions - -| Spring Data ArangoDB | Spring Data | ArangoDB | -|----------------------|-------------|----------------| -| 1.0.0 | 1.13.x | 3.0*, 3.1, 3.2 | -| 2.0.0 | 2.0.x | 3.0*, 3.1, 3.2 | - -Spring Data ArangoDB requires ArangoDB 3.0 or higher - which you can download [here](https://www.arangodb.com/download/) - and Java 8 or higher. - -**Note**: ArangoDB 3.0 does not support the default transport protocol [VelocyStream](https://github.com/arangodb/velocystream). A manual switch to HTTP is required. See chapter [configuration](#configuration). Also ArangoDB 3.0 does not support geospatial queries. - -## Maven - -To use Spring Data ArangoDB in your project, your build automation tool needs to be configured to include and use the Spring Data ArangoDB dependency. Example with Maven: - -```xml - - com.arangodb - arangodb-spring-data - {version} - -``` - -There is a [demonstration app](https://github.com/arangodb/spring-data-demo), which contains common use cases and examples of how to use Spring Data ArangoDB's functionality. - -## Configuration - -You can use Java to configure your Spring Data environment as show below. Setting up the underlying driver (`ArangoDB.Builder`) with default configuration automatically loads a properties file `arangodb.properties`, if it exists in the classpath. - -```java -@Configuration -@EnableArangoRepositories(basePackages = { "com.company.mypackage" }) -public class MyConfiguration extends AbstractArangoConfiguration { - - @Override - public ArangoDB.Builder arango() { - return new ArangoDB.Builder(); - } - - @Override - public String database() { - // Name of the database to be used - return "example-database"; - } - -} -``` - -The driver is configured with some default values: - -property-key | description | default value --------------|-------------|-------------- -arangodb.host | ArangoDB host | 127.0.0.1 -arangodb.port | ArangoDB port | 8529 -arangodb.timeout | socket connect timeout(millisecond) | 0 -arangodb.user | Basic Authentication User | -arangodb.password | Basic Authentication Password | -arangodb.useSsl | use SSL connection | false - -To customize the configuration, the parameters can be changed in the Java code. - -```java -@Override -public ArangoDB.Builder arango() { - ArangoDB.Builder arango = new ArangoDB.Builder() - .host("127.0.0.1") - .port(8429) - .user("root"); - return arango; -} -``` - -In addition you can use the *arangodb.properties* or a custom properties file to supply credentials to the driver. - -*Properties file* -``` -arangodb.host=127.0.0.1 -arangodb.port=8529 -# arangodb.hosts=127.0.0.1:8529 could be used instead -arangodb.user=root -arangodb.password= -``` - -*Custom properties file* -```java -@Override -public ArangoDB.Builder arango() { - InputStream in = MyClass.class.getResourceAsStream("my.properties"); - ArangoDB.Builder arango = new ArangoDB.Builder() - .loadProperties(in); - return arango; -} -``` - -**Note**: When using ArangoDB 3.0 it is required to set the transport protocol to HTTP and fetch the dependency `org.apache.httpcomponents:httpclient`. - -```java -@Override -public ArangoDB.Builder arango() { - ArangoDB.Builder arango = new ArangoDB.Builder() - .useProtocol(Protocol.HTTP_JSON); - return arango; -} -``` -```xml - - org.apache.httpcomponents - httpclient - 4.5.1 - -``` +# Spring Data ArangoDB - Getting Started + +## Supported versions + +| Spring Data ArangoDB | Spring Data | ArangoDB | +|----------------------|-------------|----------------| +| 1.0.0 | 1.13.x | 3.0*, 3.1, 3.2 | +| 2.0.0 | 2.0.x | 3.0*, 3.1, 3.2 | + +Spring Data ArangoDB requires ArangoDB 3.0 or higher - which you can download [here](https://www.arangodb.com/download/) - and Java 8 or higher. + +**Note**: ArangoDB 3.0 does not support the default transport protocol [VelocyStream](https://github.com/arangodb/velocystream). A manual switch to HTTP is required. See chapter [configuration](#configuration). Also ArangoDB 3.0 does not support geospatial queries. + +## Maven + +To use Spring Data ArangoDB in your project, your build automation tool needs to be configured to include and use the Spring Data ArangoDB dependency. Example with Maven: + +```xml + + com.arangodb + arangodb-spring-data + {version} + +``` + +There is a [demonstration app](https://github.com/arangodb/spring-data-demo), which contains common use cases and examples of how to use Spring Data ArangoDB's functionality. + +## Configuration + +You can use Java to configure your Spring Data environment as show below. Setting up the underlying driver (`ArangoDB.Builder`) with default configuration automatically loads a properties file `arangodb.properties`, if it exists in the classpath. + +```java +@Configuration +@EnableArangoRepositories(basePackages = { "com.company.mypackage" }) +public class MyConfiguration extends AbstractArangoConfiguration { + + @Override + public ArangoDB.Builder arango() { + return new ArangoDB.Builder(); + } + + @Override + public String database() { + // Name of the database to be used + return "example-database"; + } + +} +``` + +The driver is configured with some default values: + +property-key | description | default value +-------------|-------------|-------------- +arangodb.host | ArangoDB host | 127.0.0.1 +arangodb.port | ArangoDB port | 8529 +arangodb.timeout | socket connect timeout(millisecond) | 0 +arangodb.user | Basic Authentication User | +arangodb.password | Basic Authentication Password | +arangodb.useSsl | use SSL connection | false + +To customize the configuration, the parameters can be changed in the Java code. + +```java +@Override +public ArangoDB.Builder arango() { + ArangoDB.Builder arango = new ArangoDB.Builder() + .host("127.0.0.1") + .port(8429) + .user("root"); + return arango; +} +``` + +In addition you can use the *arangodb.properties* or a custom properties file to supply credentials to the driver. + +*Properties file* +``` +arangodb.host=127.0.0.1 +arangodb.port=8529 +# arangodb.hosts=127.0.0.1:8529 could be used instead +arangodb.user=root +arangodb.password= +``` + +*Custom properties file* +```java +@Override +public ArangoDB.Builder arango() { + InputStream in = MyClass.class.getResourceAsStream("my.properties"); + ArangoDB.Builder arango = new ArangoDB.Builder() + .loadProperties(in); + return arango; +} +``` + +**Note**: When using ArangoDB 3.0 it is required to set the transport protocol to HTTP and fetch the dependency `org.apache.httpcomponents:httpclient`. + +```java +@Override +public ArangoDB.Builder arango() { + ArangoDB.Builder arango = new ArangoDB.Builder() + .useProtocol(Protocol.HTTP_JSON); + return arango; +} +``` +```xml + + org.apache.httpcomponents + httpclient + 4.5.1 + +``` diff --git a/Documentation/Books/Drivers/SpringData/README.md b/Documentation/Books/Drivers/SpringData/README.md index 52d703c0b4..ecf60e94a1 100644 --- a/Documentation/Books/Drivers/SpringData/README.md +++ b/Documentation/Books/Drivers/SpringData/README.md @@ -1,12 +1,13 @@ -# Spring Data ArangoDB - -- [Getting Started](GettingStarted/README.md) -- [Reference](Reference/README.md) - -## Learn more -* [ArangoDB](https://www.arangodb.com/) -* [Demo](https://github.com/arangodb/spring-data-demo) -* [JavaDoc 1.0.0](http://arangodb.github.io/spring-data/javadoc-1_0/index.html) -* [JavaDoc 2.0.0](http://arangodb.github.io/spring-data/javadoc-2_0/index.html) -* [JavaDoc Java driver](http://arangodb.github.io/arangodb-java-driver/javadoc-4_3/index.html) +# Spring Data ArangoDB + +- [Getting Started](GettingStarted/README.md) +- [Reference](Reference/README.md) + +## Learn more +* [ArangoDB](https://www.arangodb.com/) +* [Demo](https://github.com/arangodb/spring-data-demo) +* [JavaDoc 1.0.0](http://arangodb.github.io/spring-data/javadoc-1_0/index.html) +* [JavaDoc 2.0.0](http://arangodb.github.io/spring-data/javadoc-2_0/index.html) +* [JavaDoc Java driver](http://arangodb.github.io/arangodb-java-driver/javadoc-4_3/index.html) +* [Changelog](https://raw.githubusercontent.com/arangodb/spring-data/master/ChangeLog) diff --git a/Documentation/Books/Drivers/SpringData/Reference/README.md b/Documentation/Books/Drivers/SpringData/Reference/README.md index a9b9999752..b7f1d2154d 100644 --- a/Documentation/Books/Drivers/SpringData/Reference/README.md +++ b/Documentation/Books/Drivers/SpringData/Reference/README.md @@ -1,706 +1,725 @@ -# Spring Data ArangoDB - Reference - -# Template - -With `ArangoTemplate` Spring Data ArangoDB offers a central support for interactions with the database over a rich feature set. It mostly offers the features from the ArangoDB Java driver with additional exception translation from the drivers exceptions to the Spring Data access exceptions inheriting the `DataAccessException` class. -The `ArangoTemplate` class is the default implementation of the operations interface `ArangoOperations` which developers of Spring Data are encouraged to code against. - -# Repositories - -## Introduction - -Spring Data Commons provides a composable repository infrastructure which Spring Data ArangoDB is built on. These allow for interface-based composition of repositories consisting of provided default implementations for certain interfaces (like `CrudRepository`) and custom implementations for other methods. - -## Instantiating - -Instances of a Repository are created in Spring beans through the auto-wired mechanism of Spring. - -```java -public class MySpringBean { - - @Autowired - private MyRepository rep; - -} -``` - -## Return types - -The method return type for single results can be a primitive type, a domain class, `Map`, `BaseDocument`, `BaseEdgeDocument`, `Optional`, `GeoResult`. -The method return type for multiple results can additionally be `ArangoCursor`, `Iterable`, `Collection`, `List`, `Set`, `Page`, `Slice`, `GeoPage`, `GeoResults` where Type can be everything a single result can be. - -## Query methods - -Queries using [ArangoDB Query Language (AQL)](https://docs.arangodb.com/current/AQL/index.html) can be supplied with the `@Query` annotation on methods. `AqlQueryOptions` can also be passed to the driver, as an argument anywhere in the method signature. - -There are three ways of passing bind parameters to the query in the query annotation. - -Using number matching, arguments will be substituted into the query in the order they are passed to the query method. - -```java -public interface MyRepository extends Repository{ - - @Query("FOR c IN customers FILTER c.name == @0 AND c.surname == @2 RETURN c") - ArangoCursor query(String name, AqlQueryOptions options, String surname); - -} -``` - -With the `@Param` annotation, the argument will be placed in the query at the place corresponding to the value passed to the `@Param` annotation. - -```java -public interface MyRepository extends Repository{ - - @Query("FOR c IN customers FILTER c.name == @name AND c.surname == @surname RETURN c") - ArangoCursor query(@Param("name") String name, @Param("surname") String surname); - -} -``` - - In addition you can use a parameter of type `Map` annotated with `@BindVars` as your bind parameters. You can then fill the map with any parameter used in the query. (see [here](https://docs.arangodb.com/3.1/AQL/Fundamentals/BindParameters.html#bind-parameters) for more Information about Bind Parameters). - -```java -public interface MyRepository extends Repository{ - - @Query("FOR c IN customers FILTER c.name == @name AND c.surname = @surname RETURN c") - ArangoCursor query(@BindVars Map bindVars); - -} -``` - -A mixture of any of these methods can be used. Parameters with the same name from an `@Param` annotation will override those in the `bindVars`. - -```java -public interface MyRepository extends Repository{ - - @Query("FOR c IN customers FILTER c.name == @name AND c.surname = @surname RETURN c") - ArangoCursor query(@BindVars Map bindVars, @Param("name") String name); - -} -``` - -## Derived queries - -Spring Data ArangoDB supports queries derived from methods names by splitting it into its semantic parts and converting into AQL. The mechanism strips the prefixes `find..By`, `get..By`, `query..By`, `read..By`, `stream..By`, `count..By`, `exists..By`, `delete..By`, `remove..By` from the method and parses the rest. The By acts as a separator to indicate the start of the criteria for the query to be built. You can define conditions on entity properties and concatenate them with `And` and `Or`. - -The complete list of part types for derived methods is below, where doc is a document in the database - -Keyword | Sample | Predicate -----------|----------------|-------- -IsGreaterThan, GreaterThan, After | findByAgeGreaterThan(int age) | doc.age > age -IsGreaterThanEqual, GreaterThanEqual | findByAgeIsGreaterThanEqual(int age) | doc.age >= age -IsLessThan, LessThan, Before | findByAgeIsLessThan(int age) | doc.age < age -IsLessThanEqualLessThanEqual | findByAgeLessThanEqual(int age) | doc.age <= age -IsBetween, Between | findByAgeBetween(int lower, int upper) | lower < doc.age < upper -IsNotNull, NotNull | findByNameNotNull() | doc.name != null -IsNull, Null | findByNameNull() | doc.name == null -IsLike, Like | findByNameLike(String name) | doc.name LIKE name -IsNotLike, NotLike | findByNameNotLike(String name) | NOT(doc.name LIKE name) -IsStartingWith, StartingWith, StartsWith | findByNameStartsWith(String prefix) | doc.name LIKE prefix -IsEndingWith, EndingWith, EndsWith | findByNameEndingWith(String suffix) | doc.name LIKE suffix -Regex, MatchesRegex, Matches | findByNameRegex(String pattern) | REGEX_TEST(doc.name, name, ignoreCase) -(No Keyword) | findByFirstName(String name) | doc.name == name -IsTrue, True | findByActiveTrue() | doc.active == true -IsFalse, False | findByActiveFalse() | doc.active == false -Is, Equals | findByAgeEquals(int age) | doc.age == age -IsNot, Not | findByAgeNot(int age) | doc.age != age -IsIn, In | findByNameIn(String[] names) | doc.name IN names -IsNotIn, NotIn | findByNameIsNotIn(String[] names) | doc.name NOT IN names -IsContaining, Containing, Contains | findByFriendsContaining(String name) | name IN doc.friends -IsNotContaining, NotContaining, NotContains | findByFriendsNotContains(String name) | name NOT IN doc.friends -Exists | findByFriendNameExists() | HAS(doc.friend, name) - - -```java -public interface MyRepository extends Repository { - - // FOR c IN customers FILTER c.name == @0 RETURN c - ArangoCursor findByName(String name); - ArangoCursor getByName(String name); - - // FOR c IN customers - // FILTER c.name == @0 && c.age == @1 - // RETURN c - ArangoCursor findByNameAndAge(String name, int age); - - // FOR c IN customers - // FILTER c.name == @0 || c.age == @1 - // RETURN c - ArangoCursor findByNameOrAge(String name, int age); -} -``` - -You can apply sorting for one or multiple sort criteria by appending `OrderBy` to the method and `Asc` or `Desc` for the directions. - -```java -public interface MyRepository extends Repository { - - // FOR c IN customers - // FITLER c.name == @0 - // SORT c.age DESC RETURN c - ArangoCursor getByNameOrderByAgeDesc(String name); - - // FOR c IN customers - // FILTER c.name = @0 - // SORT c.name ASC, c.age DESC RETURN c - ArangoCursor findByNameOrderByNameAscAgeDesc(String name); - -} -``` - -### Geospatial queries - -Geospatial queries are a subsection of derived queries. To use a geospatial query on a collection, a geo index must exist on that collection. A geo index can be created on a field which is a two element array, corresponding to latitude and longitude coordinates. - -As a subsection of derived queries, geospatial queries support all the same return types, but also support the three return types `GeoPage, GeoResult and Georesults`. These types must be used in order to get the distance of each document as generated by the query. - -There are two kinds of geospatial query, Near and Within. Near sorts documents by distance from the given point, while within both sorts and filters documents, returning those within the given distance range or shape. - -```java -public interface MyRepository extends Repository { - - GeoResult getByLocationNear(Point point); - - GeoResults findByLocationWithinOrLocationWithin(Box box, Polygon polygon); - - //Equivalent queries - GeoResults findByLocationWithinOrLocationWithin(Point point, int distance); - GeoResults findByLocationWithinOrLocationWithin(Point point, Distance distance); - GeoResults findByLocationWithinOrLocationWithin(Circle circle); - -} -``` - -## Property expression - -Property expressions can refer only to direct and nested properties of the managed domain class. The algorithm checks the domain class for the entire expression as the property. If the check fails, the algorithm splits up the expression at the camel case parts from the right and tries to find the corresponding property. - -```java -@Document("customers") -public class Customer { - private Address address; -} - -public class Address { - private ZipCode zipCode; -} - -public interface MyRepository extends Repository { - - // 1. step: search domain class for a property "addressZipCode" - // 2. step: search domain class for "addressZip.code" - // 3. step: search domain class for "address.zipCode" - ArangoCursor findByAddressZipCode(ZipCode zipCode); -} -``` - -It is possible for the algorithm to select the wrong property if the domain class also has a property which matches the first split of the expression. To resolve this ambiguity you can use _ as a separator inside your method-name to define traversal points. - -```java -@Document("customers") -public class Customer { - private Address address; - private AddressZip addressZip; -} - -public class Address { - private ZipCode zipCode; -} - -public class AddressZip { - private String code; -} - -public interface MyRepository extends Repository { - - // 1. step: search domain class for a property "addressZipCode" - // 2. step: search domain class for "addressZip.code" - // creates query with "x.addressZip.code" - ArangoCursor findByAddressZipCode(ZipCode zipCode); - - // 1. step: search domain class for a property "addressZipCode" - // 2. step: search domain class for "addressZip.code" - // 3. step: search domain class for "address.zipCode" - // creates query with "x.address.zipCode" - ArangoCursor findByAddress_ZipCode(ZipCode zipCode); - -} -``` - -## Special parameter handling - -### Bind parameters - -AQL supports the usage of [bind parameters](https://docs.arangodb.com/3.1/AQL/Fundamentals/BindParameters.html) which you can define with a method parameter named `bindVars` of type `Map`. - -```java -public interface MyRepository extends Repository { - - @Query("FOR c IN customers FILTER c[@field] == @value RETURN c") - ArangoCursor query(Map bindVars); - -} - -Map bindVars = new HashMap(); -bindVars.put("field", "name"); -bindVars.put("value", "john"; - -// will execute query "FOR c IN customers FILTER c.name == "john" RETURN c" -ArangoCursor cursor = myRepo.query(bindVars); -``` - -### AQL query options - -You can set additional options for the query and the created cursor over the class `AqlQueryOptions` which you can simply define as a method parameter without a specific name. AqlQuery options can also be defined with the `@QueryOptions` annotation, as shown below. AqlQueryOptions from an annotation and those from an argument are merged if both exist, with those in the argument taking precedence. - -The `AqlQueryOptions` allows you to set the cursor time-to-life, batch-size, caching flag and several other settings. This special parameter works with both query-methods and finder-methods. Keep in mind that some options, like time-to-life, are only effective if the method return type is`ArangoCursor` or `Iterable`. - -```java -public interface MyRepository extends Repository { - - - @Query("FOR c IN customers FILTER c.name == @0 RETURN c") - Iterable query(String name, AqlQueryOptions options); - - - Iterable findByName(String name, AqlQueryOptions options); - - - @QueryOptions(maxPlans = 1000, ttl = 128) - ArangoCursor findByAddressZipCode(ZipCode zipCode); - - - @Query("FOR c IN customers FILTER c[@field] == @value RETURN c") - @QueryOptions(cache = true, ttl = 128) - ArangoCursor query(Map bindVars, AqlQueryOptions options); - -} -``` - -# Mapping - -## Introduction - -In this section we will describe the features and conventions for mapping Java objects to documents and how to override those conventions with annotation based mapping metadata. - -## Conventions - -* The Java class name is mapped to the collection name -* The non-static fields of a Java object are used as fields in the stored document -* The Java field name is mapped to the stored document field name -* All nested Java object are stored as nested objects in the stored document -* The Java class needs a constructor which meets the following criteria: - * in case of a single constructor: - * a non-parameterized constructor or - * a parameterized constructor - * in case of multiple constructors: - * a non-parameterized constructor or - * a parameterized constructor annotated with `@PersistenceConstructor` - -## Type conventions - -ArangoDB uses [VelocyPack](https://github.com/arangodb/velocypack) as it's internal storage format which supports a large number of data types. In addition Spring Data ArangoDB offers - with the underlying Java driver - built-in converters to add additional types to the mapping. - -Java type | VelocyPack type -----------|---------------- -java.lang.String | string -java.lang.Boolean | bool -java.lang.Integer | signed int 4 bytes, smallint -java.lang.Long | signed int 8 bytes, smallint -java.lang.Short | signed int 2 bytes, smallint -java.lang.Double | double -java.lang.Float | double -java.math.BigInteger | signed int 8 bytes, unsigned int 8 bytes -java.math.BigDecimal | double -java.lang.Number | double -java.lang.Character | string -java.util.Date | string (date-format ISO 8601) -java.sql.Date | string (date-format ISO 8601) -java.sql.Timestamp | string (date-format ISO 8601) -java.util.UUID | string -java.lang.byte[] | string (Base64) - -## Type mapping -As collections in ArangoDB can contain documents of various types, a mechanism to retrieve the correct Java class is required. The type information of properties declared in a class may not be enough to restore the original class (due to inheritance). If the declared complex type and the actual type do not match, information about the actual type is stored together with the document. This is necessary to restore the correct type when reading from the DB. Consider the following example: - -```java -public class Person { - private String name; - private Address homeAddress; - // ... - - // getters and setters omitted -} - -public class Employee extends Person { - private Address workAddress; - // ... - - // getters and setters omitted -} - -public class Address { - private final String street; - private final String number; - // ... - - public Address(String street, String number) { - this.street = street; - this.number = number; - } - - // getters omitted -} - -@Document -public class Company { - @Key - private String key; - private Person manager; - - // getters and setters omitted -} - -Employee manager = new Employee(); -manager.setName("Jane Roberts"); -manager.setHomeAddress(new Address("Park Avenue", "432/64")); -manager.setWorkAddress(new Address("Main Street", "223")); -Company comp = new Company(); -comp.setManager(manager); -``` - -The serialized document for the DB looks like this: - -```json -{ - "manager": { - "name": "Jane Roberts", - "homeAddress": { - "street": "Park Avenue", - "number": "432/64" - }, - "workAddress": { - "street": "Main Street", - "number": "223" - }, - "_class": "com.arangodb.Employee" - }, - "_class": "com.arangodb.Company" -} -``` - -Type hints are written for top-level documents (as a collection can contain different document types) as well as for every value if it's a complex type and a sub-type of the property type declared. `Map`s and `Collection`s are excluded from type mapping. Without the additional information about the concrete classes used, the document couldn't be restored in Java. The type information of the `manager` property is not enough to determine the `Employee` type. The `homeAddress` and `workAddress` properties have the same actual and defined type, thus no type hint is needed. - -### Customizing type mapping -By default, the fully qualified class name is stored in the documents as a type hint. A custom type hint can be set with the `@TypeAlias("my-alias")` annotation on an entity. Make sure that it is an unique identifier across all entities. If we would add a `TypeAlias("employee")` annotation to the `Employee` class above, it would be persisted as `"_class": "employee"`. - -The default type key is `_class` and can be changed by overriding the `typeKey()` method of the `AbstractArangoConfiguration` class. - -If you need to further customize the type mapping process, the `arangoTypeMapper()` method of the configuration class can be overridden. The included `DefaultArangoTypeMapper` can be customized by providing a list of [`TypeInformationMapper`](https://docs.spring.io/spring-data/commons/docs/current/api/org/springframework/data/convert/TypeInformationMapper.html)s that create aliases from types and vice versa. - -In order to fully customize the type mapping process you can provide a custom type mapper implementation by extending the `DefaultArangoTypeMapper` class. - -### Deactivating type mapping -To deactivate the type mapping process, you can return `null` from the `typeKey()` method of the `AbstractArangoConfiguration` class. No type hints are stored in the documents with this setting. If you make sure that each defined type corresponds to the actual type, you can disable the type mapping, otherwise it can lead to exceptions when reading the entities from the DB. - -## Annotations - -### Annotation overview - -annotation | level | description ------------|-------|------------ -@Document | class | marks this class as a candidate for mapping -@Edge | class | marks this class as a candidate for mapping -@Id | field | stores the field as the system field _id -@Key | field | stores the field as the system field _key -@Rev | field | stores the field as the system field _rev -@Field("alt-name") | field | stores the field with an alternative name -@Ref | field | stores the _id of the referenced document and not the nested document -@From | field | stores the _id of the referenced document as the system field _from -@To | field | stores the _id of the referenced document as the system field _to -@Relations | field | vertices which are connected over edges -@Transient | field, method, annotation | marks a field to be transient for the mapping framework, thus the property will not be persisted and not further inspected by the mapping framework -@PersistenceConstructor | constructor | marks a given constructor - even a package protected one - to use when instantiating the object from the database -@TypeAlias("alias") | class | set a type alias for the class when persisted to the DB -@HashIndex | class | describes a hash index -@HashIndexed | field | describes how to index the field -@SkiplistIndex | class | describes a skiplist index -@SkiplistIndexed | field | describes how to index the field -@PersistentIndex | class | describes a persistent index -@PersistentIndexed | field | describes how to index the field -@GeoIndex | class | describes a geo index -@GeoIndexed | field | describes how to index the field -@FulltextIndex | class | describes a fulltext index -@FulltextIndexed | field | describes how to index the field - -### Document - -The annotations `@Document` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Document` specifies the collection type to `DOCUMENT`. - -```java -@Document(value="persons") -public class Person { - ... -} -``` - -### Edge - -The annotations `@Edge` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Edge` specifies the collection type to `EDGE`. - -```java -@Edge("relations") -public class Relation { - ... -} -``` - -### Reference - -With the annotation `@Ref` applied on a field the nested object isn’t stored as a nested object in the document. The `_id` field of the nested object is stored in the document and the nested object has to be stored as a separate document in another collection described in the `@Document` annotation of the nested object class. To successfully persist an instance of your object the referencing field has to be null or it's instance has to provide a field with the annotation `@Id` including a valid id. - -```java -@Document(value="persons") -public class Person { - @Ref - private Address address; -} - -@Document("addresses") -public class Address { - @Id - private String id; - private String country; - private String street; -} -``` - -The database representation of `Person` in collection *persons* looks as follow: - -``` -{ - "_key" : "123", - "_id" : "persons/123", - "address" : "addresses/456" -} -``` -and the representation of `Address` in collection *addresses*: -``` -{ - "_key" : "456", - "_id" : "addresses/456", - "country" : "...", - "street" : "..." -} -``` - -Without the annotation `@Ref` at the field `address`, the stored document would look: - -``` -{ - "_key" : "123", - "_id" : "persons/123", - "address" : { - "country" : "...", - "street" : "..." - } -} -``` - -### Relations - -With the annotation `@Relations` applied on a collection or array field in a class annotated with `@Document` the nested objects are fetched from the database over a graph traversal with your current object as the starting point. The most relevant parameter is `edge`. With `edge` you define the edge collection - which should be used in the traversal - using the class type. With the parameter `depth` you can define the maximal depth for the traversal (default 1) and the parameter `direction` defines whether the traversal should follow outgoing or incoming edges (default Direction.ANY). - -```java -@Document(value="persons") -public class Person { - @Relations(edge=Relation.class, depth=1, direction=Direction.ANY) - private List friends; -} - -@Edge(name="relations") -public class Relation { - -} -``` - -### Document with From and To - -With the annotations `@From` and `@To` applied on a collection or array field in a class annotated with `@Document` the nested edge objects are fetched from the database. Each of the nested edge objects has to be stored as separate edge document in the edge collection described in the `@Edge` annotation of the nested object class with the *_id* of the parent document as field *_from* or *_to*. - -```java -@Document("persons") -public class Person { - @From - private List relations; -} - -@Edge(name="relations") -public class Relation { - ... -} -``` - -The database representation of `Person` in collection *persons* looks as follow: -``` -{ - "_key" : "123", - "_id" : "persons/123" -} -``` - -and the representation of `Relation` in collection *relations*: -``` -{ - "_key" : "456", - "_id" : "relations/456", - "_from" : "persons/123" - "_to" : ".../..." -} -{ - "_key" : "789", - "_id" : "relations/456", - "_from" : "persons/123" - "_to" : ".../..." -} -... - -``` - -### Edge with From and To - -With the annotations `@From` and `@To` applied on a field in a class annotated with `@Edge` the nested object is fetched from the database. The nested object has to be stored as a separate document in the collection described in the `@Document` annotation of the nested object class. The *_id* field of this nested object is stored in the fields `_from` or `_to` within the edge document. - -```java -@Edge("relations") -public class Relation { - @From - private Person c1; - @To - private Person c2; -} - -@Document(value="persons") -public class Person { - @Id - private String id; -} -``` - -The database representation of `Relation` in collection *relations* looks as follow: -``` -{ - "_key" : "123", - "_id" : "relations/123", - "_from" : "persons/456", - "_to" : "persons/789" -} -``` - -and the representation of `Person` in collection *persons*: -``` -{ - "_key" : "456", - "_id" : "persons/456", -} -{ - "_key" : "789", - "_id" : "persons/789", -} -``` - -**Note:** If you want to save an instance of `Relation`, both `Person` objects (from & to) already have to be persisted and the class `Person` needs a field with the annotation `@Id` so it can hold the persisted `_id` from the database. - -### Index and Indexed annotations - -With the `@Indexed` annotations user defined indexes can be created at a collection level by annotating single fields of a class. - -Possible `@Indexed` annotations are: -* `@HashIndexed` -* `@SkiplistIndexed` -* `@PersistentIndexed` -* `@GeoIndexed` -* `@FulltextIndexed` - -The following example creates a hash index on the field `name` and a separate hash index on the field `age`: -```java -public class Person { - @HashIndexed - private String name; - - @HashIndexed - private int age; -} -``` - -With the `@Indexed` annotations different indexes can be created on the same field. - -The following example creates a hash index and also a skiplist index on the field `name`: -```java -public class Person { - @HashIndexed - @SkiplistIndexed - private String name; -} -``` - -If the index should include multiple fields the `@Index` annotations can be used on the type instead. - -Possible `@Index` annotations are: -* `@HashIndex` -* `@SkiplistIndex` -* `@PersistentIndex` -* `@GeoIndex` -* `@FulltextIndex` - -The following example creates a single hash index on the fields `name` and `age`, note that if a field is renamed in the database with @Field, the new field name must be used in the index declaration: -```java -@HashIndex(fields = {"fullname", "age"}) -public class Person { - @Field("fullname") - private String name; - - private int age; -} -``` - -The `@Index` annotations can also be used to create an index on a nested field. - -The following example creates a single hash index on the fields `name` and `address.country`: -```java -@HashIndex(fields = {"name", "address.country"}) -public class Person { - private String name; - - private Address address; -} -``` - -The `@Index` annotations and the `@Indexed` annotations can be used at the same time in one class. - -The following example creates a hash index on the fields `name` and `age` and a separate hash index on the field `age`: -```java -@HashIndex(fields = {"name", "age"}) -public class Person { - private String name; - - @HashIndexed - private int age; -} -``` - -The `@Index` annotations can be used multiple times to create more than one index in this way. - -The following example creates a hash index on the fields `name` and `age` and a separate hash index on the fields `name` and `gender`: -```java -@HashIndex(fields = {"name", "age"}) -@HashIndex(fields = {"name", "gender"}) -public class Person { - private String name; - - private int age; - - private Gender gender -} -``` +# Spring Data ArangoDB - Reference + +# Template + +With `ArangoTemplate` Spring Data ArangoDB offers a central support for interactions with the database over a rich feature set. It mostly offers the features from the ArangoDB Java driver with additional exception translation from the drivers exceptions to the Spring Data access exceptions inheriting the `DataAccessException` class. +The `ArangoTemplate` class is the default implementation of the operations interface `ArangoOperations` which developers of Spring Data are encouraged to code against. + +# Repositories + +## Introduction + +Spring Data Commons provides a composable repository infrastructure which Spring Data ArangoDB is built on. These allow for interface-based composition of repositories consisting of provided default implementations for certain interfaces (like `CrudRepository`) and custom implementations for other methods. + +## Instantiating + +Instances of a Repository are created in Spring beans through the auto-wired mechanism of Spring. + +```java +public class MySpringBean { + + @Autowired + private MyRepository rep; + +} +``` + +## Return types + +The method return type for single results can be a primitive type, a domain class, `Map`, `BaseDocument`, `BaseEdgeDocument`, `Optional`, `GeoResult`. +The method return type for multiple results can additionally be `ArangoCursor`, `Iterable`, `Collection`, `List`, `Set`, `Page`, `Slice`, `GeoPage`, `GeoResults` where Type can be everything a single result can be. + +## Query methods + +Queries using [ArangoDB Query Language (AQL)](https://docs.arangodb.com/current/AQL/index.html) can be supplied with the `@Query` annotation on methods. `AqlQueryOptions` can also be passed to the driver, as an argument anywhere in the method signature. + +There are three ways of passing bind parameters to the query in the query annotation. + +Using number matching, arguments will be substituted into the query in the order they are passed to the query method. + +```java +public interface MyRepository extends Repository{ + + @Query("FOR c IN customers FILTER c.name == @0 AND c.surname == @2 RETURN c") + ArangoCursor query(String name, AqlQueryOptions options, String surname); + +} +``` + +With the `@Param` annotation, the argument will be placed in the query at the place corresponding to the value passed to the `@Param` annotation. + +```java +public interface MyRepository extends Repository{ + + @Query("FOR c IN customers FILTER c.name == @name AND c.surname == @surname RETURN c") + ArangoCursor query(@Param("name") String name, @Param("surname") String surname); + +} +``` + + In addition you can use a parameter of type `Map` annotated with `@BindVars` as your bind parameters. You can then fill the map with any parameter used in the query. (see [here](https://docs.arangodb.com/3.1/AQL/Fundamentals/BindParameters.html#bind-parameters) for more Information about Bind Parameters). + +```java +public interface MyRepository extends Repository{ + + @Query("FOR c IN customers FILTER c.name == @name AND c.surname = @surname RETURN c") + ArangoCursor query(@BindVars Map bindVars); + +} +``` + +A mixture of any of these methods can be used. Parameters with the same name from an `@Param` annotation will override those in the `bindVars`. + +```java +public interface MyRepository extends Repository{ + + @Query("FOR c IN customers FILTER c.name == @name AND c.surname = @surname RETURN c") + ArangoCursor query(@BindVars Map bindVars, @Param("name") String name); + +} +``` + +## Named queries +An alternative to using the `@Query` annotation on methods is specifying them in a separate `.properties` file. The default path for the file is `META-INF/arango-named-queries.properties` and can be changed with the `EnableArangoRepositories#namedQueriesLocation()` setting. The entries in the properties file must adhere to the following convention: `{simple entity name}.{method name} = {query}`. Let's assume we have the following repository interface: + +```java +package com.arangodb.repository; + +public interface CustomerRepository extends ArangoRepository { + Customer findByUsername(@Param("username") String username); +} +``` + +The corresponding `arango-named-queries.properties` file looks like this: + +```properties +Customer.findByUsername = FOR c IN customers FILTER c.username == @username RETURN c +``` + +The queries specified in the properties file are no different than the queries that can be defined with the `@Query` annotation. The only difference is that the queries are in one place. If there is a `@Query` annotation present and a named query defined, the query in the `@Query` annotation takes precedence. + +## Derived queries + +Spring Data ArangoDB supports queries derived from methods names by splitting it into its semantic parts and converting into AQL. The mechanism strips the prefixes `find..By`, `get..By`, `query..By`, `read..By`, `stream..By`, `count..By`, `exists..By`, `delete..By`, `remove..By` from the method and parses the rest. The By acts as a separator to indicate the start of the criteria for the query to be built. You can define conditions on entity properties and concatenate them with `And` and `Or`. + +The complete list of part types for derived methods is below, where doc is a document in the database + +Keyword | Sample | Predicate +----------|----------------|-------- +IsGreaterThan, GreaterThan, After | findByAgeGreaterThan(int age) | doc.age > age +IsGreaterThanEqual, GreaterThanEqual | findByAgeIsGreaterThanEqual(int age) | doc.age >= age +IsLessThan, LessThan, Before | findByAgeIsLessThan(int age) | doc.age < age +IsLessThanEqualLessThanEqual | findByAgeLessThanEqual(int age) | doc.age <= age +IsBetween, Between | findByAgeBetween(int lower, int upper) | lower < doc.age < upper +IsNotNull, NotNull | findByNameNotNull() | doc.name != null +IsNull, Null | findByNameNull() | doc.name == null +IsLike, Like | findByNameLike(String name) | doc.name LIKE name +IsNotLike, NotLike | findByNameNotLike(String name) | NOT(doc.name LIKE name) +IsStartingWith, StartingWith, StartsWith | findByNameStartsWith(String prefix) | doc.name LIKE prefix +IsEndingWith, EndingWith, EndsWith | findByNameEndingWith(String suffix) | doc.name LIKE suffix +Regex, MatchesRegex, Matches | findByNameRegex(String pattern) | REGEX_TEST(doc.name, name, ignoreCase) +(No Keyword) | findByFirstName(String name) | doc.name == name +IsTrue, True | findByActiveTrue() | doc.active == true +IsFalse, False | findByActiveFalse() | doc.active == false +Is, Equals | findByAgeEquals(int age) | doc.age == age +IsNot, Not | findByAgeNot(int age) | doc.age != age +IsIn, In | findByNameIn(String[] names) | doc.name IN names +IsNotIn, NotIn | findByNameIsNotIn(String[] names) | doc.name NOT IN names +IsContaining, Containing, Contains | findByFriendsContaining(String name) | name IN doc.friends +IsNotContaining, NotContaining, NotContains | findByFriendsNotContains(String name) | name NOT IN doc.friends +Exists | findByFriendNameExists() | HAS(doc.friend, name) + + +```java +public interface MyRepository extends Repository { + + // FOR c IN customers FILTER c.name == @0 RETURN c + ArangoCursor findByName(String name); + ArangoCursor getByName(String name); + + // FOR c IN customers + // FILTER c.name == @0 && c.age == @1 + // RETURN c + ArangoCursor findByNameAndAge(String name, int age); + + // FOR c IN customers + // FILTER c.name == @0 || c.age == @1 + // RETURN c + ArangoCursor findByNameOrAge(String name, int age); +} +``` + +You can apply sorting for one or multiple sort criteria by appending `OrderBy` to the method and `Asc` or `Desc` for the directions. + +```java +public interface MyRepository extends Repository { + + // FOR c IN customers + // FILTER c.name == @0 + // SORT c.age DESC RETURN c + ArangoCursor getByNameOrderByAgeDesc(String name); + + // FOR c IN customers + // FILTER c.name = @0 + // SORT c.name ASC, c.age DESC RETURN c + ArangoCursor findByNameOrderByNameAscAgeDesc(String name); + +} +``` + +### Geospatial queries + +Geospatial queries are a subsection of derived queries. To use a geospatial query on a collection, a geo index must exist on that collection. A geo index can be created on a field which is a two element array, corresponding to latitude and longitude coordinates. + +As a subsection of derived queries, geospatial queries support all the same return types, but also support the three return types `GeoPage, GeoResult and Georesults`. These types must be used in order to get the distance of each document as generated by the query. + +There are two kinds of geospatial query, Near and Within. Near sorts documents by distance from the given point, while within both sorts and filters documents, returning those within the given distance range or shape. + +```java +public interface MyRepository extends Repository { + + GeoResult getByLocationNear(Point point); + + GeoResults findByLocationWithinOrLocationWithin(Box box, Polygon polygon); + + //Equivalent queries + GeoResults findByLocationWithinOrLocationWithin(Point point, int distance); + GeoResults findByLocationWithinOrLocationWithin(Point point, Distance distance); + GeoResults findByLocationWithinOrLocationWithin(Circle circle); + +} +``` + +## Property expression + +Property expressions can refer only to direct and nested properties of the managed domain class. The algorithm checks the domain class for the entire expression as the property. If the check fails, the algorithm splits up the expression at the camel case parts from the right and tries to find the corresponding property. + +```java +@Document("customers") +public class Customer { + private Address address; +} + +public class Address { + private ZipCode zipCode; +} + +public interface MyRepository extends Repository { + + // 1. step: search domain class for a property "addressZipCode" + // 2. step: search domain class for "addressZip.code" + // 3. step: search domain class for "address.zipCode" + ArangoCursor findByAddressZipCode(ZipCode zipCode); +} +``` + +It is possible for the algorithm to select the wrong property if the domain class also has a property which matches the first split of the expression. To resolve this ambiguity you can use _ as a separator inside your method-name to define traversal points. + +```java +@Document("customers") +public class Customer { + private Address address; + private AddressZip addressZip; +} + +public class Address { + private ZipCode zipCode; +} + +public class AddressZip { + private String code; +} + +public interface MyRepository extends Repository { + + // 1. step: search domain class for a property "addressZipCode" + // 2. step: search domain class for "addressZip.code" + // creates query with "x.addressZip.code" + ArangoCursor findByAddressZipCode(ZipCode zipCode); + + // 1. step: search domain class for a property "addressZipCode" + // 2. step: search domain class for "addressZip.code" + // 3. step: search domain class for "address.zipCode" + // creates query with "x.address.zipCode" + ArangoCursor findByAddress_ZipCode(ZipCode zipCode); + +} +``` + +## Special parameter handling + +### Bind parameters + +AQL supports the usage of [bind parameters](https://docs.arangodb.com/3.1/AQL/Fundamentals/BindParameters.html) which you can define with a method parameter annotated with `@BindVars` of type `Map`. + +```java +public interface MyRepository extends Repository { + + @Query("FOR c IN customers FILTER c[@field] == @value RETURN c") + ArangoCursor query(Map bindVars); + +} + +Map bindVars = new HashMap(); +bindVars.put("field", "name"); +bindVars.put("value", "john"; + +// will execute query "FOR c IN customers FILTER c.name == "john" RETURN c" +ArangoCursor cursor = myRepo.query(bindVars); +``` + +### AQL query options + +You can set additional options for the query and the created cursor over the class `AqlQueryOptions` which you can simply define as a method parameter without a specific name. AqlQuery options can also be defined with the `@QueryOptions` annotation, as shown below. AqlQueryOptions from an annotation and those from an argument are merged if both exist, with those in the argument taking precedence. + +The `AqlQueryOptions` allows you to set the cursor time-to-life, batch-size, caching flag and several other settings. This special parameter works with both query-methods and finder-methods. Keep in mind that some options, like time-to-life, are only effective if the method return type is`ArangoCursor` or `Iterable`. + +```java +public interface MyRepository extends Repository { + + + @Query("FOR c IN customers FILTER c.name == @0 RETURN c") + Iterable query(String name, AqlQueryOptions options); + + + Iterable findByName(String name, AqlQueryOptions options); + + + @QueryOptions(maxPlans = 1000, ttl = 128) + ArangoCursor findByAddressZipCode(ZipCode zipCode); + + + @Query("FOR c IN customers FILTER c[@field] == @value RETURN c") + @QueryOptions(cache = true, ttl = 128) + ArangoCursor query(Map bindVars, AqlQueryOptions options); + +} +``` + +# Mapping + +## Introduction + +In this section we will describe the features and conventions for mapping Java objects to documents and how to override those conventions with annotation based mapping metadata. + +## Conventions + +* The Java class name is mapped to the collection name +* The non-static fields of a Java object are used as fields in the stored document +* The Java field name is mapped to the stored document field name +* All nested Java object are stored as nested objects in the stored document +* The Java class needs a constructor which meets the following criteria: + * in case of a single constructor: + * a non-parameterized constructor or + * a parameterized constructor + * in case of multiple constructors: + * a non-parameterized constructor or + * a parameterized constructor annotated with `@PersistenceConstructor` + +## Type conventions + +ArangoDB uses [VelocyPack](https://github.com/arangodb/velocypack) as it's internal storage format which supports a large number of data types. In addition Spring Data ArangoDB offers - with the underlying Java driver - built-in converters to add additional types to the mapping. + +Java type | VelocyPack type +----------|---------------- +java.lang.String | string +java.lang.Boolean | bool +java.lang.Integer | signed int 4 bytes, smallint +java.lang.Long | signed int 8 bytes, smallint +java.lang.Short | signed int 2 bytes, smallint +java.lang.Double | double +java.lang.Float | double +java.math.BigInteger | signed int 8 bytes, unsigned int 8 bytes +java.math.BigDecimal | double +java.lang.Number | double +java.lang.Character | string +java.util.Date | string (date-format ISO 8601) +java.sql.Date | string (date-format ISO 8601) +java.sql.Timestamp | string (date-format ISO 8601) +java.util.UUID | string +java.lang.byte[] | string (Base64) + +## Type mapping +As collections in ArangoDB can contain documents of various types, a mechanism to retrieve the correct Java class is required. The type information of properties declared in a class may not be enough to restore the original class (due to inheritance). If the declared complex type and the actual type do not match, information about the actual type is stored together with the document. This is necessary to restore the correct type when reading from the DB. Consider the following example: + +```java +public class Person { + private String name; + private Address homeAddress; + // ... + + // getters and setters omitted +} + +public class Employee extends Person { + private Address workAddress; + // ... + + // getters and setters omitted +} + +public class Address { + private final String street; + private final String number; + // ... + + public Address(String street, String number) { + this.street = street; + this.number = number; + } + + // getters omitted +} + +@Document +public class Company { + @Key + private String key; + private Person manager; + + // getters and setters omitted +} + +Employee manager = new Employee(); +manager.setName("Jane Roberts"); +manager.setHomeAddress(new Address("Park Avenue", "432/64")); +manager.setWorkAddress(new Address("Main Street", "223")); +Company comp = new Company(); +comp.setManager(manager); +``` + +The serialized document for the DB looks like this: + +```json +{ + "manager": { + "name": "Jane Roberts", + "homeAddress": { + "street": "Park Avenue", + "number": "432/64" + }, + "workAddress": { + "street": "Main Street", + "number": "223" + }, + "_class": "com.arangodb.Employee" + }, + "_class": "com.arangodb.Company" +} +``` + +Type hints are written for top-level documents (as a collection can contain different document types) as well as for every value if it's a complex type and a sub-type of the property type declared. `Map`s and `Collection`s are excluded from type mapping. Without the additional information about the concrete classes used, the document couldn't be restored in Java. The type information of the `manager` property is not enough to determine the `Employee` type. The `homeAddress` and `workAddress` properties have the same actual and defined type, thus no type hint is needed. + +### Customizing type mapping +By default, the fully qualified class name is stored in the documents as a type hint. A custom type hint can be set with the `@TypeAlias("my-alias")` annotation on an entity. Make sure that it is an unique identifier across all entities. If we would add a `TypeAlias("employee")` annotation to the `Employee` class above, it would be persisted as `"_class": "employee"`. + +The default type key is `_class` and can be changed by overriding the `typeKey()` method of the `AbstractArangoConfiguration` class. + +If you need to further customize the type mapping process, the `arangoTypeMapper()` method of the configuration class can be overridden. The included `DefaultArangoTypeMapper` can be customized by providing a list of [`TypeInformationMapper`](https://docs.spring.io/spring-data/commons/docs/current/api/org/springframework/data/convert/TypeInformationMapper.html)s that create aliases from types and vice versa. + +In order to fully customize the type mapping process you can provide a custom type mapper implementation by extending the `DefaultArangoTypeMapper` class. + +### Deactivating type mapping +To deactivate the type mapping process, you can return `null` from the `typeKey()` method of the `AbstractArangoConfiguration` class. No type hints are stored in the documents with this setting. If you make sure that each defined type corresponds to the actual type, you can disable the type mapping, otherwise it can lead to exceptions when reading the entities from the DB. + +## Annotations + +### Annotation overview + +annotation | level | description +-----------|-------|------------ +@Document | class | marks this class as a candidate for mapping +@Edge | class | marks this class as a candidate for mapping +@Id | field | stores the field as the system field _id +@Key | field | stores the field as the system field _key +@Rev | field | stores the field as the system field _rev +@Field("alt-name") | field | stores the field with an alternative name +@Ref | field | stores the _id of the referenced document and not the nested document +@From | field | stores the _id of the referenced document as the system field _from +@To | field | stores the _id of the referenced document as the system field _to +@Relations | field | vertices which are connected over edges +@Transient | field, method, annotation | marks a field to be transient for the mapping framework, thus the property will not be persisted and not further inspected by the mapping framework +@PersistenceConstructor | constructor | marks a given constructor - even a package protected one - to use when instantiating the object from the database +@TypeAlias("alias") | class | set a type alias for the class when persisted to the DB +@HashIndex | class | describes a hash index +@HashIndexed | field | describes how to index the field +@SkiplistIndex | class | describes a skiplist index +@SkiplistIndexed | field | describes how to index the field +@PersistentIndex | class | describes a persistent index +@PersistentIndexed | field | describes how to index the field +@GeoIndex | class | describes a geo index +@GeoIndexed | field | describes how to index the field +@FulltextIndex | class | describes a fulltext index +@FulltextIndexed | field | describes how to index the field + +### Document + +The annotations `@Document` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Document` specifies the collection type to `DOCUMENT`. + +```java +@Document(value="persons") +public class Person { + ... +} +``` + +### Edge + +The annotations `@Edge` applied to a class marks this class as a candidate for mapping to the database. The most relevant parameter is `value` to specify the collection name in the database. The annotation `@Edge` specifies the collection type to `EDGE`. + +```java +@Edge("relations") +public class Relation { + ... +} +``` + +### Reference + +With the annotation `@Ref` applied on a field the nested object isn’t stored as a nested object in the document. The `_id` field of the nested object is stored in the document and the nested object has to be stored as a separate document in another collection described in the `@Document` annotation of the nested object class. To successfully persist an instance of your object the referencing field has to be null or it's instance has to provide a field with the annotation `@Id` including a valid id. + +```java +@Document(value="persons") +public class Person { + @Ref + private Address address; +} + +@Document("addresses") +public class Address { + @Id + private String id; + private String country; + private String street; +} +``` + +The database representation of `Person` in collection *persons* looks as follow: + +``` +{ + "_key" : "123", + "_id" : "persons/123", + "address" : "addresses/456" +} +``` +and the representation of `Address` in collection *addresses*: +``` +{ + "_key" : "456", + "_id" : "addresses/456", + "country" : "...", + "street" : "..." +} +``` + +Without the annotation `@Ref` at the field `address`, the stored document would look: + +``` +{ + "_key" : "123", + "_id" : "persons/123", + "address" : { + "country" : "...", + "street" : "..." + } +} +``` + +### Relations + +With the annotation `@Relations` applied on a collection or array field in a class annotated with `@Document` the nested objects are fetched from the database over a graph traversal with your current object as the starting point. The most relevant parameter is `edge`. With `edge` you define the edge collection - which should be used in the traversal - using the class type. With the parameter `depth` you can define the maximal depth for the traversal (default 1) and the parameter `direction` defines whether the traversal should follow outgoing or incoming edges (default Direction.ANY). + +```java +@Document(value="persons") +public class Person { + @Relations(edge=Relation.class, depth=1, direction=Direction.ANY) + private List friends; +} + +@Edge(name="relations") +public class Relation { + +} +``` + +### Document with From and To + +With the annotations `@From` and `@To` applied on a collection or array field in a class annotated with `@Document` the nested edge objects are fetched from the database. Each of the nested edge objects has to be stored as separate edge document in the edge collection described in the `@Edge` annotation of the nested object class with the *_id* of the parent document as field *_from* or *_to*. + +```java +@Document("persons") +public class Person { + @From + private List relations; +} + +@Edge(name="relations") +public class Relation { + ... +} +``` + +The database representation of `Person` in collection *persons* looks as follow: +``` +{ + "_key" : "123", + "_id" : "persons/123" +} +``` + +and the representation of `Relation` in collection *relations*: +``` +{ + "_key" : "456", + "_id" : "relations/456", + "_from" : "persons/123" + "_to" : ".../..." +} +{ + "_key" : "789", + "_id" : "relations/456", + "_from" : "persons/123" + "_to" : ".../..." +} +... + +``` + +### Edge with From and To + +With the annotations `@From` and `@To` applied on a field in a class annotated with `@Edge` the nested object is fetched from the database. The nested object has to be stored as a separate document in the collection described in the `@Document` annotation of the nested object class. The *_id* field of this nested object is stored in the fields `_from` or `_to` within the edge document. + +```java +@Edge("relations") +public class Relation { + @From + private Person c1; + @To + private Person c2; +} + +@Document(value="persons") +public class Person { + @Id + private String id; +} +``` + +The database representation of `Relation` in collection *relations* looks as follow: +``` +{ + "_key" : "123", + "_id" : "relations/123", + "_from" : "persons/456", + "_to" : "persons/789" +} +``` + +and the representation of `Person` in collection *persons*: +``` +{ + "_key" : "456", + "_id" : "persons/456", +} +{ + "_key" : "789", + "_id" : "persons/789", +} +``` + +**Note:** If you want to save an instance of `Relation`, both `Person` objects (from & to) already have to be persisted and the class `Person` needs a field with the annotation `@Id` so it can hold the persisted `_id` from the database. + +### Index and Indexed annotations + +With the `@Indexed` annotations user defined indexes can be created at a collection level by annotating single fields of a class. + +Possible `@Indexed` annotations are: +* `@HashIndexed` +* `@SkiplistIndexed` +* `@PersistentIndexed` +* `@GeoIndexed` +* `@FulltextIndexed` + +The following example creates a hash index on the field `name` and a separate hash index on the field `age`: +```java +public class Person { + @HashIndexed + private String name; + + @HashIndexed + private int age; +} +``` + +With the `@Indexed` annotations different indexes can be created on the same field. + +The following example creates a hash index and also a skiplist index on the field `name`: +```java +public class Person { + @HashIndexed + @SkiplistIndexed + private String name; +} +``` + +If the index should include multiple fields the `@Index` annotations can be used on the type instead. + +Possible `@Index` annotations are: +* `@HashIndex` +* `@SkiplistIndex` +* `@PersistentIndex` +* `@GeoIndex` +* `@FulltextIndex` + +The following example creates a single hash index on the fields `name` and `age`, note that if a field is renamed in the database with @Field, the new field name must be used in the index declaration: +```java +@HashIndex(fields = {"fullname", "age"}) +public class Person { + @Field("fullname") + private String name; + + private int age; +} +``` + +The `@Index` annotations can also be used to create an index on a nested field. + +The following example creates a single hash index on the fields `name` and `address.country`: +```java +@HashIndex(fields = {"name", "address.country"}) +public class Person { + private String name; + + private Address address; +} +``` + +The `@Index` annotations and the `@Indexed` annotations can be used at the same time in one class. + +The following example creates a hash index on the fields `name` and `age` and a separate hash index on the field `age`: +```java +@HashIndex(fields = {"name", "age"}) +public class Person { + private String name; + + @HashIndexed + private int age; +} +``` + +The `@Index` annotations can be used multiple times to create more than one index in this way. + +The following example creates a hash index on the fields `name` and `age` and a separate hash index on the fields `name` and `gender`: +```java +@HashIndex(fields = {"name", "age"}) +@HashIndex(fields = {"name", "gender"}) +public class Person { + private String name; + + private int age; + + private Gender gender +} +``` diff --git a/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md b/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md index fce0ab176c..4180ed7be2 100644 --- a/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md +++ b/Documentation/Books/Manual/Deployment/Kubernetes/DeploymentResource.md @@ -217,6 +217,42 @@ This setting specifies the pull policy for the docker image to use for all Arang For possible values, see `spec.imagePullPolicy`. When not specified, the `spec.imagePullPolicy` value is used. +### `spec.sync.externalAccess.type: string` + +This setting specifies the type of `Service` that will be created to provide +access to the ArangoSync syncMasters from outside the Kubernetes cluster. +Possible values are: + +- `None` To limit access to applications running inside the Kubernetes cluster. +- `LoadBalancer` To create a `Service` of type `LoadBalancer` for the ArangoSync SyncMasters. +- `NodePort` To create a `Service` of type `NodePort` for the ArangoSync SyncMasters. +- `Auto` (default) To create a `Service` of type `LoadBalancer` and fallback to a `Service` or type `NodePort` when the + `LoadBalancer` is not assigned an IP address. + +Note that when you specify a value of `None`, a `Service` will still be created, but of type `ClusterIP`. + +### `spec.sync.externalAccess.loadBalancerIP: string` + +This setting specifies the IP used for the LoadBalancer to expose the ArangoSync SyncMasters on. +This setting is used when `spec.sync.externalAccess.type` is set to `LoadBalancer` or `Auto`. + +If you do not specify this setting, an IP will be chosen automatically by the load-balancer provisioner. + +### `spec.sync.externalAccess.nodePort: int` + +This setting specifies the port used to expose the ArangoSync SyncMasters on. +This setting is used when `spec.sync.externalAccess.type` is set to `NodePort` or `Auto`. + +If you do not specify this setting, a random port will be chosen automatically. + +### `spec.sync.externalAccess.masterEndpoint: []string` + +This setting specifies the master endpoint(s) advertised by the ArangoSync SyncMasters. +If not set, this setting defaults to: + +- If `spec.sync.externalAccess.loadBalancerIP` is set, it defaults to `https://:<8629>`. +- Otherwise it defaults to `https://:<8629>`. + ### `spec.sync.auth.jwtSecretName: string` This setting specifies the name of a kubernetes `Secret` that contains @@ -318,3 +354,16 @@ for each server of this group. This setting is not available for group `coordinators`, `syncmasters` & `syncworkers` because servers in these groups do not need persistent storage. + +### `spec..tolerations: [Toleration]` + +This setting specifies the `tolerations` for the `Pod`s created +for each server of this group. + +By default, suitable tolerations are set for the following keys with the `NoExecute` effect: + +- `node.kubernetes.io/not-ready` +- `node.kubernetes.io/unreachable` +- `node.alpha.kubernetes.io/unreachable` (will be removed in future version) + +For more information on tolerations, consult the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/). diff --git a/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md b/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md index efaafc2b07..b5ea155503 100644 --- a/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md +++ b/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md @@ -1,5 +1,6 @@ @startDocuBlock put_api_simple_replace_by_example @brief replaces the body of all documents of a collection that match an example + @RESTHEADER{PUT /_api/simple/replace-by-example, Replace documents by example} @RESTBODYPARAM{collection,string,required,string} diff --git a/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md b/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md index c518565ba7..d9e0ede133 100644 --- a/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md +++ b/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md @@ -1,5 +1,6 @@ @startDocuBlock put_api_simple_update_by_example @brief partially updates the body of all documents of a collection that match an example + @RESTHEADER{PUT /_api/simple/update-by-example, Update documents by example} @RESTBODYPARAM{collection,string,required,string} diff --git a/Documentation/Scripts/fetchRefs.sh b/Documentation/Scripts/fetchRefs.sh old mode 100644 new mode 100755 index d49b4afdbf..0a266cfe8f --- a/Documentation/Scripts/fetchRefs.sh +++ b/Documentation/Scripts/fetchRefs.sh @@ -46,13 +46,13 @@ for book in ${ALLBOOKS}; do export NAME=$(basename ${oneMD}) export MDSUBDIR=$(echo "${oneMD}" | sed "s;${NAME};;") export DSTDIR="../Books/${book}/${DST}/${MDSUBDIR}" - + export TOPREF=$(echo ${MDSUBDIR} | sed 's;\([a-zA-Z]*\)/;../;g') if test ! -d "${DSTDIR}"; then mkdir -p "${DSTDIR}" fi ( echo "" - cat "${CODIR}/${SUBDIR}/${SRC}/${oneMD}" + cat "${CODIR}/${SUBDIR}/${SRC}/${oneMD}" |sed "s;https://docs.arangodb.com/latest;../${TOPREF};g" ) > "${DSTDIR}/${NAME}" done diff --git a/js/apps/system/_admin/aardvark/APP/api-docs.json b/js/apps/system/_admin/aardvark/APP/api-docs.json index 049c810c7c..382edd1b62 100644 --- a/js/apps/system/_admin/aardvark/APP/api-docs.json +++ b/js/apps/system/_admin/aardvark/APP/api-docs.json @@ -72,7 +72,7 @@ "startVertex" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph Traversal/HTTP_API_TRAVERSAL.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph Traversal/HTTP_API_TRAVERSAL.md" }, "PostApiQueryProperties": { "properties": { @@ -85,7 +85,7 @@ "query" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/PostApiQueryProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/PostApiQueryProperties.md" }, "PutApiQueryCacheProperties": { "properties": { @@ -104,7 +104,7 @@ "maxResults" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/PutApiQueryCacheProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/PutApiQueryCacheProperties.md" }, "PutApiQueryProperties": { "properties": { @@ -148,7 +148,7 @@ "maxQueryStringLength" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/PutApiQueryProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/PutApiQueryProperties.md" }, "RestLookupByKeys": { "properties": { @@ -169,7 +169,7 @@ "keys" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/RestLookupByKeys.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/RestLookupByKeys.md" }, "RestRemoveByKeys": { "properties": { @@ -193,7 +193,7 @@ "keys" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/RestRemoveByKeys.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/RestRemoveByKeys.md" }, "UserHandling_create": { "properties": { @@ -221,7 +221,7 @@ "passwd" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "UserHandling_grantCollection": { "properties": { @@ -234,7 +234,7 @@ "grant" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "UserHandling_grantDatabase": { "properties": { @@ -247,7 +247,7 @@ "grant" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "UserHandling_modify": { "properties": { @@ -270,7 +270,7 @@ "passwd" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "UserHandling_replace": { "properties": { @@ -293,7 +293,7 @@ "passwd" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "admin_echo_client_struct": { "description": "attributes of the client connection\n\n", @@ -302,7 +302,7 @@ "client" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" }, "admin_echo_server_struct": { "description": "\n", @@ -326,7 +326,7 @@ "server" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" }, "admin_statistics_figures_struct": { "description": "", @@ -480,7 +480,7 @@ "bytesReceived" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "cluster_endpoints_struct": { "description": "", @@ -554,7 +554,7 @@ "indexes" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_alive": { "description": "the currently active figures\n\n", @@ -571,7 +571,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_compactors": { "description": "\n", @@ -588,7 +588,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_datafiles": { "description": "Metrics regarding the datafiles\n\n", @@ -605,7 +605,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_dead": { "description": "the items waiting to be swept away by the cleaner\n\n", @@ -627,7 +627,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_indexes": { "description": "", @@ -644,7 +644,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_journals": { "description": "Metrics regarding the journal files\n\n", @@ -661,7 +661,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_readcache": { "description": "\n", @@ -678,7 +678,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "collection_figures_revisions": { "description": "\n", @@ -695,7 +695,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "compactionStatus_attributes": { "description": "", @@ -710,7 +710,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "delete_api_aqlfunction_rc_200": { "properties": { @@ -736,7 +736,7 @@ "deletedCount" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" }, "delete_api_aqlfunction_rc_400": { "properties": { @@ -767,7 +767,7 @@ "errorMessage" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" }, "delete_api_aqlfunction_rc_404": { "properties": { @@ -798,7 +798,7 @@ "errorMessage" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" }, "delete_api_tasks_rc_200": { "properties": { @@ -818,7 +818,7 @@ "error" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/delete_api_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/delete_api_tasks.md" }, "delete_api_tasks_rc_404": { "properties": { @@ -843,7 +843,7 @@ "errorMessage" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/delete_api_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/delete_api_tasks.md" }, "explain_options": { "description": "Options for the query\n\n", @@ -867,7 +867,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_explain.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_explain.md" }, "general_graph_create_http_examples": { "properties": { @@ -896,7 +896,7 @@ "name" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_create_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_create_http_examples.md" }, "general_graph_edge_definition_add_http_examples": { "properties": { @@ -925,7 +925,7 @@ "to" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_add_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_add_http_examples.md" }, "general_graph_edge_definition_modify_http_examples": { "properties": { @@ -954,7 +954,7 @@ "to" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_modify_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_modify_http_examples.md" }, "get_admin_log_rc_200": { "properties": { @@ -999,7 +999,7 @@ "totalAmount" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" }, "get_admin_server_role_rc_200": { "properties": { @@ -1030,7 +1030,7 @@ "role" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_server_role.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_server_role.md" }, "get_admin_statistics_description_rc_200": { "properties": { @@ -1066,7 +1066,7 @@ "error" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics_description.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics_description.md" }, "get_admin_statistics_rc_200": { "properties": { @@ -1115,7 +1115,7 @@ "enabled" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "get_admin_time_rc_200": { "properties": { @@ -1141,7 +1141,7 @@ "time" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md" }, "get_api_aqlfunction_rc_200": { "properties": { @@ -1169,7 +1169,7 @@ "result" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/get_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/get_api_aqlfunction.md" }, "get_api_aqlfunction_rc_400": { "properties": { @@ -1200,7 +1200,7 @@ "errorMessage" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/get_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/get_api_aqlfunction.md" }, "get_api_cluster_endpoints_rc_200": { "properties": { @@ -1228,7 +1228,7 @@ "endpoints" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_cluster_endpoints.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_cluster_endpoints.md" }, "get_api_collection_figures_rc_200": { "properties": { @@ -1251,7 +1251,7 @@ "journalSize" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" }, "get_api_database_new": { "properties": { @@ -1271,7 +1271,7 @@ "name" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Database/get_api_database_new.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Database/get_api_database_new.md" }, "get_api_database_new_USERS": { "description": "", @@ -1315,7 +1315,7 @@ "version" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_return.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_return.md" }, "get_api_tasks_all_rc_200": { "description": "a list of all tasks\n\n", @@ -1335,7 +1335,7 @@ "name" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_engine.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_engine.md" }, "http_statistics_struct": { "description": "the numbers of requests by Verb\n\n", @@ -1395,7 +1395,7 @@ "http" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "patch_api_view_link_props": { "description": "The link properties. If specified, then *properties*\nshould be a JSON object containing the following attributes:\n\n", @@ -1421,7 +1421,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_link_props_field_props": { "description": "The field properties. If specified, then *properties*\nshould be a JSON object containing the following attributes:\n\n", @@ -1448,7 +1448,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_links": { "description": "The set of collection names associated with the properties.\n\n", @@ -1458,7 +1458,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_properties_iresearch": { "properties": { @@ -1484,7 +1484,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_props_commit": { "description": "Commit options for regular operations.\n\n", @@ -1499,7 +1499,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_props_consolidation": { "description": "\n", @@ -1534,7 +1534,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_props_consolidation_bytes_accum": { "description": "Use empty object for default values, i.e. {}\n\n", @@ -1551,7 +1551,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_props_consolidation_count": { "description": "Use empty object for default values, i.e. {}\n\n", @@ -1568,7 +1568,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "patch_api_view_props_consolidation_fill": { "description": "Use empty object for default values, i.e. {}\n\n", @@ -1585,7 +1585,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "post_admin_echo_rc_200": { "properties": { @@ -1688,7 +1688,7 @@ "rawRequestBody" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" }, "post_api_aqlfunction": { "properties": { @@ -1711,7 +1711,7 @@ "code" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" }, "post_api_aqlfunction_rc_200": { "properties": { @@ -1731,7 +1731,7 @@ "code" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" }, "post_api_aqlfunction_rc_201": { "properties": { @@ -1751,7 +1751,7 @@ "code" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" }, "post_api_aqlfunction_rc_400": { "properties": { @@ -1782,7 +1782,7 @@ "errorMessage" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" }, "post_api_collection": { "properties": { @@ -1851,7 +1851,7 @@ "name" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md" }, "post_api_collection_opts": { "description": "additional options for key generation. If specified, then *keyOptions*\nshould be a JSON array containing the following attributes:\n\n", @@ -1876,7 +1876,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md" }, "post_api_cursor": { "properties": { @@ -1925,7 +1925,7 @@ "query" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" }, "post_api_cursor_opts": { "description": "key/value object with extra options for the query.\n\n", @@ -1990,7 +1990,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" }, "post_api_cursor_rc_201": { "properties": { @@ -2045,7 +2045,7 @@ "cached" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" }, "post_api_cursor_rc_400": { "properties": { @@ -2076,7 +2076,7 @@ "errorMessage" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" }, "post_api_explain": { "properties": { @@ -2100,7 +2100,7 @@ "query" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_explain.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_explain.md" }, "post_api_export": { "properties": { @@ -2147,7 +2147,7 @@ "ttl" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Bulk/post_api_export.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Bulk/post_api_export.md" }, "post_api_export_restrictions": { "description": "an object containing an array of attribute names that will be \nincluded or excluded when returning result documents.\n\nNot specifying *restrict* will by default return all attributes of each document.\n\n", @@ -2166,7 +2166,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Bulk/post_api_export.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Bulk/post_api_export.md" }, "post_api_gharial_create_opts": { "description": "a JSON object which is only useful in Enterprise version and with isSmart set to true.\nIt can contain the following attributes:\n\n", @@ -2182,7 +2182,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_create_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_create_http_examples.md" }, "post_api_index_fulltext": { "properties": { @@ -2209,7 +2209,7 @@ "minLength" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_fulltext.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_fulltext.md" }, "post_api_index_geo": { "properties": { @@ -2235,7 +2235,7 @@ "geoJson" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_geo.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_geo.md" }, "post_api_index_hash": { "properties": { @@ -2273,7 +2273,7 @@ "sparse" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_hash.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_hash.md" }, "post_api_index_persistent": { "properties": { @@ -2306,7 +2306,7 @@ "sparse" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_persistent.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_persistent.md" }, "post_api_index_skiplist": { "properties": { @@ -2344,7 +2344,7 @@ "sparse" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_skiplist.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_skiplist.md" }, "post_api_new_tasks": { "properties": { @@ -2377,7 +2377,7 @@ "params" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_api_new_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_api_new_tasks.md" }, "post_api_new_tasks_rc_200": { "properties": { @@ -2435,7 +2435,7 @@ "error" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_api_new_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_api_new_tasks.md" }, "post_api_transaction": { "properties": { @@ -2482,7 +2482,7 @@ "action" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Transactions/post_api_transaction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Transactions/post_api_transaction.md" }, "post_api_view_iresearch": { "properties": { @@ -2503,7 +2503,7 @@ "type" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" }, "post_api_view_link_props": { "description": "The link properties. If specified, then *properties*\nshould be a JSON object containing the following attributes:\n\n", @@ -2529,7 +2529,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_link_props_field_props": { "description": "The field properties. If specified, then *properties*\nshould be a JSON object containing the following attributes:\n\n", @@ -2556,7 +2556,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_links": { "description": "The set of collection names associated with the properties.\n\n", @@ -2566,7 +2566,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_props": { "description": "The view properties. If specified, then *properties*\nshould be a JSON object containing the following attributes:\n\n", @@ -2594,7 +2594,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" }, "post_api_view_props_commit": { "description": "Commit options for regular operations.\n\nCommit options for regular operations.\n\n", @@ -2614,7 +2614,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_props_consolidation": { "description": "\n\n", @@ -2649,7 +2649,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_props_consolidation_bytes_accum": { "description": "Use empty object for default values, i.e. {}\n\nUse empty object for default values, i.e. {}\n\n", @@ -2666,7 +2666,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_props_consolidation_count": { "description": "Use empty object for default values, i.e. {}\n\nUse empty object for default values, i.e. {}\n\n", @@ -2683,7 +2683,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_api_view_props_consolidation_fill": { "description": "Use empty object for default values, i.e. {}\n\nUse empty object for default values, i.e. {}\n\n", @@ -2700,7 +2700,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "post_batch_replication": { "properties": { @@ -2714,7 +2714,7 @@ "ttl" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/post_batch_replication.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/post_batch_replication.md" }, "put_admin_loglevel": { "properties": { @@ -2876,7 +2876,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" }, "put_api_new_tasks": { "properties": { @@ -2909,7 +2909,7 @@ "params" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/put_api_new_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/put_api_new_tasks.md" }, "put_api_replication_applier_adjust": { "properties": { @@ -3032,7 +3032,7 @@ "restrictType" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_adjust.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_adjust.md" }, "put_api_replication_makeSlave": { "properties": { @@ -3141,7 +3141,7 @@ "includeSystem" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_makeSlave.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_makeSlave.md" }, "put_api_replication_synchronize": { "properties": { @@ -3193,7 +3193,7 @@ "password" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_synchronize.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_synchronize.md" }, "put_api_simple_any": { "properties": { @@ -3206,7 +3206,7 @@ "collection" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_any.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_any.md" }, "put_api_simple_by_example": { "properties": { @@ -3239,7 +3239,7 @@ "limit" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_by_example.md" }, "put_api_simple_first_example": { "properties": { @@ -3257,7 +3257,7 @@ "example" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_first_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_first_example.md" }, "put_api_simple_fulltext": { "properties": { @@ -3295,7 +3295,7 @@ "index" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_fulltext.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_fulltext.md" }, "put_api_simple_near": { "properties": { @@ -3338,7 +3338,7 @@ "geo" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_near.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_near.md" }, "put_api_simple_range": { "properties": { @@ -3382,7 +3382,7 @@ "skip" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_range.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_range.md" }, "put_api_simple_remove_by_example": { "properties": { @@ -3403,7 +3403,7 @@ "example" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_remove_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_remove_by_example.md" }, "put_api_simple_remove_by_example_opts": { "description": "a json object which can contains following attributes:\n\n", @@ -3418,7 +3418,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_remove_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_remove_by_example.md" }, "put_api_simple_remove_by_keys_opts": { "description": "a json object which can contains following attributes:\n\n", @@ -3437,7 +3437,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/RestRemoveByKeys.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/RestRemoveByKeys.md" }, "put_api_simple_replace_by_example": { "properties": { @@ -3463,7 +3463,7 @@ "newValue" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md" }, "put_api_simple_replace_by_example_options": { "description": "a json object which can contain following attributes\n\n", @@ -3478,7 +3478,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md" }, "put_api_simple_update_by_example": { "properties": { @@ -3505,7 +3505,7 @@ "newValue" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md" }, "put_api_simple_update_by_example_options": { "description": "a json object which can contains following attributes:\n\n", @@ -3529,7 +3529,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md" }, "put_api_simple_within": { "properties": { @@ -3577,7 +3577,7 @@ "geo" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within.md" }, "put_api_simple_within_rectangle": { "properties": { @@ -3625,7 +3625,7 @@ "geo" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within_rectangle.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within_rectangle.md" }, "put_api_view_properties_iresearch": { "properties": { @@ -3651,7 +3651,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" }, "put_batch_replication": { "properties": { @@ -3665,7 +3665,7 @@ "ttl" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_batch_replication.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_batch_replication.md" }, "put_read_all_documents": { "properties": { @@ -3682,7 +3682,7 @@ "collection" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/put_read_all_documents.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/put_read_all_documents.md" }, "server_statistics_struct": { "description": "statistics of the server\n\n", @@ -3710,7 +3710,7 @@ "threads" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "server_threads_struct": { "description": "Statistics about the server worker threads (excluding V8 specific or jemalloc specific threads and system threads)\n\n", @@ -3737,7 +3737,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "setof_statistics_struct": { "description": "total connection times\n\nthe system time \n\nthe request times\n\nthe time requests were queued waiting for processing\n\nIO Time\n\nnumber of bytes sent to the clients\n\nnumber of bytes received from the clients\n\n\n", @@ -3811,7 +3811,7 @@ "system" ], "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "v8_context_struct": { "description": "Statistics about the V8 javascript contexts\n\n", @@ -3843,7 +3843,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" }, "version_details_struct": { "description": "an optional JSON object with additional details. This is\nreturned only if the *details* query parameter is set to *true* in the\nrequest.\n\n", @@ -3982,7 +3982,7 @@ } }, "type": "object", - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_return.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_return.md" } }, "info": { @@ -4008,7 +4008,7 @@ "Cluster" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cluster/get_cluster_health.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cluster/get_cluster_health.md" } }, "/_admin/clusterStatistics": { @@ -4039,7 +4039,7 @@ "Cluster" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cluster/get_cluster_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cluster/get_cluster_statistics.md" } }, "/_admin/database/target-version": { @@ -4056,7 +4056,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_database_version.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_database_version.md" } }, "/_admin/echo": { @@ -4092,7 +4092,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md" } }, "/_admin/execute": { @@ -4127,7 +4127,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_admin_execute.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_admin_execute.md" } }, "/_admin/log": { @@ -4207,7 +4207,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" } }, "/_admin/log/level": { @@ -4227,7 +4227,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" }, "put": { "description": "\n\nModifies and returns the server's current loglevel settings.\nThe request body must be a JSON object with the log topics being the object keys\nand the log levels being the object values.\n\nThe result is a JSON object with the adjusted log topics being the object keys, and\nthe adjusted log levels being the object values.\n\nIt can set the loglevel of all facilities by only specifying the loglevel as string without json.\n\nPossible loglevels are:\n - FATAL - There will be no way out of this. ArangoDB will go down after this message.\n - ERROR - This is an error. you should investigate and fix it. It may harm your production.\n - WARNING - This may be serious application-wise, but we don't know.\n - INFO - Something has happened, take notice, but no drama attached.\n - DEBUG - output debug messages\n - TRACE - trace - prepare your log to be flooded - don't use in production.\n\n\n**A JSON object with these properties is required:**\n\n - **audit-service**: One of the possible loglevels.\n - **cache**: One of the possible loglevels.\n - **syscall**: One of the possible loglevels.\n - **communication**: One of the possible loglevels.\n - **audit-authentication**: One of the possible loglevels.\n - **agencycomm**: One of the possible loglevels.\n - **startup**: One of the possible loglevels.\n - **general**: One of the possible loglevels.\n - **cluster**: One of the possible loglevels.\n - **audit-view**: One of the possible loglevels.\n - **collector**: One of the possible loglevels.\n - **audit-documentation**: One of the possible loglevels.\n - **engines**: One of the possible loglevels.\n - **trx**: One of the possible loglevels.\n - **mmap**: One of the possible loglevels.\n - **agency**: One of the possible loglevels.\n - **authentication**: One of the possible loglevels.\n - **memory**: One of the possible loglevels.\n - **performance**: One of the possible loglevels.\n - **config**: One of the possible loglevels.\n - **authorization**: One of the possible loglevels.\n - **development**: One of the possible loglevels.\n - **datafiles**: One of the possible loglevels.\n - **views**: One of the possible loglevels.\n - **ldap**: One of the possible loglevels.\n - **replication**: One of the possible loglevels.\n - **threads**: One of the possible loglevels.\n - **audit-database**: One of the possible loglevels.\n - **v8**: One of the possible loglevels.\n - **ssl**: One of the possible loglevels.\n - **pregel**: One of the possible loglevels.\n - **audit-collection**: One of the possible loglevels.\n - **rocksdb**: One of the possible loglevels.\n - **supervision**: One of the possible loglevels.\n - **graphs**: One of the possible loglevels.\n - **compactor**: One of the possible loglevels.\n - **queries**: One of the possible loglevels.\n - **heartbeat**: One of the possible loglevels.\n - **requests**: One of the possible loglevels.\n\n\n", @@ -4261,7 +4261,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_modules_flush.md" } }, "/_admin/routing/reload": { @@ -4278,7 +4278,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_routing_reloads.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_routing_reloads.md" } }, "/_admin/server/availability": { @@ -4298,7 +4298,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_server_availability.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_server_availability.md" } }, "/_admin/server/id": { @@ -4318,7 +4318,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_server_id.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_server_id.md" } }, "/_admin/server/role": { @@ -4342,7 +4342,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_server_role.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_server_role.md" } }, "/_admin/shutdown": { @@ -4359,7 +4359,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/delete_api_shutdown.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/delete_api_shutdown.md" } }, "/_admin/statistics": { @@ -4383,7 +4383,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics.md" } }, "/_admin/statistics-description": { @@ -4407,7 +4407,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics_description.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_statistics_description.md" } }, "/_admin/status": { @@ -4424,7 +4424,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/JSF_get_admin_status.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/JSF_get_admin_status.md" } }, "/_admin/time": { @@ -4448,7 +4448,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md" } }, "/_admin/wal/flush": { @@ -4483,7 +4483,7 @@ "wal" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/wal/put_admin_wal_flush.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/wal/put_admin_wal_flush.md" } }, "/_admin/wal/properties": { @@ -4503,7 +4503,7 @@ "wal" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/wal/get_admin_wal_properties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/wal/get_admin_wal_properties.md" }, "put": { "description": "\n\nConfigures the behavior of the write-ahead log. The body of the request\nmust be a JSON object with the following attributes:\n- *allowOversizeEntries*: whether or not operations that are bigger than a\n single logfile can be executed and stored\n- *logfileSize*: the size of each write-ahead logfile\n- *historicLogfiles*: the maximum number of historic logfiles to keep\n- *reserveLogfiles*: the maximum number of reserve logfiles that ArangoDB\n allocates in the background\n- *throttleWait*: the maximum wait time that operations will wait before\n they get aborted if case of write-throttling (in milliseconds)\n- *throttleWhenPending*: the number of unprocessed garbage-collection\n operations that, when reached, will activate write-throttling. A value of\n *0* means that write-throttling will not be triggered.\n\nSpecifying any of the above attributes is optional. Not specified attributes\nwill be ignored and the configuration for them will not be modified.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_admin/wal/properties <<EOF\n{ \n  \"logfileSize\" : 33554432, \n  \"allowOversizeEntries\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"allowOversizeEntries\" : true, \n  \"logfileSize\" : 33554432, \n  \"historicLogfiles\" : 10, \n  \"reserveLogfiles\" : 3, \n  \"syncInterval\" : 100, \n  \"throttleWait\" : 15000, \n  \"throttleWhenPending\" : 0 \n}\n
\n\n\n\n\n", @@ -4521,7 +4521,7 @@ "wal" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/wal/put_admin_wal_properties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/wal/put_admin_wal_properties.md" } }, "/_admin/wal/transactions": { @@ -4541,7 +4541,7 @@ "wal" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/wal/get_admin_wal_transactions.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/wal/get_admin_wal_transactions.md" } }, "/_api/aqlfunction": { @@ -4580,7 +4580,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/get_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/get_api_aqlfunction.md" }, "post": { "description": "\n**A JSON object with these properties is required:**\n\n - **isDeterministic**: an optional boolean value to indicate whether the function\n results are fully deterministic (function return value solely depends on\n the input value and return value is the same for repeated calls with same\n input). The *isDeterministic* attribute is currently not used but may be\n used later for optimisations.\n - **code**: a string representation of the function body.\n - **name**: the fully qualified name of the user functions.\n\n\n\n\n\nIn case of success, HTTP 200 is returned.\nIf the function isn't valid etc. HTTP 400 including a detailed error message will be returned.\n\n\n#### HTTP 200\n*A json document with these Properties is returned:*\n\nIf the function already existed and was replaced by the\ncall, the server will respond with *HTTP 200*.\n\n- **code**: the HTTP status code\n- **error**: boolean flag to indicate whether an error occurred (*false* in this case)\n\n\n#### HTTP 201\n*A json document with these Properties is returned:*\n\nIf the function can be registered by the server, the server will respond with\n*HTTP 201*.\n\n- **code**: the HTTP status code\n- **error**: boolean flag to indicate whether an error occurred (*false* in this case)\n\n\n#### HTTP 400\n*A json document with these Properties is returned:*\n\nIf the JSON representation is malformed or mandatory data is missing from the\nrequest, the server will respond with *HTTP 400*.\n\n- **errorMessage**: a descriptive error message\n- **errorNum**: the server error number\n- **code**: the HTTP status code\n- **error**: boolean flag to indicate whether an error occurred (*true* in this case)\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/aqlfunction <<EOF\n{ \n  \"name\" : \"myfunctions::temperature::celsiustofahrenheit\", \n  \"code\" : \"function (celsius) { return celsius * 1.8 + 32; }\", \n  \"isDeterministic\" : true \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 201 \n}\n
\n\n\n\n\n", @@ -4626,7 +4626,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md" } }, "/_api/aqlfunction/{name}": { @@ -4680,7 +4680,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/delete_api_aqlfunction.md" } }, "/_api/batch": { @@ -4715,7 +4715,7 @@ "Bulk" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Bulk/batch_processing.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Bulk/batch_processing.md" } }, "/_api/cluster/endpoints": { @@ -4742,7 +4742,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_cluster_endpoints.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_cluster_endpoints.md" } }, "/_api/collection": { @@ -4767,7 +4767,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collections.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collections.md" }, "post": { "description": "\n\nCreates a new collection with a given name. The request must contain an\nobject with the following attributes.\n\n\n**A JSON object with these properties is required:**\n\n - **journalSize**: The maximal size of a journal or datafile in bytes. The value\n must be at least `1048576` (1 MiB). (The default is a configuration parameter)\n This option is meaningful for the MMFiles storage engine only.\n - **replicationFactor**: (The default is *1*): in a cluster, this attribute determines how many copies\n of each shard are kept on different DBServers. The value 1 means that only one\n copy (no synchronous replication) is kept. A value of k means that k-1 replicas\n are kept. Any two copies reside on different DBServers. Replication between them is \n synchronous, that is, every write operation to the \"leader\" copy will be replicated \n to all \"follower\" replicas, before the write operation is reported successful.\n If a server fails, this is detected automatically and one of the servers holding \n copies take over, usually without an error being reported.\n - **keyOptions**:\n - **allowUserKeys**: if set to *true*, then it is allowed to supply own key values in the\n *_key* attribute of a document. If set to *false*, then the key generator\n will solely be responsible for generating keys and supplying own key values\n in the *_key* attribute of documents is considered an error.\n - **type**: specifies the type of the key generator. The currently available generators are\n *traditional* and *autoincrement*.\n - **increment**: increment value for *autoincrement* key generator. Not used for other key\n generator types.\n - **offset**: Initial offset value for *autoincrement* key generator.\n Not used for other key generator types.\n - **name**: The name of the collection.\n - **waitForSync**: If *true* then the data is synchronized to disk before returning from a\n document create, update, replace or removal operation. (default: false)\n - **doCompact**: whether or not the collection will be compacted (default is *true*)\n This option is meaningful for the MMFiles storage engine only.\n - **isVolatile**: If *true* then the collection data is kept in-memory only and not made persistent.\n Unloading the collection will cause the collection data to be discarded. Stopping\n or re-starting the server will also cause full loss of data in the\n collection. Setting this option will make the resulting collection be\n slightly faster than regular collections because ArangoDB does not\n enforce any synchronization to disk and does not calculate any CRC\n checksums for datafiles (as there are no datafiles). This option\n should therefore be used for cache-type collections only, and not\n for data that cannot be re-created otherwise.\n (The default is *false*)\n This option is meaningful for the MMFiles storage engine only.\n - **shardKeys**: (The default is *[ \"_key\" ]*): in a cluster, this attribute determines\n which document attributes are used to determine the target shard for documents.\n Documents are sent to shards based on the values of their shard key attributes.\n The values of all shard key attributes in a document are hashed,\n and the hash value is used to determine the target shard.\n **Note**: Values of shard key attributes cannot be changed once set.\n This option is meaningless in a single server setup.\n - **numberOfShards**: (The default is *1*): in a cluster, this value determines the\n number of shards to create for the collection. In a single\n server setup, this option is meaningless.\n - **isSystem**: If *true*, create a system collection. In this case *collection-name*\n should start with an underscore. End users should normally create non-system\n collections only. API implementors may be required to create system\n collections in very special occasions, but normally a regular collection will do.\n (The default is *false*)\n - **type**: (The default is *2*): the type of the collection to create.\n The following values for *type* are valid:\n - *2*: document collection\n - *3*: edges collection\n - **indexBuckets**: The number of buckets into which indexes using a hash\n table are split. The default is 16 and this number has to be a\n power of 2 and less than or equal to 1024.\n For very large collections one should increase this to avoid long pauses\n when the hash table has to be initially built or resized, since buckets\n are resized individually and can be initially built in parallel. For\n example, 64 might be a sensible value for a collection with 100\n 000 000 documents. Currently, only the edge index respects this\n value, but other index types might follow in future ArangoDB versions.\n Changes (see below) are applied when the collection is loaded the next\n time.\n This option is meaningful for the MMFiles storage engine only.\n - **distributeShardsLike**: (The default is *\"\"*): in an enterprise cluster, this attribute binds\n the specifics of sharding for the newly created collection to follow that of a\n specified existing collection.\n **Note**: Using this parameter has consequences for the prototype\n collection. It can no longer be dropped, before sharding imitating\n collections are dropped. Equally, backups and restores of imitating\n collections alone will generate warnings, which can be overridden,\n about missing sharding prototype.\n\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionBasics\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"status\" : 3, \n  \"statusString\" : \"loaded\", \n  \"name\" : \"testCollectionBasics\", \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true, \n    \"lastValue\" : 0 \n  }, \n  \"type\" : 2, \n  \"indexBuckets\" : 8, \n  \"globallyUniqueId\" : \"h1AA24B099AC2/11464\", \n  \"doCompact\" : true, \n  \"waitForSync\" : false, \n  \"id\" : \"11464\", \n  \"isSystem\" : false, \n  \"journalSize\" : 33554432, \n  \"isVolatile\" : false \n}\nshell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionEdges\", \n  \"type\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"status\" : 3, \n  \"statusString\" : \"loaded\", \n  \"name\" : \"testCollectionEdges\", \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true, \n    \"lastValue\" : 0 \n  }, \n  \"type\" : 3, \n  \"indexBuckets\" : 8, \n  \"globallyUniqueId\" : \"h1AA24B099AC2/11467\", \n  \"doCompact\" : true, \n  \"waitForSync\" : false, \n  \"id\" : \"11467\", \n  \"isSystem\" : false, \n  \"journalSize\" : 33554432, \n  \"isVolatile\" : false \n}\n
\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionUsers\", \n  \"keyOptions\" : { \n    \"type\" : \"autoincrement\", \n    \"increment\" : 5, \n    \"allowUserKeys\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"status\" : 3, \n  \"statusString\" : \"loaded\", \n  \"name\" : \"testCollectionUsers\", \n  \"keyOptions\" : { \n    \"type\" : \"autoincrement\", \n    \"allowUserKeys\" : true, \n    \"offset\" : 0, \n    \"increment\" : 5, \n    \"lastValue\" : 0 \n  }, \n  \"type\" : 2, \n  \"indexBuckets\" : 8, \n  \"globallyUniqueId\" : \"h1AA24B099AC2/11472\", \n  \"doCompact\" : true, \n  \"waitForSync\" : false, \n  \"id\" : \"11472\", \n  \"isSystem\" : false, \n  \"journalSize\" : 33554432, \n  \"isVolatile\" : false \n}\n
\n\n\n\n\n", @@ -4809,7 +4809,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md" } }, "/_api/collection/{collection-name}": { @@ -4845,7 +4845,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/delete_api_collection.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/delete_api_collection.md" }, "get": { "description": "\n\nThe result is an object describing the collection with the following\nattributes:\n\n- *id*: The identifier of the collection.\n\n- *name*: The name of the collection.\n\n- *status*: The status of the collection as number.\n - 1: new born collection\n - 2: unloaded\n - 3: loaded\n - 4: in the process of being unloaded\n - 5: deleted\n - 6: loading\n\nEvery other status indicates a corrupted collection.\n\n- *type*: The type of the collection as number.\n - 2: document collection (normal case)\n - 3: edges collection\n\n- *isSystem*: If *true* then the collection is a system collection.\n\n", @@ -4869,7 +4869,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_name.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_name.md" } }, "/_api/collection/{collection-name}/checksum": { @@ -4912,7 +4912,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_checksum.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_checksum.md" } }, "/_api/collection/{collection-name}/count": { @@ -4941,7 +4941,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_count.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_count.md" } }, "/_api/collection/{collection-name}/figures": { @@ -4980,7 +4980,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_figures.md" } }, "/_api/collection/{collection-name}/load": { @@ -5009,7 +5009,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_load.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_load.md" } }, "/_api/collection/{collection-name}/loadIndexesIntoMemory": { @@ -5041,7 +5041,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_load_indexes_in_memory.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_load_indexes_in_memory.md" } }, "/_api/collection/{collection-name}/properties": { @@ -5070,7 +5070,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_properties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_properties.md" }, "put": { "description": "\n\nChanges the properties of a collection. Expects an object with the\nattribute(s)\n\n- *waitForSync*: If *true* then creating or changing a\n document will wait until the data has been synchronized to disk.\n\n- *journalSize*: The maximal size of a journal or datafile in bytes. \n The value must be at least `1048576` (1 MB). Note that when\n changing the journalSize value, it will only have an effect for\n additional journals or datafiles that are created. Already\n existing journals or datafiles will not be affected.\n\nOn success an object with the following attributes is returned:\n\n- *id*: The identifier of the collection.\n\n- *name*: The name of the collection.\n\n- *waitForSync*: The new value.\n\n- *journalSize*: The new value.\n\n- *status*: The status of the collection as number.\n\n- *type*: The collection type. Valid types are:\n - 2: document collection\n - 3: edges collection\n\n- *isSystem*: If *true* then the collection is a system collection.\n\n- *isVolatile*: If *true* then the collection data will be\n kept in memory only and ArangoDB will not write or sync the data\n to disk.\n\n- *doCompact*: Whether or not the collection will be compacted.\n\n- *keyOptions*: JSON object which contains key generation options:\n - *type*: specifies the type of the key generator. The currently\n available generators are *traditional* and *autoincrement*.\n - *allowUserKeys*: if set to *true*, then it is allowed to supply\n own key values in the *_key* attribute of a document. If set to\n *false*, then the key generator is solely responsible for\n generating keys and supplying own key values in the *_key* attribute\n of documents is considered an error.\n\n**Note**: except for *waitForSync*, *journalSize* and *name*, collection\nproperties **cannot be changed** once a collection is created. To rename\na collection, the rename endpoint must be used.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/properties <<EOF\n{ \n  \"waitForSync\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\nlocation: /_api/collection/products/properties\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"status\" : 3, \n  \"statusString\" : \"loaded\", \n  \"name\" : \"products\", \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true, \n    \"lastValue\" : 0 \n  }, \n  \"type\" : 2, \n  \"indexBuckets\" : 8, \n  \"globallyUniqueId\" : \"h1AA24B099AC2/11863\", \n  \"doCompact\" : true, \n  \"waitForSync\" : true, \n  \"id\" : \"11863\", \n  \"isSystem\" : false, \n  \"journalSize\" : 33554432, \n  \"isVolatile\" : false \n}\n
\n\n\n\n\n", @@ -5097,7 +5097,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_properties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_properties.md" } }, "/_api/collection/{collection-name}/rename": { @@ -5126,7 +5126,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_rename.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_rename.md" } }, "/_api/collection/{collection-name}/revision": { @@ -5155,7 +5155,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/get_api_collection_revision.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/get_api_collection_revision.md" } }, "/_api/collection/{collection-name}/rotate": { @@ -5184,7 +5184,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_rotate.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_rotate.md" } }, "/_api/collection/{collection-name}/truncate": { @@ -5213,7 +5213,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_truncate.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_truncate.md" } }, "/_api/collection/{collection-name}/unload": { @@ -5242,7 +5242,7 @@ "Collections" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Collections/put_api_collection_unload.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Collections/put_api_collection_unload.md" } }, "/_api/cursor": { @@ -5289,7 +5289,7 @@ "Cursors" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor.md" } }, "/_api/cursor/{cursor-identifier}": { @@ -5318,7 +5318,7 @@ "Cursors" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor_delete.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor_delete.md" }, "put": { "description": "\n\nIf the cursor is still alive, returns an object with the following\nattributes:\n\n- *id*: the *cursor-identifier*\n- *result*: a list of documents for the current batch\n- *hasMore*: *false* if this was the last batch\n- *count*: if present the total number of elements\n\nNote that even if *hasMore* returns *true*, the next call might\nstill return no documents. If, however, *hasMore* is *false*, then\nthe cursor is exhausted. Once the *hasMore* attribute has a value of\n*false*, the client can stop.\n\n\n\n\n**Example:**\n Valid request for next batch\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/cursor/12019\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"result\" : [ \n    { \n      \"_key\" : \"12013\", \n      \"_id\" : \"products/12013\", \n      \"_rev\" : \"_WnWW0km--F\", \n      \"hello4\" : \"world1\" \n    }, \n    { \n      \"_key\" : \"12003\", \n      \"_id\" : \"products/12003\", \n      \"_rev\" : \"_WnWW0km--_\", \n      \"hello1\" : \"world1\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"12019\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0, \n      \"httpRequests\" : 0, \n      \"executionTime\" : 0.00010180473327636719 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false \n}\n
\n\n\n\n\n**Example:**\n Missing identifier\n\n
shell> curl -X PUT --dump - http://localhost:8529/_api/cursor\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting PUT /_api/cursor/<cursor-id>\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n
\n\n\n\n\n**Example:**\n Unknown identifier\n\n
shell> curl -X PUT --dump - http://localhost:8529/_api/cursor/123123\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cursor not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1600 \n}\n
\n\n\n\n\n", @@ -5348,7 +5348,7 @@ "Cursors" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor_identifier.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Cursors/post_api_cursor_identifier.md" } }, "/_api/database": { @@ -5371,7 +5371,7 @@ "Database" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Database/get_api_database_list.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Database/get_api_database_list.md" }, "post": { "description": "\n**A JSON object with these properties is required:**\n\n - **name**: Has to contain a valid database name.\n - **users**: Has to be an array of user objects to initially create for the new database.\n User information will not be changed for users that already exist.\n If *users* is not specified or does not contain any users, a default user\n *root* will be created with an empty string password. This ensures that the\n new database will be accessible after it is created.\n Each user object can contain the following attributes:\n - **username**: Loginname of the user to be created\n - **passwd**: The user password as a string. If not specified, it will default to an empty string.\n - **active**: A flag indicating whether the user account should be activated or not.\n The default value is *true*. If set to *false*, the user won't be able to\n log into the database.\n - **extra**: A JSON object with extra user information. The data contained in *extra*\n will be stored for the user but not be interpreted further by ArangoDB.\n\n\n\n\nCreates a new database\n\nThe response is a JSON object with the attribute *result* set to *true*.\n\n**Note**: creating a new database is only possible from within the *_system* database.\n\n\n\n\n**Example:**\n Creating a database named *example*.\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/database <<EOF\n{ \n  \"name\" : \"example\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"result\" : true \n}\n
\n\n\n\n\n**Example:**\n Creating a database named *mydb* with two users.\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/database <<EOF\n{ \n  \"name\" : \"mydb\", \n  \"users\" : [ \n    { \n      \"username\" : \"admin\", \n      \"passwd\" : \"secret\", \n      \"active\" : true \n    }, \n    { \n      \"username\" : \"tester\", \n      \"passwd\" : \"test001\", \n      \"active\" : false \n    } \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"result\" : true \n}\n
\n\n\n\n\n", @@ -5405,7 +5405,7 @@ "Database" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Database/get_api_database_new.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Database/get_api_database_new.md" } }, "/_api/database/current": { @@ -5428,7 +5428,7 @@ "Database" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Database/get_api_database_current.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Database/get_api_database_current.md" } }, "/_api/database/user": { @@ -5448,7 +5448,7 @@ "Database" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Database/get_api_database_user.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Database/get_api_database_user.md" } }, "/_api/database/{database-name}": { @@ -5483,7 +5483,7 @@ "Database" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Database/get_api_database_delete.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Database/get_api_database_delete.md" } }, "/_api/document/{collection}": { @@ -5547,7 +5547,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/delete_mutliple_documents.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/delete_mutliple_documents.md" }, "patch": { "description": "\n\nPartially updates documents, the documents to update are specified\nby the *_key* attributes in the body objects. The body of the\nrequest must contain a JSON array of document updates with the\nattributes to patch (the patch documents). All attributes from the\npatch documents will be added to the existing documents if they do\nnot yet exist, and overwritten in the existing documents if they do\nexist there.\n\nSetting an attribute value to *null* in the patch documents will cause a\nvalue of *null* to be saved for the attribute by default.\n\nIf *ignoreRevs* is *false* and there is a *_rev* attribute in a\ndocument in the body and its value does not match the revision of\nthe corresponding document in the database, the precondition is\nviolated.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on *waitForSync*, see below).\n\nOptionally, the query parameter *waitForSync* can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the *waitForSync* flag had been disabled for the entire collection.\nThus, the *waitForSync* query parameter can be used to force synchronization\nof just specific operations. To use this, set the *waitForSync* parameter\nto *true*. If the *waitForSync* parameter is not specified or set to\n*false*, then the collection's default *waitForSync* behavior is\napplied. The *waitForSync* query parameter cannot be used to disable\nsynchronization for collections that have a default *waitForSync* value\nof *true*.\n\nThe body of the response contains a JSON array of the same length\nas the input array with the information about the handle and the\nrevision of the updated documents. In each entry, the attribute\n*_id* contains the known *document-handle* of each updated document,\n*_key* contains the key which uniquely identifies a document in a\ngiven collection, and the attribute *_rev* contains the new document\nrevision. In case of an error or violated precondition, an error\nobject with the attribute *error* set to *true* and the attribute\n*errorCode* set to the error code is built.\n\nIf the query parameter *returnOld* is *true*, then, for each\ngenerated document, the complete previous revision of the document\nis returned under the *old* attribute in the result.\n\nIf the query parameter *returnNew* is *true*, then, for each\ngenerated document, the complete new document is returned under\nthe *new* attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 201 or 202, but\nthe additional HTTP header *X-Arango-Error-Codes* is set, which\ncontains a map of the error codes that occurred together with their\nmultiplicities, as in: *1200:17,1205:10* which means that in 17\ncases the error 1200 \"revision conflict\" and in 10 cases the error\n1205 \"illegal document handle\" has happened.\n\n", @@ -5633,7 +5633,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/patch_update_multiple_documents.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/patch_update_multiple_documents.md" }, "post": { "description": "\n\nCreates a new document from the document given in the body, unless there\nis already a document with the *_key* given. If no *_key* is given, a new\nunique *_key* is generated automatically.\n\nThe body can be an array of documents, in which case all\ndocuments in the array are inserted with the same semantics as for a\nsingle document. The result body will contain a JSON array of the\nsame length as the input array, and each entry contains the result\nof the operation for the corresponding input. In case of an error\nthe entry is a document with attributes *error* set to *true* and\nerrorCode set to the error code that has happened.\n\nPossibly given *_id* and *_rev* attributes in the body are always ignored,\nthe URL part or the query parameter collection respectively counts.\n\nIf the document was created successfully, then the *Location* header\ncontains the path to the newly created document. The *Etag* header field\ncontains the revision of the document. Both are only set in the single\ndocument case.\n\nIf *silent* is not set to *true*, the body of the response contains a \nJSON object (single document case) with the following attributes:\n\n - *_id* contains the document handle of the newly created document\n - *_key* contains the document key\n - *_rev* contains the document revision\n\nIn the multi case the body is an array of such objects.\n\nIf the collection parameter *waitForSync* is *false*, then the call\nreturns as soon as the document has been accepted. It will not wait\nuntil the documents have been synced to disk.\n\nOptionally, the query parameter *waitForSync* can be used to force\nsynchronization of the document creation operation to disk even in\ncase that the *waitForSync* flag had been disabled for the entire\ncollection. Thus, the *waitForSync* query parameter can be used to\nforce synchronization of just this specific operations. To use this,\nset the *waitForSync* parameter to *true*. If the *waitForSync*\nparameter is not specified or set to *false*, then the collection's\ndefault *waitForSync* behavior is applied. The *waitForSync* query\nparameter cannot be used to disable synchronization for collections\nthat have a default *waitForSync* value of *true*.\n\nIf the query parameter *returnNew* is *true*, then, for each\ngenerated document, the complete new document is returned under\nthe *new* attribute in the result.\n\n\n\n\n**Example:**\n Create a document in a collection named *products*. Note that the\nrevision identifier might or might not by equal to the auto-generated\nkey.\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1Ey--_\"\nlocation: /_db/_system/_api/document/products/12271\n\n{ \n  \"_id\" : \"products/12271\", \n  \"_key\" : \"12271\", \n  \"_rev\" : \"_WnWW1Ey--_\" \n}\n
\n\n\n\n\n**Example:**\n Create a document in a collection named *products* with a collection-level\n*waitForSync* value of *false*.\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1DC--_\"\nlocation: /_db/_system/_api/document/products/12259\n\n{ \n  \"_id\" : \"products/12259\", \n  \"_key\" : \"12259\", \n  \"_rev\" : \"_WnWW1DC--_\" \n}\n
\n\n\n\n\n**Example:**\n Create a document in a collection with a collection-level *waitForSync*\nvalue of *false*, but using the *waitForSync* query parameter.\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products?waitForSync=true <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1G6--_\"\nlocation: /_db/_system/_api/document/products/12301\n\n{ \n  \"_id\" : \"products/12301\", \n  \"_key\" : \"12301\", \n  \"_rev\" : \"_WnWW1G6--_\" \n}\n
\n\n\n\n\n**Example:**\n Unknown collection name\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection or view not found: products\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n
\n\n\n\n\n**Example:**\n Illegal document\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF\n{ 1: \"World\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"VPackError error: Expecting '\\\"' or '}'\", \n  \"code\" : 400, \n  \"errorNum\" : 600 \n}\n
\n\n\n\n\n**Example:**\n Insert multiple documents:\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF\n[{\"Hello\":\"Earth\"}, {\"Hello\":\"Venus\"}, {\"Hello\":\"Mars\"}]\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n[ \n  { \n    \"_id\" : \"products/12279\", \n    \"_key\" : \"12279\", \n    \"_rev\" : \"_WnWW1Fm--_\" \n  }, \n  { \n    \"_id\" : \"products/12283\", \n    \"_key\" : \"12283\", \n    \"_rev\" : \"_WnWW1Fm--B\" \n  }, \n  { \n    \"_id\" : \"products/12285\", \n    \"_key\" : \"12285\", \n    \"_rev\" : \"_WnWW1Fm--D\" \n  } \n]\n
\n\n\n\n\n**Example:**\n Use of returnNew:\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products?returnNew=true <<EOF\n{\"Hello\":\"World\"}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1GO--_\"\nlocation: /_db/_system/_api/document/products/12293\n\n{ \n  \"_id\" : \"products/12293\", \n  \"_key\" : \"12293\", \n  \"_rev\" : \"_WnWW1GO--_\", \n  \"new\" : { \n    \"_key\" : \"12293\", \n    \"_id\" : \"products/12293\", \n    \"_rev\" : \"_WnWW1GO--_\", \n    \"Hello\" : \"World\" \n  } \n}\n
\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products <<EOF\n{ \"Hello\": \"World\", \"_key\" : \"lock\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_W3YeJtq--_\"\nlocation: /_db/_system/_api/document/products/lock\n\n{ \n  \"_id\" : \"products/lock\", \n  \"_key\" : \"lock\", \n  \"_rev\" : \"_W3YeJtq--_\" \n}\nshell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document/products?overwrite=true <<EOF\n{ \"Hello\": \"Universe\", \"_key\" : \"lock\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_W3YeJuG--B\"\nlocation: /_db/_system/_api/document/products/lock\n\n{ \n  \"_id\" : \"products/lock\", \n  \"_key\" : \"lock\", \n  \"_rev\" : \"_W3YeJuG--B\", \n  \"_oldRev\" : \"_W3YeJtq--_\" \n}\n
\n\n\n\n\n\n", @@ -5722,7 +5722,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/post_create_document.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/post_create_document.md" }, "put": { "description": "\n\nReplaces multiple documents in the specified collection with the\nones in the body, the replaced documents are specified by the *_key*\nattributes in the body documents.\n\nIf *ignoreRevs* is *false* and there is a *_rev* attribute in a\ndocument in the body and its value does not match the revision of\nthe corresponding document in the database, the precondition is\nviolated.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on *waitForSync*, see below).\n\nOptionally, the query parameter *waitForSync* can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the *waitForSync* flag had been disabled for the entire collection.\nThus, the *waitForSync* query parameter can be used to force synchronization\nof just specific operations. To use this, set the *waitForSync* parameter\nto *true*. If the *waitForSync* parameter is not specified or set to\n*false*, then the collection's default *waitForSync* behavior is\napplied. The *waitForSync* query parameter cannot be used to disable\nsynchronization for collections that have a default *waitForSync* value\nof *true*.\n\nThe body of the response contains a JSON array of the same length\nas the input array with the information about the handle and the\nrevision of the replaced documents. In each entry, the attribute\n*_id* contains the known *document-handle* of each updated document,\n*_key* contains the key which uniquely identifies a document in a\ngiven collection, and the attribute *_rev* contains the new document\nrevision. In case of an error or violated precondition, an error\nobject with the attribute *error* set to *true* and the attribute\n*errorCode* set to the error code is built.\n\nIf the query parameter *returnOld* is *true*, then, for each\ngenerated document, the complete previous revision of the document\nis returned under the *old* attribute in the result.\n\nIf the query parameter *returnNew* is *true*, then, for each\ngenerated document, the complete new document is returned under\nthe *new* attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 201 or 202, but\nthe additional HTTP header *X-Arango-Error-Codes* is set, which\ncontains a map of the error codes that occurred together with their\nmultiplicities, as in: *1200:17,1205:10* which means that in 17\ncases the error 1200 \"revision conflict\" and in 10 cases the error\n1205 \"illegal document handle\" has happened.\n\n", @@ -5794,7 +5794,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/put_replace_multiple_documents.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/put_replace_multiple_documents.md" } }, "/_api/document/{document-handle}": { @@ -5856,7 +5856,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/delete_remove_document.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/delete_remove_document.md" }, "get": { "description": "\n\nReturns the document identified by *document-handle*. The returned\ndocument contains three special attributes: *_id* containing the document\nhandle, *_key* containing key which uniquely identifies a document\nin a given collection and *_rev* containing the revision.\n\n\n\n\n**Example:**\n Use a document handle:\n\n
shell> curl --dump - http://localhost:8529/_api/document/products/12309\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1Hq--_\"\n\n{ \n  \"_key\" : \"12309\", \n  \"_id\" : \"products/12309\", \n  \"_rev\" : \"_WnWW1Hq--_\", \n  \"hello\" : \"world\" \n}\n
\n\n\n\n\n**Example:**\n Use a document handle and an Etag:\n\n
shell> curl --header 'If-None-Match: \"_WnWW1KC--_\"' --dump - http://localhost:8529/_api/document/products/12357\n\n
\n\n\n\n\n**Example:**\n Unknown document handle:\n\n
shell> curl --dump - http://localhost:8529/_api/document/products/unknownhandle\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection or view not found: products\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n
\n\n\n\n\n", @@ -5901,7 +5901,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/get_read_document.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/get_read_document.md" }, "head": { "description": "\n\nLike *GET*, but only returns the header fields and not the body. You\ncan use this call to get the current revision of a document or check if\nthe document was deleted.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X HEAD --dump - http://localhost:8529/_api/document/products/12348\n\n
\n\n\n\n\n\n", @@ -5946,7 +5946,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/head_read_document_header.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/head_read_document_header.md" }, "patch": { "description": "\n\nPartially updates the document identified by *document-handle*.\nThe body of the request must contain a JSON document with the\nattributes to patch (the patch document). All attributes from the\npatch document will be added to the existing document if they do not\nyet exist, and overwritten in the existing document if they do exist\nthere.\n\nSetting an attribute value to *null* in the patch document will cause a\nvalue of *null* to be saved for the attribute by default.\n\nIf the *If-Match* header is specified and the revision of the\ndocument in the database is unequal to the given revision, the\nprecondition is violated.\n\nIf *If-Match* is not given and *ignoreRevs* is *false* and there\nis a *_rev* attribute in the body and its value does not match\nthe revision of the document in the database, the precondition is\nviolated.\n\nIf a precondition is violated, an *HTTP 412* is returned.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on *waitForSync*, see below),\nthe *Etag* header field contains the new revision of the document\n(in double quotes) and the *Location* header contains a complete URL\nunder which the document can be queried.\n\nOptionally, the query parameter *waitForSync* can be used to force\nsynchronization of the updated document operation to disk even in case\nthat the *waitForSync* flag had been disabled for the entire collection.\nThus, the *waitForSync* query parameter can be used to force synchronization\nof just specific operations. To use this, set the *waitForSync* parameter\nto *true*. If the *waitForSync* parameter is not specified or set to\n*false*, then the collection's default *waitForSync* behavior is\napplied. The *waitForSync* query parameter cannot be used to disable\nsynchronization for collections that have a default *waitForSync* value\nof *true*.\n\nIf *silent* is not set to *true*, the body of the response contains a JSON \nobject with the information about the handle and the revision. The attribute \n*_id* contains the known *document-handle* of the updated document, *_key* \ncontains the key which uniquely identifies a document in a given collection, \nand the attribute *_rev* contains the new document revision.\n\nIf the query parameter *returnOld* is *true*, then\nthe complete previous revision of the document\nis returned under the *old* attribute in the result.\n\nIf the query parameter *returnNew* is *true*, then\nthe complete new document is returned under\nthe *new* attribute in the result.\n\nIf the document does not exist, then a *HTTP 404* is returned and the\nbody of the response contains an error document.\n\n\n\n\n**Example:**\n Patches an existing document with new content.\n\n
shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/12228 <<EOF\n{ \n  \"hello\" : \"world\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1_u--_\"\nlocation: /_db/_system/_api/document/products/12228\n\n{ \n  \"_id\" : \"products/12228\", \n  \"_key\" : \"12228\", \n  \"_rev\" : \"_WnWW1_u--_\", \n  \"_oldRev\" : \"_WnWW1_q--_\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/12228 <<EOF\n{ \n  \"numbers\" : { \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3, \n    \"empty\" : null \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1_6--_\"\nlocation: /_db/_system/_api/document/products/12228\n\n{ \n  \"_id\" : \"products/12228\", \n  \"_key\" : \"12228\", \n  \"_rev\" : \"_WnWW1_6--_\", \n  \"_oldRev\" : \"_WnWW1_u--_\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/12228\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1_6--_\"\n\n{ \n  \"_key\" : \"12228\", \n  \"_id\" : \"products/12228\", \n  \"_rev\" : \"_WnWW1_6--_\", \n  \"one\" : \"world\", \n  \"hello\" : \"world\", \n  \"numbers\" : { \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3, \n    \"empty\" : null \n  } \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/12228?keepNull=false <<EOF\n{ \n  \"hello\" : null, \n  \"numbers\" : { \n    \"four\" : 4 \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1AS--_\"\nlocation: /_db/_system/_api/document/products/12228\n\n{ \n  \"_id\" : \"products/12228\", \n  \"_key\" : \"12228\", \n  \"_rev\" : \"_WnWW1AS--_\", \n  \"_oldRev\" : \"_WnWW1_6--_\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/12228\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1AS--_\"\n\n{ \n  \"_key\" : \"12228\", \n  \"_id\" : \"products/12228\", \n  \"_rev\" : \"_WnWW1AS--_\", \n  \"one\" : \"world\", \n  \"numbers\" : { \n    \"empty\" : null, \n    \"one\" : 1, \n    \"three\" : 3, \n    \"two\" : 2, \n    \"four\" : 4 \n  } \n}\n
\n\n\n\n\n**Example:**\n Merging attributes of an object using `mergeObjects`:\n\n
shell> curl --dump - http://localhost:8529/_api/document/products/12244\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1Ba--_\"\n\n{ \n  \"_key\" : \"12244\", \n  \"_id\" : \"products/12244\", \n  \"_rev\" : \"_WnWW1Ba--_\", \n  \"inhabitants\" : { \n    \"china\" : 1366980000, \n    \"india\" : 1263590000, \n    \"usa\" : 319220000 \n  } \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/12244?mergeObjects=true <<EOF\n{ \n  \"inhabitants\" : { \n    \"indonesia\" : 252164800, \n    \"brazil\" : 203553000 \n  } \n}\nEOF\n\nshell> curl --dump - http://localhost:8529/_api/document/products/12244\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1Bq--_\"\n\n{ \n  \"_key\" : \"12244\", \n  \"_id\" : \"products/12244\", \n  \"_rev\" : \"_WnWW1Bq--_\", \n  \"inhabitants\" : { \n    \"china\" : 1366980000, \n    \"india\" : 1263590000, \n    \"usa\" : 319220000, \n    \"indonesia\" : 252164800, \n    \"brazil\" : 203553000 \n  } \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/12244?mergeObjects=false <<EOF\n{ \n  \"inhabitants\" : { \n    \"pakistan\" : 188346000 \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1B6--_\"\nlocation: /_db/_system/_api/document/products/12244\n\n{ \n  \"_id\" : \"products/12244\", \n  \"_key\" : \"12244\", \n  \"_rev\" : \"_WnWW1B6--_\", \n  \"_oldRev\" : \"_WnWW1Bq--_\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/12244\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1B6--_\"\n\n{ \n  \"_key\" : \"12244\", \n  \"_id\" : \"products/12244\", \n  \"_rev\" : \"_WnWW1B6--_\", \n  \"inhabitants\" : { \n    \"pakistan\" : 188346000 \n  } \n}\n
\n\n\n\n\n", @@ -6048,7 +6048,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/patch_update_document.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/patch_update_document.md" }, "put": { "description": "\n\nReplaces the document with handle with the one in\nthe body, provided there is such a document and no precondition is\nviolated.\n\nIf the *If-Match* header is specified and the revision of the\ndocument in the database is unequal to the given revision, the\nprecondition is violated.\n\nIf *If-Match* is not given and *ignoreRevs* is *false* and there\nis a *_rev* attribute in the body and its value does not match\nthe revision of the document in the database, the precondition is\nviolated.\n\nIf a precondition is violated, an *HTTP 412* is returned.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on *waitForSync*, see below),\nthe *Etag* header field contains the new revision of the document\nand the *Location* header contains a complete URL under which the\ndocument can be queried.\n\nOptionally, the query parameter *waitForSync* can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the *waitForSync* flag had been disabled for the entire collection.\nThus, the *waitForSync* query parameter can be used to force synchronization\nof just specific operations. To use this, set the *waitForSync* parameter\nto *true*. If the *waitForSync* parameter is not specified or set to\n*false*, then the collection's default *waitForSync* behavior is\napplied. The *waitForSync* query parameter cannot be used to disable\nsynchronization for collections that have a default *waitForSync* value\nof *true*.\n\nIf *silent* is not set to *true*, the body of the response contains a JSON \nobject with the information about the handle and the revision. The attribute \n*_id* contains the known *document-handle* of the updated document, *_key* \ncontains the key which uniquely identifies a document in a given collection, \nand the attribute *_rev* contains the new document revision.\n\nIf the query parameter *returnOld* is *true*, then\nthe complete previous revision of the document\nis returned under the *old* attribute in the result.\n\nIf the query parameter *returnNew* is *true*, then\nthe complete new document is returned under\nthe *new* attribute in the result.\n\nIf the document does not exist, then a *HTTP 404* is returned and the\nbody of the response contains an error document.\n\n\n\n\n**Example:**\n Using a document handle\n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/12366 <<EOF\n{\"Hello\": \"you\"}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\netag: \"_WnWW1LG--_\"\nlocation: /_db/_system/_api/document/products/12366\n\n{ \n  \"_id\" : \"products/12366\", \n  \"_key\" : \"12366\", \n  \"_rev\" : \"_WnWW1LG--_\", \n  \"_oldRev\" : \"_WnWW1LC--_\" \n}\n
\n\n\n\n\n**Example:**\n Unknown document handle\n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/12388 <<EOF\n{}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n
\n\n\n\n\n**Example:**\n Produce a revision conflict\n\n
shell> curl -X PUT --header 'If-Match: \"_WnWW1Lu--B\"' --data-binary @- --dump - http://localhost:8529/_api/document/products/12376 <<EOF\n{\"other\":\"content\"}\nEOF\n\nHTTP/1.1 412 Precondition Failed\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: \"_WnWW1Lu--_\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/12376\", \n  \"_key\" : \"12376\", \n  \"_rev\" : \"_WnWW1Lu--_\" \n}\n
\n\n\n\n\n", @@ -6136,7 +6136,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/put_replace_document.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/put_replace_document.md" } }, "/_api/edges/{collection-id}": { @@ -6182,7 +6182,7 @@ "Graph Edges" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph Edges/get_read_in_out_edges.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph Edges/get_read_in_out_edges.md" } }, "/_api/endpoint": { @@ -6205,7 +6205,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_endpoint.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_endpoint.md" } }, "/_api/engine": { @@ -6229,7 +6229,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_engine.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_engine.md" } }, "/_api/explain": { @@ -6262,7 +6262,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/post_api_explain.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/post_api_explain.md" } }, "/_api/export": { @@ -6308,7 +6308,7 @@ "Bulk" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Bulk/post_api_export.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Bulk/post_api_export.md" } }, "/_api/foxx": { @@ -6333,7 +6333,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_list.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_list.md" }, "post": { "description": "\n\nInstalls the given new service at the given mount path.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- *configuration*: a JSON object describing configuration values\n- *dependencies*: a JSON object describing dependency settings\n- *source*: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the *source* field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf *source* is a URL, the URL must be reachable from the server.\nIf *source* is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple coordinators\nthe file system path must resolve to equivalent files on every coordinator.\n\n", @@ -6377,7 +6377,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_install.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_install.md" } }, "/_api/foxx/commit": { @@ -6402,7 +6402,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_commit.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_commit.md" } }, "/_api/foxx/configuration": { @@ -6427,7 +6427,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_configuration_get.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_configuration_get.md" }, "patch": { "description": "\n\nReplaces the given service's configuration.\n\nReturns an object mapping all configuration option names to their new values.\n\n", @@ -6461,7 +6461,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_configuration_update.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_configuration_update.md" }, "put": { "description": "\n\nReplaces the given service's configuration completely.\n\nReturns an object mapping all configuration option names to their new values.\n\n", @@ -6495,7 +6495,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_configuration_replace.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_configuration_replace.md" } }, "/_api/foxx/dependencies": { @@ -6520,7 +6520,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_dependencies_get.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_dependencies_get.md" }, "patch": { "description": "\n\nReplaces the given service's dependencies.\n\nReturns an object mapping all dependency names to their new mount paths.\n\n", @@ -6554,7 +6554,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_dependencies_update.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_dependencies_update.md" }, "put": { "description": "\n\nReplaces the given service's dependencies completely.\n\nReturns an object mapping all dependency names to their new mount paths.\n\n", @@ -6588,7 +6588,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_dependencies_replace.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_dependencies_replace.md" } }, "/_api/foxx/development": { @@ -6613,7 +6613,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_development_disable.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_development_disable.md" }, "post": { "description": "\n\nPuts the service into development mode.\n\nWhile the service is running in development mode the service will be reloaded\nfrom the filesystem and its setup script (if any) will be re-executed every\ntime the service handles a request.\n\nWhen running ArangoDB in a cluster with multiple coordinators note that changes\nto the filesystem on one coordinator will not be reflected across the other\ncoordinators. This means you should treat your coordinators as inconsistent\nas long as any service is running in development mode.\n\n", @@ -6636,7 +6636,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_development_enable.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_development_enable.md" } }, "/_api/foxx/download": { @@ -6664,7 +6664,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_bundle.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_bundle.md" } }, "/_api/foxx/readme": { @@ -6692,7 +6692,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_readme.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_readme.md" } }, "/_api/foxx/scripts": { @@ -6717,7 +6717,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_scripts_list.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_scripts_list.md" } }, "/_api/foxx/scripts/{name}": { @@ -6761,7 +6761,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_scripts_run.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_scripts_run.md" } }, "/_api/foxx/service": { @@ -6793,7 +6793,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_uninstall.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_uninstall.md" }, "get": { "description": "\n\nFetches detailed information for the service at the given mount path.\n\nReturns an object with the following attributes:\n\n- *mount*: the mount path of the service\n- *path*: the local file system path of the service\n- *development*: *true* if the service is running in development mode\n- *legacy*: *true* if the service is running in 2.8 legacy compatibility mode\n- *manifest*: the normalized JSON manifest of the service\n\nAdditionally the object may contain the following attributes if they have been set on the manifest:\n\n- *name*: a string identifying the service type\n- *version*: a semver-compatible version string\n\n", @@ -6819,7 +6819,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_details.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_details.md" }, "patch": { "description": "\n\nInstalls the given new service on top of the service currently installed at the given mount path.\nThis is only recommended for switching between different versions of the same service.\n\nUnlike replacing a service, upgrading a service retains the old service's configuration\nand dependencies (if any) and should therefore only be used to migrate an existing service\nto a newer or equivalent service.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- *configuration*: a JSON object describing configuration values\n- *dependencies*: a JSON object describing dependency settings\n- *source*: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the *source* field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf *source* is a URL, the URL must be reachable from the server.\nIf *source* is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple coordinators\nthe file system path must resolve to equivalent files on every coordinator.\n\n", @@ -6863,7 +6863,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_upgrade.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_upgrade.md" }, "put": { "description": "\n\nRemoves the service at the given mount path from the database and file system.\nThen installs the given new service at the same mount path.\n\nThis is a slightly safer equivalent to performing an uninstall of the old service\nfollowed by installing the new service. The new service's main and script files\n(if any) will be checked for basic syntax errors before the old service is removed.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- *configuration*: a JSON object describing configuration values\n- *dependencies*: a JSON object describing dependency settings\n- *source*: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the *source* field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf *source* is a URL, the URL must be reachable from the server.\nIf *source* is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple coordinators\nthe file system path must resolve to equivalent files on every coordinator.\n\n", @@ -6914,7 +6914,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_replace.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_service_replace.md" } }, "/_api/foxx/swagger": { @@ -6939,7 +6939,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_swagger.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_swagger.md" } }, "/_api/foxx/tests": { @@ -6978,7 +6978,7 @@ "Foxx" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Foxx/api_foxx_tests_run.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Foxx/api_foxx_tests_run.md" } }, "/_api/gharial": { @@ -6995,7 +6995,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_list_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_list_http_examples.md" }, "post": { "description": "\n\nThe creation of a graph requires the name of the graph and a\ndefinition of its edges.\n[See also edge definitions](../../Manual/Graphs/GeneralGraphs/Management.html#edge-definitions).\n\n\n**A JSON object with these properties is required:**\n\n - **orphanCollections**: An array of additional vertex collections.\n - **edgeDefinitions**: An array of definitions for the edge\n - **name**: Name of the graph.\n - **isSmart**: Define if the created graph should be smart.\n This only has effect in Enterprise version.\n - **options**:\n - **smartGraphAttribute**: The attribute name that is used to smartly shard the vertices of a graph.\n Every vertex in this Graph has to have this attribute.\n Cannot be modified later.\n - **numberOfShards**: The number of shards that is used for every collection within this graph.\n Cannot be modified later.\n\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial <<EOF\n{ \n  \"name\" : \"myGraph\", \n  \"edgeDefinitions\" : [ \n    { \n      \"collection\" : \"edges\", \n      \"from\" : [ \n        \"startVertices\" \n      ], \n      \"to\" : [ \n        \"endVertices\" \n      ] \n    } \n  ] \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvIG--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"isSmart\" : false, \n    \"numberOfShards\" : 0, \n    \"replicationFactor\" : 1, \n    \"smartGraphAttribute\" : \"\", \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"_WnWWvIG--_\" \n  } \n}\n
\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial <<EOF\n{ \n  \"name\" : \"myGraph\", \n  \"edgeDefinitions\" : [ \n    { \n      \"collection\" : \"edges\", \n      \"from\" : [ \n        \"startVertices\" \n      ], \n      \"to\" : [ \n        \"endVertices\" \n      ] \n    } \n  ], \n  \"isSmart\" : true, \n  \"options\" : { \n    \"numberOfShards\" : 9, \n    \"smartGraphAttribute\" : \"region\" \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvJ6--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"isSmart\" : false, \n    \"numberOfShards\" : 0, \n    \"replicationFactor\" : 1, \n    \"smartGraphAttribute\" : \"\", \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"_WnWWvJ6--_\" \n  } \n}\n
\n\n\n\n\n", @@ -7026,7 +7026,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_create_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_create_http_examples.md" } }, "/_api/gharial/{graph-name}": { @@ -7065,7 +7065,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_drop_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_drop_http_examples.md" }, "get": { "description": "\n\nGets a graph from the collection *_graphs*.\nReturns the definition content of this graph.\n\n\n\n\n**Example:**\n \n\n
shell> curl --dump - http://localhost:8529/_api/gharial/myGraph\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvdS--_\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"isSmart\" : false, \n    \"numberOfShards\" : 1, \n    \"replicationFactor\" : 1, \n    \"smartGraphAttribute\" : \"\", \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"_WnWWvdS--_\" \n  } \n}\n
\n\n\n\n\n", @@ -7092,7 +7092,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_get_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_get_http_examples.md" } }, "/_api/gharial/{graph-name}/edge": { @@ -7121,7 +7121,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_list_edge_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_list_edge_http_examples.md" }, "post": { "description": "\n\nAdds an additional edge definition to the graph.\n\nThis edge definition has to contain a *collection* and an array of\neach *from* and *to* vertex collections. An edge definition can only\nbe added if this definition is either not used in any other graph, or\nit is used with exactly the same definition. It is not possible to\nstore a definition \"e\" from \"v1\" to \"v2\" in the one graph, and \"e\"\nfrom \"v2\" to \"v1\" in the other graph.\n\n\n**A JSON object with these properties is required:**\n\n - **to** (string): One or many vertex collections that can contain target vertices.\n - **from** (string): One or many vertex collections that can contain source vertices.\n - **collection**: The name of the edge collection to be used.\n\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge <<EOF\n{ \n  \"collection\" : \"works_in\", \n  \"from\" : [ \n    \"female\", \n    \"male\" \n  ], \n  \"to\" : [ \n    \"city\" \n  ] \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvBS--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      }, \n      { \n        \"collection\" : \"works_in\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"city\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"isSmart\" : false, \n    \"numberOfShards\" : 1, \n    \"replicationFactor\" : 1, \n    \"smartGraphAttribute\" : \"\", \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"_WnWWvBS--_\" \n  } \n}\n
\n\n\n\n", @@ -7163,7 +7163,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_add_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_add_http_examples.md" } }, "/_api/gharial/{graph-name}/edge/{collection-name}": { @@ -7235,7 +7235,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_create_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_create_http_examples.md" } }, "/_api/gharial/{graph-name}/edge/{collection-name}/{edge-key}": { @@ -7299,7 +7299,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_delete_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_delete_http_examples.md" }, "get": { "description": "\n\nGets an edge from the given collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl --dump - http://localhost:8529/_api/gharial/social/edge/relation/10223\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvai--_\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"edge\" : { \n    \"_key\" : \"10223\", \n    \"_id\" : \"relation/10223\", \n    \"_from\" : \"female/alice\", \n    \"_to\" : \"male/charly\", \n    \"_rev\" : \"_WnWWvai--_\", \n    \"type\" : \"friend\", \n    \"vertex\" : \"alice\" \n  } \n}\n
\n\n\n\n\n", @@ -7351,7 +7351,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_get_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_get_http_examples.md" }, "patch": { "description": "\n\nUpdates the data of the specific edge in the collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/10799 <<EOF\n{ \n  \"since\" : \"01.01.2001\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWv4u--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/10799\", \n    \"_key\" : \"10799\", \n    \"_rev\" : \"_WnWWv4u--_\", \n    \"_oldRev\" : \"_WnWWv4O--B\" \n  } \n}\n
\n\n\n\n\n", @@ -7422,7 +7422,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_modify_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_modify_http_examples.md" }, "put": { "description": "\n\nReplaces the data of an edge in the collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/10868 <<EOF\n{ \n  \"type\" : \"divorced\", \n  \"_from\" : \"female/alice\", \n  \"_to\" : \"male/bob\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWv8G--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/10868\", \n    \"_key\" : \"10868\", \n    \"_rev\" : \"_WnWWv8G--_\", \n    \"_oldRev\" : \"_WnWWv8---L\" \n  } \n}\n
\n\n\n\n", @@ -7495,7 +7495,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_replace_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_replace_http_examples.md" } }, "/_api/gharial/{graph-name}/edge/{definition-name}": { @@ -7545,7 +7545,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_remove_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_remove_http_examples.md" }, "put": { "description": "\n\nChange one specific edge definition.\nThis will modify all occurrences of this definition in all graphs known to your database.\n\n\n**A JSON object with these properties is required:**\n\n - **to** (string): One or many vertex collections that can contain target vertices.\n - **from** (string): One or many vertex collections that can contain source vertices.\n - **collection**: The name of the edge collection to be used.\n\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF\n{ \n  \"collection\" : \"relation\", \n  \"from\" : [ \n    \"female\", \n    \"male\", \n    \"animal\" \n  ], \n  \"to\" : [ \n    \"female\", \n    \"male\", \n    \"animal\" \n  ] \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWwIa--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"animal\", \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"animal\", \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"isSmart\" : false, \n    \"numberOfShards\" : 1, \n    \"replicationFactor\" : 1, \n    \"smartGraphAttribute\" : \"\", \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"_WnWWwIa--_\" \n  } \n}\n
\n\n\n\n", @@ -7595,7 +7595,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_modify_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_edge_definition_modify_http_examples.md" } }, "/_api/gharial/{graph-name}/vertex": { @@ -7624,7 +7624,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_list_vertex_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_list_vertex_http_examples.md" }, "post": { "description": "\n\nAdds a vertex collection to the set of collections of the graph. If\nthe collection does not exist, it will be created.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex <<EOF\n{ \n  \"collection\" : \"otherVertices\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvF6--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ \n      \"otherVertices\" \n    ], \n    \"isSmart\" : false, \n    \"numberOfShards\" : 1, \n    \"replicationFactor\" : 1, \n    \"smartGraphAttribute\" : \"\", \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"_WnWWvF6--_\" \n  } \n}\n
\n\n\n\n", @@ -7654,7 +7654,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_collection_add_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_collection_add_http_examples.md" } }, "/_api/gharial/{graph-name}/vertex/{collection-name}": { @@ -7704,7 +7704,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_collection_remove_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_collection_remove_http_examples.md" }, "post": { "description": "\n\nAdds a vertex to the given collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/male <<EOF\n{ \n  \"name\" : \"Francis\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvEO--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"male/9743\", \n    \"_key\" : \"9743\", \n    \"_rev\" : \"_WnWWvEO--_\" \n  } \n}\n
\n\n\n\n\n", @@ -7760,7 +7760,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_create_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_create_http_examples.md" } }, "/_api/gharial/{graph-name}/vertex/{collection-name}/{vertex-key}": { @@ -7824,7 +7824,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_delete_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_delete_http_examples.md" }, "get": { "description": "\n\nGets a vertex from the given collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWvhO--_\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"vertex\" : { \n    \"_key\" : \"alice\", \n    \"_id\" : \"female/alice\", \n    \"_rev\" : \"_WnWWvhO--_\", \n    \"name\" : \"Alice\" \n  } \n}\n
\n\n\n\n\n", @@ -7876,7 +7876,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_get_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_get_http_examples.md" }, "patch": { "description": "\n\nUpdates the data of the specific vertex in the collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF\n{ \n  \"age\" : 26 \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWv0S--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_key\" : \"alice\", \n    \"_rev\" : \"_WnWWv0S--_\", \n    \"_oldRev\" : \"_WnWWvzi--_\" \n  } \n}\n
\n\n\n\n\n", @@ -7956,7 +7956,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_modify_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_modify_http_examples.md" }, "put": { "description": "\n\nReplaces the data of a vertex in the collection.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF\n{ \n  \"name\" : \"Alice Cooper\", \n  \"age\" : 26 \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\ncontent-type: application/json; charset=utf-8\netag: _WnWWwNq--_\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_key\" : \"alice\", \n    \"_rev\" : \"_WnWWwNq--_\", \n    \"_oldRev\" : \"_WnWWwNW--_\" \n  } \n}\n
\n\n\n\n\n", @@ -8029,7 +8029,7 @@ "Graph" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_replace_http_examples.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph/general_graph_vertex_replace_http_examples.md" } }, "/_api/import#document": { @@ -8126,7 +8126,7 @@ "Bulk" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Bulk/import_document.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Bulk/import_document.md" } }, "/_api/import#json": { @@ -8230,7 +8230,7 @@ "Bulk" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Bulk/import_json.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Bulk/import_json.md" } }, "/_api/index": { @@ -8255,7 +8255,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/get_api_index.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/get_api_index.md" } }, "/_api/index#fulltext": { @@ -8295,7 +8295,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_fulltext.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_fulltext.md" } }, "/_api/index#general": { @@ -8340,7 +8340,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index.md" } }, "/_api/index#geo": { @@ -8380,7 +8380,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_geo.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_geo.md" } }, "/_api/index#hash": { @@ -8423,7 +8423,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_hash.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_hash.md" } }, "/_api/index#persistent": { @@ -8466,7 +8466,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_persistent.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_persistent.md" } }, "/_api/index#skiplist": { @@ -8509,7 +8509,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_skiplist.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_skiplist.md" } }, "/_api/index/{index-handle}": { @@ -8538,7 +8538,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/post_api_index_delete.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/post_api_index_delete.md" }, "get": { "description": "\n\nThe result is an object describing the index. It has at least the following\nattributes:\n\n- *id*: the identifier of the index\n\n- *type*: the index type\n\nAll other attributes are type-dependent. For example, some indexes provide\n*unique* or *sparse* flags, whereas others don't. Some indexes also provide \na selectivity estimate in the *selectivityEstimate* attribute of the result.\n\n\n\n\n**Example:**\n \n\n
shell> curl --dump - http://localhost:8529/_api/index/products/0\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"fields\" : [ \n    \"_key\" \n  ], \n  \"id\" : \"products/0\", \n  \"selectivityEstimate\" : 1, \n  \"sparse\" : false, \n  \"type\" : \"primary\", \n  \"unique\" : true, \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n", @@ -8565,7 +8565,7 @@ "Indexes" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Indexes/get_api_reads_index.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Indexes/get_api_reads_index.md" } }, "/_api/job/{job-id}": { @@ -8597,7 +8597,7 @@ "job" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/job/job_getStatusById.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/job/job_getStatusById.md" }, "put": { "description": "\n\nReturns the result of an async job identified by job-id. If the async job\nresult is present on the server, the result will be removed from the list of\nresult. That means this method can be called for each job-id once.\nThe method will return the original job result's headers and body, plus the\nadditional HTTP header x-arango-async-job-id. If this header is present,\nthen\nthe job was found and the response contains the original job's result. If\nthe header is not present, the job was not found and the response contains\nstatus information from the job manager.\n\n\n\n\n**Example:**\n Not providing a job-id:\n\n
shell> curl -X PUT --dump - http://localhost:8529/_api/job\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"bad parameter\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n
\n\n\n\n\n**Example:**\n Providing a job-id for a non-existing job:\n\n
shell> curl -X PUT --dump - http://localhost:8529/_api/job/notthere\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"not found\", \n  \"code\" : 404, \n  \"errorNum\" : 404 \n}\n
\n\n\n\n\n**Example:**\n Fetching the result of an HTTP GET job:\n\n
shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584438\ncontent-type: text/plain; charset=utf-8\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/152293662584438\n\nHTTP/1.1 200 OK\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584438\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"3.4.devel\", \n  \"license\" : \"community\" \n}\n
\n\n\n\n\n**Example:**\n Fetching the result of an HTTP POST job that failed:\n\n
shell> curl -X PUT --header 'x-arango-async: store' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \" this name is invalid \" \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584443\ncontent-type: text/plain; charset=utf-8\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/152293662584443\n\nHTTP/1.1 400 Bad Request\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584443\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expected PUT /_api/collection/<collection-name>/<action>\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n
\n\n\n\n\n", @@ -8627,7 +8627,7 @@ "job" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/job/job_fetch_result.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/job/job_fetch_result.md" } }, "/_api/job/{job-id}/cancel": { @@ -8659,7 +8659,7 @@ "job" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/job/job_cancel.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/job/job_cancel.md" } }, "/_api/job/{type}": { @@ -8698,7 +8698,7 @@ "job" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/job/job_delete.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/job/job_delete.md" }, "get": { "description": "\n\nReturns the list of ids of async jobs with a specific status (either done or\npending).\nThe list can be used by the client to get an overview of the job system\nstatus and\nto retrieve completed job results later.\n\n\n\n\n**Example:**\n Fetching the list of done jobs:\n\n
shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584448\ncontent-type: text/plain; charset=utf-8\n\nshell> curl --dump - http://localhost:8529/_api/job/done\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n[ \n  \"152293662584448\" \n]\n
\n\n\n\n\n**Example:**\n Fetching the list of pending jobs:\n\n
shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584453\ncontent-type: text/plain; charset=utf-8\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n[ ]\n
\n\n\n\n\n**Example:**\n Querying the status of a pending job:\n(we create a sleep job therefore...)\n\n
shell> curl -X POST --header 'x-arango-async: store' --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"read\" : [ \n      \"_frontend\" \n    ] \n  }, \n  \"action\" : \"function () {require('internal').sleep(15.0);}\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\nx-content-type-options: nosniff\nx-arango-async-id: 152293662584458\ncontent-type: text/plain; charset=utf-8\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n[ \n  \"152293662584458\" \n]\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/152293662584458\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"result\" : true \n}\n
\n\n\n\n\n", @@ -8732,7 +8732,7 @@ "job" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/job/job_getByType.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/job/job_getByType.md" } }, "/_api/query": { @@ -8762,7 +8762,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/PostApiQueryProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/PostApiQueryProperties.md" } }, "/_api/query-cache": { @@ -8782,7 +8782,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/DeleteApiQueryCache.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/DeleteApiQueryCache.md" } }, "/_api/query-cache/properties": { @@ -8802,7 +8802,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/GetApiQueryCacheProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/GetApiQueryCacheProperties.md" }, "put": { "description": "\n\nAfter the properties have been changed, the current set of properties will\nbe returned in the HTTP response.\n\nNote: changing the properties may invalidate all results in the cache.\nThe global properties for AQL query cache.\nThe properties need to be passed in the attribute *properties* in the body\nof the HTTP request. *properties* needs to be a JSON object with the following\nproperties:\n\n\n**A JSON object with these properties is required:**\n\n - **mode**: the mode the AQL query cache should operate in. Possible values are *off*, *on* or *demand*.\n - **maxResults**: the maximum number of query results that will be stored per database-specific cache.\n\n\n", @@ -8830,7 +8830,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/PutApiQueryCacheProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/PutApiQueryCacheProperties.md" } }, "/_api/query/current": { @@ -8850,7 +8850,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/GetApiQueryCurrent.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/GetApiQueryCurrent.md" } }, "/_api/query/properties": { @@ -8870,7 +8870,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/GetApiQueryProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/GetApiQueryProperties.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **maxSlowQueries**: The maximum number of slow queries to keep in the list\n of slow queries. If the list of slow queries is full, the oldest entry in\n it will be discarded when additional slow queries occur.\n - **slowQueryThreshold**: The threshold value for treating a query as slow. A\n query with a runtime greater or equal to this threshold value will be\n put into the list of slow queries when slow query tracking is enabled.\n The value for *slowQueryThreshold* is specified in seconds.\n - **enabled**: If set to *true*, then queries will be tracked. If set to\n *false*, neither queries nor slow queries will be tracked.\n - **maxQueryStringLength**: The maximum query string length to keep in the list of queries.\n Query strings can have arbitrary lengths, and this property\n can be used to save memory in case very long query strings are used. The\n value is specified in bytes.\n - **trackSlowQueries**: If set to *true*, then slow queries will be tracked\n in the list of slow queries if their runtime exceeds the value set in\n *slowQueryThreshold*. In order for slow queries to be tracked, the *enabled*\n property must also be set to *true*.\n - **trackBindVars**: If set to *true*, then the bind variables used in queries will be tracked \n along with queries.\n\n\n\n\nThe properties need to be passed in the attribute *properties* in the body\nof the HTTP request. *properties* needs to be a JSON object.\n\nAfter the properties have been changed, the current set of properties will\nbe returned in the HTTP response.\n\n", @@ -8898,7 +8898,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/PutApiQueryProperties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/PutApiQueryProperties.md" } }, "/_api/query/slow": { @@ -8918,7 +8918,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/DeleteApiQuerySlow.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/DeleteApiQuerySlow.md" }, "get": { "description": "\n\nReturns an array containing the last AQL queries that are finished and\nhave exceeded the slow query threshold in the selected database.\nThe maximum amount of queries in the list can be controlled by setting\nthe query tracking property `maxSlowQueries`. The threshold for treating\na query as *slow* can be adjusted by setting the query tracking property\n`slowQueryThreshold`.\n\nEach query is a JSON object with the following attributes:\n\n- *id*: the query's id\n\n- *query*: the query string (potentially truncated)\n\n- *bindVars*: the bind parameter values used by the query\n\n- *started*: the date and time when the query was started\n\n- *runTime*: the query's total run time \n\n- *state*: the query's current execution state (will always be \"finished\"\n for the list of slow queries)\n\n", @@ -8936,7 +8936,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/GetApiQuerySlow.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/GetApiQuerySlow.md" } }, "/_api/query/{query-id}": { @@ -8968,7 +8968,7 @@ "AQL" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/AQL/DeleteApiQueryKill.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/AQL/DeleteApiQueryKill.md" } }, "/_api/replication/applier-config": { @@ -8991,7 +8991,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **username**: an optional ArangoDB username to use when connecting to the endpoint.\n - **includeSystem**: whether or not system collection operations will be applied\n - **endpoint**: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.\n - **verbose**: if set to *true*, then a log line will be emitted for all operations \n performed by the replication applier. This should be used for debugging replication\n problems only.\n - **connectTimeout**: the timeout (in seconds) when attempting to connect to the\n endpoint. This value is used for each connection attempt.\n - **autoResync**: whether or not the slave should perform a full automatic resynchronization\n with the master in case the master cannot serve log data requested by the\n slave,\n or when the replication is started and no tick value can be found.\n - **database**: the name of the database on the endpoint. If not specified, defaults to the current local database name.\n - **idleMinWaitTime**: the minimum wait time (in seconds) that the applier will intentionally idle\n before fetching more log data from the master in case the master has\n already sent all its log data. This wait time can be used to control the\n frequency with which the replication applier sends HTTP log fetch requests\n to the master in case there is no write activity on the master.\n This value will be ignored if set to *0*.\n - **requestTimeout**: the timeout (in seconds) for individual requests to the endpoint.\n - **requireFromPresent**: if set to *true*, then the replication applier will check\n at start whether the start tick from which it starts or resumes replication is\n still present on the master. If not, then there would be data loss. If \n *requireFromPresent* is *true*, the replication applier will abort with an\n appropriate error message. If set to *false*, then the replication applier will\n still start, and ignore the data loss.\n - **idleMaxWaitTime**: the maximum wait time (in seconds) that the applier will intentionally idle \n before fetching more log data from the master in case the master has \n already sent all its log data and there have been previous log fetch attempts\n that resulted in no more log data. This wait time can be used to control the\n maximum frequency with which the replication applier sends HTTP log fetch\n requests to the master in case there is no write activity on the master for\n longer periods. This configuration value will only be used if the option\n *adaptivePolling* is set to *true*.\n This value will be ignored if set to *0*.\n - **restrictCollections** (string): the array of collections to include or exclude,\n based on the setting of *restrictType*\n - **restrictType**: the configuration for *restrictCollections*; Has to be either *include* or *exclude*\n - **initialSyncMaxWaitTime**: the maximum wait time (in seconds) that the initial synchronization will\n wait for a response from the master when fetching initial collection data.\n This wait time can be used to control after what time the initial\n synchronization\n will give up waiting for a response and fail. This value is relevant even\n for continuous replication when *autoResync* is set to *true* because this\n may re-start the initial synchronization when the master cannot provide\n log data the slave requires.\n This value will be ignored if set to *0*.\n - **maxConnectRetries**: the maximum number of connection attempts the applier\n will make in a row. If the applier cannot establish a connection to the\n endpoint in this number of attempts, it will stop itself.\n - **autoStart**: whether or not to auto-start the replication applier on\n (next and following) server starts\n - **adaptivePolling**: if set to *true*, the replication applier will fall\n to sleep for an increasingly long period in case the logger server at the\n endpoint does not have any more replication events to apply. Using\n adaptive polling is thus useful to reduce the amount of work for both the\n applier and the logger server for cases when there are only infrequent\n changes. The downside is that when using adaptive polling, it might take\n longer for the replication applier to detect that there are new replication\n events on the logger server.\n Setting *adaptivePolling* to false will make the replication applier\n contact the logger server in a constant interval, regardless of whether\n the logger server provides updates frequently or seldom.\n - **password**: the password to use when connecting to the endpoint.\n - **connectionRetryWaitTime**: the time (in seconds) that the applier will intentionally idle before\n it retries connecting to the master in case of connection problems.\n This value will be ignored if set to *0*.\n - **autoResyncRetries**: number of resynchronization retries that will be performed in a row when\n automatic resynchronization is enabled and kicks in. Setting this to *0*\n will\n effectively disable *autoResync*. Setting it to some other value will limit\n the number of retries that are performed. This helps preventing endless\n retries\n in case resynchronizations always fail.\n - **chunkSize**: the requested maximum size for log transfer packets that\n is used when the endpoint is contacted.\n\n\n\n\nSets the configuration of the replication applier. The configuration can\nonly be changed while the applier is not running. The updated configuration\nwill be saved immediately but only become active with the next start of the\napplier.\n\nIn case of success, the body of the response is a JSON object with the updated\nconfiguration.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/replication/applier-config <<EOF\n{ \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"username\" : \"replicationApplier\", \n  \"password\" : \"applier1234@foxx\", \n  \"chunkSize\" : 4194304, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\", \n  \"username\" : \"replicationApplier\", \n  \"requestTimeout\" : 600, \n  \"connectTimeout\" : 10, \n  \"ignoreErrors\" : 0, \n  \"maxConnectRetries\" : 100, \n  \"lockTimeoutRetries\" : 0, \n  \"sslProtocol\" : 0, \n  \"chunkSize\" : 4194304, \n  \"skipCreateDrop\" : false, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true, \n  \"autoResync\" : false, \n  \"autoResyncRetries\" : 2, \n  \"includeSystem\" : true, \n  \"requireFromPresent\" : false, \n  \"verbose\" : false, \n  \"incremental\" : false, \n  \"restrictType\" : \"\", \n  \"restrictCollections\" : [ ], \n  \"connectionRetryWaitTime\" : 15, \n  \"initialSyncMaxWaitTime\" : 300, \n  \"idleMinWaitTime\" : 1, \n  \"idleMaxWaitTime\" : 2.5, \n  \"force32mode\" : false \n}\n
\n\n\n\n\n", @@ -9025,7 +9025,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_adjust.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_adjust.md" } }, "/_api/replication/applier-start": { @@ -9059,7 +9059,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_start.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_start.md" } }, "/_api/replication/applier-state": { @@ -9082,7 +9082,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_applier_state.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_applier_state.md" } }, "/_api/replication/applier-stop": { @@ -9105,7 +9105,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_stop.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_applier_stop.md" } }, "/_api/replication/batch": { @@ -9138,7 +9138,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/post_batch_replication.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/post_batch_replication.md" } }, "/_api/replication/batch/{id}": { @@ -9170,7 +9170,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/delete_batch_replication.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/delete_batch_replication.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **ttl**: the time-to-live for the new batch (in seconds)\n\n\n\n\nExtends the ttl of an existing dump batch, using the batch's id and\nthe provided ttl value.\n\nIf the batch's ttl can be extended successfully, the response is empty.\n\n**Note**: on a coordinator, this request must have the query parameter\n*DBserver* which must be an ID of a DBserver.\nThe very same request is forwarded synchronously to that DBserver.\nIt is an error if this attribute is not bound in the coordinator case.\n\n", @@ -9209,7 +9209,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_batch_replication.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_batch_replication.md" } }, "/_api/replication/clusterInventory": { @@ -9240,7 +9240,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_cluster_inventory.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_cluster_inventory.md" } }, "/_api/replication/dump": { @@ -9329,7 +9329,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_dump.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_dump.md" } }, "/_api/replication/inventory": { @@ -9360,7 +9360,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_inventory.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_inventory.md" } }, "/_api/replication/logger-first-tick": { @@ -9386,7 +9386,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_first_tick.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_first_tick.md" } }, "/_api/replication/logger-follow": { @@ -9447,7 +9447,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_returns.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_returns.md" } }, "/_api/replication/logger-state": { @@ -9470,7 +9470,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_return_state.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_return_state.md" } }, "/_api/replication/logger-tick-ranges": { @@ -9496,7 +9496,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_tick_ranges.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/get_api_replication_logger_tick_ranges.md" } }, "/_api/replication/make-slave": { @@ -9535,7 +9535,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_makeSlave.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_makeSlave.md" } }, "/_api/replication/server-id": { @@ -9558,7 +9558,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_serverID.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_serverID.md" } }, "/_api/replication/sync": { @@ -9597,7 +9597,7 @@ "Replication" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Replication/put_api_replication_synchronize.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Replication/put_api_replication_synchronize.md" } }, "/_api/simple/all": { @@ -9632,7 +9632,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_all.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_all.md" } }, "/_api/simple/all-keys": { @@ -9669,7 +9669,7 @@ "Documents" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Documents/put_read_all_documents.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Documents/put_read_all_documents.md" } }, "/_api/simple/any": { @@ -9702,7 +9702,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_any.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_any.md" } }, "/_api/simple/by-example": { @@ -9735,7 +9735,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_by_example.md" } }, "/_api/simple/first-example": { @@ -9768,7 +9768,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_first_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_first_example.md" } }, "/_api/simple/fulltext": { @@ -9801,7 +9801,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_fulltext.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_fulltext.md" } }, "/_api/simple/lookup-by-keys": { @@ -9834,7 +9834,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/RestLookupByKeys.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/RestLookupByKeys.md" } }, "/_api/simple/near": { @@ -9867,7 +9867,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_near.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_near.md" } }, "/_api/simple/range": { @@ -9900,7 +9900,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_range.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_range.md" } }, "/_api/simple/remove-by-example": { @@ -9933,7 +9933,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_remove_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_remove_by_example.md" } }, "/_api/simple/remove-by-keys": { @@ -9966,7 +9966,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/RestRemoveByKeys.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/RestRemoveByKeys.md" } }, "/_api/simple/replace-by-example": { @@ -9999,7 +9999,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_replace_by_example.md" } }, "/_api/simple/update-by-example": { @@ -10032,7 +10032,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_update_by_example.md" } }, "/_api/simple/within": { @@ -10065,7 +10065,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within.md" } }, "/_api/simple/within-rectangle": { @@ -10098,7 +10098,7 @@ "Simple Queries" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within_rectangle.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Simple Queries/put_api_simple_within_rectangle.md" } }, "/_api/tasks": { @@ -10135,7 +10135,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/post_api_new_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/post_api_new_tasks.md" } }, "/_api/tasks/": { @@ -10159,7 +10159,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_tasks_all.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_tasks_all.md" } }, "/_api/tasks/{id}": { @@ -10199,7 +10199,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/delete_api_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/delete_api_tasks.md" }, "get": { "description": "\n\nfetches one existing task on the server specified by *id*\n\n\n#### HTTP 200\n*A json document with these Properties is returned:*\n\nThe requested task\n\n- **name**: The fully qualified name of the user function\n- **created**: The timestamp when this task was created\n- **database**: the database this task belongs to\n- **period**: this task should run each `period` seconds\n- **command**: the javascript function for this dask\n- **offset**: time offset in seconds from the created timestamp\n- **type**: What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n- **id**: A string identifying the task\n\n\n\n\n**Example:**\n Fetching a single task by its id\n\n
shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/tasks <<EOF\n{\"id\":\"testTask\",\"command\":\"console.log('Hello from task!');\",\"offset\":10000}\nEOF\n\nshell> curl --dump - http://localhost:8529/_api/tasks/testTask\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"id\" : \"testTask\", \n  \"name\" : \"user-defined task\", \n  \"created\" : 1522936640.674124, \n  \"type\" : \"timed\", \n  \"offset\" : 10000, \n  \"command\" : \"(function (params) { console.log('Hello from task!'); } )(params);\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n**Example:**\n Trying to fetch a non-existing task\n\n
shell> curl --dump - http://localhost:8529/_api/tasks/non-existing-task\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1852, \n  \"errorMessage\" : \"task not found\" \n}\n
\n\n\n\n\n", @@ -10230,7 +10230,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_tasks.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **params**: The parameters to be passed into command\n - **offset**: Number of seconds initial delay \n - **command**: The JavaScript code to be executed\n - **name**: The name of the task\n - **period**: number of seconds between the executions\n\n\n\n\nregisters a new task with the specified id\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/tasks/sampleTask <<EOF\n{ \n  \"id\" : \"SampleTask\", \n  \"name\" : \"SampleTask\", \n  \"command\" : \"(function(params) { require('@arangodb').print(params); })(params)\", \n  \"params\" : { \n    \"foo\" : \"bar\", \n    \"bar\" : \"foo\" \n  }, \n  \"period\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"id\" : \"sampleTask\", \n  \"name\" : \"SampleTask\", \n  \"created\" : 1522936640.6792622, \n  \"type\" : \"periodic\", \n  \"period\" : 2, \n  \"offset\" : 0, \n  \"command\" : \"(function (params) { (function(params) { require('@arangodb').print(params); })(params) } )(params);\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n", @@ -10263,7 +10263,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/put_api_new_tasks.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/put_api_new_tasks.md" } }, "/_api/transaction": { @@ -10299,7 +10299,7 @@ "Transactions" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Transactions/post_api_transaction.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Transactions/post_api_transaction.md" } }, "/_api/traversal": { @@ -10335,7 +10335,7 @@ "Graph Traversal" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Graph Traversal/HTTP_API_TRAVERSAL.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Graph Traversal/HTTP_API_TRAVERSAL.md" } }, "/_api/user": { @@ -10374,7 +10374,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/": { @@ -10397,7 +10397,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/{user}": { @@ -10432,7 +10432,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "get": { "description": "\n\nFetches data about the specified user. You can fetch information about\nyourself or you need the *Administrate* server access level in order to\nexecute this REST call.\n\n\n\n\n**Example:**\n \n\n
shell> curl --dump - http://localhost:8529/_api/user/admin@myapp\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"user\" : \"admin@myapp\", \n  \"active\" : true, \n  \"extra\" : { \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n\n\n\n", @@ -10465,7 +10465,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "patch": { "description": "\n**A JSON object with these properties is required:**\n\n - **passwd**: The user password as a string. Specifying a password is mandatory, but\n the empty string is allowed for passwords\n - **active**: An optional flag that specifies whether the user is active. If not\n specified, this will default to true\n - **extra**: An optional JSON object with arbitrary extra data about the user.\n\n\n\n\nPartially updates the data of an existing user. The name of an existing user\nmust be specified in *user*. You need server access level *Administrate* in\norder to execute this REST call. Additionally, a user can change his/her own\ndata.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp <<EOF\n{ \n  \"passwd\" : \"secure\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"user\" : \"admin@myapp\", \n  \"active\" : true, \n  \"extra\" : { \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n\n\n\n\n\n", @@ -10510,7 +10510,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **passwd**: The user password as a string. Specifying a password is mandatory, but\n the empty string is allowed for passwords\n - **active**: An optional flag that specifies whether the user is active. If not\n specified, this will default to true\n - **extra**: An optional JSON object with arbitrary extra data about the user.\n\n\n\n\nReplaces the data of an existing user. The name of an existing user must be\nspecified in *user*. You need server access level *Administrate* in order to\nexecute this REST call. Additionally, a user can change his/her own data.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp <<EOF\n{ \n  \"passwd\" : \"secure\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"user\" : \"admin@myapp\", \n  \"active\" : true, \n  \"extra\" : { \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n\n\n\n", @@ -10555,7 +10555,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/{user}/database/": { @@ -10597,7 +10597,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/{user}/database/{database}": { @@ -10640,7 +10640,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/{user}/database/{database}/{collection}": { @@ -10691,7 +10691,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/{user}/database/{dbname}": { @@ -10728,7 +10728,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **grant**: Use \"rw\" to set the database access level to *Administrate* .\n Use \"ro\" to set the database access level to *Access*.\n Use \"none\" to set the database access level to *No access*.\n\n\n\n\nSets the database access levels for the database *dbname* of user *user*. You\nneed the *Administrate* server access level in order to execute this REST\ncall.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp/database/_system <<EOF\n{ \n  \"grant\" : \"rw\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"_system\" : \"rw\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n\n\n\n", @@ -10778,7 +10778,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/user/{user}/database/{dbname}/{collection}": { @@ -10823,7 +10823,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **grant**: Use \"rw\" to set the collection level access to *Read/Write*.\n Use \"ro\" to set the collection level access to *Read Only*.\n Use \"none\" to set the collection level access to *No access*.\n\n\n\n\nSets the collection access level for the *collection* in the database *dbname*\nfor user *user*. You need the *Administrate* server access level in order to\nexecute this REST call.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/user/admin@myapp/database/_system/reports <<EOF\n{ \n  \"grant\" : \"rw\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"_system/reports\" : \"rw\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n
\n\n\n\n\n\n\n\n", @@ -10881,7 +10881,7 @@ "User Management" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/User Management/README.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/User Management/README.md" } }, "/_api/version": { @@ -10913,7 +10913,7 @@ "Administration" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Administration/get_api_return.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Administration/get_api_return.md" } }, "/_api/view": { @@ -10930,7 +10930,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/get_api_views.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/get_api_views.md" } }, "/_api/view#arangosearch": { @@ -10960,7 +10960,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/post_api_view_iresearch.md" } }, "/_api/view/{view-name}": { @@ -10989,7 +10989,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/delete_api_view.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/delete_api_view.md" }, "get": { "description": "\n\nThe result is an object describing the view with the following\nattributes:\n\n- *id*: The identifier of the view.\n\n- *name*: The name of the view.\n\n- *type*: The type of the view as string\n - arangosearch: ArangoSearch view\n\n- *properties* : The properties of the view.\n\n", @@ -11013,7 +11013,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/get_api_view_name.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/get_api_view_name.md" } }, "/_api/view/{view-name}/properties": { @@ -11042,7 +11042,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/get_api_view_properties.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/get_api_view_properties.md" } }, "/_api/view/{view-name}/properties#ArangoSearch": { @@ -11080,7 +11080,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/patch_api_view_properties_iresearch.md" }, "put": { "description": "\n**A JSON object with these properties is required:**\n\n - **locale**: The default locale used for queries on analyzed string values (default: *C*).\n - **commit**:\n - **consolidate**:\n - **count**:\n - **threshold**: Consolidate IFF {threshold} > segment_docs{valid} / (all_segment_docs{valid} / #segments) (default: 0.85)\n - **segmentThreshold**: Apply consolidation policy IFF {segmentThreshold} >= #segments (default: 300, to disable use: 0)\n - **bytes**:\n - **threshold**: Consolidate IFF {threshold} > segment_bytes / (all_segment_bytes / #segments) (default: 0.85)\n - **segmentThreshold**: Apply consolidation policy IFF {segmentThreshold} >= #segments (default: 300, to disable use: 0)\n - **bytes_accum**:\n - **threshold**: Consolidate IFF {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes (default: 0.85)\n - **segmentThreshold**: Apply consolidation policy IFF {segmentThreshold} >= #segments (default: 300, to disable use: 0)\n - **fill**:\n - **threshold**: Consolidate IFF {threshold} > #segment_docs{valid} / (#segment_docs{valid} + #segment_docs{removed}) (default: 0.85)\n - **segmentThreshold**: Apply consolidation policy IFF {segmentThreshold} >= #segments (default: 300, to disable use: 0)\n - **commitIntervalMsec**: Wait at least this many milliseconds between committing index data changes and\n making them visible to queries (default: 60000, to disable use: 0).\n For the case where there are a lot of inserts/updates, a lower value, until commit, will cause the index not to account for them and\n memory usage would continue to grow.\n For the case where there are a few inserts/updates, a higher value will impact performance and waste disk space for each\n commit call without any added benefits.\n - **cleanupIntervalStep**: Wait at least this many commits between removing unused files in data directory (default: 10, \n to disable use: 0).\n For the case where the consolidation policies merge segments often (i.e. a lot of commit+consolidate), a lower value will cause a\n lot of disk space to be wasted.\n For the case where the consolidation policies rarely merge segments (i.e. few inserts/deletes), a higher value will impact\n performance without any added benefits.\n - **threadMaxTotal**: Maximum total number of threads (>0) for single-run tasks (default: 5).\n For the case where there are a lot of parallelizable tasks and an abundance of resources, a lower value would limit performance.\n For the case where there are limited resources CPU/memory, a higher value will negatively impact performance.\n - **threadMaxIdle**: Maximum idle number of threads for single-run tasks (default: 5).\n For the case where there are a lot of short-lived asynchronous tasks, a lower value will cause a lot of thread creation/deletion calls.\n For the case where there are no short-lived asynchronous tasks, a higher value will only waste memory.\n - **links**:\n - **[collection-name]**:\n - **analyzers** (string): The list of analyzers to be used for indexing of string values (default: [\"identity\"]).\n - **[field-name]**:\n - **analyzers** (string): The list of analyzers to be used for indexing of string values (default: [\"identity\"]).\n - **[field-name]**: Specify properties for nested fields here\n - **includeAllFields**: The flag determines whether or not to index all fields on a particular level of depth (default: false).\n - **trackListPositions**: The flag determines whether or not values in a lists should be treated separate (default: false).\n - **includeAllFields**: The flag determines whether or not to index all fields on a particular level of depth (default: false).\n - **trackListPositions**: The flag determines whether or not values in a lists should be treated separate (default: false).\n\n\n\n\nChanges the properties of a view.\n\nOn success an object with the following attributes is returned:\n- *id*: The identifier of the view.\n- *name*: The name of the view.\n- *type*: The view type. Valid types are:\n - arangosearch: ArangoSearch view\n- *properties*: The updated properties of the view.\n\n\n\n\n**Example:**\n \n\n
shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/view/products/properties <<EOF\n{ \n  \"threadMaxIdle\" : 10 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-content-type-options: nosniff\n\n{ \n  \"id\" : \"12656\", \n  \"name\" : \"products\", \n  \"type\" : \"arangosearch\" \n}\n
\n\n\n\n\n", @@ -11116,7 +11116,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_properties_iresearch.md" } }, "/_api/view/{view-name}/rename": { @@ -11145,7 +11145,7 @@ "Views" ], "x-examples": [], - "x-filename": "/var/lib/jenkins/workspace/RELEASE__BuildFrontend/Documentation/DocuBlocks/Rest/Views/put_api_view_rename.md" + "x-filename": "/home/willi/src/devel2/Documentation/DocuBlocks/Rest/Views/put_api_view_rename.md" } } },