1
0
Fork 0

Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel

This commit is contained in:
Max Neunhoeffer 2016-06-24 10:43:00 +02:00
commit 7da5ad642b
79 changed files with 1721 additions and 1515 deletions

View File

@ -252,6 +252,8 @@ class Builder {
// get a const reference to the Builder's Buffer object
std::shared_ptr<Buffer<uint8_t>> const& buffer() const { return _buffer; }
// steal the Builder's Buffer object. afterwards the Builder
// is unusable
std::shared_ptr<Buffer<uint8_t>> steal() {
// After a steal the Builder is broken!
std::shared_ptr<Buffer<uint8_t>> res = _buffer;
@ -259,7 +261,7 @@ class Builder {
_pos = 0;
return res;
}
uint8_t const* data() const {
return _buffer.get()->data();
}
@ -283,6 +285,7 @@ class Builder {
void clear() {
_pos = 0;
_stack.clear();
_keyWritten = false;
}
// Return a pointer to the start of the result:
@ -332,14 +335,17 @@ class Builder {
// Add a subvalue into an object from a Value:
uint8_t* add(std::string const& attrName, Value const& sub);
uint8_t* add(char const* attrName, Value const& sub);
uint8_t* add(char const* attrName, size_t attrLength, Value const& sub);
// Add a subvalue into an object from a Slice:
uint8_t* add(std::string const& attrName, Slice const& sub);
uint8_t* add(char const* attrName, Slice const& sub);
uint8_t* add(char const* attrName, size_t attrLength, Slice const& sub);
// Add a subvalue into an object from a ValuePair:
uint8_t* add(std::string const& attrName, ValuePair const& sub);
uint8_t* add(char const* attrName, ValuePair const& sub);
uint8_t* add(char const* attrName, size_t attrLength, ValuePair const& sub);
// Add all subkeys and subvalues into an object from an ObjectIterator
// and leaves open the object intentionally
@ -365,6 +371,7 @@ class Builder {
}
}
try {
checkKeyIsString(Slice(sub).isString());
auto oldPos = _pos;
reserveSpace(1 + sizeof(void*));
// store pointer. this doesn't need to be portable
@ -627,6 +634,11 @@ private:
template <typename T>
uint8_t* addInternal(char const* attrName, T const& sub) {
return addInternal<T>(attrName, strlen(attrName), sub);
}
template <typename T>
uint8_t* addInternal(char const* attrName, size_t attrLength, T const& sub) {
bool haveReported = false;
if (!_stack.empty()) {
ValueLength& tos = _stack.back();
@ -640,8 +652,6 @@ private:
haveReported = true;
}
ValueLength attrLength = strlen(attrName);
try {
if (options->attributeTranslator != nullptr) {
// check if a translation for the attribute name exists

View File

@ -42,6 +42,12 @@
#include "velocypack/Value.h"
#include "velocypack/ValueType.h"
#ifndef VELOCYPACK_XXHASH
#ifndef VELOCYPACK_FASTHASH
#define VELOCYPACK_XXHASH
#endif
#endif
#ifdef VELOCYPACK_XXHASH
// forward for XXH64 function declared elsewhere
extern "C" unsigned long long XXH64(void const*, size_t, unsigned long long);

View File

@ -55,6 +55,8 @@ using VPackObjectIterator = arangodb::velocypack::ObjectIterator;
using VPackBuilder = arangodb::velocypack::Builder;
using VPackObjectBuilder = arangodb::velocypack::ObjectBuilder;
using VPackArrayBuilder = arangodb::velocypack::ArrayBuilder;
using VPackBuilderNonDeleter = arangodb::velocypack::BuilderNonDeleter;
using VPackBuilderContainer = arangodb::velocypack::BuilderContainer;
#endif
#endif
@ -149,6 +151,13 @@ using VPackSlimBuffer = arangodb::velocypack::SliceContainer;
#endif
#endif
#ifdef VELOCYPACK_VALIDATOR_H
#ifndef VELOCYPACK_ALIAS_VALIDATOR
#define VELOCYPACK_ALIAS_VALIDATOR
using VPackValidator = arangodb::velocypack::Validator;
#endif
#endif
#ifdef VELOCYPACK_VALUE_H
#ifndef VELOCYPACK_ALIAS_VALUE
#define VELOCYPACK_ALIAS_VALUE

View File

@ -162,7 +162,6 @@ static inline int64_t toInt64(uint64_t v) noexcept {
// specified length, starting at the specified byte offset
template <typename T>
static inline T readInteger(uint8_t const* start, ValueLength length) noexcept {
VELOCYPACK_ASSERT(length > 0);
uint64_t value = 0;
uint64_t x = 0;
uint8_t const* end = start + length;

View File

@ -33,7 +33,7 @@
#include "velocypack/Sink.h"
using namespace arangodb::velocypack;
std::string Builder::toString() const {
Options options;
options.prettyPrint = true;
@ -799,6 +799,10 @@ uint8_t* Builder::add(char const* attrName, Value const& sub) {
return addInternal<Value>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, size_t attrLength, Value const& sub) {
return addInternal<Value>(attrName, attrLength, sub);
}
uint8_t* Builder::add(std::string const& attrName, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, sub);
}
@ -807,6 +811,10 @@ uint8_t* Builder::add(char const* attrName, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, size_t attrLength, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, attrLength, sub);
}
uint8_t* Builder::add(std::string const& attrName, Slice const& sub) {
return addInternal<Slice>(attrName, sub);
}
@ -814,6 +822,10 @@ uint8_t* Builder::add(std::string const& attrName, Slice const& sub) {
uint8_t* Builder::add(char const* attrName, Slice const& sub) {
return addInternal<Slice>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, size_t attrLength, Slice const& sub) {
return addInternal<Slice>(attrName, attrLength, sub);
}
// Add all subkeys and subvalues into an object from an ObjectIterator
// and leaves open the object intentionally

View File

@ -1 +1,42 @@
!CHAPTER Common Errors
!SECTION String concatenation
In AQL, strings must be concatenated using the [CONCAT()](Functions/String.md#concat)
function. Joining them together with the `+` operator is not supported. Especially
as JavaScript programmer it is easy to walk into this trap:
```js
RETURN "foo" + "bar" // [ 0 ]
RETURN "foo" + 123 // [ 123 ]
RETURN "123" + 200 // [ 323 ]
```
The arithmetic plus operator expects numbers as operands, and will try to implicitly
cast them to numbers if they are of different type. `"foo"` and `"bar"` are casted
to `0` and then added to together (still zero). If an actual number is added, that
number will be returned (adding zero doesn't change the result). If the string is a
valid string representation of a number, then it is casted to a number. Thus, adding
`"123"` and `200` results in two numbers being added up to `323`.
To concatenate elements (with implicit casting to string for non-string values), do:
```js
RETURN CONCAT("foo", "bar") // [ "foobar" ]
RETURN CONCAT("foo", 123) // [ "foo123" ]
RETURN CONCAT("123", 200) // [ "123200" ]
```
<!--
Rename to Error Sources?
Include article about parameter injection from cookbook?
Quote marks around bind parameter placeholders
https://github.com/arangodb/arangodb/issues/1634#issuecomment-167808660
FILTER HAS(doc, "attr") instead of FITLER doc.attr / FILTER doc.attr != null
collection ... not found error, e.g. access of variable after COLLECT (no longer existing)
-->

View File

@ -54,39 +54,11 @@ FOR u IN users
```json
[
{
"age": 37,
"users": [
"John",
"Sophia"
]
},
{
"age": 36,
"users": [
"Fred",
"Emma"
]
},
{
"age": 34,
"users": [
"Madison"
]
},
{
"age": 33,
"users": [
"Chloe",
"Michael"
]
},
{
"age": 32,
"users": [
"Alexander"
]
}
{ "age": 37, "users": [ "John", "Sophia" ] },
{ "age": 36, "users": [ "Fred", "Emma" ] },
{ "age": 34, "users": [ "Madison" ] },
{ "age": 33, "users": [ "Chloe", "Michael" ] },
{ "age": 32, "users": [ "Alexander" ] }
]
```
@ -132,30 +104,12 @@ FOR u IN users
```json
[
{
"ageGroup": 35,
"gender": "f"
},
{
"ageGroup": 35,
"gender": "m"
},
{
"ageGroup": 30,
"gender": "f"
},
{
"ageGroup": 30,
"gender": "m"
},
{
"ageGroup": 25,
"gender": "f"
},
{
"ageGroup": 25,
"gender": "m"
}
{ "ageGroup": 35, "gender": "f" },
{ "ageGroup": 35, "gender": "m" },
{ "ageGroup": 30, "gender": "f" },
{ "ageGroup": 30, "gender": "m" },
{ "ageGroup": 25, "gender": "f" },
{ "ageGroup": 25, "gender": "m" }
]
```
@ -180,36 +134,12 @@ FOR u IN users
```json
[
{
"ageGroup": 35,
"gender": "f",
"numUsers": 2
},
{
"ageGroup": 35,
"gender": "m",
"numUsers": 2
},
{
"ageGroup": 30,
"gender": "f",
"numUsers": 4
},
{
"ageGroup": 30,
"gender": "m",
"numUsers": 4
},
{
"ageGroup": 25,
"gender": "f",
"numUsers": 2
},
{
"ageGroup": 25,
"gender": "m",
"numUsers": 2
}
{ "ageGroup": 35, "gender": "f", "numUsers": 2 },
{ "ageGroup": 35, "gender": "m", "numUsers": 2 },
{ "ageGroup": 30, "gender": "f", "numUsers": 4 },
{ "ageGroup": 30, "gender": "m", "numUsers": 4 },
{ "ageGroup": 25, "gender": "f", "numUsers": 2 },
{ "ageGroup": 25, "gender": "m", "numUsers": 2 }
]
```

View File

@ -64,8 +64,8 @@ CONCAT_SEPARATOR(", ", [ "foo", "bar", "baz" ])
CONCAT_SEPARATOR(", ", [ "foo", [ "b", "a", "r" ], "baz" ])
// [ "foo, b,a,r, baz" ]
/* "1-2-3-4-5" */
CONCAT_SEPARATOR("-", [1, 2, 3, null], [4, null, 5])
// "1-2-3-4-5"
```
!SUBSECTION CONTAINS()

View File

@ -160,6 +160,9 @@ FOR vertex[, edge[, path]]
Instead of `GRAPH graphName` you may specify a list of edge collections. Vertex
collections are determined by the edges in the edge collections. The rest of the
behavior is similar to the named version.
If the same edge collection is specified multiple times, it will behave as if it
were specified only once. Specifying the same edge collection is only allowed when
the collections do not have conflicting traversal directions.
!SUBSECTION Traversing in mixed directions

View File

@ -10,7 +10,8 @@
</a>
</div>
<select class="arangodb-version-switcher">
<option value="devel">VERSION_NUMBER</option>
<option value="devel">devel</option>
<option value="3.0">v3.0</option>
<option value="2.8">v2.8</option>
<option value="2.7">v2.7</option>
<option value="2.6">v2.6</option>
@ -25,16 +26,16 @@
</div>
<ul class="arangodb-navmenu">
<li>
<a href="BASE_PATH/Manual/index.html">Manual</a>
<a href="#" data-book="Manual">Manual</a>
</li>
<li class="active-tab">
<a href="BASE_PATH/AQL/index.html">AQL</a>
<a href="#" data-book="AQL">AQL</a>
</li>
<li>
<a href="BASE_PATH/HTTP/index.html">HTTP</a>
<a href="#" data-book="HTTP">HTTP</a>
</li>
<li>
<a href="https://docs.arangodb.com/cookbook">Cookbook</a>
<a href="#" data-book="cookbook">Cookbook</a>
</li>
<li class="socialIcons">
<a href="https://github.com/ArangoDB/ArangoDB/issues" target="blank" name="github">

View File

@ -35,8 +35,29 @@ function appendHeader() {
};
addGoogleSrc();
$("#version-switcher").on("change", function(e) {
window.location.href = "https://docs.arangodb.com/" + e.target.value;
$(".arangodb-navmenu a:lt(4)").on("click", function(e) {
e.preventDefault();
var urlSplit = gitbook.state.root.split("/");
urlSplit.pop(); // ""
urlSplit.pop(); // e.g. "Manual"
window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html";
});
var bookVersion = gitbook.state.root.match(/\/(\d\.\d|devel)\//);
if (bookVersion) {
$(".arangodb-version-switcher").val(bookVersion[1]);
}
$(".arangodb-version-switcher").on("change", function(e) {
var urlSplit = gitbook.state.root.split("/");
if (urlSplit.length == 6) {
urlSplit.pop(); // ""
var currentBook = urlSplit.pop(); // e.g. "Manual"
urlSplit.pop() // e.g. "3.0"
window.location.href = urlSplit.join("/") + "/" + e.target.value + "/" + currentBook + "/";
} else {
window.location.href = "https://docs.arangodb.com/" + e.target.value;
}
});
});

View File

@ -10,7 +10,8 @@
</a>
</div>
<select class="arangodb-version-switcher">
<option value="devel">VERSION_NUMBER</option>
<option value="devel">devel</option>
<option value="3.0">v3.0</option>
<option value="2.8">v2.8</option>
<option value="2.7">v2.7</option>
<option value="2.6">v2.6</option>
@ -25,16 +26,16 @@
</div>
<ul class="arangodb-navmenu">
<li>
<a href="BASE_PATH/Manual/index.html">Manual</a>
<a href="#" data-book="Manual">Manual</a>
</li>
<li>
<a href="BASE_PATH/AQL/index.html">AQL</a>
<a href="#" data-book="AQL">AQL</a>
</li>
<li class="active-tab">
<a href="BASE_PATH/HTTP/index.html">HTTP</a>
<a href="#" data-book="HTTP">HTTP</a>
</li>
<li>
<a href="https://docs.arangodb.com/cookbook">Cookbook</a>
<a href="#" data-book="cookbook">Cookbook</a>
</li>
<li class="socialIcons">
<a href="https://github.com/ArangoDB/ArangoDB/issues" target="blank" name="github">

View File

@ -35,8 +35,29 @@ function appendHeader() {
};
addGoogleSrc();
$(".arangodb-navmenu a:lt(4)").on("click", function(e) {
e.preventDefault();
var urlSplit = gitbook.state.root.split("/");
urlSplit.pop(); // ""
urlSplit.pop(); // e.g. "Manual"
window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html";
});
var bookVersion = gitbook.state.root.match(/\/(\d\.\d|devel)\//);
if (bookVersion) {
$(".arangodb-version-switcher").val(bookVersion[1]);
}
$(".arangodb-version-switcher").on("change", function(e) {
window.location.href = "https://docs.arangodb.com/" + e.target.value;
var urlSplit = gitbook.state.root.split("/");
if (urlSplit.length == 6) {
urlSplit.pop(); // ""
var currentBook = urlSplit.pop(); // e.g. "Manual"
urlSplit.pop() // e.g. "3.0"
window.location.href = urlSplit.join("/") + "/" + e.target.value + "/" + currentBook + "/";
} else {
window.location.href = "https://docs.arangodb.com/" + e.target.value;
}
});
});

View File

@ -226,5 +226,5 @@ Note that while a slave has only partly executed a transaction from the master,
a write lock on the collections involved in the transaction.
You may also want to check the master and slave states via the HTTP APIs
(see [HTTP Interface for Replication](../../../HTTP/Replications/index.html)).
(see [HTTP Interface for Replication](../../../../HTTP/Replications/index.html)).

View File

@ -2,7 +2,8 @@
!SUBSECTION Recommended major upgrade procedure
*TODO*
To upgrade an existing ArangoDB 2.x to 3.0 please use the procedure described
[here](../../Administration/Upgrading/Upgrading30.md).
!SUBSECTION Recommended minor upgrade procedure

View File

@ -37,8 +37,8 @@ arangodump --server.database _system --server.username myuser --server.password
The dumps produced by `arangodump` can now be imported into ArangoDB 3.0 using
the 3.0 version of `arangodump`:
# in 3.0
```
# in 3.0
arangorestore --server.database _system --input-directory dump-system
arangorestore --server.database mydb --input-directory dump-mydb
...
@ -109,3 +109,16 @@ require("@arangodb/users").remove("myuser");
require("@arangodb/users").all();
```
!SECTION Foxx applications
The dump/restore procedure described above will not export and re-import Foxx applications.
In order to move these from 2.8 to 3.0, Foxx applications should be exported as zip files
via the 2.8 web interface.
The zip files can then be uploaded in the "Services" section in the ArangoDB 3.0 web interface.
Applications may need to be adjusted manually to run in 3.0. Please consult the
[migration guide for Foxx apps](../../Foxx/Migrating2x/README.md).
An alternative way of moving Foxx apps into 3.0 is to copy the source directory of a 2.8 Foxx
application manually into the 3.0 Foxx apps directory for the target database (which is normally
`/var/lib/arangodb3-apps/_db/<dbname>/` but the exact location is platform-specific).

View File

@ -1 +0,0 @@
!CHAPTER Modeling Relationships

View File

@ -2,7 +2,7 @@
ArangoDB 3 continues to support Foxx services written for ArangoDB 2.8 by running them in a special legacy compatibility mode that provides access to some of the modules and APIs no longer provided in 3.0 and beyond.
**Note:** Legacy compatibility mode is strictly intended as a temporary stop gap solution for supporting existing services while [upgrading to ArangoDB 3.0](Migrating2x.md) and should not be considered a permanent feature of ArangoDB or Foxx.
**Note:** Legacy compatibility mode is strictly intended as a temporary stop gap solution for supporting existing services while [upgrading to ArangoDB 3.0](Migrating2x/README.md) and should not be considered a permanent feature of ArangoDB or Foxx.
In order to mark an existing service as a legacy service, just make sure the following attribute is defined in the service manifest:
@ -67,4 +67,4 @@ The `@arangodb/foxx` module also provides the same exports as in 2.8, namely:
Any feature not supported in 2.8 will also not work in legacy compatibility mode. When migrating from an older version of ArangoDB it is a good idea to migrate to ArangoDB 2.8 first for an easier upgrade path.
Additionally please note the differences laid out in the section titled ["Migrating from pre-2.8"](Migrating2x.md#migrating-from-pre28) in the migration guide.
Additionally please note the differences laid out in the chapter [*Migrating from pre-2.8*](Migrating2x/Wayback.md) in the migration guide.

View File

@ -1,895 +0,0 @@
!CHAPTER Migrating 2.x services to 3.0
When migrating services from older versions of ArangoDB it is generally recommended you make sure they work in [legacy compatibility mode](LegacyMode.md), which can also serve as a stop-gap solution.
This chapter outlines the major differences in the Foxx API between ArangoDB 2.8 and ArangoDB 3.0.
!SECTION Migrating from pre-2.8
When migrating from a version older than ArangoDB 2.8 please note that starting with ArangoDB 2.8 the behaviour of the `require` function more closely mimics the behaviour observed in Node.js and module bundlers for browsers, e.g.:
In a file `/routes/examples.js` (relative to the root folder of the service):
* `require('./my-module')` will be attempted to be resolved in the following order:
1. `/routes/my-module` (relative to service root)
2. `/routes/my-module.js` (relative to service root)
3. `/routes/my-module.json` (relative to service root)
4. `/routes/my-module/index.js` (relative to service root)
5. `/routes/my-module/index.json` (relative to service root)
* `require('lodash')` will be attempted to be resolved in the following order:
1. `/routes/node_modules/lodash` (relative to service root)
2. `/node_modules/lodash` (relative to service root)
3. ArangoDB module `lodash`
4. Node compatibility module `lodash`
5. Bundled NPM module `lodash`
* `require('/abs/path')` will be attempted to be resolved in the following order:
1. `/abs/path` (relative to file system root)
2. `/abs/path.js` (relative to file system root)
3. `/abs/path.json` (relative to file system root)
4. `/abs/path/index.js` (relative to file system root)
5. `/abs/path/index.json` (relative to file system root)
This behaviour is incompatible with the source code generated by the Foxx generator in the web interface before ArangoDB 2.8.
**Note:** The `org/arangodb` module is aliased to the new name `@arangodb` in ArangoDB 3.0.0 and the `@arangodb` module was aliased to the old name `org/arangodb` in ArangoDB 2.8.0. Either one will work in 2.8 and 3.0 but outside of legacy services you should use `@arangodb` going forward.
!SUBSECTION Foxx queue
In ArangoDB 2.6 Foxx introduced a new way to define queued jobs using Foxx scripts to replace the function-based job type definitions which were causing problems when restarting the server. The function-based jobs have been removed in 2.7 and are no longer supported at all.
!SUBSECTION CoffeeScript
ArangoDB 3.0 no longer provides built-in support for CoffeeScript source files, even in legacy compatibility mode. If you want to use an alternative language like CoffeeScript, make sure to pre-compile the raw source files to JavaScript and use the compiled JavaScript files in the service.
!SUBSECTION The request module
The `@arangodb/request` module when used with the `json` option previously overwrote the string in the `body` property of the response object of the response with the parsed JSON body. In 2.8 this was changed so the parsed JSON body is added as the `json` property of the response object in addition to overwriting the `body` property. In 3.0 and later (including legacy compatibility mode) the `body` property is no longer overwritten and must use the `json` property instead. Note that this only affects code using the `json` option when making the request.
!SUBSECTION Bundled NPM modules
The bundled NPM modules have been upgraded and may include backwards-incompatible changes, especially the API of `joi` has changed several times. If in doubt you should bundle your own versions of these modules to ensure specific versions will be used.
The utility module `lodash` is now available and should be used instead of `underscore`, but both modules will continue to be provided.
!SECTION Manifest
Many of the fields that were required in ArangoDB 2.x are now optional and can be safely omitted.
To avoid compatibility problems with future versions of ArangoDB you should always specify the `engines` field, e.g.:
```json
{
"engines": {
"arangodb": "^3.0.0"
}
}
```
!SUBSECTION Controllers & exports
Previously Foxx distinguished between `exports` and `controllers`, each of which could be specified as an object. In ArangoDB 3.0 these have been merged into a single `main` field specifying an entry file.
The easiest way to migrate services using multiple exports and/or controllers is to create a separate entry file that imports these files:
Old (manifest.json):
```json
{
"exports": {
"doodads": "doodads.js",
"dingbats": "dingbats.js"
},
"controllers": {
"/doodads": "routes/doodads.js",
"/dingbats": "routes/dingbats.js",
"/": "routes/root.js"
}
}
```
New (manifest.json):
```json
{
"main": "index.js"
}
```
New (index.js):
```js
'use strict';
module.context.use('/doodads', require('./routes/doodads'));
module.context.use('/dingbats', require('./routes/dingbats'));
module.context.use('/', require('./routes/root'));
module.exports = {
doodads: require('./doodads'),
dingbats: require('./dingbats')
};
```
!SUBSECTION Index redirect
If you previously did not define the `defaultDocument` field, please note that in ArangoDB 3.0 the field will no longer default to the value `index.html` when omitted:
Old:
```json
{
// no defaultDocument
}
```
New:
```json
{
"defaultDocument": "index.html"
}
```
This also means it is no longer necessary to specify the `defaultDocument` field with an empty value to prevent the redirect and be able to serve requests at the `/` (root) path of the mount point:
Old:
```json
{
"defaultDocument": ""
}
```
New:
```json
{
// no defaultDocument
}
```
!SUBSECTION Assets
The `assets` field is no longer supported in ArangoDB 3.0 outside of legacy compatibility mode.
If you previously used the field to serve individual files as-is you can simply use the `files` field instead:
Old:
```json
{
"assets": {
"client.js": {
"files": ["assets/client.js"],
"contentType": "application/javascript"
}
}
}
```
New:
```json
{
"files": {
"client.js": {
"path": "assets/client.js",
"type": "application/javascript"
}
}
}
```
If you relied on being able to specify multiple files that should be concatenated, you will have to use build tools outside of ArangoDB to prepare these files accordingly.
!SUBSECTION Root element
The `rootElement` field is no longer supported and has been removed entirely.
If your controllers relied on this field being available you need to adjust your schemas and routes to be able to handle the full JSON structure of incoming documents.
!SUBSECTION System services
The `isSystem` field is no longer supported. The presence or absence of the field had no effect in most recent versions of ArangoDB 2.x and has now been removed entirely.
!SECTION The application context
The global `applicationContext` variable available in Foxx modules has been replaced with the `context` attribute of the `module` variable. For consistency it is now referred to as the *service* context throughout this documentation.
Some methods of the service context have changed in ArangoDB 3.0:
* `fileName()` now behaves like `path()` did in ArangoDB 2.x
* `path()` has been removed (use `fileName()` instead)
* `foxxFileName()` has been removed (use `fileName()` instead)
Additionally the `version` and `name` attributes have been removed and can now only be accessed via the `manifest` attribute (as `manifest.version` and `manifest.name`). Note that the corresponding manifest fields are now optional and may be omitted.
The `options` attribute has also been removed as it should be considered an implementation detail. You should instead access the `dependencies` and `configuration` attributes directly.
The internal `_prefix` attribute (which was an alias for `basePath`) and the internal `comment` and `clearComments` methods (which were used by the magical documentation comments in ArangoDB 2.x) have also been removed.
The internal `_service` attribute (which provides access to the service itself) has been renamed to `service`.
!SECTION Repositories and models
Previously Foxx was heavily built around the concept of repositories and models, which provided complex but rarely necessary abstractions on top of ArangoDB collections and documents. In ArangoDB 3.0 these have been removed entirely.
!SUBSECTION Repositories vs collections
Repositories mostly wrapped methods that already existed on ArangoDB collection objects and primarily dealt with converting between plain ArangoDB documents and Foxx model instances. In ArangoDB 3.0 you can simply use these collections directly and treat documents as plain JavaScript objects.
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const myRepo = new Foxx.Repository(
applicationContext.collection('myCollection'),
{model: Foxx.Model}
);
// ...
const models = myRepo.byExample({color: 'green'});
res.json(models.map(function (model) {
return model.forClient();
}));
```
New:
```js
'use strict';
const myDocs = module.context.collection('myCollection');
// ...
const docs = myDocs.byExample({color: 'green'});
res.json(docs);
```
!SUBSECTION Schema validation
The main purpose of models in ArangoDB 2.x was to validate incoming data using joi schemas. In more recent versions of ArangoDB 2.x it was already possible to pass these schemas directly in most places where a model was expected as an argument. The only difference is that schemas should now be considered the default.
If you previously relied on the automatic validation of Foxx model instances when setting attributes or instantiating models from untrusted data, you can simply use the schema's `validate` method directly.
Old:
```js
'use strict';
const joi = require('joi');
const mySchema = {
name: joi.string().required(),
size: joi.number().required()
};
const Foxx = require('org/arangodb/foxx');
const MyModel = Foxx.Model.extend({schema: mySchema});
// ...
const model = new MyModel(req.json());
if (!model.isValid) {
res.status(400);
res.write('Bad request');
return;
}
```
New:
```js
'use strict';
const joi = require('joi');
// Note this is now wrapped in a joi.object()
const mySchema = joi.object({
name: joi.string().required(),
size: joi.number().required()
}).required();
// ...
const result = mySchema.validate(req.body);
if (result.errors) {
res.status(400);
res.write('Bad request');
return;
}
```
!SUBSECTION Migrating models
While most use cases for models can now be replaced with plain joi schemas, there is still the concept of a "model" in Foxx in ArangoDB 3.0 although it is quite different from Foxx models in ArangoDB 2.x.
A model in Foxx now refers to a plain JavaScript object with an optional `schema` attribute and the optional methods `forClient` and `fromClient`. Models can be used instead of plain joi schemas to define request and response bodies but there are no model "instances" in ArangoDB 3.0.
Old:
```js
'use strict';
const _ = require('underscore');
const joi = require('joi');
const Foxx = require('org/arangodb/foxx');
const MyModel = Foxx.Model.extend({
schema: {
name: joi.string().required(),
size: joi.number().required()
},
forClient () {
return _.omit(this.attributes, ['_key', '_id', '_rev']);
}
});
// ...
ctrl.get(/* ... */)
.bodyParam('body', {type: MyModel});
```
New:
```js
'use strict';
const _ = require('lodash');
const joi = require('joi');
const MyModel = {
schema: joi.object({
name: joi.string().required(),
size: joi.number().required()
}).required(),
forClient (data) {
return _.omit(data, ['_key', '_id', '_rev']);
}
};
// ...
router.get(/* ... */)
.body(MyModel);
```
!SUBSECTION Triggers
When saving, updating, replacing or deleting models in ArangoDB 2.x using the repository methods the repository and model would fire events that could be subscribed to in order to perform side-effects.
Note that even in 2.x these events would not fire when using queries or manipulating documents in any other way than using the specific repository methods that operated on individual documents.
This behaviour is no longer available in ArangoDB 3.0 but can be emulated by using an `EventEmitter` directly if it is not possible to solve the problem differently:
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const MyModel = Foxx.Model.extend({
// ...
}, {
afterRemove () {
console.log(this.get('name'), 'was removed');
}
});
// ...
const model = myRepo.firstExample({name: 'myName'});
myRepo.remove(model);
// -> "myName was removed successfully"
```
New:
```js
'use strict';
const EventEmitter = require('events');
const emitter = new EventEmitter();
emitter.on('afterRemove', function (doc) {
console.log(doc.name, 'was removed');
});
// ...
const doc = myDocs.firstExample({name: 'myName'});
myDocs.remove(doc);
emitter.emit('afterRemove', doc);
// -> "myName was removed successfully"
```
Or simply:
```js
'use strict';
function afterRemove(doc) {
console.log(doc.name, 'was removed');
}
// ...
const doc = myDocs.firstExample({name: 'myName'});
myDocs.remove(doc);
afterRemove(doc);
// -> "myName was removed successfully"
```
!SECTION Controllers vs routers
Foxx Controllers have been replaced with [routers](Router/README.md). This is more than a cosmetic change as there are significant differences in behaviour:
Controllers were automatically mounted when the file defining them was executed. Routers need to be explicitly mounted using the `module.context.use` method. Routers can also be exported, imported and even nested. This makes it easier to split up complex routing trees across multiple files.
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const ctrl = new Foxx.Controller(applicationContext);
ctrl.get('/hello', function (req, res) {
// ...
});
```
New:
```js
'use strict';
const createRouter = require('org/arangodb/foxx/router');
const router = createRouter();
// If you are importing this file from your entry file ("main"):
module.exports = router;
// Otherwise: module.context.use(router);
router.get('/hello', function (req, res) {
// ...
});
```
Some general changes in behaviour that might trip you up:
* When specifying path parameters with schemas Foxx will now ignore the route if the schema does not match (i.e. `/hello/foxx` will no longer match `/hello/:num` if `num` specifies a schema that doesn't match the value `"foxx"`). With controllers this could previously result in users seeing a 400 (bad request) error when they should instead be served a 404 (not found) response.
* When a request is made with an HTTP verb not supported by an endpoint, Foxx will now respond with a 405 (method not allowed) error with an appropriate `Allowed` header listing the supported HTTP verbs for that endpoint.
* Foxx will no longer parse your JSDoc comments to generate route documentation (use the `summary` and `description` methods of the endpoint instead).
* The `apiDocumentation` method now lives on the service context and behaves slightly differently.
* There is no router equivalent for the `activateAuthentication` and `activateSessions` methods. Instead you should use the session middleware (see the section on sessions below).
* There is no `del` alias for the `delete` method on routers. It has always been safe to use keywords as method names in Foxx, so the use of this alias was already discouraged before.
* The `allRoutes` proxy is no lot available on routers but can easily be replaced with middleware or child routers.
!SUBSECTION Before, after and around
The `before`, `after` and `around` methods can easily be replaced by middleware:
Old:
```js
let start;
ctrl.before(function (req, res) {
start = Date.now();
});
ctrl.after(function (req, res) {
console.log('Request handled in ', (Date.now() - start), 'ms');
});
```
New:
```js
router.use(function (req, res, next) {
let start = Date.now();
next();
console.log('Request handled in ', (Date.now() - start), 'ms');
});
```
Note that unlike `around` middleware receives the `next` function as the *third* argument (the "opts" argument has no equivalent).
!SUBSECTION The request context
When defining a route on a controller the controller would return an object called *request context*. Routers return a similar object called *endpoint*. Routers also return endpoints when mounting child routers or middleware, as does the `use` method of the service context.
The main differences between the new endpoints and the objects returned by controllers in previous versions of ArangoDB are:
* `bodyParam` is now simply called `body`; it is no longer neccessary or possible to give the body a name and the request body will not show up in the request parameters. It's also possible to specify a MIME type
* `body`, `queryParam` and `pathParam` now take position arguments instead of an object. For specifics see the [endpoint documentation](Router/Endpoints.md).
* `notes` is now called `description` and takes a single string argument.
* `onlyIf` and `onlyIfAuthenticated` are no longer available; they can be emulated with middleware if necessary:
Old:
```js
ctrl.get(/* ... */)
.onlyIf(function (req) {
if (!req.user) {
throw new Error('Not authenticated!');
}
});
```
New:
```js
router.use(function (req, res, next) {
if (!req.arangoUser) {
res.throw(403, 'Not authenticated!');
}
next();
});
router.get(/* ... */);
```
!SUBSECTION Error handling
The `errorResponse` method provided by controller request contexts has no equivalent in router endpoints. If you want to handle specific error types with specific status codes you need to catch them explicitly, either in the route or in a middleware:
Old:
```js
ctrl.get('/puppies', function (req, res) {
// Exception is thrown here
})
.errorResponse(TooManyPuppiesError, 400, 'Something went wrong!');
```
New:
```js
ctrl.get('/puppies', function (req, res) {
try {
// Exception is thrown here
} catch (e) {
if (!(e instanceof TooManyPuppiesError)) {
throw e;
}
res.throw(400, 'Something went wrong!');
}
})
// The "error" method merely documents the meaning
// of the status code and has no other effect.
.error(400, 'Thrown if there are too many puppies.');
```
Note that errors created with `http-errors` are still handled by Foxx intelligently. In fact `res.throw` is just a helper method for creating and throwing these errors.
!SUBSECTION Request objects
The names of some attributes of the request object have been adjusted to more closely align with those of the corresponding methods on the endpoint objects and established conventions in other JavaScript frameworks:
* `req.urlParameters` is now called `req.pathParams`
* `req.parameters` is now called `req.queryParams`
* `req.params()` is now called `req.param()`
* `req.requestType` is now called `req.method`
* `req.compatibility` is now called `req.arangoVersion`
* `req.user` is now called `req.arangoUser`
Some attributes have been removed or changed:
* `req.cookies` has been removed entirely (use `req.cookie(name)`)
* `req.requestBody` has been removed entirely (see below)
* `req.suffix` is now a string rather than an array
Additionally the `req.server` and `req.client` attributes are no longer available. The information is now exposed in a way that can (optionally) transparently handle proxy forwarding headers:
* `req.hostname` defaults to `req.server.address`
* `req.port` defaults to `req.server.port`
* `req.remoteAddress` defaults to `client.address`
* `req.remotePort` defaults to `client.port`
Finally, the `req.cookie` method now takes the `signed` options directly.
Old:
```js
const sid = req.cookie('sid', {
signed: {
secret: 'keyboardcat',
algorithm: 'sha256'
}
});
```
New:
```js
const sid = req.cookie('sid', {
secret: 'keyboardcat',
algorithm: 'sha256'
});
```
!SUBSUBSECTION Request bodies
The `req.body` is no longer a method and no longer automatically parses JSON request bodies unless a request body was defined. The `req.rawBody` now corresponds to the `req.rawBodyBuffer` of ArangoDB 2.x and is also no longer a method.
Old:
```js
ctrl.post('/', function (req, res) {
const data = req.body();
// ...
});
```
New:
```js
router.post('/', function (req, res) {
const data = req.body;
// ...
})
.body(['json']);
```
Or simply:
```js
const joi = require('joi');
router.post('/', function (req, res) {
const data = req.body;
// ...
})
.body(joi.object().optional());
```
!SUBSUBSECTION Multipart requests
The `req.requestParts` method has been removed entirely. If you need to accept multipart request bodies, you can simply define the request body using a multipart MIME type like `multipart/form-data`:
Old:
```js
ctrl.post('/', function (req, res) {
const parts = req.requestParts();
// ...
});
```
New:
```js
router.post('/', function (req, res) {
const parts = req.body;
// ...
})
.body(['multipart/form-data']);
```
!SUBSECTION Response objects
The response object has a lot of new methods in ArangoDB 3.0 but otherwise remains similar to the response object of previous versions:
The `res.send` method behaves very differently from how the method with the same name behaved in ArangoDB 2.x: the conversion now takes the response body definition of the route into account. There is a new method `res.write` that implements the old behaviour.
Note that consecutive calls to `res.write` will append to the response body rather than replacing it like `res.send`.
The `res.contentType` property is also no longer available. If you want to set the MIME type of the response body to an explicit value you should set the `content-type` header instead:
Old:
```js
res.contentType = 'application/json';
res.body = JSON.stringify(results);
```
New:
```js
res.set('content-type', 'application/json');
res.body = JSON.stringify(results);
```
Or simply:
```js
// sets the content type to JSON
// if it has not already been set
res.json(results);
```
The `res.cookie` method now takes the `signed` options as part of the regular options object.
Old:
```js
res.cookie('sid', 'abcdef', {
ttl: 60 * 60,
signed: {
secret: 'keyboardcat',
algorithm: 'sha256'
}
});
```
New:
```js
res.cookie('sid', 'abcdef', {
ttl: 60 * 60,
secret: 'keyboardcat',
algorithm: 'sha256'
});
```
!SUBSECTION Dependency injection
There is no equivalent of the `addInjector` method available in ArangoDB 2.x controllers. Most use cases can be solved by simply using plain variables but if you need something more flexible you can also use middleware:
Old:
```js
ctrl.addInjector('magicNumber', function () {
return Math.random();
});
ctrl.get('/', function (req, res, injected) {
res.json(injected.magicNumber);
});
```
New:
```js
function magicMiddleware(name) {
return {
register () {
let magic;
return function (req, res, next) {
if (!magic) {
magic = Math.random();
}
req[name] = magic;
next();
};
}
};
}
router.use(magicMiddleware('magicNumber'));
router.get('/', function (req, res) {
res.json(req.magicNumber);
});
```
Or simply:
```js
const magicNumber = Math.random();
router.get('/', function (req, res) {
res.json(magicNumber);
});
```
!SECTION Sessions
The `ctrl.activateSessions` method and the related `util-sessions-local` Foxx service have been replaced with the [Foxx sessions](Sessions/README.md) middleware. It is no longer possible to use the built-in session storage but you can simply pass in any document collection directly.
Old:
```js
const localSessions = applicationContext.dependencies.localSessions;
const sessionStorage = localSessions.sessionStorage;
ctrl.activateSessions({
sessionStorage: sessionStorage,
cookie: {secret: 'keyboardcat'}
});
ctrl.destroySession('/logout', function (req, res) {
res.json({message: 'Goodbye!'});
});
```
New:
```js
const sessionMiddleware = require('@arangodb/foxx/sessions');
const cookieTransport = require('@arangodb/foxx/sessions/transports/cookie');
router.use(sessionMiddleware({
storage: module.context.collection('sessions'),
transport: cookieTransport('keyboardcat')
}));
router.post('/logout', function (req, res) {
req.sessionStorage.clear(req.session);
res.json({message: 'Goodbye!'});
});
```
!SECTION Auth and OAuth2
The `util-simple-auth` and `util-oauth2` Foxx services have been replaced with the [Foxx auth](Auth.md) and Foxx OAuth2<!-- TODO (link to docs) --> modules. It is no longer necessary to install these services as dependencies in order to use the functionality.
Old:
```js
'use strict';
const auth = applicationContext.dependencies.simpleAuth;
// ...
const valid = auth.verifyPassword(authData, password);
```
New:
```js
'use strict';
const createAuth = require('@arangodb/foxx/auth');
const auth = createAuth(); // Use default configuration
// ...
const valid = auth.verifyPassword(authData, password);
```
!SECTION Foxx queries
The `createQuery` method has been removed. It can be trivially replaced with plain JavaScript functions and direct calls to [the `db._query` method](Modules.md):
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const query = Foxx.createQuery({
query: 'FOR u IN _users SORT u.user ASC RETURN u[@propName]',
params: ['propName'],
transform: function (results, uppercase) {
return (
uppercase
? results[0].toUpperCase()
: results[0].toLowerCase()
);
}
});
query('user', true);
```
New:
```js
'use strict';
const db = require('@arangodb').db;
const aql = require('@arangodb').aql;
function query(propName, uppercase) {
const results = db._query(aql`
FOR u IN _users
SORT u.user ASC
RETURN u[${propName}]
`);
return (
uppercase
? results[0].toUpperCase()
: results[0].toLowerCase()
);
}
query('user', true);
```
!SECTION Other changes
The `console` object in later versions of ArangoDB 2.x implemented a special Foxx console API and would optionally log messages to a collection. ArangoDB 3.0 restores the original behaviour where `console` is the same object available from the [console module](../Appendix/JavaScriptModules/Console.md).

View File

@ -0,0 +1,26 @@
!CHAPTER Auth and OAuth2
The `util-simple-auth` and `util-oauth2` Foxx services have been replaced with the [Foxx auth](Auth.md) and Foxx OAuth2<!-- TODO (link to docs) --> modules. It is no longer necessary to install these services as dependencies in order to use the functionality.
Old:
```js
'use strict';
const auth = applicationContext.dependencies.simpleAuth;
// ...
const valid = auth.verifyPassword(authData, password);
```
New:
```js
'use strict';
const createAuth = require('@arangodb/foxx/auth');
const auth = createAuth(); // Use default configuration
// ...
const valid = auth.verifyPassword(authData, password);
```

View File

@ -0,0 +1,17 @@
!CHAPTER The application context
The global `applicationContext` variable available in Foxx modules has been replaced with the `context` attribute of the `module` variable. For consistency it is now referred to as the *service* context throughout this documentation.
Some methods of the service context have changed in ArangoDB 3.0:
* `fileName()` now behaves like `path()` did in ArangoDB 2.x
* `path()` has been removed (use `fileName()` instead)
* `foxxFileName()` has been removed (use `fileName()` instead)
Additionally the `version` and `name` attributes have been removed and can now only be accessed via the `manifest` attribute (as `manifest.version` and `manifest.name`). Note that the corresponding manifest fields are now optional and may be omitted.
The `options` attribute has also been removed as it should be considered an implementation detail. You should instead access the `dependencies` and `configuration` attributes directly.
The internal `_prefix` attribute (which was an alias for `basePath`) and the internal `comment` and `clearComments` methods (which were used by the magical documentation comments in ArangoDB 2.x) have also been removed.
The internal `_service` attribute (which provides access to the service itself) has been renamed to `service`.

View File

@ -0,0 +1,37 @@
!CHAPTER The request context
When defining a route on a controller the controller would return an object called *request context*. Routers return a similar object called *endpoint*. Routers also return endpoints when mounting child routers or middleware, as does the `use` method of the service context.
The main differences between the new endpoints and the objects returned by controllers in previous versions of ArangoDB are:
* `bodyParam` is now simply called `body`; it is no longer neccessary or possible to give the body a name and the request body will not show up in the request parameters. It's also possible to specify a MIME type
* `body`, `queryParam` and `pathParam` now take position arguments instead of an object. For specifics see the [endpoint documentation](../../Router/Endpoints.md).
* `notes` is now called `description` and takes a single string argument.
* `onlyIf` and `onlyIfAuthenticated` are no longer available; they can be emulated with middleware if necessary:
Old:
```js
ctrl.get(/* ... */)
.onlyIf(function (req) {
if (!req.user) {
throw new Error('Not authenticated!');
}
});
```
New:
```js
router.use(function (req, res, next) {
if (!req.arangoUser) {
res.throw(403, 'Not authenticated!');
}
next();
});
router.get(/* ... */);
```

View File

@ -0,0 +1,32 @@
!CHAPTER Error handling
The `errorResponse` method provided by controller request contexts has no equivalent in router endpoints. If you want to handle specific error types with specific status codes you need to catch them explicitly, either in the route or in a middleware:
Old:
```js
ctrl.get('/puppies', function (req, res) {
// Exception is thrown here
})
.errorResponse(TooManyPuppiesError, 400, 'Something went wrong!');
```
New:
```js
ctrl.get('/puppies', function (req, res) {
try {
// Exception is thrown here
} catch (e) {
if (!(e instanceof TooManyPuppiesError)) {
throw e;
}
res.throw(400, 'Something went wrong!');
}
})
// The "error" method merely documents the meaning
// of the status code and has no other effect.
.error(400, 'Thrown if there are too many puppies.');
```
Note that errors created with `http-errors` are still handled by Foxx intelligently. In fact `res.throw` is just a helper method for creating and throwing these errors.

View File

@ -0,0 +1,50 @@
!CHAPTER Dependency injection
There is no equivalent of the `addInjector` method available in ArangoDB 2.x controllers. Most use cases can be solved by simply using plain variables but if you need something more flexible you can also use middleware:
Old:
```js
ctrl.addInjector('magicNumber', function () {
return Math.random();
});
ctrl.get('/', function (req, res, injected) {
res.json(injected.magicNumber);
});
```
New:
```js
function magicMiddleware(name) {
return {
register () {
let magic;
return function (req, res, next) {
if (!magic) {
magic = Math.random();
}
req[name] = magic;
next();
};
}
};
}
router.use(magicMiddleware('magicNumber'));
router.get('/', function (req, res) {
res.json(req.magicNumber);
});
```
Or simply:
```js
const magicNumber = Math.random();
router.get('/', function (req, res) {
res.json(magicNumber);
});
```

View File

@ -0,0 +1,27 @@
!CHAPTER Before, after and around
The `before`, `after` and `around` methods can easily be replaced by middleware:
Old:
```js
let start;
ctrl.before(function (req, res) {
start = Date.now();
});
ctrl.after(function (req, res) {
console.log('Request handled in ', (Date.now() - start), 'ms');
});
```
New:
```js
router.use(function (req, res, next) {
let start = Date.now();
next();
console.log('Request handled in ', (Date.now() - start), 'ms');
});
```
Note that unlike `around` middleware receives the `next` function as the *third* argument (the "opts" argument has no equivalent).

View File

@ -0,0 +1,48 @@
!CHAPTER Controllers vs routers
Foxx Controllers have been replaced with [routers](../../Router/README.md). This is more than a cosmetic change as there are significant differences in behaviour:
Controllers were automatically mounted when the file defining them was executed. Routers need to be explicitly mounted using the `module.context.use` method. Routers can also be exported, imported and even nested. This makes it easier to split up complex routing trees across multiple files.
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const ctrl = new Foxx.Controller(applicationContext);
ctrl.get('/hello', function (req, res) {
// ...
});
```
New:
```js
'use strict';
const createRouter = require('org/arangodb/foxx/router');
const router = createRouter();
// If you are importing this file from your entry file ("main"):
module.exports = router;
// Otherwise: module.context.use(router);
router.get('/hello', function (req, res) {
// ...
});
```
Some general changes in behaviour that might trip you up:
* When specifying path parameters with schemas Foxx will now ignore the route if the schema does not match (i.e. `/hello/foxx` will no longer match `/hello/:num` if `num` specifies a schema that doesn't match the value `"foxx"`). With controllers this could previously result in users seeing a 400 (bad request) error when they should instead be served a 404 (not found) response.
* When a request is made with an HTTP verb not supported by an endpoint, Foxx will now respond with a 405 (method not allowed) error with an appropriate `Allowed` header listing the supported HTTP verbs for that endpoint.
* Foxx will no longer parse your JSDoc comments to generate route documentation (use the `summary` and `description` methods of the endpoint instead).
* The `apiDocumentation` method now lives on the service context and behaves slightly differently.
* There is no router equivalent for the `activateAuthentication` and `activateSessions` methods. Instead you should use the session middleware (see the section on sessions below).
* There is no `del` alias for the `delete` method on routers. It has always been safe to use keywords as method names in Foxx, so the use of this alias was already discouraged before.
* The `allRoutes` proxy is no lot available on routers but can easily be replaced with middleware or child routers.

View File

@ -0,0 +1,113 @@
!CHAPTER Request objects
The names of some attributes of the request object have been adjusted to more closely align with those of the corresponding methods on the endpoint objects and established conventions in other JavaScript frameworks:
* `req.urlParameters` is now called `req.pathParams`
* `req.parameters` is now called `req.queryParams`
* `req.params()` is now called `req.param()`
* `req.requestType` is now called `req.method`
* `req.compatibility` is now called `req.arangoVersion`
* `req.user` is now called `req.arangoUser`
Some attributes have been removed or changed:
* `req.cookies` has been removed entirely (use `req.cookie(name)`)
* `req.requestBody` has been removed entirely (see below)
* `req.suffix` is now a string rather than an array
Additionally the `req.server` and `req.client` attributes are no longer available. The information is now exposed in a way that can (optionally) transparently handle proxy forwarding headers:
* `req.hostname` defaults to `req.server.address`
* `req.port` defaults to `req.server.port`
* `req.remoteAddress` defaults to `client.address`
* `req.remotePort` defaults to `client.port`
Finally, the `req.cookie` method now takes the `signed` options directly.
Old:
```js
const sid = req.cookie('sid', {
signed: {
secret: 'keyboardcat',
algorithm: 'sha256'
}
});
```
New:
```js
const sid = req.cookie('sid', {
secret: 'keyboardcat',
algorithm: 'sha256'
});
```
!SECTION Request bodies
The `req.body` is no longer a method and no longer automatically parses JSON request bodies unless a request body was defined. The `req.rawBody` now corresponds to the `req.rawBodyBuffer` of ArangoDB 2.x and is also no longer a method.
Old:
```js
ctrl.post('/', function (req, res) {
const data = req.body();
// ...
});
```
New:
```js
router.post('/', function (req, res) {
const data = req.body;
// ...
})
.body(['json']);
```
Or simply:
```js
const joi = require('joi');
router.post('/', function (req, res) {
const data = req.body;
// ...
})
.body(joi.object().optional());
```
!SECTION Multipart requests
The `req.requestParts` method has been removed entirely. If you need to accept multipart request bodies, you can simply define the request body using a multipart MIME type like `multipart/form-data`:
Old:
```js
ctrl.post('/', function (req, res) {
const parts = req.requestParts();
// ...
});
```
New:
```js
router.post('/', function (req, res) {
const parts = req.body;
// ...
})
.body(['multipart/form-data']);
```

View File

@ -0,0 +1,55 @@
!CHAPTER Response objects
The response object has a lot of new methods in ArangoDB 3.0 but otherwise remains similar to the response object of previous versions:
The `res.send` method behaves very differently from how the method with the same name behaved in ArangoDB 2.x: the conversion now takes the response body definition of the route into account. There is a new method `res.write` that implements the old behaviour.
Note that consecutive calls to `res.write` will append to the response body rather than replacing it like `res.send`.
The `res.contentType` property is also no longer available. If you want to set the MIME type of the response body to an explicit value you should set the `content-type` header instead:
Old:
```js
res.contentType = 'application/json';
res.body = JSON.stringify(results);
```
New:
```js
res.set('content-type', 'application/json');
res.body = JSON.stringify(results);
```
Or simply:
```js
// sets the content type to JSON
// if it has not already been set
res.json(results);
```
The `res.cookie` method now takes the `signed` options as part of the regular options object.
Old:
```js
res.cookie('sid', 'abcdef', {
ttl: 60 * 60,
signed: {
secret: 'keyboardcat',
algorithm: 'sha256'
}
});
```
New:
```js
res.cookie('sid', 'abcdef', {
ttl: 60 * 60,
secret: 'keyboardcat',
algorithm: 'sha256'
});
```

View File

@ -0,0 +1,138 @@
!CHAPTER Manifest
Many of the fields that were required in ArangoDB 2.x are now optional and can be safely omitted.
To avoid compatibility problems with future versions of ArangoDB you should always specify the `engines` field, e.g.:
```json
{
"engines": {
"arangodb": "^3.0.0"
}
}
```
!SECTION Controllers & exports
Previously Foxx distinguished between `exports` and `controllers`, each of which could be specified as an object. In ArangoDB 3.0 these have been merged into a single `main` field specifying an entry file.
The easiest way to migrate services using multiple exports and/or controllers is to create a separate entry file that imports these files:
Old (manifest.json):
```json
{
"exports": {
"doodads": "doodads.js",
"dingbats": "dingbats.js"
},
"controllers": {
"/doodads": "routes/doodads.js",
"/dingbats": "routes/dingbats.js",
"/": "routes/root.js"
}
}
```
New (manifest.json):
```json
{
"main": "index.js"
}
```
New (index.js):
```js
'use strict';
module.context.use('/doodads', require('./routes/doodads'));
module.context.use('/dingbats', require('./routes/dingbats'));
module.context.use('/', require('./routes/root'));
module.exports = {
doodads: require('./doodads'),
dingbats: require('./dingbats')
};
```
!SECTION Index redirect
If you previously did not define the `defaultDocument` field, please note that in ArangoDB 3.0 the field will no longer default to the value `index.html` when omitted:
Old:
```json
{
// no defaultDocument
}
```
New:
```json
{
"defaultDocument": "index.html"
}
```
This also means it is no longer necessary to specify the `defaultDocument` field with an empty value to prevent the redirect and be able to serve requests at the `/` (root) path of the mount point:
Old:
```json
{
"defaultDocument": ""
}
```
New:
```json
{
// no defaultDocument
}
```
!SECTION Assets
The `assets` field is no longer supported in ArangoDB 3.0 outside of legacy compatibility mode.
If you previously used the field to serve individual files as-is you can simply use the `files` field instead:
Old:
```json
{
"assets": {
"client.js": {
"files": ["assets/client.js"],
"contentType": "application/javascript"
}
}
}
```
New:
```json
{
"files": {
"client.js": {
"path": "assets/client.js",
"type": "application/javascript"
}
}
}
```
If you relied on being able to specify multiple files that should be concatenated, you will have to use build tools outside of ArangoDB to prepare these files accordingly.
!SECTION Root element
The `rootElement` field is no longer supported and has been removed entirely.
If your controllers relied on this field being available you need to adjust your schemas and routes to be able to handle the full JSON structure of incoming documents.
!SECTION System services
The `isSystem` field is no longer supported. The presence or absence of the field had no effect in most recent versions of ArangoDB 2.x and has now been removed entirely.

View File

@ -0,0 +1,46 @@
!CHAPTER Foxx queries
The `createQuery` method has been removed. It can be trivially replaced with plain JavaScript functions and direct calls to [the `db._query` method](../Modules.md):
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const query = Foxx.createQuery({
query: 'FOR u IN _users SORT u.user ASC RETURN u[@propName]',
params: ['propName'],
transform: function (results, uppercase) {
return (
uppercase
? results[0].toUpperCase()
: results[0].toLowerCase()
);
}
});
query('user', true);
```
New:
```js
'use strict';
const db = require('@arangodb').db;
const aql = require('@arangodb').aql;
function query(propName, uppercase) {
const results = db._query(aql`
FOR u IN _users
SORT u.user ASC
RETURN u[${propName}]
`);
return (
uppercase
? results[0].toUpperCase()
: results[0].toLowerCase()
);
}
query('user', true);
```

View File

@ -0,0 +1,9 @@
!CHAPTER Migrating 2.x services to 3.0
When migrating services from older versions of ArangoDB it is generally recommended you make sure they work in [legacy compatibility mode](../LegacyMode.md), which can also serve as a stop-gap solution.
This chapter outlines the major differences in the Foxx API between ArangoDB 2.8 and ArangoDB 3.0.
!SECTION General changes
The `console` object in later versions of ArangoDB 2.x implemented a special Foxx console API and would optionally log messages to a collection. ArangoDB 3.0 restores the original behaviour where `console` is the same object available from the [console module](../../Appendix/JavaScriptModules/Console.md).

View File

@ -0,0 +1,199 @@
!CHAPTER Repositories and models
Previously Foxx was heavily built around the concept of repositories and models, which provided complex but rarely necessary abstractions on top of ArangoDB collections and documents. In ArangoDB 3.0 these have been removed entirely.
!SECTION Repositories vs collections
Repositories mostly wrapped methods that already existed on ArangoDB collection objects and primarily dealt with converting between plain ArangoDB documents and Foxx model instances. In ArangoDB 3.0 you can simply use these collections directly and treat documents as plain JavaScript objects.
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const myRepo = new Foxx.Repository(
applicationContext.collection('myCollection'),
{model: Foxx.Model}
);
// ...
const models = myRepo.byExample({color: 'green'});
res.json(models.map(function (model) {
return model.forClient();
}));
```
New:
```js
'use strict';
const myDocs = module.context.collection('myCollection');
// ...
const docs = myDocs.byExample({color: 'green'});
res.json(docs);
```
!SECTION Schema validation
The main purpose of models in ArangoDB 2.x was to validate incoming data using joi schemas. In more recent versions of ArangoDB 2.x it was already possible to pass these schemas directly in most places where a model was expected as an argument. The only difference is that schemas should now be considered the default.
If you previously relied on the automatic validation of Foxx model instances when setting attributes or instantiating models from untrusted data, you can simply use the schema's `validate` method directly.
Old:
```js
'use strict';
const joi = require('joi');
const mySchema = {
name: joi.string().required(),
size: joi.number().required()
};
const Foxx = require('org/arangodb/foxx');
const MyModel = Foxx.Model.extend({schema: mySchema});
// ...
const model = new MyModel(req.json());
if (!model.isValid) {
res.status(400);
res.write('Bad request');
return;
}
```
New:
```js
'use strict';
const joi = require('joi');
// Note this is now wrapped in a joi.object()
const mySchema = joi.object({
name: joi.string().required(),
size: joi.number().required()
}).required();
// ...
const result = mySchema.validate(req.body);
if (result.errors) {
res.status(400);
res.write('Bad request');
return;
}
```
!SECTION Migrating models
While most use cases for models can now be replaced with plain joi schemas, there is still the concept of a "model" in Foxx in ArangoDB 3.0 although it is quite different from Foxx models in ArangoDB 2.x.
A model in Foxx now refers to a plain JavaScript object with an optional `schema` attribute and the optional methods `forClient` and `fromClient`. Models can be used instead of plain joi schemas to define request and response bodies but there are no model "instances" in ArangoDB 3.0.
Old:
```js
'use strict';
const _ = require('underscore');
const joi = require('joi');
const Foxx = require('org/arangodb/foxx');
const MyModel = Foxx.Model.extend({
schema: {
name: joi.string().required(),
size: joi.number().required()
},
forClient () {
return _.omit(this.attributes, ['_key', '_id', '_rev']);
}
});
// ...
ctrl.get(/* ... */)
.bodyParam('body', {type: MyModel});
```
New:
```js
'use strict';
const _ = require('lodash');
const joi = require('joi');
const MyModel = {
schema: joi.object({
name: joi.string().required(),
size: joi.number().required()
}).required(),
forClient (data) {
return _.omit(data, ['_key', '_id', '_rev']);
}
};
// ...
router.get(/* ... */)
.body(MyModel);
```
!SECTION Triggers
When saving, updating, replacing or deleting models in ArangoDB 2.x using the repository methods the repository and model would fire events that could be subscribed to in order to perform side-effects.
Note that even in 2.x these events would not fire when using queries or manipulating documents in any other way than using the specific repository methods that operated on individual documents.
This behaviour is no longer available in ArangoDB 3.0 but can be emulated by using an `EventEmitter` directly if it is not possible to solve the problem differently:
Old:
```js
'use strict';
const Foxx = require('org/arangodb/foxx');
const MyModel = Foxx.Model.extend({
// ...
}, {
afterRemove () {
console.log(this.get('name'), 'was removed');
}
});
// ...
const model = myRepo.firstExample({name: 'myName'});
myRepo.remove(model);
// -> "myName was removed successfully"
```
New:
```js
'use strict';
const EventEmitter = require('events');
const emitter = new EventEmitter();
emitter.on('afterRemove', function (doc) {
console.log(doc.name, 'was removed');
});
// ...
const doc = myDocs.firstExample({name: 'myName'});
myDocs.remove(doc);
emitter.emit('afterRemove', doc);
// -> "myName was removed successfully"
```
Or simply:
```js
'use strict';
function afterRemove(doc) {
console.log(doc.name, 'was removed');
}
// ...
const doc = myDocs.firstExample({name: 'myName'});
myDocs.remove(doc);
afterRemove(doc);
// -> "myName was removed successfully"
```

View File

@ -0,0 +1,34 @@
!CHAPTER Sessions
The `ctrl.activateSessions` method and the related `util-sessions-local` Foxx service have been replaced with the [Foxx sessions](../Sessions/README.md) middleware. It is no longer possible to use the built-in session storage but you can simply pass in any document collection directly.
Old:
```js
const localSessions = applicationContext.dependencies.localSessions;
const sessionStorage = localSessions.sessionStorage;
ctrl.activateSessions({
sessionStorage: sessionStorage,
cookie: {secret: 'keyboardcat'}
});
ctrl.destroySession('/logout', function (req, res) {
res.json({message: 'Goodbye!'});
});
```
New:
```js
const sessionMiddleware = require('@arangodb/foxx/sessions');
const cookieTransport = require('@arangodb/foxx/sessions/transports/cookie');
router.use(sessionMiddleware({
storage: module.context.collection('sessions'),
transport: cookieTransport('keyboardcat')
}));
router.post('/logout', function (req, res) {
req.sessionStorage.clear(req.session);
res.json({message: 'Goodbye!'});
});
```

View File

@ -0,0 +1,51 @@
!CHAPTER Migrating from pre-2.8
When migrating from a version older than ArangoDB 2.8 please note that starting with ArangoDB 2.8 the behaviour of the `require` function more closely mimics the behaviour observed in Node.js and module bundlers for browsers, e.g.:
In a file `/routes/examples.js` (relative to the root folder of the service):
* `require('./my-module')` will be attempted to be resolved in the following order:
1. `/routes/my-module` (relative to service root)
2. `/routes/my-module.js` (relative to service root)
3. `/routes/my-module.json` (relative to service root)
4. `/routes/my-module/index.js` (relative to service root)
5. `/routes/my-module/index.json` (relative to service root)
* `require('lodash')` will be attempted to be resolved in the following order:
1. `/routes/node_modules/lodash` (relative to service root)
2. `/node_modules/lodash` (relative to service root)
3. ArangoDB module `lodash`
4. Node compatibility module `lodash`
5. Bundled NPM module `lodash`
* `require('/abs/path')` will be attempted to be resolved in the following order:
1. `/abs/path` (relative to file system root)
2. `/abs/path.js` (relative to file system root)
3. `/abs/path.json` (relative to file system root)
4. `/abs/path/index.js` (relative to file system root)
5. `/abs/path/index.json` (relative to file system root)
This behaviour is incompatible with the source code generated by the Foxx generator in the web interface before ArangoDB 2.8.
**Note:** The `org/arangodb` module is aliased to the new name `@arangodb` in ArangoDB 3.0.0 and the `@arangodb` module was aliased to the old name `org/arangodb` in ArangoDB 2.8.0. Either one will work in 2.8 and 3.0 but outside of legacy services you should use `@arangodb` going forward.
!SECTION Foxx queue
In ArangoDB 2.6 Foxx introduced a new way to define queued jobs using Foxx scripts to replace the function-based job type definitions which were causing problems when restarting the server. The function-based jobs have been removed in 2.7 and are no longer supported at all.
!SECTION CoffeeScript
ArangoDB 3.0 no longer provides built-in support for CoffeeScript source files, even in legacy compatibility mode. If you want to use an alternative language like CoffeeScript, make sure to pre-compile the raw source files to JavaScript and use the compiled JavaScript files in the service.
!SECTION The request module
The `@arangodb/request` module when used with the `json` option previously overwrote the string in the `body` property of the response object of the response with the parsed JSON body. In 2.8 this was changed so the parsed JSON body is added as the `json` property of the response object in addition to overwriting the `body` property. In 3.0 and later (including legacy compatibility mode) the `body` property is no longer overwritten and must use the `json` property instead. Note that this only affects code using the `json` option when making the request.
!SECTION Bundled NPM modules
The bundled NPM modules have been upgraded and may include backwards-incompatible changes, especially the API of `joi` has changed several times. If in doubt you should bundle your own versions of these modules to ensure specific versions will be used.
The utility module `lodash` is now available and should be used instead of `underscore`, but both modules will continue to be provided.

View File

@ -17,19 +17,19 @@ graph databases can handle an arbitrary number of these hops over edge collectio
Also edges in one edge collection may point to several vertex collections.
Its common to have attributes attached to edges, i.e. a *label* naming this interconnection.
Edges have a direction, with their relations `_from` and `_to` pointing *from* one document *to* another document stored in vertex collections.
In queries you can define in which directions the edge relations may be followed (`OUTBOUND`: `_from` → `_to`, `INBOUND`: `_to` → `_from`, `ANY`: `_from` ↔ `_to`).
In queries you can define in which directions the edge relations may be followed (`OUTBOUND`: `_from` → `_to`, `INBOUND`: `_from` ← `_to`, `ANY`: `_from` ↔ `_to`).
!SUBSECTION Named Graphs
Named graphs are completely managed by arangodb, and thus also [visible in the webinterface](../Administration/WebInterface/Graphs.md).
They use the full spectrum of ArangoDBs graph features. You may access them via several interfaces.
- [AQL Graph Operations](../../AQL/Graphs/index.html) with several flavors:
- [AQL Graph Operations](../../AQL/Graphs/index.html) with several flavors:
- [AQL Traversals](../../AQL/Graphs/Traversals.html) on both named and anonymous graphs
- [AQL Shortest Path](../../AQL/Graphs/ShortestPath.html) on both named and anonymous graph
- [The javascript General Graph implementation, as you may use it in Foxx Services](GeneralGraphs/README.md)
* [Graph Management](GeneralGraphs/Management.md); creating & manipualating graph definitions; inserting, updating and deleting vertices and edges into graphs
* [Graph Functions](GeneralGraphs/Functions.md) for working with edges and vertices, to analyze them and their relations
- [Graph Management](GeneralGraphs/Management.md); creating & manipualating graph definitions; inserting, updating and deleting vertices and edges into graphs
- [Graph Functions](GeneralGraphs/Functions.md) for working with edges and vertices, to analyze them and their relations
- [the RESTful General Graph interface](../../HTTP/Gharial/index.html) used to implement graph management in client drivers
!SUBSUBSECTION Manipulating collections of named graphs with regular document functions
@ -75,6 +75,7 @@ A set of persons knowing each other:
The *knows* graph consists of one *vertex collection* `persons` connected via one *edge collection* `knows`.
It will contain five persons *Alice*, *Bob*, *Charlie*, *Dave* and *Eve*.
We will have the following directed relations:
- *Alice* knows *Bob*
- *Bob* knows *Charlie*
- *Bob* knows *Dave*
@ -174,7 +175,7 @@ The above referenced chapters describe the various APIs of ArangoDBs graph engin
- [Traversing a graph in full depth](https://docs.arangodb.com/cookbook/Graph/FulldepthTraversal.html)
- [Search for vertices of special type connecting a given subgraph](https://docs.arangodb.com/cookbook/Graph/FindingConnectedVerticesForSubgraphs.html)
- [Using an example vertex with the java driver](https://docs.arangodb.com/cookbook/Graph/JavaDriverGraphExampleVertex.html)
- [Retrieving documents from ArangoDB without knowing the structure](https://docs.arangodb.com/cookbook/JavaDriverBaseDocument.html)
- [Retrieving documents from ArangoDB without knowing the structure](https://docs.arangodb.com/cookbook/Graph/JavaDriverBaseDocument.html)
- [Using a custom visitor from node.js](https://docs.arangodb.com/cookbook/Graph/CustomVisitorFromNodeJs.html)
- [AQL Example Queries on an Actors and Movies Database](https://docs.arangodb.com/cookbook/Graph/ExampleActorsAndMovies.html)

View File

@ -10,7 +10,8 @@
</a>
</div>
<select class="arangodb-version-switcher">
<option value="devel">VERSION_NUMBER</option>
<option value="devel">devel</option>
<option value="3.0">v3.0</option>
<option value="2.8">v2.8</option>
<option value="2.7">v2.7</option>
<option value="2.6">v2.6</option>
@ -25,16 +26,16 @@
</div>
<ul class="arangodb-navmenu">
<li class="active-tab">
<a href="BASE_PATH/Manual/index.html">Manual</a>
<a href="#" data-book="Manual">Manual</a>
</li>
<li>
<a href="BASE_PATH/AQL/index.html">AQL</a>
<a href="#" data-book="AQL">AQL</a>
</li>
<li>
<a href="BASE_PATH/HTTP/index.html">HTTP</a>
<a href="#" data-book="HTTP">HTTP</a>
</li>
<li>
<a href="https://docs.arangodb.com/cookbook">Cookbook</a>
<a href="#" data-book="cookbook">Cookbook</a>
</li>
<li class="socialIcons">
<a href="https://github.com/ArangoDB/ArangoDB/issues" target="blank" name="github">

View File

@ -283,12 +283,6 @@ The operations for storing and retrieving cluster state information are now much
expensive from an ArangoDB cluster node perspective, which in turn allows for faster
cluster operations that need to fetch or update the overall cluster state.
TODO
!SECTION Synchronous replication
TODO
!SECTION `_from` and `_to` attributes of edges are updatable and usable in indexes
In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated

View File

@ -40,7 +40,7 @@
* [Collection Names](DataModeling/NamingConventions/CollectionNames.md)
* [Document Keys](DataModeling/NamingConventions/DocumentKeys.md)
* [Attribute Names](DataModeling/NamingConventions/AttributeNames.md)
* [Modeling Relationships](DataModeling/ModelingRelationships.md)
# * [Modeling Relationships](DataModeling/ModelingRelationships.md)
#
* [Indexing](Indexing/README.md)
* [Index Basics](Indexing/IndexBasics.md)
@ -84,7 +84,21 @@
* [Serving files](Foxx/Assets.md)
# * [Writing tests](Foxx/Testing.md)
* [Scripts and queued jobs](Foxx/Scripts.md)
* [Migrating 2.x services](Foxx/Migrating2x.md)
* [Migrating 2.x services](Foxx/Migrating2x/README.md)
* [Migrating from pre-2.8](Foxx/Migrating2x/Wayback.md)
* [manifest.json](Foxx/Migrating2x/Manifest.md)
* [applicationContext](Foxx/Migrating2x/Context.md)
* [Repositories and Models](Foxx/Migrating2x/Repositories.md)
* [Controllers](Foxx/Migrating2x/Controllers/README.md)
* [Request context](Foxx/Migrating2x/Controllers/Endpoints.md)
* [Error handling](Foxx/Migrating2x/Controllers/Errors.md)
* [Before/After/Around](Foxx/Migrating2x/Controllers/Middleware.md)
* [Request object](Foxx/Migrating2x/Controllers/Request.md)
* [Response object](Foxx/Migrating2x/Controllers/Response.md)
* [Dependency Injection](Foxx/Migrating2x/Controllers/IoC.md)
* [Sessions](Foxx/Migrating2x/Sessions.md)
* [Auth and OAuth2](Foxx/Migrating2x/Auth.md)
* [Foxx Queries](Foxx/Migrating2x/Queries.md)
* [Legacy compatibility mode](Foxx/LegacyMode.md)
* [User management](Foxx/Users.md)
* [Related modules](Foxx/Modules.md)

View File

@ -35,8 +35,29 @@ function appendHeader() {
};
addGoogleSrc();
$(".arangodb-navmenu a:lt(4)").on("click", function(e) {
e.preventDefault();
var urlSplit = gitbook.state.root.split("/");
urlSplit.pop(); // ""
urlSplit.pop(); // e.g. "Manual"
window.location.href = urlSplit.join("/") + "/" + e.target.getAttribute("data-book") + "/index.html";
});
var bookVersion = gitbook.state.root.match(/\/(\d\.\d|devel)\//);
if (bookVersion) {
$(".arangodb-version-switcher").val(bookVersion[1]);
}
$(".arangodb-version-switcher").on("change", function(e) {
window.location.href = "https://docs.arangodb.com/" + e.target.value;
var urlSplit = gitbook.state.root.split("/");
if (urlSplit.length == 6) {
urlSplit.pop(); // ""
var currentBook = urlSplit.pop(); // e.g. "Manual"
urlSplit.pop() // e.g. "3.0"
window.location.href = urlSplit.join("/") + "/" + e.target.value + "/" + currentBook + "/";
} else {
window.location.href = "https://docs.arangodb.com/" + e.target.value;
}
});
});

View File

@ -11,6 +11,7 @@ ArangoDB
2.6: [![Build Status](https://secure.travis-ci.org/arangodb/arangodb.png?branch=2.6)](http://travis-ci.org/arangodb/arangodb)
2.7: [![Build Status](https://secure.travis-ci.org/arangodb/arangodb.png?branch=2.7)](http://travis-ci.org/arangodb/arangodb)
2.8: [![Build Status](https://secure.travis-ci.org/arangodb/arangodb.png?branch=2.8)](http://travis-ci.org/arangodb/arangodb)
3.0: [![Build Status](https://secure.travis-ci.org/arangodb/arangodb.png?branch=3.0)](http://travis-ci.org/arangodb/arangodb)
Master: [![Build Status](https://secure.travis-ci.org/arangodb/arangodb.png?branch=master)](http://travis-ci.org/arangodb/arangodb)
@ -44,7 +45,7 @@ Microservice Example
By extending the HTTP API with user code written in JavaScript, ArangoDB can be turned into a strict schema-enforcing persistence engine.
Next step, bundle your Foxx application as a [docker container](https://docs.arangodb.com/cookbook/UsingArangoDBNodeJSDocker.html) and get it running in the cloud.
Next step, bundle your Foxx application as a [docker container](https://docs.arangodb.com/cookbook/Cloud/NodeJsDocker.html) and get it running in the cloud.
Other features of ArangoDB include:
@ -60,36 +61,34 @@ Other features of ArangoDB include:
* **Replication** and **Sharding**: set up the database in a master-slave configuration or spread bigger datasets across multiple servers
* It is **open source** (Apache License 2.0)
For more in-depth information read the [design goals of ArangoDB](http://www.arangodb.com/2012/03/07/avocadodbs-design-objectives)
For more in-depth information read the [design goals of ArangoDB](https://www.arangodb.com/2012/03/07/avocadodbs-design-objectives)
Latest Release - ArangoDB 2.8
-----------------
Latest Release - ArangoDB 3.0
-----------------------------
The [What's new in ArangoDB 2.8](https://docs.arangodb.com/NewFeatures/NewFeatures28.html) can be found in the documentation.
The [What's new in ArangoDB 3.0](https://docs.arangodb.com/3.0/Manual/ReleaseNotes/NewFeatures30.html) can be found in the documentation.
**AQL Graph Traversals / Pattern Matching**: AQL offers a new feature to traverse over a graph without writing JavaScript functions but with all the other features you know from AQL. For this purpose, a special version of `FOR variable-name IN expression` has been introduced.
The added **Array Indexes** are a major improvement to ArangoDB that you will love and never want to miss again. Hash indexes and skiplist indexes can now be defined for array values as well, so its freaking fast to access documents by individual array values.
Additional, there is a cool new **aggregation feature** that was added after the beta releases. AQL introduces the keyword `AGGREGATE` for use in `AQL COLLECT` statements. Using `AGGREGATE` allows more efficient aggregation (incrementally while building the groups) than previous versions of AQL, which built group aggregates afterwards from the total of all group values
**Optimizer improvements**: The AQL query optimizer can now use indexes if multiple filter conditions on attributes of the same collection are combined with logical ORs, and if the usage of indexes would completely cover these conditions.
ArangoDB 2.8 now has an automatic **deadlock detection** for transactions. A deadlock is a situation in which two or more concurrent operations (user transactions or AQL queries) try to access the same resources (collections, documents) and need to wait for the others to finish, but none of them can make any progress.
**Foxx Improvements**
The module resolution used by `require` now behaves more like in node.js. The `org/arangodb/request` module now returns response bodies for error responses by default. The old behavior of not returning bodies for error responses can be re-enabled by explicitly setting the option `returnBodyOnError` to `false`.
Key features of the 3.0 release are:
- use of VelocyPack as internal storage format
- AQL improvements
- much better cluster state management
- Synchronous replication (master/master)
- unified APIs for CRUD operations
- persistent indexes
- upgraded version of V8
- new web admin interface
- Foxx improvements
- Logging improvements
- improved documentation
More Information
----------------
Please check the [Installation Manual](https://docs.arangodb.com/Installing/) for installation and compilation instructions.
Please check the [Installation Manual](https://docs.arangodb.com/latest/Manual/GettingStarted/Installing/) for installation and compilation instructions.
The [User Manual](https://docs.arangodb.com/FirstSteps/) has an introductory chapter showing the basic operations of ArangoDB.
The [User Manual](https://docs.arangodb.com/latest/Manual/GettingStarted/) has an introductory chapter showing the basic operations of ArangoDB.
Stay in Contact

View File

@ -352,6 +352,17 @@ bool Store::check(VPackSlice const& slice) const {
if (op.value.getBool() ? found : !found) {
return false;
}
} else if (oper == "in") { // in
if (found) {
if (node.slice().isArray()) {
for (auto const& i : VPackArrayIterator(node.slice())) {
if (i == op.value) {
return true;
}
}
}
}
return false;
}
}
} else {

View File

@ -742,6 +742,10 @@ void AqlValue::toVelocyPack(AqlTransaction* trx,
bool resolveExternals) const {
switch (type()) {
case VPACK_SLICE_POINTER:
if (!resolveExternals && isMasterPointer()) {
builder.addExternal(_data.pointer);
break;
} // fallthrough intentional
case VPACK_INLINE:
case VPACK_MANAGED: {
if (resolveExternals) {
@ -785,10 +789,13 @@ AqlValue AqlValue::materialize(AqlTransaction* trx, bool& hasCopied, bool resolv
}
case DOCVEC:
case RANGE: {
VPackBuilder builder;
bool shouldDelete = true;
ConditionalDeleter<VPackBuffer<uint8_t>> deleter(shouldDelete);
std::shared_ptr<VPackBuffer<uint8_t>> buffer(new VPackBuffer<uint8_t>, deleter);
VPackBuilder builder(buffer);
toVelocyPack(trx, builder, resolveExternals);
hasCopied = true;
return AqlValue(builder);
return AqlValue(buffer.get(), shouldDelete);
}
}
@ -801,6 +808,11 @@ AqlValue AqlValue::materialize(AqlTransaction* trx, bool& hasCopied, bool resolv
AqlValue AqlValue::clone() const {
switch (type()) {
case VPACK_SLICE_POINTER: {
if (isMasterPointer()) {
// copy from master pointer. this will not copy the data
return AqlValue(_data.pointer, AqlValueFromMasterPointer());
}
// copy from regular pointer. this may copy the data
return AqlValue(_data.pointer);
}
case VPACK_INLINE: {
@ -871,7 +883,6 @@ void AqlValue::destroy() {
VPackSlice AqlValue::slice() const {
switch (type()) {
case VPACK_SLICE_POINTER: {
return VPackSlice(_data.pointer);
}
case VPACK_INLINE: {
@ -901,7 +912,10 @@ AqlValue AqlValue::CreateFromBlocks(
arangodb::AqlTransaction* trx, std::vector<AqlItemBlock*> const& src,
std::vector<std::string> const& variableNames) {
VPackBuilder builder;
bool shouldDelete = true;
ConditionalDeleter<VPackBuffer<uint8_t>> deleter(shouldDelete);
std::shared_ptr<VPackBuffer<uint8_t>> buffer(new VPackBuffer<uint8_t>, deleter);
VPackBuilder builder(buffer);
builder.openArray();
for (auto const& current : src) {
@ -929,7 +943,7 @@ AqlValue AqlValue::CreateFromBlocks(
}
builder.close();
return AqlValue(builder);
return AqlValue(buffer.get(), shouldDelete);
}
/// @brief create an AqlValue from a vector of AqlItemBlock*s
@ -937,7 +951,11 @@ AqlValue AqlValue::CreateFromBlocks(
arangodb::AqlTransaction* trx, std::vector<AqlItemBlock*> const& src,
arangodb::aql::RegisterId expressionRegister) {
VPackBuilder builder;
bool shouldDelete = true;
ConditionalDeleter<VPackBuffer<uint8_t>> deleter(shouldDelete);
std::shared_ptr<VPackBuffer<uint8_t>> buffer(new VPackBuffer<uint8_t>, deleter);
VPackBuilder builder(buffer);
builder.openArray();
for (auto const& current : src) {
@ -947,7 +965,7 @@ AqlValue AqlValue::CreateFromBlocks(
}
builder.close();
return AqlValue(builder);
return AqlValue(buffer.get(), shouldDelete);
}
/// @brief 3-way comparison for AqlValue objects

View File

@ -28,6 +28,7 @@
#include "Basics/Common.h"
#include "Aql/Range.h"
#include "Aql/types.h"
#include "Basics/ConditionalDeleter.h"
#include "Basics/VelocyPackHelper.h"
#include "VocBase/document-collection.h"
@ -45,6 +46,10 @@ class AqlTransaction;
namespace aql {
class AqlItemBlock;
// no-op struct used only in an internal API to signal we want
// to construct from a master pointer!
struct AqlValueFromMasterPointer {};
struct AqlValue final {
friend struct std::hash<arangodb::aql::AqlValue>;
friend struct std::equal_to<arangodb::aql::AqlValue>;
@ -93,32 +98,30 @@ struct AqlValue final {
_data.internal[0] = '\x00';
setType(AqlValueType::VPACK_INLINE);
}
// construct from document
explicit AqlValue(TRI_doc_mptr_t const* mptr) {
_data.pointer = mptr->vpack();
setType(AqlValueType::VPACK_SLICE_POINTER);
TRI_ASSERT(VPackSlice(_data.pointer).isObject());
// construct from mptr, not copying!
AqlValue(uint8_t const* pointer, AqlValueFromMasterPointer const&) {
setPointer<true>(pointer);
TRI_ASSERT(!VPackSlice(_data.pointer).isExternal());
}
// construct from pointer
// construct from pointer, not copying!
explicit AqlValue(uint8_t const* pointer) {
// we must get rid of Externals first here, because all
// methods that use VPACK_SLICE_POINTER expect its contents
// to be non-Externals
if (*pointer == '\x1d') {
// an external
_data.pointer = VPackSlice(pointer).resolveExternals().begin();
setPointer<false>(VPackSlice(pointer).resolveExternals().begin());
} else {
_data.pointer = pointer;
setPointer<false>(pointer);
}
setType(AqlValueType::VPACK_SLICE_POINTER);
TRI_ASSERT(!VPackSlice(_data.pointer).isExternal());
}
// construct from docvec, taking over its ownership
explicit AqlValue(std::vector<AqlItemBlock*>* docvec) {
TRI_ASSERT(docvec != nullptr);
_data.docvec = docvec;
setType(AqlValueType::DOCVEC);
}
@ -130,8 +133,9 @@ struct AqlValue final {
setType(AqlValueType::VPACK_INLINE);
}
// construct from char* and length
// construct from char* and length, copying the string
AqlValue(char const* value, size_t length) {
TRI_ASSERT(value != nullptr);
if (length == 0) {
// empty string
_data.internal[0] = 0x40;
@ -158,7 +162,7 @@ struct AqlValue final {
}
}
// construct from std::string
// construct from std::string, copying the string
explicit AqlValue(std::string const& value) {
if (value.empty()) {
// empty string
@ -187,24 +191,50 @@ struct AqlValue final {
}
}
// construct from Buffer, potentially taking over its ownership
// (by adjusting the boolean passed)
AqlValue(arangodb::velocypack::Buffer<uint8_t>* buffer, bool& shouldDelete) {
TRI_ASSERT(buffer != nullptr);
TRI_ASSERT(shouldDelete); // here, the Buffer is still owned by the caller
// intentionally do not resolve externals here
// if (slice.isExternal()) {
// // recursively resolve externals
// slice = slice.resolveExternals();
// }
if (buffer->length() < sizeof(_data.internal)) {
// Use inline value
memcpy(_data.internal, buffer->data(), static_cast<size_t>(buffer->length()));
setType(AqlValueType::VPACK_INLINE);
} else {
// Use managed buffer, simply reuse the pointer and adjust the original
// Buffer's deleter
_data.buffer = buffer;
setType(AqlValueType::VPACK_MANAGED);
shouldDelete = false; // adjust deletion control variable
}
}
// construct from Buffer, taking over its ownership
explicit AqlValue(arangodb::velocypack::Buffer<uint8_t>* buffer) {
TRI_ASSERT(buffer != nullptr);
_data.buffer = buffer;
setType(AqlValueType::VPACK_MANAGED);
}
// construct from Builder
// construct from Builder, copying contents
explicit AqlValue(arangodb::velocypack::Builder const& builder) {
TRI_ASSERT(builder.isClosed());
initFromSlice(builder.slice());
}
// construct from Builder, copying contents
explicit AqlValue(arangodb::velocypack::Builder const* builder) {
TRI_ASSERT(builder->isClosed());
initFromSlice(builder->slice());
}
// construct from Slice
// construct from Slice, copying contents
explicit AqlValue(arangodb::velocypack::Slice const& slice) {
initFromSlice(slice);
}
@ -227,8 +257,8 @@ struct AqlValue final {
/// @brief whether or not the value must be destroyed
inline bool requiresDestruction() const noexcept {
AqlValueType t = type();
return (t == VPACK_MANAGED || t == DOCVEC || t == RANGE);
auto t = type();
return (t != VPACK_SLICE_POINTER && t != VPACK_INLINE);
}
/// @brief whether or not the value is empty / none
@ -243,6 +273,11 @@ struct AqlValue final {
inline bool isPointer() const noexcept {
return type() == VPACK_SLICE_POINTER;
}
/// @brief whether or not the value is a master pointer
inline bool isMasterPointer() const noexcept {
return isPointer() && (_data.internal[sizeof(_data.internal) - 2] == 1);
}
/// @brief whether or not the value is a range
inline bool isRange() const noexcept {
@ -406,13 +441,22 @@ struct AqlValue final {
setType(AqlValueType::VPACK_MANAGED);
}
}
/// @brief sets the value type
inline void setType(AqlValueType type) noexcept {
_data.internal[sizeof(_data.internal) - 1] = type;
}
};
template<bool isMasterPointer>
inline void setPointer(uint8_t const* pointer) {
_data.pointer = pointer;
// we use the byte at (size - 2) to distinguish between data pointing to database
// documents (size[-2] == 1) and other data(size[-2] == 0)
_data.internal[sizeof(_data.internal) - 2] = isMasterPointer ? 1 : 0;
_data.internal[sizeof(_data.internal) - 1] = AqlValueType::VPACK_SLICE_POINTER;
}
};
class AqlValueGuard {
public:
AqlValueGuard() = delete;

View File

@ -628,6 +628,10 @@ AqlValue Expression::executeSimpleExpressionArray(
size_t const n = node->numMembers();
if (n == 0) {
return AqlValue(VelocyPackHelper::EmptyArrayValue());
}
TransactionBuilderLeaser builder(trx);
builder->openArray();
@ -659,6 +663,12 @@ AqlValue Expression::executeSimpleExpressionObject(
// this will not create a copy
return AqlValue(node->computeValue().begin());
}
size_t const n = node->numMembers();
if (n == 0) {
return AqlValue(VelocyPackHelper::EmptyObjectValue());
}
// unordered map to make object keys unique afterwards
std::unordered_map<std::string, size_t> uniqueKeyValues;
@ -668,7 +678,6 @@ AqlValue Expression::executeSimpleExpressionObject(
TransactionBuilderLeaser builder(trx);
builder->openObject();
size_t const n = node->numMembers();
for (size_t i = 0; i < n; ++i) {
auto member = node->getMemberUnchecked(i);
@ -805,12 +814,14 @@ AqlValue Expression::executeSimpleExpressionReference(
mustDestroy = false;
auto v = static_cast<Variable const*>(node->getData());
TRI_ASSERT(v != nullptr);
{
auto it = _variables.find(v);
if (it != _variables.end()) {
return AqlValue((*it).second.begin()); // use only pointer to data
mustDestroy = true;
return AqlValue(VPackSlice((*it).second.begin()));
}
}

View File

@ -30,6 +30,7 @@
#include "Aql/Function.h"
#include "Aql/Query.h"
#include "Basics/ConditionalDeleter.h"
#include "Basics/Exceptions.h"
#include "Basics/ScopeGuard.h"
#include "Basics/StringBuffer.h"
@ -618,19 +619,18 @@ static AqlValue MergeParameters(arangodb::aql::Query* query,
VPackFunctionParameters const& parameters,
char const* funcName,
bool recursive) {
VPackBuilder builder;
size_t const n = parameters.size();
if (n == 0) {
builder.openObject();
builder.close();
return AqlValue(builder);
return AqlValue(arangodb::basics::VelocyPackHelper::EmptyObjectValue());
}
// use the first argument as the preliminary result
AqlValue initial = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValueMaterializer materializer(trx);
VPackSlice initialSlice = materializer.slice(initial, false);
VPackBuilder builder;
if (initial.isArray() && n == 1) {
// special case: a single array parameter
@ -645,9 +645,7 @@ static AqlValue MergeParameters(arangodb::aql::Query* query,
for (auto const& it : VPackArrayIterator(initialSlice)) {
if (!it.isObject()) {
RegisterInvalidArgumentWarning(query, funcName);
builder.clear();
builder.add(VPackValue(VPackValueType::Null));
return AqlValue(builder);
return AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
try {
builder = arangodb::basics::VelocyPackHelper::merge(builder.slice(), it, false,
@ -1000,11 +998,13 @@ AqlValue Functions::Length(arangodb::aql::Query* query,
length = static_cast<size_t>(fpconv_dtoa(tmp, buffer));
}
} else if (value.isString()) {
length = TRI_CharLengthUtf8String(value.slice().copyString().c_str());
VPackValueLength l;
char const* p = value.slice().getString(l);
length = TRI_CharLengthUtf8String(p, l);
} else if (value.isObject()) {
length = static_cast<size_t>(value.length());
}
builder->add(VPackValue(static_cast<double>(length)));
builder->add(VPackValue(static_cast<uint64_t>(length)));
return AqlValue(builder.get());
}
@ -1801,7 +1801,7 @@ AqlValue Functions::Md5(arangodb::aql::Query* query,
arangodb::rest::SslInterface::sslHEX(hash, 16, p, length);
return AqlValue(std::string(hex, 32));
return AqlValue(&hex[0], 32);
}
/// @brief function SHA1
@ -1828,7 +1828,7 @@ AqlValue Functions::Sha1(arangodb::aql::Query* query,
arangodb::rest::SslInterface::sslHEX(hash, 20, p, length);
return AqlValue(std::string(hex, 40));
return AqlValue(&hex[0], 40);
}
/// @brief function HASH

View File

@ -534,7 +534,7 @@ AqlItemBlock* IndexBlock::getSome(size_t atLeast, size_t atMost) {
auto doc = _documents[_posInDocs++];
TRI_ASSERT(doc.isExternal());
res->setValue(j, static_cast<arangodb::aql::RegisterId>(curRegs),
AqlValue(doc.resolveExternal().begin()));
AqlValue(doc.resolveExternal().begin(), AqlValueFromMasterPointer()));
// No harm done, if the setValue throws!
}
}

View File

@ -580,7 +580,7 @@ void TraversalBlock::neighbors(std::string const& startVertex) {
continue;
}
_vertices.emplace_back(AqlValue(mptr.vpack()));
_vertices.emplace_back(AqlValue(mptr.vpack(), AqlValueFromMasterPointer()));
}
}

View File

@ -171,6 +171,8 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
"invalid traversal depth");
}
std::unordered_map<std::string, TRI_edge_direction_e> seenCollections;
if (graph->type == NODE_TYPE_COLLECTION_LIST) {
size_t edgeCollectionCount = graph->numMembers();
_graphJson = arangodb::basics::Json(arangodb::basics::Json::Array,
@ -180,16 +182,32 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
// List of edge collection names
for (size_t i = 0; i < edgeCollectionCount; ++i) {
auto col = graph->getMember(i);
TRI_edge_direction_e dir = TRI_EDGE_ANY;
if (col->type == NODE_TYPE_DIRECTION) {
// We have a collection with special direction.
TRI_edge_direction_e dir = parseDirection(col->getMember(0));
_directions.emplace_back(dir);
dir = parseDirection(col->getMember(0));
col = col->getMember(1);
} else {
_directions.emplace_back(baseDirection);
dir = baseDirection;
}
std::string eColName = col->getString();
// now do some uniqueness checks for the specified collections
auto it = seenCollections.find(eColName);
if (it != seenCollections.end()) {
if ((*it).second != dir) {
std::string msg("conflicting directions specified for collection '" +
std::string(eColName));
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID,
msg);
}
// do not re-add the same collection!
continue;
}
seenCollections.emplace(eColName, dir);
auto eColType = resolver->getCollectionTypeCluster(eColName);
if (eColType != TRI_COL_TYPE_EDGE) {
std::string msg("collection type invalid for collection '" +
@ -198,8 +216,10 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID,
msg);
}
_directions.emplace_back(dir);
_graphJson.add(arangodb::basics::Json(eColName));
_edgeColls.push_back(eColName);
_edgeColls.emplace_back(eColName);
}
} else {
if (_edgeColls.empty()) {
@ -252,6 +272,7 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
// Parse options node
}
/// @brief Internal constructor to clone the node.
TraversalNode::TraversalNode(
ExecutionPlan* plan, size_t id, TRI_vocbase_t* vocbase,
std::vector<std::string> const& edgeColls, Variable const* inVariable,

View File

@ -366,64 +366,6 @@ AgencyConnectionOptions AgencyComm::_globalConnectionOptions = {
10 // numRetries
};
////////////////////////////////////////////////////////////////////////////////
/// @brief constructs an agency comm locker
////////////////////////////////////////////////////////////////////////////////
AgencyCommLocker::AgencyCommLocker(std::string const& key,
std::string const& type,
double ttl, double timeout)
: _key(key), _type(type), _isLocked(false) {
AgencyComm comm;
_vpack = std::make_shared<VPackBuilder>();
try {
_vpack->add(VPackValue(type));
} catch (...) {
return;
}
if (comm.lock(key, ttl, timeout, _vpack->slice())) {
_isLocked = true;
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an agency comm locker
////////////////////////////////////////////////////////////////////////////////
AgencyCommLocker::~AgencyCommLocker() {
unlock();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief unlocks the lock
////////////////////////////////////////////////////////////////////////////////
void AgencyCommLocker::unlock() {
if (_isLocked) {
AgencyComm comm;
updateVersion(comm);
if (comm.unlock(_key, _vpack->slice(), 0.0)) {
_isLocked = false;
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief update a lock version in the agency
////////////////////////////////////////////////////////////////////////////////
bool AgencyCommLocker::updateVersion(AgencyComm& comm) {
if (_type != "WRITE") {
return true;
}
AgencyCommResult result = comm.increment(_key + "/Version");
return result.successful();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief cleans up all connections
////////////////////////////////////////////////////////////////////////////////

View File

@ -476,53 +476,8 @@ struct AgencyCommResult {
bool _connected;
};
class AgencyCommLocker {
public:
//////////////////////////////////////////////////////////////////////////////
/// @brief constructs an agency comm locker
///
/// The keys mentioned in this class are all not yet encoded.
//////////////////////////////////////////////////////////////////////////////
AgencyCommLocker(std::string const&, std::string const&, double = 0.0, double = 0.0);
//////////////////////////////////////////////////////////////////////////////
/// @brief destroys an agency comm locker
//////////////////////////////////////////////////////////////////////////////
~AgencyCommLocker();
public:
//////////////////////////////////////////////////////////////////////////////
/// @brief return whether the locking was successful
//////////////////////////////////////////////////////////////////////////////
bool successful() const { return _isLocked; }
//////////////////////////////////////////////////////////////////////////////
/// @brief unlocks the lock
//////////////////////////////////////////////////////////////////////////////
void unlock();
private:
//////////////////////////////////////////////////////////////////////////////
/// @brief update a lock version in the agency
//////////////////////////////////////////////////////////////////////////////
bool updateVersion(AgencyComm&);
private:
std::string const _key;
std::string const _type;
std::shared_ptr<arangodb::velocypack::Builder> _vpack;
bool _isLocked;
};
class AgencyComm {
friend struct AgencyCommResult;
friend class AgencyCommLocker;
public:

View File

@ -859,7 +859,16 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name,
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
res = ac.casValue("Plan/Databases/" + name, slice, false, 0.0, realTimeout);
AgencyOperation newVal(
"Plan/Databases/" + name, AgencyValueOperationType::SET, slice);
AgencyOperation incrementVersion(
"Plan/Version", AgencySimpleOperationType::INCREMENT_OP);
AgencyPrecondition precondition(
"Plan/Databases/" + name, AgencyPrecondition::EMPTY, true);
AgencyWriteTransaction trx({newVal, incrementVersion}, precondition);
res = ac.sendTransactionWithFailover(trx, realTimeout);
if (!res.successful()) {
if (res._statusCode ==
(int)arangodb::GeneralResponse::ResponseCode::PRECONDITION_FAILED) {
@ -1506,149 +1515,147 @@ int ClusterInfo::ensureIndexCoordinator(
// It is possible that between the fetching of the planned collections
// and the write lock we acquire below something has changed. Therefore
// we first get the previous value and then do a compare and swap operation.
auto collectionBuilder = std::make_shared<VPackBuilder>();
{
AgencyCommLocker locker("Plan", "WRITE");
if (!locker.successful()) {
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_LOCK_PLAN, errorMsg);
std::shared_ptr<CollectionInfo> c =
getCollection(databaseName, collectionID);
// Note that nobody is removing this collection in the plan, since
// we hold the write lock in the agency, therefore it does not matter
// that getCollection fetches the read lock and releases it before
// we get it again.
//
READ_LOCKER(readLocker, _planProt.lock);
if (c->empty()) {
return setErrormsg(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, errorMsg);
}
auto collectionBuilder = std::make_shared<VPackBuilder>();
std::shared_ptr<VPackBuilder> tmp = std::make_shared<VPackBuilder>();
tmp->add(c->getIndexes());
MUTEX_LOCKER(guard, numberOfShardsMutex);
{
std::shared_ptr<CollectionInfo> c =
getCollection(databaseName, collectionID);
// Note that nobody is removing this collection in the plan, since
// we hold the write lock in the agency, therefore it does not matter
// that getCollection fetches the read lock and releases it before
// we get it again.
//
READ_LOCKER(readLocker, _planProt.lock);
if (c->empty()) {
return setErrormsg(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, errorMsg);
}
std::shared_ptr<VPackBuilder> tmp = std::make_shared<VPackBuilder>();
tmp->add(c->getIndexes());
MUTEX_LOCKER(guard, numberOfShardsMutex);
{
numberOfShards = c->numberOfShards();
}
VPackSlice const indexes = tmp->slice();
if (indexes.isArray()) {
VPackSlice const type = slice.get("type");
if (!type.isString()) {
return setErrormsg(TRI_ERROR_INTERNAL, errorMsg);
}
for (auto const& other : VPackArrayIterator(indexes)) {
if (arangodb::basics::VelocyPackHelper::compare(
type, other.get("type"), false) != 0) {
// compare index types first. they must match
continue;
}
TRI_ASSERT(other.isObject());
bool isSame = compare(slice, other);
if (isSame) {
// found an existing index...
{
// Copy over all elements in slice.
VPackObjectBuilder b(&resultBuilder);
for (auto const& entry : VPackObjectIterator(other)) {
resultBuilder.add(entry.key.copyString(), entry.value);
}
resultBuilder.add("isNewlyCreated", VPackValue(false));
}
return setErrormsg(TRI_ERROR_NO_ERROR, errorMsg);
}
}
}
// no existing index found.
if (!create) {
TRI_ASSERT(resultBuilder.isEmpty());
return setErrormsg(TRI_ERROR_NO_ERROR, errorMsg);
}
// now create a new index
collectionBuilder->add(c->getSlice());
numberOfShards = c->numberOfShards();
}
VPackSlice const collectionSlice = collectionBuilder->slice();
if (!collectionSlice.isObject()) {
return setErrormsg(TRI_ERROR_CLUSTER_AGENCY_STRUCTURE_INVALID, errorMsg);
}
try {
VPackObjectBuilder b(&newBuilder);
// Create a new collection VPack with the new Index
for (auto const& entry : VPackObjectIterator(collectionSlice)) {
TRI_ASSERT(entry.key.isString());
std::string key = entry.key.copyString();
if (key == "indexes") {
TRI_ASSERT(entry.value.isArray());
newBuilder.add(key, VPackValue(VPackValueType::Array));
// Copy over all indexes known so far
for (auto const& idx : VPackArrayIterator(entry.value)) {
newBuilder.add(idx);
}
VPackSlice const indexes = tmp->slice();
if (indexes.isArray()) {
VPackSlice const type = slice.get("type");
if (!type.isString()) {
return setErrormsg(TRI_ERROR_INTERNAL, errorMsg);
}
for (auto const& other : VPackArrayIterator(indexes)) {
if (arangodb::basics::VelocyPackHelper::compare(
type, other.get("type"), false) != 0) {
// compare index types first. they must match
continue;
}
TRI_ASSERT(other.isObject());
bool isSame = compare(slice, other);
if (isSame) {
// found an existing index...
{
VPackObjectBuilder ob(&newBuilder);
// Add the new index ignoring "id"
for (auto const& e : VPackObjectIterator(slice)) {
TRI_ASSERT(e.key.isString());
std::string tmpkey = e.key.copyString();
if (tmpkey != "id") {
newBuilder.add(tmpkey, e.value);
}
// Copy over all elements in slice.
VPackObjectBuilder b(&resultBuilder);
for (auto const& entry : VPackObjectIterator(other)) {
resultBuilder.add(entry.key.copyString(), entry.value);
}
newBuilder.add("id", VPackValue(idString));
resultBuilder.add("isNewlyCreated", VPackValue(false));
}
newBuilder.close(); // the array
} else {
// Plain copy everything else
newBuilder.add(key, entry.value);
return setErrormsg(TRI_ERROR_NO_ERROR, errorMsg);
}
}
} catch (...) {
return setErrormsg(TRI_ERROR_OUT_OF_MEMORY, errorMsg);
}
AgencyCommResult result;
result = ac.casValue(key, collection, newBuilder.slice(), 0.0, 0.0);
if (!result.successful()) {
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
errorMsg);
// no existing index found.
if (!create) {
TRI_ASSERT(resultBuilder.isEmpty());
return setErrormsg(TRI_ERROR_NO_ERROR, errorMsg);
}
// now create a new index
collectionBuilder->add(c->getSlice());
}
// reload our own cache:
VPackSlice const collectionSlice = collectionBuilder->slice();
if (!collectionSlice.isObject()) {
return setErrormsg(TRI_ERROR_CLUSTER_AGENCY_STRUCTURE_INVALID, errorMsg);
}
try {
VPackObjectBuilder b(&newBuilder);
// Create a new collection VPack with the new Index
for (auto const& entry : VPackObjectIterator(collectionSlice)) {
TRI_ASSERT(entry.key.isString());
std::string key = entry.key.copyString();
if (key == "indexes") {
TRI_ASSERT(entry.value.isArray());
newBuilder.add(key, VPackValue(VPackValueType::Array));
// Copy over all indexes known so far
for (auto const& idx : VPackArrayIterator(entry.value)) {
newBuilder.add(idx);
}
{
VPackObjectBuilder ob(&newBuilder);
// Add the new index ignoring "id"
for (auto const& e : VPackObjectIterator(slice)) {
TRI_ASSERT(e.key.isString());
std::string tmpkey = e.key.copyString();
if (tmpkey != "id") {
newBuilder.add(tmpkey, e.value);
}
}
newBuilder.add("id", VPackValue(idString));
}
newBuilder.close(); // the array
} else {
// Plain copy everything else
newBuilder.add(key, entry.value);
}
}
} catch (...) {
return setErrormsg(TRI_ERROR_OUT_OF_MEMORY, errorMsg);
}
AgencyOperation newValue(
key, AgencyValueOperationType::SET, newBuilder.slice());
AgencyOperation incrementVersion(
"Plan/Version", AgencySimpleOperationType::INCREMENT_OP);
AgencyPrecondition oldValue(key, AgencyPrecondition::VALUE, collection);
AgencyWriteTransaction trx ({newValue, incrementVersion}, oldValue);
AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0);
if (!result.successful()) {
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
errorMsg);
}
loadPlan();
TRI_ASSERT(numberOfShards > 0);
{
CONDITION_LOCKER(locker, agencyCallback->_cv);
while (true) {
if (dbServerResult >= 0) {
return dbServerResult;
}
if (TRI_microtime() > endTime) {
return setErrormsg(TRI_ERROR_CLUSTER_TIMEOUT, errorMsg);
}
agencyCallback->executeByCallbackOrTimeout(interval);
}
}
return setErrormsg(TRI_ERROR_CLUSTER_TIMEOUT, errorMsg);
}
@ -1759,85 +1766,84 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName,
// It is possible that between the fetching of the planned collections
// and the write lock we acquire below something has changed. Therefore
// we first get the previous value and then do a compare and swap operation.
VPackSlice indexes;
{
AgencyCommLocker locker("Plan", "WRITE");
if (!locker.successful()) {
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_LOCK_PLAN, errorMsg);
std::shared_ptr<CollectionInfo> c =
getCollection(databaseName, collectionID);
READ_LOCKER(readLocker, _planProt.lock);
if (c->empty()) {
return setErrormsg(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, errorMsg);
}
VPackSlice indexes;
{
std::shared_ptr<CollectionInfo> c =
getCollection(databaseName, collectionID);
READ_LOCKER(readLocker, _planProt.lock);
if (c->empty()) {
return setErrormsg(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, errorMsg);
}
indexes = c->getIndexes();
if (!indexes.isArray()) {
// no indexes present, so we can't delete our index
return setErrormsg(TRI_ERROR_ARANGO_INDEX_NOT_FOUND, errorMsg);
}
MUTEX_LOCKER(guard, numberOfShardsMutex);
numberOfShards = c->numberOfShards();
}
bool found = false;
VPackBuilder newIndexes;
{
VPackArrayBuilder newIndexesArrayBuilder(&newIndexes);
// mop: eh....do we need a flag to mark it invalid until cache is renewed?
// TRI_DeleteObjectJson(TRI_UNKNOWN_MEM_ZONE, collectionJson, "indexes");
for (auto const& indexSlice: VPackArrayIterator(indexes)) {
VPackSlice id = indexSlice.get("id");
VPackSlice type = indexSlice.get("type");
if (!id.isString() || !type.isString()) {
continue;
}
if (idString == id.copyString()) {
// found our index, ignore it when copying
found = true;
std::string const typeString = type.copyString();
if (typeString == "primary" || typeString == "edge") {
return setErrormsg(TRI_ERROR_FORBIDDEN, errorMsg);
}
continue;
}
newIndexes.add(indexSlice);
}
}
if (!found) {
indexes = c->getIndexes();
if (!indexes.isArray()) {
// no indexes present, so we can't delete our index
return setErrormsg(TRI_ERROR_ARANGO_INDEX_NOT_FOUND, errorMsg);
}
VPackBuilder newCollectionBuilder;
{
VPackObjectBuilder newCollectionObjectBuilder(&newCollectionBuilder);
for (auto const& property: VPackObjectIterator(previous)) {
if (property.key.copyString() == "indexes") {
newCollectionBuilder.add(property.key.copyString(), newIndexes.slice());
} else {
newCollectionBuilder.add(property.key.copyString(), property.value);
}
MUTEX_LOCKER(guard, numberOfShardsMutex);
numberOfShards = c->numberOfShards();
}
bool found = false;
VPackBuilder newIndexes;
{
VPackArrayBuilder newIndexesArrayBuilder(&newIndexes);
// mop: eh....do we need a flag to mark it invalid until cache is renewed?
// TRI_DeleteObjectJson(TRI_UNKNOWN_MEM_ZONE, collectionJson, "indexes");
for (auto const& indexSlice: VPackArrayIterator(indexes)) {
VPackSlice id = indexSlice.get("id");
VPackSlice type = indexSlice.get("type");
if (!id.isString() || !type.isString()) {
continue;
}
}
AgencyCommResult result =
ac.casValue(key, previous, newCollectionBuilder.slice(), 0.0, 0.0);
if (!result.successful()) {
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
errorMsg);
if (idString == id.copyString()) {
// found our index, ignore it when copying
found = true;
std::string const typeString = type.copyString();
if (typeString == "primary" || typeString == "edge") {
return setErrormsg(TRI_ERROR_FORBIDDEN, errorMsg);
}
continue;
}
newIndexes.add(indexSlice);
}
}
if (!found) {
return setErrormsg(TRI_ERROR_ARANGO_INDEX_NOT_FOUND, errorMsg);
}
VPackBuilder newCollectionBuilder;
{
VPackObjectBuilder newCollectionObjectBuilder(&newCollectionBuilder);
for (auto const& property: VPackObjectIterator(previous)) {
if (property.key.copyString() == "indexes") {
newCollectionBuilder.add(property.key.copyString(), newIndexes.slice());
} else {
newCollectionBuilder.add(property.key.copyString(), property.value);
}
}
}
AgencyOperation newVal(
key, AgencyValueOperationType::SET, newCollectionBuilder.slice());
AgencyOperation incrementVersion(
"Plan/Version", AgencySimpleOperationType::INCREMENT_OP);
AgencyPrecondition prec(key, AgencyPrecondition::VALUE, previous);
AgencyWriteTransaction trx ({newVal, incrementVersion}, prec);
AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0);
if (!result.successful()) {
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
errorMsg);
}
// load our own cache:
loadPlan();

View File

@ -24,6 +24,7 @@
#include "ClusterMethods.h"
#include "Basics/conversions.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringRef.h"
#include "Basics/StringUtils.h"
#include "Basics/tri-strings.h"
#include "Basics/VelocyPackHelper.h"
@ -878,7 +879,7 @@ int deleteDocumentOnCoordinator(
VPackSlice const node, VPackValueLength const index) -> int {
// Sort out the _key attribute and identify the shard responsible for it.
std::string _key(Transaction::extractKeyPart(node));
StringRef _key(Transaction::extractKeyPart(node));
ShardID shardID;
if (_key.empty()) {
// We have invalid input at this point.
@ -892,7 +893,7 @@ int deleteDocumentOnCoordinator(
bool usesDefaultShardingAttributes;
int error = ci->getResponsibleShard(
collid, arangodb::basics::VelocyPackHelper::EmptyObjectValue(), true,
shardID, usesDefaultShardingAttributes, _key);
shardID, usesDefaultShardingAttributes, _key.toString());
if (error == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
return TRI_ERROR_CLUSTER_SHARD_GONE;

View File

@ -931,19 +931,12 @@ bool ServerState::storeRole(RoleEnum role) {
if (isClusterRole(role)) {
AgencyComm comm;
AgencyCommResult result;
std::unique_ptr<AgencyCommLocker> locker;
locker.reset(new AgencyCommLocker("Current", "WRITE"));
if (!locker->successful()) {
return false;
}
if (role == ServerState::ROLE_COORDINATOR) {
VPackBuilder builder;
try {
builder.add(VPackValue("none"));
} catch (...) {
locker->unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
@ -952,7 +945,6 @@ bool ServerState::storeRole(RoleEnum role) {
comm.setValue("Current/Coordinators/" + _id, builder.slice(), 0.0);
if (!result.successful()) {
locker->unlock();
LOG(FATAL) << "unable to register coordinator in agency"; FATAL_ERROR_EXIT();
}
} else if (role == ServerState::ROLE_PRIMARY) {
@ -960,7 +952,6 @@ bool ServerState::storeRole(RoleEnum role) {
try {
builder.add(VPackValue("none"));
} catch (...) {
locker->unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
@ -969,7 +960,6 @@ bool ServerState::storeRole(RoleEnum role) {
comm.setValue("Current/DBServers/" + _id, builder.slice(), 0.0);
if (!result.successful()) {
locker->unlock();
LOG(FATAL) << "unable to register db server in agency"; FATAL_ERROR_EXIT();
}
} else if (role == ServerState::ROLE_SECONDARY) {
@ -978,20 +968,22 @@ bool ServerState::storeRole(RoleEnum role) {
try {
builder.add(VPackValue(keyName));
} catch (...) {
locker->unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
std::string myId (
"Current/DBServers/" + ServerState::instance()->getPrimaryId());
AgencyOperation addMe(
myId, AgencyValueOperationType::SET, builder.slice());
AgencyOperation incrementVersion(
"Plan/Version", AgencySimpleOperationType::INCREMENT_OP);
AgencyPrecondition precondition(myId, AgencyPrecondition::EMPTY, true);
AgencyWriteTransaction trx({addMe, incrementVersion}, precondition);
// register server
AgencyCommResult result = comm.casValue(
"Current/DBServers/" + ServerState::instance()->getPrimaryId(),
builder.slice(),
true,
0.0,
0.0);
AgencyCommResult result = comm.sendTransactionWithFailover(trx, 0.0);
if (!result.successful()) {
locker->unlock();
// mop: fail gracefully (allow retry)
return false;
}

View File

@ -57,9 +57,10 @@ bool IndexIteratorContext::isCluster() const {
return arangodb::ServerState::instance()->isRunningInCluster();
}
int IndexIteratorContext::resolveId(char const* handle, TRI_voc_cid_t& cid,
int IndexIteratorContext::resolveId(char const* handle, size_t length,
TRI_voc_cid_t& cid,
char const*& key) const {
char const* p = strchr(handle, TRI_DOCUMENT_HANDLE_SEPARATOR_CHR);
char const* p = static_cast<char const*>(memchr(handle, TRI_DOCUMENT_HANDLE_SEPARATOR_CHR, length));
if (p == nullptr || *p == '\0') {
return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD;

View File

@ -48,7 +48,7 @@ struct IndexIteratorContext {
bool isCluster() const;
int resolveId(char const*, TRI_voc_cid_t&, char const*&) const;
int resolveId(char const*, size_t, TRI_voc_cid_t&, char const*&) const;
TRI_vocbase_t* vocbase;
mutable CollectionNameResolver const* resolver;

View File

@ -39,7 +39,7 @@
using namespace arangodb;
static inline uint64_t HashKey(void* userData, uint8_t const* key) {
static inline uint64_t HashKey(void*, uint8_t const* key) {
// can use fast hash-function here, as index values are restricted to strings
return VPackSlice(key).hashString();
}
@ -466,8 +466,7 @@ IndexIterator* PrimaryIndex::createIterator(
arangodb::aql::AstNode const* attrNode,
std::vector<arangodb::aql::AstNode const*> const& valNodes) const {
// _key or _id?
bool const isId =
(strcmp(attrNode->getStringValue(), TRI_VOC_ATTRIBUTE_ID) == 0);
bool const isId = (attrNode->getString() == StaticStrings::IdString);
// only leave the valid elements in the vector
auto keys = std::make_unique<VPackBuilder>();
@ -486,7 +485,7 @@ IndexIterator* PrimaryIndex::createIterator(
// correct collection (i.e. _collection)
TRI_voc_cid_t cid;
char const* key;
int res = context->resolveId(valNode->getStringValue(), cid, key);
int res = context->resolveId(valNode->getStringValue(), valNode->getStringLength(), cid, key);
if (res != TRI_ERROR_NO_ERROR) {
continue;
@ -516,7 +515,7 @@ IndexIterator* PrimaryIndex::createIterator(
} else {
keys->openArray();
keys->openObject();
keys->add(TRI_SLICE_KEY_EQUAL, VPackValue(valNode->getStringValue()));
keys->add(TRI_SLICE_KEY_EQUAL, VPackValuePair(valNode->getStringValue(), valNode->getStringLength(), VPackValueType::String));
keys->close();
keys->close();
}

View File

@ -423,7 +423,7 @@ bool RestImportHandler::createFromJson(std::string const& type) {
}
// now find end of line
char const* pos = strchr(ptr, '\n');
char const* pos = static_cast<char const*>(memchr(ptr, '\n', end - ptr));
char const* oldPtr = nullptr;
std::shared_ptr<VPackBuilder> builder;
@ -580,7 +580,7 @@ bool RestImportHandler::createFromKeyValueList() {
char const* bodyEnd = current + bodyStr.size();
// process header
char const* next = strchr(current, '\n');
char const* next = static_cast<char const*>(memchr(current, '\n', bodyEnd - current));
if (next == nullptr) {
generateError(GeneralResponse::ResponseCode::BAD,

View File

@ -392,7 +392,7 @@ void RestSimpleHandler::lookupByKeys(VPackSlice const& slice) {
if (!e->isEdgeAccess && !e->matchesCheck(&trx, tmp)) {
add = false;
std::string _id = trx.extractIdString(tmp);
filteredIds.emplace_back(_id);
filteredIds.emplace_back(std::move(_id));
break;
}
}
@ -430,8 +430,7 @@ void RestSimpleHandler::lookupByKeys(VPackSlice const& slice) {
}
}
auto transactionContext = std::make_shared<StandaloneTransactionContext>(_vocbase);
auto customTypeHandler = transactionContext->orderCustomTypeHandler();
auto customTypeHandler = queryResult.context->orderCustomTypeHandler();
VPackOptions options = VPackOptions::Defaults; // copy defaults
options.customTypeHandler = customTypeHandler.get();

View File

@ -60,7 +60,7 @@ UpgradeFeature::UpgradeFeature(
void UpgradeFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
options->addSection("database", "Configure the database");
options->addOldOption("upgrade", "--database.auto-upgrade");
options->addOldOption("upgrade", "database.auto-upgrade");
options->addOption("--database.auto-upgrade",
"perform a database upgrade if necessary",

View File

@ -630,24 +630,24 @@ rocksdb::Transaction* Transaction::rocksTransaction() {
/// @brief extract the _key attribute from a slice
////////////////////////////////////////////////////////////////////////////////
std::string Transaction::extractKeyPart(VPackSlice const slice) {
StringRef Transaction::extractKeyPart(VPackSlice const slice) {
// extract _key
if (slice.isObject()) {
VPackSlice k = slice.get(StaticStrings::KeyString);
if (!k.isString()) {
return StaticStrings::Empty; // fail
return StringRef(); // fail
}
return k.copyString();
return StringRef(k);
}
if (slice.isString()) {
std::string key = slice.copyString();
StringRef key(slice);
size_t pos = key.find('/');
if (pos == std::string::npos) {
return key;
}
return key.substr(pos + 1);
}
return StaticStrings::Empty;
return StringRef();
}
//////////////////////////////////////////////////////////////////////////////
@ -975,18 +975,18 @@ void Transaction::extractKeyAndRevFromDocument(VPackSlice slice,
void Transaction::buildDocumentIdentity(TRI_document_collection_t* document,
VPackBuilder& builder,
TRI_voc_cid_t cid,
std::string const& key,
StringRef const& key,
VPackSlice const rid,
VPackSlice const oldRid,
TRI_doc_mptr_t const* oldMptr,
TRI_doc_mptr_t const* newMptr) {
builder.openObject();
if (ServerState::isRunningInCluster(_serverRole)) {
builder.add(StaticStrings::IdString, VPackValue(resolver()->getCollectionName(cid) + "/" + key));
builder.add(StaticStrings::IdString, VPackValue(resolver()->getCollectionName(cid) + "/" + key.toString()));
} else {
builder.add(StaticStrings::IdString, VPackValue(document->_info.name() + "/" + key));
builder.add(StaticStrings::IdString, VPackValue(document->_info.name() + "/" + key.toString()));
}
builder.add(StaticStrings::KeyString, VPackValue(key));
builder.add(StaticStrings::KeyString, VPackValuePair(key.data(), key.length(), VPackValueType::String));
TRI_ASSERT(!rid.isNone());
builder.add(StaticStrings::RevString, rid);
if (!oldRid.isNone()) {
@ -1325,7 +1325,7 @@ int Transaction::documentFastPath(std::string const& collectionName,
orderDitch(cid); // will throw when it fails
std::string key(Transaction::extractKeyPart(value));
StringRef key(Transaction::extractKeyPart(value));
if (key.empty()) {
return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD;
}
@ -1411,7 +1411,7 @@ OperationResult Transaction::documentCoordinator(std::string const& collectionNa
auto resultBody = std::make_shared<VPackBuilder>();
if (!value.isArray()) {
std::string key(Transaction::extractKeyPart(value));
StringRef key(Transaction::extractKeyPart(value));
if (key.empty()) {
return OperationResult(TRI_ERROR_ARANGO_DOCUMENT_KEY_BAD);
}
@ -1455,7 +1455,7 @@ OperationResult Transaction::documentLocal(std::string const& collectionName,
auto workOnOneDocument = [&](VPackSlice const value, bool isMultiple) -> int {
TIMER_START(TRANSACTION_DOCUMENT_EXTRACT);
std::string key(Transaction::extractKeyPart(value));
StringRef key(Transaction::extractKeyPart(value));
if (key.empty()) {
return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD;
}
@ -1652,8 +1652,7 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
TRI_ASSERT(mptr.vpack() != nullptr);
std::string keyString
= VPackSlice(mptr.vpack()).get(StaticStrings::KeyString).copyString();
StringRef keyString(VPackSlice(mptr.vpack()).get(StaticStrings::KeyString));
TIMER_START(TRANSACTION_INSERT_BUILD_DOCUMENT_IDENTITY);
@ -2001,7 +2000,7 @@ OperationResult Transaction::modifyLocal(
if (res == TRI_ERROR_ARANGO_CONFLICT) {
// still return
if ((!options.silent || doingSynchronousReplication) && !isBabies) {
std::string key = newVal.get(StaticStrings::KeyString).copyString();
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(document, resultBuilder, cid, key, actualRevision,
VPackSlice(),
options.returnOld ? &previous : nullptr, nullptr);
@ -2014,7 +2013,7 @@ OperationResult Transaction::modifyLocal(
TRI_ASSERT(mptr.vpack() != nullptr);
if (!options.silent || doingSynchronousReplication) {
std::string key = newVal.get(StaticStrings::KeyString).copyString();
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(document, resultBuilder, cid, key,
mptr.revisionIdAsSlice(), actualRevision,
options.returnOld ? &previous : nullptr ,
@ -2238,15 +2237,14 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
auto workOnOneDocument = [&](VPackSlice value, bool isBabies) -> int {
VPackSlice actualRevision;
TRI_doc_mptr_t previous;
std::string key;
std::shared_ptr<VPackBuilder> builder;
TransactionBuilderLeaser builder(this);
StringRef key;
if (value.isString()) {
key = value.copyString();
key = value;
size_t pos = key.find('/');
if (pos != std::string::npos) {
key = key.substr(pos + 1);
builder = std::make_shared<VPackBuilder>();
builder->add(VPackValue(key));
builder->add(VPackValuePair(key.data(), key.length(), VPackValueType::String));
value = builder->slice();
}
} else if (value.isObject()) {
@ -2254,7 +2252,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
if (!keySlice.isString()) {
return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD;
}
key = value.get(StaticStrings::KeyString).copyString();
key = keySlice;
} else {
return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD;
}

View File

@ -26,6 +26,7 @@
#include "Basics/Common.h"
#include "Basics/Exceptions.h"
#include "Basics/StringRef.h"
#include "Cluster/ServerState.h"
#include "Utils/OperationOptions.h"
#include "Utils/OperationResult.h"
@ -301,7 +302,7 @@ class Transaction {
/// @brief extract the _key attribute from a slice
//////////////////////////////////////////////////////////////////////////////
static std::string extractKeyPart(VPackSlice const);
static StringRef extractKeyPart(VPackSlice const);
//////////////////////////////////////////////////////////////////////////////
/// @brief extract the _id attribute from a slice, and convert it into a
@ -649,7 +650,7 @@ class Transaction {
void buildDocumentIdentity(TRI_document_collection_t* document,
VPackBuilder& builder,
TRI_voc_cid_t cid,
std::string const& key,
StringRef const& key,
VPackSlice const rid,
VPackSlice const oldRid,
TRI_doc_mptr_t const* oldMptr,

View File

@ -1196,7 +1196,7 @@ static void JS_ExecuteAqlJson(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_EXCEPTION(res);
}
}
TRI_GET_GLOBALS();
arangodb::aql::Query query(true, vocbase, queryBuilder, options,
arangodb::aql::PART_MAIN);
@ -1208,14 +1208,12 @@ static void JS_ExecuteAqlJson(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_EXCEPTION_FULL(queryResult.code, queryResult.details);
}
auto transactionContext =
std::make_shared<StandaloneTransactionContext>(vocbase);
// return the array value as it is. this is a performance optimization
v8::Handle<v8::Object> result = v8::Object::New(isolate);
if (queryResult.result != nullptr) {
result->ForceSet(TRI_V8_ASCII_STRING("json"),
TRI_VPackToV8(isolate, queryResult.result->slice(),
transactionContext->getVPackOptions()));
queryResult.context->getVPackOptions()));
}
if (queryResult.stats != nullptr) {
VPackSlice stats = queryResult.stats->slice();

View File

@ -134,10 +134,10 @@ aql::AqlValue SingleServerTraverser::fetchVertexData(std::string const& id) {
uint8_t const* p = mptr.vpack();
_vertices.emplace(id, p);
return aql::AqlValue(p);
return aql::AqlValue(p, aql::AqlValueFromMasterPointer());
}
return aql::AqlValue((*it).second);
return aql::AqlValue((*it).second, aql::AqlValueFromMasterPointer());
}
bool SingleServerTraverser::VertexGetter::getVertex(std::string const& edge,

View File

@ -3175,11 +3175,16 @@ arangodb::Index* TRI_EnsureFulltextIndexDocumentCollection(
int TRI_document_collection_t::read(Transaction* trx, std::string const& key,
TRI_doc_mptr_t* mptr, bool lock) {
return read(trx, StringRef(key.c_str(), key.size()), mptr, lock);
}
int TRI_document_collection_t::read(Transaction* trx, StringRef const& key,
TRI_doc_mptr_t* mptr, bool lock) {
TRI_ASSERT(mptr != nullptr);
mptr->setVPack(nullptr);
TransactionBuilderLeaser builder(trx);
builder->add(VPackValue(key));
builder->add(VPackValuePair(key.data(), key.size(), VPackValueType::String));
VPackSlice slice = builder->slice();
{
@ -4115,7 +4120,7 @@ int TRI_document_collection_t::newObjectForInsert(
return TRI_ERROR_ARANGO_OUT_OF_KEYS;
}
uint8_t* where = builder.add(StaticStrings::KeyString,
VPackValue(keyString));
VPackValue(keyString));
s = VPackSlice(where); // point to newly built value, the string
} else if (!s.isString()) {
return TRI_ERROR_ARANGO_DOCUMENT_KEY_BAD;
@ -4266,7 +4271,7 @@ void TRI_document_collection_t::mergeObjectsForUpdate(
fromSlice = it.value();
} else if (key == StaticStrings::ToString) {
toSlice = it.value();
}
} // else do nothing
} else {
// regular attribute
newValues.emplace(std::move(key), it.value());

View File

@ -26,6 +26,7 @@
#include "Basics/Common.h"
#include "Basics/ReadWriteLock.h"
#include "Basics/StringRef.h"
#include "Cluster/ClusterInfo.h"
#include "VocBase/collection.h"
#include "VocBase/DatafileHelper.h"
@ -184,8 +185,8 @@ struct TRI_document_collection_t : public TRI_collection_t {
// function that is called to garbage-collect the collection's indexes
int (*cleanupIndexes)(struct TRI_document_collection_t*);
int read(arangodb::Transaction*, std::string const&,
TRI_doc_mptr_t*, bool);
int read(arangodb::Transaction*, std::string const&, TRI_doc_mptr_t*, bool);
int read(arangodb::Transaction*, arangodb::StringRef const&, TRI_doc_mptr_t*, bool);
int insert(arangodb::Transaction*, arangodb::velocypack::Slice const,
TRI_doc_mptr_t*, arangodb::OperationOptions&, TRI_voc_tick_t&, bool);
int update(arangodb::Transaction*, arangodb::velocypack::Slice const,

View File

@ -33,6 +33,7 @@
#include "Basics/Exceptions.h"
#include "Basics/FileUtils.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringRef.h"
#include "Basics/VelocyPackHelper.h"
#include "Basics/WriteLocker.h"
#include "Basics/conversions.h"
@ -2322,12 +2323,12 @@ void TRI_SanitizeObject(VPackSlice const slice, VPackBuilder& builder) {
TRI_ASSERT(slice.isObject());
VPackObjectIterator it(slice);
while (it.valid()) {
std::string key(it.key().copyString());
StringRef key(it.key());
if (key.empty() || key[0] != '_' ||
(key != StaticStrings::KeyString &&
key != StaticStrings::IdString &&
key != StaticStrings::RevString)) {
builder.add(std::move(key), it.value());
builder.add(key.data(), key.size(), it.value());
}
it.next();
}
@ -2342,14 +2343,14 @@ void TRI_SanitizeObjectWithEdges(VPackSlice const slice, VPackBuilder& builder)
TRI_ASSERT(slice.isObject());
VPackObjectIterator it(slice);
while (it.valid()) {
std::string key(it.key().copyString());
StringRef key(it.key());
if (key.empty() || key[0] != '_' ||
(key != StaticStrings::KeyString &&
key != StaticStrings::IdString &&
key != StaticStrings::RevString &&
key != StaticStrings::FromString &&
key != StaticStrings::ToString)) {
builder.add(std::move(key), it.value());
builder.add(key.data(), key.length(), it.value());
}
it.next();
}

View File

@ -49,17 +49,18 @@ function agencyTestSuite () {
// Wait for multi-host agency to have elected a leader
if (agencyServers.length > 1) {
require("internal").print("Waiting for the agency to get ready ... ");
while (true) {
var res = request({url: agencyServers[whoseTurn] + "/_api/agency/config",
method: "GET", followRedirects: true, body: "",
headers: {"Content-Type": "application/json"}});
wait(1);
wait(0.25);
res.bodyParsed = JSON.parse(res.body);
require("internal").print("Leadership election going on ... ");
if (res.bodyParsed.leaderId >= 0 && res.bodyParsed.leaderId < nagents) {
whoseTurn = res.bodyParsed.leaderId;
require("internal").print("Agents elected " + res.bodyParsed.leaderId +
" leader in term " + res.bodyParsed.term + ".");
require("internal").print(
"Agent " + res.bodyParsed.leaderId + " was elected leader in term "
+ res.bodyParsed.term + ".");
break;
}
}

View File

@ -804,6 +804,9 @@ function ahuacatlNumericFunctionsTestSuite () {
////////////////////////////////////////////////////////////////////////////////
testAsin : function () {
if (require("internal").platform === "solaris") {
return;
}
var values = [
[-999999999,null],
[-1000,null],
@ -877,6 +880,9 @@ function ahuacatlNumericFunctionsTestSuite () {
////////////////////////////////////////////////////////////////////////////////
testAcos : function () {
if (require("internal").platform === "solaris") {
return;
}
var values = [
[-999999999,null],
[-1000,null],

View File

@ -2113,8 +2113,40 @@ function multiEdgeDirectionSuite () {
result = db._query(item.q2, bindVars2).toArray();
assertEqual(result, item.res);
});
}
},
testDuplicationCollections: function () {
var queries = [
[ "FOR x IN ANY @start @@ec, INBOUND @@ec RETURN x", false ],
[ "FOR x IN ANY @start @@ec, OUTBOUND @@ec RETURN x", false ],
[ "FOR x IN ANY @start @@ec, ANY @@ec RETURN x", true ],
[ "FOR x IN INBOUND @start @@ec, INBOUND @@ec RETURN x", true ],
[ "FOR x IN INBOUND @start @@ec, OUTBOUND @@ec RETURN x", false ],
[ "FOR x IN INBOUND @start @@ec, ANY @@ec RETURN x", false ],
[ "FOR x IN OUTBOUND @start @@ec, INBOUND @@ec RETURN x", false ],
[ "FOR x IN OUTBOUND @start @@ec, OUTBOUND @@ec RETURN x", true ],
[ "FOR x IN OUTBOUND @start @@ec, ANY @@ec RETURN x", false ]
];
var bindVars = {
"@ec": en,
start: vertex.A
};
queries.forEach(function (query) {
if (query[1]) {
// should work
db._query(query[0], bindVars).toArray();
} else {
// should fail
try {
db._query(query[0], bindVars).toArray();
fail();
} catch (e) {
assertEqual(e.errorNum, errors.ERROR_ARANGO_COLLECTION_TYPE_INVALID.code);
}
}
});
}
};
}

View File

@ -0,0 +1,48 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_BASICS_CONDITIONAL_DELETER_H
#define ARANGODB_BASICS_CONDITIONAL_DELETER_H 1
#include "Basics/Common.h"
namespace arangodb {
/// @brief a custom deleter that deletes an object only if the condition is true
/// to be used in conjunction with unique or shared ptrs when ownership needs to
/// be transferred
template<typename T>
struct ConditionalDeleter {
explicit ConditionalDeleter(bool& condition) : condition(condition) {}
void operator()(T* object) {
if (condition) {
delete object;
}
}
bool& condition;
};
}
#endif

View File

@ -79,11 +79,7 @@ int Utf8Helper::compareUtf8(char const* left, size_t leftLength,
char const* right, size_t rightLength) const {
TRI_ASSERT(left != nullptr);
TRI_ASSERT(right != nullptr);
if (!_coll) {
LOG(ERR) << "no Collator in Utf8Helper::compareUtf8()!";
return (strcmp(left, right));
}
TRI_ASSERT(_coll);
UErrorCode status = U_ZERO_ERROR;
int result =
@ -91,7 +87,7 @@ int Utf8Helper::compareUtf8(char const* left, size_t leftLength,
StringPiece(right, (int32_t)rightLength), status);
if (U_FAILURE(status)) {
LOG(ERR) << "error in Collator::compareUTF8(...): " << u_errorName(status);
return (strcmp(left, right));
return (strncmp(left, right, leftLength < rightLength ? leftLength : rightLength));
}
return result;
@ -550,28 +546,6 @@ bool Utf8Helper::matches(RegexMatcher* matcher, char const* value,
return (result ? true : false);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief compare two utf8 strings
////////////////////////////////////////////////////////////////////////////////
int TRI_compare_utf8(char const* left, char const* right) {
TRI_ASSERT(left != nullptr);
TRI_ASSERT(right != nullptr);
return Utf8Helper::DefaultUtf8Helper.compareUtf8(left, right);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief compare two utf8 strings
////////////////////////////////////////////////////////////////////////////////
int TRI_compare_utf8(char const* left, size_t leftLength, char const* right,
size_t rightLength) {
TRI_ASSERT(left != nullptr);
TRI_ASSERT(right != nullptr);
return Utf8Helper::DefaultUtf8Helper.compareUtf8(left, leftLength, right,
rightLength);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief Lowercase the characters in a UTF-8 string (implemented in
/// Basic/Utf8Helper.cpp)

View File

@ -186,21 +186,29 @@ char* TRI_normalize_utf16_to_NFC(TRI_memory_zone_t* zone, uint16_t const* utf16,
size_t inLength, size_t* outLength);
////////////////////////////////////////////////////////////////////////////////
/// @brief compare two utf8 strings (implemented in Basic/Utf8Helper.cpp)
/// @brief compare two utf8 strings
////////////////////////////////////////////////////////////////////////////////
int TRI_compare_utf8(char const* left, char const* right);
static inline int TRI_compare_utf8(char const* left, char const* right) {
TRI_ASSERT(left != nullptr);
TRI_ASSERT(right != nullptr);
return arangodb::basics::Utf8Helper::DefaultUtf8Helper.compareUtf8(left, right);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief compare two utf8 strings (implemented in Basic/Utf8Helper.cpp)
/// @brief compare two utf8 strings
////////////////////////////////////////////////////////////////////////////////
int TRI_compare_utf8(char const* left, size_t leftLength, char const* right,
size_t rightLength);
static inline int TRI_compare_utf8(char const* left, size_t leftLength,
char const* right, size_t rightLength) {
TRI_ASSERT(left != nullptr);
TRI_ASSERT(right != nullptr);
return arangodb::basics::Utf8Helper::DefaultUtf8Helper.compareUtf8(left, leftLength,
right, rightLength);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief Lowercase the characters in a UTF-8 string (implemented in
/// Basic/Utf8Helper.cpp)
/// @brief Lowercase the characters in a UTF-8 string
////////////////////////////////////////////////////////////////////////////////
char* TRI_tolower_utf8(TRI_memory_zone_t* zone, char const* src,

View File

@ -1240,8 +1240,8 @@ char* TRI_UnescapeUtf8String(TRI_memory_zone_t* zone, char const* in,
////////////////////////////////////////////////////////////////////////////////
size_t TRI_CharLengthUtf8String(char const* in) {
unsigned char* p = (unsigned char*)in;
size_t length = 0;
unsigned char const* p = reinterpret_cast<unsigned char const*>(in);
size_t chars = 0;
while (*p) {
unsigned char c = *p;
@ -1256,15 +1256,46 @@ size_t TRI_CharLengthUtf8String(char const* in) {
} else if (c < 248) {
p += 4;
} else {
printf("invalid utf\n");
// invalid UTF-8 sequence
break;
}
++length;
++chars;
}
return length;
return chars;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief determine the number of characters in a UTF-8 string
////////////////////////////////////////////////////////////////////////////////
size_t TRI_CharLengthUtf8String(char const* in, size_t length) {
unsigned char const* p = reinterpret_cast<unsigned char const*>(in);
unsigned char const* e = p + length;
size_t chars = 0;
while (p < e) {
unsigned char c = *p;
if (c < 128) {
// single byte
p++;
} else if (c < 224) {
p += 2;
} else if (c < 240) {
p += 3;
} else if (c < 248) {
p += 4;
} else {
// invalid UTF-8 sequence
break;
}
++chars;
}
return chars;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -260,6 +260,12 @@ char* TRI_UnescapeUtf8String(TRI_memory_zone_t*, char const* in,
size_t TRI_CharLengthUtf8String(char const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief determine the number of characters in a UTF-8 string
////////////////////////////////////////////////////////////////////////////////
size_t TRI_CharLengthUtf8String(char const*, size_t);
////////////////////////////////////////////////////////////////////////////////
/// @brief get the string end position for a leftmost prefix of a UTF-8 string
/// eg. when specifying (müller, 2), the return value will be a pointer to the

View File

@ -438,7 +438,7 @@ void SimpleHttpClient::setUserNamePassword(std::string const& prefix,
std::string value =
arangodb::basics::StringUtils::encodeBase64(username + ":" + password);
_pathToBasicAuth.push_back(make_pair(prefix, value));
_pathToBasicAuth.push_back(std::make_pair(prefix, value));
}
////////////////////////////////////////////////////////////////////////////////
@ -501,12 +501,15 @@ void SimpleHttpClient::setRequest(
HttpRequest::appendMethod(method, &_writeBuffer);
// append location
std::string l(location);
std::string const* l = &location;
std::string appended;
if (location.empty() || location[0] != '/') {
l = "/" + location;
appended = "/" + location;
l = &appended;
}
_writeBuffer.appendText(l);
_writeBuffer.appendText(*l);
// append protocol
_writeBuffer.appendText(TRI_CHAR_LENGTH_PAIR(" HTTP/1.1\r\n"));
@ -538,13 +541,12 @@ void SimpleHttpClient::setRequest(
if (!_pathToBasicAuth.empty()) {
std::string foundPrefix;
std::string foundValue;
std::vector<std::pair<std::string, std::string>>::iterator i =
_pathToBasicAuth.begin();
auto i = _pathToBasicAuth.begin();
for (; i != _pathToBasicAuth.end(); ++i) {
std::string& f = i->first;
if (l.find(f) == 0) {
if (l->find(f) == 0) {
// f is prefix of l
if (f.length() > foundPrefix.length()) {
foundPrefix = f;

View File

@ -98,7 +98,11 @@ class SimpleHttpResult {
//////////////////////////////////////////////////////////////////////////////
void setHttpReturnMessage(std::string const& message) {
this->_returnMessage = message;
_returnMessage = message;
}
void setHttpReturnMessage(std::string&& message) {
_returnMessage = std::move(message);
}
//////////////////////////////////////////////////////////////////////////////

View File

@ -245,6 +245,10 @@ function main(argv) {
}
}
if (res.exit != 0) {
throw("generating examples failed!");
}
return 0;
}

View File

@ -39,7 +39,9 @@ ${ARANGOSH} \
-- \
"$@"
if test $? -eq 0; then
rc=$?
if test $rc -eq 0; then
echo "removing ${LOGFILE} ${DBDIR}"
rm -rf ${LOGFILE} ${DBDIR} arangosh.examples.js
else
@ -47,4 +49,4 @@ else
cat ${LOGFILE}
fi
echo Server has terminated.
exit $rc

View File

@ -1,6 +1,6 @@
#!/bin/bash
python \
exec python \
`pwd`/utils/generateSwagger.py \
`pwd` \
`pwd`/js/apps/system/_admin/aardvark/APP/api-docs \