Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
19
CHANGELOG
|
@ -1,3 +1,22 @@
|
|||
v3.0.0-rc3 (????-??-??)
|
||||
-----------------------
|
||||
|
||||
* renamed various Foxx errors to no longer refer to Foxx services as apps
|
||||
|
||||
* adjusted various error messages in Foxx to be more informative
|
||||
|
||||
* specifying "files" in a Foxx manifest to be mounted at the service root
|
||||
no longer results in 404s when trying to access non-file routes
|
||||
|
||||
* undeclared path parameters in Foxx no longer break the service
|
||||
|
||||
* trusted reverse proxy support is now handled more consistently
|
||||
|
||||
* ArangoDB request compatibility and user are now exposed in Foxx
|
||||
|
||||
* all bundled NPM modules have been upgraded to their latest versions
|
||||
|
||||
|
||||
v3.0.0-rc2 (2015-06-12)
|
||||
-----------------------
|
||||
|
||||
|
|
|
@ -430,12 +430,19 @@ option(USE_JEMALLOC
|
|||
${JEMALLOC_DEFAULT_VALUE}
|
||||
)
|
||||
|
||||
set(FORCE_JEMALLOC_LIB "" CACHE STRING "force this library command to link jemalloc")
|
||||
|
||||
if (USE_JEMALLOC)
|
||||
if (NOT JEMALLOC_FOUND)
|
||||
message(FATAL_ERROR "jemalloc build was requested but jemalloc not found")
|
||||
endif ()
|
||||
add_definitions("-DARANGODB_HAVE_JEMALLOC=1")
|
||||
set(SYS_LIBS ${SYS_LIBS} ${JEMALLOC_jemalloc_LIBRARY})
|
||||
|
||||
if (FORCE_JEMALLOC_LIB STREQUAL "")
|
||||
set(SYS_LIBS ${SYS_LIBS} ${JEMALLOC_jemalloc_LIBRARY})
|
||||
else ()
|
||||
set(SYS_LIBS ${SYS_LIBS} ${FORCE_JEMALLOC_LIB})
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
<div id="header">
|
||||
<div id="logo">
|
||||
<div class="arangodb-header">
|
||||
<div class="arangodb-logo">
|
||||
<a href="https://arangodb.com/">
|
||||
<img src="https://docs.arangodb.com/assets/arangodb_logo_2016.png">
|
||||
</a>
|
||||
</div>
|
||||
<div id="logo-small">
|
||||
<div class="arangodb-logo-small">
|
||||
<a href="https://arangodb.com/">
|
||||
<img src="https://docs.arangodb.com/assets/arangodb_logo_small_2016.png">
|
||||
</a>
|
||||
</div>
|
||||
<select id="version-switcher">
|
||||
<select class="arangodb-version-switcher">
|
||||
<option value="devel">VERSION_NUMBER</option>
|
||||
<option value="2.8">v2.8</option>
|
||||
<option value="2.7">v2.7</option>
|
||||
|
@ -23,7 +23,7 @@
|
|||
<div class="google-search">
|
||||
<gcse:searchbox-only></gcse:searchbox-only>
|
||||
</div>
|
||||
<ul id="navmenu">
|
||||
<ul class="arangodb-navmenu">
|
||||
<li>
|
||||
<a href="BASE_PATH/Manual/index.html">Manual</a>
|
||||
</li>
|
||||
|
|
|
@ -92,22 +92,22 @@ body {
|
|||
margin-top: 48px;
|
||||
}
|
||||
|
||||
#logo, #logo-small {
|
||||
.arangodb-logo, arangodb-logo-small {
|
||||
display: inline;
|
||||
float: left;
|
||||
padding-top: 10px;
|
||||
margin-left:5%;
|
||||
}
|
||||
|
||||
#logo img {
|
||||
.arangodb-logo img {
|
||||
height: 23px;
|
||||
}
|
||||
|
||||
#logo-small {
|
||||
.arangodb-logo-small {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
width: 62px;
|
||||
height: 44px;
|
||||
margin-left: 24px;
|
||||
|
@ -119,37 +119,37 @@ body {
|
|||
border: 0;
|
||||
}
|
||||
|
||||
#version-switcher option {
|
||||
.arangodb-version-switcher option {
|
||||
background-color: white;
|
||||
color: black;
|
||||
}
|
||||
|
||||
|
||||
#header {
|
||||
.arangodb-header {
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 48px;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
#header .socialIcons-googlegroups a img {
|
||||
.arangodb-header .socialIcons-googlegroups a img {
|
||||
position: relative;
|
||||
height: 14px;
|
||||
top: 3px;
|
||||
}
|
||||
|
||||
#navmenu {
|
||||
.arangodb-navmenu {
|
||||
display: block;
|
||||
float: right;
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
#navmenu li {
|
||||
.arangodb-navmenu li {
|
||||
display: block;
|
||||
float: left;
|
||||
}
|
||||
|
||||
#navmenu li a {
|
||||
.arangodb-navmenu li a {
|
||||
display: block;
|
||||
float: left;
|
||||
padding: 0 10px;
|
||||
|
@ -161,14 +161,14 @@ body {
|
|||
font-family: Roboto, Helvetica, sans-serif;
|
||||
}
|
||||
|
||||
#navmenu li.active-tab a, #navmenu li a:hover {
|
||||
.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover {
|
||||
background-color: #88A049 !important;
|
||||
}
|
||||
|
||||
/** simple responsive updates **/
|
||||
|
||||
@media screen and (max-width: 1100px) {
|
||||
#logo {
|
||||
.arangodb-logo {
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
|
@ -190,29 +190,29 @@ body {
|
|||
width: 130px !important;
|
||||
}
|
||||
|
||||
#navmenu {
|
||||
.arangodb-navmenu {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#navmenu li a {
|
||||
.arangodb-navmenu li a {
|
||||
font-size: 15px;
|
||||
padding: 0 7px;
|
||||
}
|
||||
|
||||
#logo {
|
||||
.arangodb-logo {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#logo-small {
|
||||
.arangodb-logo-small {
|
||||
display: inline;
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
#logo-small img {
|
||||
.arangodb-logo-small img {
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
margin: 0;
|
||||
width: 50px;
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ body {
|
|||
}
|
||||
|
||||
@media screen and (max-width: 480px) {
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ body {
|
|||
}
|
||||
|
||||
@media screen and (max-width: 330px) {
|
||||
#logo-small, .google-search {
|
||||
.arangodb-logo-small, .google-search {
|
||||
display: none;
|
||||
}
|
||||
}
|
|
@ -1,15 +1,15 @@
|
|||
<div id="header">
|
||||
<div id="logo">
|
||||
<div class="arangodb-header">
|
||||
<div class="arangodb-logo">
|
||||
<a href="https://arangodb.com/">
|
||||
<img src="https://docs.arangodb.com/assets/arangodb_logo_2016.png">
|
||||
</a>
|
||||
</div>
|
||||
<div id="logo-small">
|
||||
<div class="arangodb-logo-small">
|
||||
<a href="https://arangodb.com/">
|
||||
<img src="https://docs.arangodb.com/assets/arangodb_logo_small_2016.png">
|
||||
</a>
|
||||
</div>
|
||||
<select id="version-switcher">
|
||||
<select class="arangodb-version-switcher">
|
||||
<option value="devel">VERSION_NUMBER</option>
|
||||
<option value="2.8">v2.8</option>
|
||||
<option value="2.7">v2.7</option>
|
||||
|
@ -23,7 +23,7 @@
|
|||
<div class="google-search">
|
||||
<gcse:searchbox-only></gcse:searchbox-only>
|
||||
</div>
|
||||
<ul id="navmenu">
|
||||
<ul class="arangodb-navmenu">
|
||||
<li>
|
||||
<a href="BASE_PATH/Manual/index.html">Manual</a>
|
||||
</li>
|
||||
|
|
|
@ -92,22 +92,22 @@ body {
|
|||
margin-top: 48px;
|
||||
}
|
||||
|
||||
#logo, #logo-small {
|
||||
.arangodb-logo, arangodb-logo-small {
|
||||
display: inline;
|
||||
float: left;
|
||||
padding-top: 10px;
|
||||
margin-left:5%;
|
||||
}
|
||||
|
||||
#logo img {
|
||||
.arangodb-logo img {
|
||||
height: 23px;
|
||||
}
|
||||
|
||||
#logo-small {
|
||||
.arangodb-logo-small {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
width: 62px;
|
||||
height: 44px;
|
||||
margin-left: 24px;
|
||||
|
@ -119,37 +119,37 @@ body {
|
|||
border: 0;
|
||||
}
|
||||
|
||||
#version-switcher option {
|
||||
.arangodb-version-switcher option {
|
||||
background-color: white;
|
||||
color: black;
|
||||
}
|
||||
|
||||
|
||||
#header {
|
||||
.arangodb-header {
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 48px;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
#header .socialIcons-googlegroups a img {
|
||||
.arangodb-header .socialIcons-googlegroups a img {
|
||||
position: relative;
|
||||
height: 14px;
|
||||
top: 3px;
|
||||
}
|
||||
|
||||
#navmenu {
|
||||
.arangodb-navmenu {
|
||||
display: block;
|
||||
float: right;
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
#navmenu li {
|
||||
.arangodb-navmenu li {
|
||||
display: block;
|
||||
float: left;
|
||||
}
|
||||
|
||||
#navmenu li a {
|
||||
.arangodb-navmenu li a {
|
||||
display: block;
|
||||
float: left;
|
||||
padding: 0 10px;
|
||||
|
@ -161,14 +161,14 @@ body {
|
|||
font-family: Roboto, Helvetica, sans-serif;
|
||||
}
|
||||
|
||||
#navmenu li.active-tab a, #navmenu li a:hover {
|
||||
.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover {
|
||||
background-color: #88A049 !important;
|
||||
}
|
||||
|
||||
/** simple responsive updates **/
|
||||
|
||||
@media screen and (max-width: 1100px) {
|
||||
#logo {
|
||||
.arangodb-logo {
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
|
@ -190,29 +190,29 @@ body {
|
|||
width: 130px !important;
|
||||
}
|
||||
|
||||
#navmenu {
|
||||
.arangodb-navmenu {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#navmenu li a {
|
||||
.arangodb-navmenu li a {
|
||||
font-size: 15px;
|
||||
padding: 0 7px;
|
||||
}
|
||||
|
||||
#logo {
|
||||
.arangodb-logo {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#logo-small {
|
||||
.arangodb-logo-small {
|
||||
display: inline;
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
#logo-small img {
|
||||
.arangodb-logo-small img {
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
margin: 0;
|
||||
width: 50px;
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ body {
|
|||
}
|
||||
|
||||
@media screen and (max-width: 480px) {
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ body {
|
|||
}
|
||||
|
||||
@media screen and (max-width: 330px) {
|
||||
#logo-small, .google-search {
|
||||
.arangodb-logo-small, .google-search {
|
||||
display: none;
|
||||
}
|
||||
}
|
|
@ -14,7 +14,7 @@ function appendHeader() {
|
|||
|
||||
|
||||
function rerenderNavbar() {
|
||||
$('#header').remove();
|
||||
$('.arangodb-header').remove();
|
||||
appendHeader();
|
||||
renderGoogleSearch();
|
||||
};
|
||||
|
@ -35,7 +35,7 @@ function appendHeader() {
|
|||
};
|
||||
addGoogleSrc();
|
||||
|
||||
$("#version-switcher").on("change", function(e) {
|
||||
$(".arangodb-version-switcher").on("change", function(e) {
|
||||
window.location.href = "https://docs.arangodb.com/" + e.target.value;
|
||||
});
|
||||
|
||||
|
|
|
@ -2,27 +2,16 @@
|
|||
|
||||
Replication allows you to *replicate* data onto another machine. It forms the base of all disaster recovery and failover features ArangoDB offers.
|
||||
|
||||
ArangoDB offers asynchronous and synchronous replication which both have their pros and cons. Both modes may and should be combined in a real world scenario and be applied in the usecase where the excel most. We will describe pros and cons of each of them in the following sections.
|
||||
ArangoDB offers asynchronous and synchronous replication which both have their pros and cons. Both modes may and should be combined in a real world scenario and be applied in the usecase where they excel most.
|
||||
|
||||
We will describe pros and cons of each of them in the following sections.
|
||||
|
||||
!SUBSECTION Synchronous replication
|
||||
|
||||
Synchronous replication only works in in a cluster and is typically used for mission critical data which must be accessible at all times. Synchronous replication generally stores a copy of the data on another host and keeps it in sync. Essentially when storing data after enabling synchronous replication the cluster will wait for all replicas to write all the data before greenlighting the write operation to the client. This makes writing naturally slower. However it will enabled the cluster to immediately fail over to a replica whenever an outage has been detected.
|
||||
Synchronous replication only works in in a cluster and is typically used for mission critical data which must be accessible at all times. Synchronous replication generally stores a copy of the data on another host and keeps it in sync. Essentially when storing data after enabling synchronous replication the cluster will wait for all replicas to write all the data before greenlighting the write operation to the client. This makes writing naturally slower. However it will enable the cluster to immediately fail over to a replica whenever an outage has been detected.
|
||||
|
||||
Synchronous replication is organized in a way that every shard has a leader and n followers. The number of followers can be controlled using the `replicationFactor` whenever you create a collection.
|
||||
|
||||
!SUBSECTION Asynchronous replication
|
||||
|
||||
Asynchronous replication does not necessarily need a cluster to operate and is configured on a per-database level, meaning that
|
||||
different databases in the same ArangoDB instance can have different replication
|
||||
settings. Replication must be turned on explicitly before it becomes active for a
|
||||
database.
|
||||
|
||||
In a typical master-slave replication setup, clients direct *all* their write
|
||||
operations for a specific database to the master. The master database is the only
|
||||
place to connect to when making any insertions/updates/deletions.
|
||||
|
||||
The master database will log all write operations in its write-ahead log.
|
||||
Any number of slaves can then connect to the master database and fetch data from the
|
||||
master database's write-ahead log. The slaves then can apply all the events from the log in
|
||||
the same order locally. After that, they will have the same state of data as the master
|
||||
database.
|
||||
In ArangoDB any write operation will be logged to the write-ahead log. When using Asynchronous replication slaves will connect to a master and apply all the events from the log in the same order locally. After that, they will have the same state of data as the master database.
|
|
@ -0,0 +1,15 @@
|
|||
!CHAPTER Configuration
|
||||
|
||||
!SUBSECTION Requirements
|
||||
|
||||
Synchronous replication requires an operational ArangoDB cluster.
|
||||
|
||||
!SUBSECTION Enabling synchronous replication
|
||||
|
||||
Synchronous replication can be enabled per collection. When creating you can specify the number of replicas using *replicationFactor*. The default is `1` which effectively *disables* synchronous replication.
|
||||
|
||||
Example:
|
||||
|
||||
127.0.0.1:8530@_system> db._create("test", {"replicationFactor": 3})
|
||||
|
||||
Any write operation will require 2 replicas to report success from now on.
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
Synchronous replication can be configured per collection via the property *replicationFactor*. Synchronous replication requires a cluster to operate.
|
||||
|
||||
Whenever you specify a *replicationFactor* greater than 1 when creating a collection synchronous replication is activated. The cluster will determine suitable *leaders* and *followers* for every requested shard (*numberOfShards*) within the cluster. For every shard in a collection only the leader will be asked. Using *synchronous replication* alone will guarantee consistent high availabilty at the cost of reduced performance (due to every request having to be executed on the followers). Combining it with [Sharding.md](sharding) will counteract that issue.
|
||||
Whenever you specify a *replicationFactor* greater than 1 when creating a collection, synchronous replication will be activated for this collection. The cluster will determine suitable *leaders* and *followers* for every requested shard (*numberOfShards*) within the cluster. When requesting data of a shard only the current leader will be asked whereas followers will only keep their copy in sync. Using *synchronous replication* alone will guarantee consistency and high availabilty at the cost of reduced performance (due to every write-request having to be executed on the followers). Combining it with [Sharding.md](sharding) will counteract that issue.
|
||||
|
||||
In a cluster synchronous replication will be managed by the *coordinators* for the client. The data will always be stored on *primaries*.
|
||||
|
||||
|
@ -31,7 +31,7 @@ The following example will give you an idea of how synchronous operation has bee
|
|||
|
||||
!SUBSECTION Automatic failover
|
||||
|
||||
Whenever the leader of a shard is failing and there is a query trying to access data of that shard the coordinator will continue retrying to contact the leader until it timeouts. Every 15 seconds the internal cluster supervision will validate cluster health. If the leader didn't come back in time the supervision will reorganize the cluster. The coordinator will then contact the new leader.
|
||||
Whenever the leader of a shard is failing and there is a query trying to access data of that shard the coordinator will continue trying to contact the leader until it timeouts. Every 15 seconds the internal cluster supervision will validate cluster health. If the leader didn't come back in time the supervision will reorganize the cluster. The coordinator will then contact the new leader.
|
||||
|
||||
The process is best outlined using an example:
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
!CHAPTER Synchronous Replication
|
||||
|
||||
At its core synchronous replication will replicate write operations to multiple hosts. This feature is only available when operating ArangoDB in a cluster. Whenever a executes a write operation it will only be reported to be successful if it was carried out on all replicas. In contrast to multi master replication known from other systems ArangoDB's synchronous operation guarantees a consistent state across the cluster.
|
||||
At its core synchronous replication will replicate write operations to multiple hosts. This feature is only available when operating ArangoDB in a cluster. Whenever a coordinator executes a sychronously replicated write operation it will only be reported to be successful if it was carried out on all replicas. In contrast to multi master replication known from other systems ArangoDB's synchronous operation guarantees a consistent state across the cluster.
|
|
@ -1,58 +0,0 @@
|
|||
!CHAPTER Authentication in a cluster
|
||||
|
||||
In this section we describe, how authentication in a cluster is done
|
||||
properly. For experiments it is possible to run the cluster completely
|
||||
unauthorized by using the option *--server.disable-authentication true*
|
||||
on the command line or the corresponding entry in the configuration
|
||||
file. However, for production use, this is not desirable.
|
||||
|
||||
You can turn on authentication in the cluster by switching it on in the
|
||||
configuration of your dispatchers. When you now use the planner and
|
||||
kickstarter to create and launch a cluster, the *arangod* processes in
|
||||
your cluster will automatically run with authentication, exactly as the
|
||||
dispatchers themselves. However, the cluster will have a sharded
|
||||
collection *_users* with one shard containing only the user *root* with
|
||||
an empty password. We emphasize that this sharded cluster-wide
|
||||
collection is different from the *_users* collections in each
|
||||
dispatcher!
|
||||
|
||||
The coordinators in your cluster will use this cluster-wide sharded collection
|
||||
to authenticate HTTP requests. If you add users using the usual methods
|
||||
via a coordinator, you will in fact change the cluster-wide
|
||||
collection *_users* and thus all coordinators will eventually see the
|
||||
new users and authenticate against them. "Eventually" means that they
|
||||
might need a few seconds to notice the change in user setup and update
|
||||
their user cache.
|
||||
|
||||
The DBservers will have their authentication switched on as well.
|
||||
However, they do not use the cluster-wide *_users* collection for
|
||||
authentication, because the idea is, that the outside clients do not talk
|
||||
to the DBservers directly, but always go via the coordinators. For the
|
||||
cluster-internal communication between coordinators and DBservers (in
|
||||
both directions), we use a simpler setup: There are two new
|
||||
configuration options *cluster.username* and *cluster.password*, which
|
||||
default to *root* and the empty password *""*. If you want to deviate
|
||||
from this default you have to change these two configuration options
|
||||
in all configuration files on all machines in the cluster. This just
|
||||
means that you have to set these two options to the same values in all
|
||||
configuration files *arangod.conf* in all dispatchers, since the
|
||||
coordinators and DBservers will simply inherit this configuration file
|
||||
from the dispatcher that has launched them.
|
||||
|
||||
Let us summarize what you have to do, to enable authentication in a cluster:
|
||||
|
||||
1. Set *server.disable-authentication* to *false* in all configuration
|
||||
files of all dispatchers (this is already the default).
|
||||
2. Put the same values for *cluster.username* and *cluster.password*
|
||||
in the very same configuration files of all dispatchers.
|
||||
3. Create users via the usual interface on the coordinators
|
||||
(initially after the cluster launch there will be a single user *root*
|
||||
with empty password).
|
||||
|
||||
Please note, that in Version 2.0 of ArangoDB you can already configure the
|
||||
endpoints of the coordinators to use SSL. However, this is not yet conveniently
|
||||
supported in the planner, kickstarter and in the graphical cluster
|
||||
management tools. We will fix this in the next version.
|
||||
|
||||
Please also consider the comments in the following section about
|
||||
firewall setup.
|
|
@ -1,42 +0,0 @@
|
|||
!CHAPTER Recommended firewall setup
|
||||
|
||||
This section is intended for people who run a cluster in production
|
||||
systems.
|
||||
|
||||
The whole idea of the cluster setup is that the coordinators serve HTTP
|
||||
requests to the outside world and that all other processes (DBservers
|
||||
and agency) are only available from within the cluster itself.
|
||||
Therefore, in a production environment, one has to put the whole cluster
|
||||
behind a firewall and only open the ports to the coordinators to the
|
||||
client processes.
|
||||
|
||||
Note however that for the asynchronous cluster-internal communication,
|
||||
the DBservers perform HTTP requests to the coordinators, which means
|
||||
that the coordinators must also be reachable from within the cluster.
|
||||
|
||||
Furthermore, it is of the utmost importance to hide the agent processes of
|
||||
the agency behind the firewall, since, at least at this stage, requests
|
||||
to them are completely unauthorized. Leaving their ports exposed to
|
||||
the outside world, endangers all data in the cluster, because everybody
|
||||
on the internet could make the cluster believe that, for example, you wanted
|
||||
your databases dropped! This weakness will be alleviated in future versions,
|
||||
because we will replace *etcd* by our own specialized agency
|
||||
implementation, which will allow for authentication.
|
||||
|
||||
A further comment applies to the dispatchers. Usually you will open the
|
||||
HTTP endpoints of your dispatchers to the outside world and switch on
|
||||
authentication for them. This is necessary to contact them from the
|
||||
outside, in the cluster launch phase. However, actually you only
|
||||
need to contact one of them, who will then in turn contact the others
|
||||
using cluster-internal communication. You can even get away with closing
|
||||
access to all dispatchers to the outside world, provided the machine
|
||||
running your browser is within the cluster network and does not have to
|
||||
go through the firewall to contact the dispatchers. It is important to
|
||||
be aware that anybody who can reach a dispatcher and can authorize
|
||||
himself to it can launch arbitrary processes on the machine on which
|
||||
the dispatcher runs!
|
||||
|
||||
Therefore we recommend to use SSL endpoints with user/password
|
||||
authentication on the dispatchers *and* to block access to them in
|
||||
the firewall. You then have to launch the cluster using an *arangosh*
|
||||
or browser running within the cluster.
|
|
@ -1,16 +1,24 @@
|
|||
!CHAPTER Sharding
|
||||
|
||||
Sharding allows to use multiple machines to run a cluster of ArangoDB
|
||||
ArangoDB is organizing its collection data in shards. Sharding allows to use multiple machines to run a cluster of ArangoDB
|
||||
instances that together constitute a single database. This enables
|
||||
you to store much more data, since ArangoDB distributes the data
|
||||
automatically to the different servers. In many situations one can
|
||||
also reap a benefit in data throughput, again because the load can
|
||||
be distributed to multiple machines.
|
||||
|
||||
In a cluster there are essentially two types of processes: "DBservers"
|
||||
and "coordinators". The former actually store the data, the latter
|
||||
expose the database to the outside world. The clients talk to the
|
||||
coordinators exactly as they would talk to a single ArangoDB instance
|
||||
via the REST interface. The coordinators know about the configuration of
|
||||
the cluster and automatically forward the incoming requests to the
|
||||
right DBservers.
|
||||
Shards are configured per collection so multiple shards of data form the collection as a whole. To determine in which shard the data is to be stored ArangoDB performs a hash across the values. By default this hash is being created from _key.
|
||||
|
||||
To configure the amount of shards:
|
||||
|
||||
```
|
||||
127.0.0.1:8529@_system> db._create("sharded_collection", {"numberOfShards": 4});
|
||||
```
|
||||
|
||||
To configure the hashing:
|
||||
|
||||
```
|
||||
127.0.0.1:8529@_system> db._create("sharded_collection", {"numberOfShards": 4, "shardKeys": ["country"]});
|
||||
```
|
||||
|
||||
This would be useful to keep data of every country in one shard which would result in better performance for queries working on a per country base. You can also specify multiple `shardKeys`.
|
|
@ -1,151 +0,0 @@
|
|||
!CHAPTER Status of the implementation
|
||||
|
||||
This version 2.0 of ArangoDB contains the first usable implementation
|
||||
of the sharding extensions. However, not all planned features are
|
||||
included in this release. In particular, automatic fail-over is fully
|
||||
prepared in the architecture but is not yet implemented. If you use
|
||||
Version 2.0 in cluster mode in a production system, you have to
|
||||
organize failure recovery manually. This is why, at this stage with
|
||||
Version 2.0 we do not yet recommend to use the cluster mode in
|
||||
production systems. If you really need this feature now, please contact
|
||||
us.
|
||||
|
||||
This section provides an overview over the implemented and future
|
||||
features.
|
||||
|
||||
In normal single instance mode, ArangoDB works as usual
|
||||
with the same performance and functionality as in previous releases.
|
||||
|
||||
In cluster mode, the following things are implemented in version 2.0
|
||||
and work:
|
||||
|
||||
* All basic CRUD operations for single documents and edges work
|
||||
essentially with good performance.
|
||||
* One can use sharded collections and can configure the number of
|
||||
shards for each such collection individually. In particular, one
|
||||
can have fully sharded collections as well as cluster-wide available
|
||||
collections with only a single shard. After creation, these
|
||||
differences are transparent to the client.
|
||||
* Creating and dropping cluster-wide databases works.
|
||||
* Creating, dropping and modifying cluster-wide collections all work.
|
||||
Since these operations occur seldom, we will only improve their
|
||||
performance in a future release, when we will have our own
|
||||
implementation of the agency as well as a cluster-wide event managing
|
||||
system (see road map for release 2.3).
|
||||
* Sharding in a collection, can be configured to use hashing
|
||||
on arbitrary properties of the documents in the collection.
|
||||
* Creating and dropping indices on sharded collections works. Please
|
||||
note that an index on a sharded collection is not a global index
|
||||
but only leads to a local index of the same type on each shard.
|
||||
* All SimpleQueries work. Again, we will improve the performance in
|
||||
future releases, when we revisit the AQL query optimizer
|
||||
(see road map for release 2.2).
|
||||
* AQL queries work, but with relatively bad performance. Also, if the
|
||||
result of a query on a sharded collection is large, this can lead
|
||||
to an out of memory situation on the coordinator handling the
|
||||
request. We will improve this situation when we revisit the AQL
|
||||
query optimizer (see road map for release 2.2).
|
||||
* Authentication on the cluster works with the method known from
|
||||
single ArangoDB instances on the coordinators. A new cluster-internal
|
||||
authorization scheme has been created. See below for hints on a
|
||||
sensible firewall and authorization setup.
|
||||
* Most standard API calls of the REST interface work on the cluster
|
||||
as usual, with a few exceptions, which do no longer make sense on
|
||||
a cluster or are harder to implement. See below for details.
|
||||
|
||||
|
||||
The following does not yet work, but is planned for future releases (see
|
||||
road map):
|
||||
|
||||
* Transactions can be run, but do not behave like transactions. They
|
||||
simply execute but have no atomicity or isolation in version 2.0.
|
||||
See the road map for version 2.X.
|
||||
* Data-modification AQL queries are not executed atomically or isolated.
|
||||
If a data-modification AQL query fails for one shard, it might be
|
||||
rolled back there, but still complete on other shards.
|
||||
* Data-modification AQL queries require a *_key* attribute in documents
|
||||
in order to operate. If a different shard key is chosen for a collection,
|
||||
specifying the *_key* attribute is currently still required. This
|
||||
restriction might be lifted in a future release.
|
||||
* We plan to revise the AQL optimizer for version 2.2. This is
|
||||
necessary since for efficient queries in cluster mode we have to
|
||||
do as much as possible of the filtering and sorting on the
|
||||
individual DBservers rather than on the coordinator.
|
||||
* Our software architecture is fully prepared for replication, automatic
|
||||
fail-over and recovery of a cluster, which will be implemented
|
||||
for version 2.3 (see our road map).
|
||||
* This setup will at the same time, allow for hot swap and in-service
|
||||
maintenance and scaling of a cluster. However, in version 2.0 the
|
||||
cluster layout is static and no redistribution of data between the
|
||||
DBservers or moving of shards between servers is possible.
|
||||
* At this stage the sharding of an [edge collection](../../Appendix/Glossary.md#edge-collection) is independent of
|
||||
the sharding of the corresponding vertex collection in a graph.
|
||||
For version 2.2 we plan to synchronize the two, to allow for more
|
||||
efficient graph traversal functions in large, sharded graphs. We
|
||||
will also do research on distributed algorithms for graphs and
|
||||
implement new algorithms in ArangoDB. However, at this stage, all
|
||||
graph traversal algorithms are executed on the coordinator and
|
||||
this means relatively poor performance since every single edge
|
||||
step leads to a network exchange.
|
||||
* In version 2.0 the import API is broken for sharded collections.
|
||||
It will appear to work but will in fact silently fail. Fixing this
|
||||
is on the road map for version 2.1.
|
||||
* In version 2.0 the *arangodump* and *arangorestore* programs
|
||||
can not be used talking to a coordinator to directly backup
|
||||
sharded collections. At this stage, one has to backup the
|
||||
DBservers individually using *arangodump* and *arangorestore*
|
||||
on them. The coordinators themselves do not hold any state and
|
||||
therefore do not need backup. Do not forget to backup the meta
|
||||
data stored in the agency because this is essential to access
|
||||
the sharded collections. These limitations will be fixed in
|
||||
version 2.1.
|
||||
* In version 2.0 the replication API (*/_api/replication*)
|
||||
does not work on coordinators. This is intentional, since the
|
||||
plan is to organize replication with automatic fail-over directly
|
||||
on the DBservers, which is planned for version 2.3.
|
||||
* The *db.<collection>.rotate()* method for sharded collections is not
|
||||
yet implemented, but will be supported from version 2.1 onwards.
|
||||
* The *db.<collection>.rename()* method for sharded collections is not
|
||||
yet implemented, but will be supported from version 2.1 onwards.
|
||||
* The *db.<collection>.checksum()* method for sharded collections is
|
||||
not yet implemented, but will be supported from version 2.1
|
||||
onwards.
|
||||
|
||||
The following restrictions will probably stay, for cluster mode, even in
|
||||
future versions. This is, because they are difficult or even impossible
|
||||
to implement efficiently:
|
||||
|
||||
* Custom key generators with the *keyOptions* property in the
|
||||
*_create* method for collections are not supported. We plan
|
||||
to improve this for version 2.1 (see road map). However, due to the
|
||||
distributed nature of a sharded collection, not everything that is
|
||||
possible in the single instance situation will be possible on a
|
||||
cluster. For example the auto-increment feature in a cluster with
|
||||
multiple DBservers and coordinators would have to lock the whole
|
||||
collection centrally for every document creation, which
|
||||
essentially defeats the performance purpose of sharding.
|
||||
* Unique constraints on non-sharding keys are unsupported. The reason
|
||||
for this is that we do not plan to have global indices for sharded
|
||||
collections. Therefore, there is no single authority that could
|
||||
efficiently decide whether or not the unique constraint is
|
||||
satisfied by a new document. The only possibility would be to have
|
||||
a central locking mechanism and use heavy communication for every
|
||||
document creation to ensure the unique constraint.
|
||||
* The method *db.<collection>.revision()* for a sharded collection
|
||||
returns the highest revision number from all shards. However,
|
||||
revision numbers are assigned per shard, so this is not guaranteed
|
||||
to be the revision of the latest inserted document. Again,
|
||||
maintaining a global revision number over all shards is very
|
||||
difficult to maintain efficiently.
|
||||
* Contrary to the situation in a single instance, objects representing
|
||||
sharded collections are broken after their database is dropped.
|
||||
In a future version they might report that they are broken, but
|
||||
it is not feasible and not desirable to retain the cluster database
|
||||
in the background until all collection objects are garbage
|
||||
collected.
|
||||
* In a cluster, the automatic creation of collections on a call to
|
||||
*db._save(ID)* is not supported. This is because one would have no
|
||||
way to specify the number or distribution of shards for the newly
|
||||
created collection. Therefore we will not offer this feature for
|
||||
cluster mode.
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
!CHAPTER Cluster
|
||||
|
||||
The cluster section displays statistics about the general cluster performance.
|
||||
|
||||

|
||||
|
||||
Statistics:
|
||||
|
||||
- Available and missing coordinators
|
||||
- Available and missing database servers
|
||||
- Memory usage (percent)
|
||||
- Current connections
|
||||
- Data (bytes)
|
||||
- HTTP (bytes)
|
||||
- Average request time (seconds)
|
|
@ -1,11 +1,19 @@
|
|||
!CHAPTER Collections
|
||||
|
||||
The collections section displays all available collections. From here you can create new collections and jump into a collection for details (click on a collection tile).
|
||||
|
||||

|
||||
|
||||
* A: Toggle filter properties
|
||||
* B: Search collection by name
|
||||
* C: Filter properties
|
||||
* D: Create a new collection
|
||||
* E: Collection type
|
||||
* F: Collection state(unloaded, loaded, ...)
|
||||
* G: Collection name
|
||||
Functions:
|
||||
|
||||
- A: Toggle filter properties
|
||||
- B: Search collection by name
|
||||
- D: Create collection
|
||||
- C: Filter properties
|
||||
- H: Show collection details (click tile)
|
||||
|
||||
Information:
|
||||
|
||||
- E: Collection type
|
||||
- F: Collection state(unloaded, loaded, ...)
|
||||
- G: Collection name
|
|
@ -0,0 +1,6 @@
|
|||
!CHAPTER Dashboard
|
||||
|
||||
The *Dashboard* tab provides statistics which are polled regularly from the
|
||||
ArangoDB server.
|
||||
|
||||

|
|
@ -0,0 +1,9 @@
|
|||
!CHAPTER Graphs
|
||||
|
||||
The *Graphs* tab provides a viewer facility for graph data stored in ArangoDB. It
|
||||
allows browsing ArangoDB graphs stored in the *_graphs* system collection or a
|
||||
graph consisting of an arbitrary vertex and [edge collection](../../Appendix/Glossary.md#edge-collection).
|
||||
|
||||
Please note that the graph viewer requires client-side SVG and that you need a
|
||||
browser capable of rendering that. Especially Internet Explorer browsers older
|
||||
than version 9 are likely to not support this.
|
|
@ -0,0 +1,38 @@
|
|||
!CHAPTER Nodes
|
||||
|
||||
The nodes section offers two subviews.
|
||||
|
||||
!SECTION Overview
|
||||
|
||||
The overview shows available and missing coordinators and database servers.
|
||||
|
||||

|
||||
|
||||
Functions:
|
||||
|
||||
- Coordinator Dashboard: Click on a Coordinator will open a statistics dashboard.
|
||||
|
||||
Information (Coordinator / Database servers):
|
||||
|
||||
- Name
|
||||
- Endpoint
|
||||
- Last Heartbeat
|
||||
- Status
|
||||
- Health
|
||||
|
||||
!SECTION Shards
|
||||
|
||||
The shard section displays all available sharded collections.
|
||||
|
||||

|
||||
|
||||
Functions:
|
||||
|
||||
- Move Shard: Click on a shard will open a move shard dialog. Shards can be transferred to all available databas servers, except the leading database server or an available follower.
|
||||
- Rebalance Shards: A new database server will not have any shards. With the rebalance functionality the cluster will start to rebalance shards including empty database servers.
|
||||
|
||||
Information (collection):
|
||||
|
||||
- Shard
|
||||
- Leader (green state: sync is complete)
|
||||
- Followers
|
Before Width: | Height: | Size: 102 KiB |
After Width: | Height: | Size: 129 KiB |
After Width: | Height: | Size: 126 KiB |
After Width: | Height: | Size: 101 KiB |
After Width: | Height: | Size: 93 KiB |
After Width: | Height: | Size: 124 KiB |
After Width: | Height: | Size: 121 KiB |
|
@ -1,6 +1,8 @@
|
|||
!CHAPTER Module "actions"
|
||||
|
||||
The action module provides the infrastructure for defining HTTP actions.
|
||||
The action module provides the infrastructure for defining low-level HTTP actions.
|
||||
|
||||
If you want to define HTTP endpoints in ArangoDB you should probably use the [Foxx microservice framework](../../Foxx/README.md) instead.
|
||||
|
||||
!SECTION Basics
|
||||
!SUBSECTION Error message
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
!CHAPTER Console Module
|
||||
|
||||
`require('console')`
|
||||
|
||||
The implementation follows the CommonJS specification
|
||||
[Console](http://wiki.commonjs.org/wiki/Console).
|
||||
|
||||
|
|
|
@ -0,0 +1,319 @@
|
|||
!CHAPTER Crypto Module
|
||||
|
||||
`const crypto = require('@arangodb/crypto')`
|
||||
|
||||
The crypto module provides implementations of various hashing algorithms as well as cryptography related functions.
|
||||
|
||||
!SECTION Nonces
|
||||
|
||||
These functions deal with cryptographic nonces.
|
||||
|
||||
!SUBSECTION createNonce
|
||||
|
||||
`crypto.createNonce(): string`
|
||||
|
||||
Creates a cryptographic nonce.
|
||||
|
||||
Returns the created nonce.
|
||||
|
||||
!SUBSECTION checkAndMarkNonce
|
||||
|
||||
`crypto.checkAndMarkNonce(nonce): void`
|
||||
|
||||
Checks and marks a nonce.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **nonce**: `string`
|
||||
|
||||
The nonce to check and mark.
|
||||
|
||||
Returns nothing.
|
||||
|
||||
!SECTION Random values
|
||||
|
||||
The following functions deal with generating random values.
|
||||
|
||||
!SUBSECTION rand
|
||||
|
||||
`crypto.rand(): number`
|
||||
|
||||
Generates a random integer that may be positive, negative or even zero.
|
||||
|
||||
Returns the generated number.
|
||||
|
||||
!SUBSECTION genRandomAlphaNumbers
|
||||
|
||||
`crypto.genRandomAlphaNumbers(length): string`
|
||||
|
||||
Generates a string of random alpabetical characters and digits.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **length**: `number`
|
||||
|
||||
The length of the string to generate.
|
||||
|
||||
Returns the generated string.
|
||||
|
||||
!SUBSECTION genRandomNumbers
|
||||
|
||||
`crypto.genRandomNumbers(length): string`
|
||||
|
||||
Generates a string of random digits.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **length**: `number`
|
||||
|
||||
The length of the string to generate.
|
||||
|
||||
Returns the generated string.
|
||||
|
||||
!SUBSECTION genRandomSalt
|
||||
|
||||
`crypto.genRandomSalt(length): string`
|
||||
|
||||
Generates a string of random (printable) ASCII characters.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **length**: `number`
|
||||
|
||||
The length of the string to generate.
|
||||
|
||||
Returns the generated string.
|
||||
|
||||
!SECTION JSON Web Tokens (JWT)
|
||||
|
||||
These methods implement the JSON Web Token standard.
|
||||
|
||||
!SUBSECTION jwtEncode
|
||||
|
||||
`crypto.jwtEncode(key, message, algorithm): string`
|
||||
|
||||
Generates a JSON Web Token for the given message.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **key**: `string | null`
|
||||
|
||||
The secret cryptographic key to be used to sign the message using the given algorithm.
|
||||
Note that this function will raise an error if the key is omitted but the algorithm expects a key,
|
||||
and also if the algorithm does not expect a key but a key is provided (e.g. when using `"none"`).
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
Message to be encoded as JWT. Note that the message will only be base64-encoded and signed, not encrypted.
|
||||
Do not store sensitive information in tokens unless they will only be handled by trusted parties.
|
||||
|
||||
* **algorithm**: `string`
|
||||
|
||||
Name of the algorithm to use for signing the message, e.g. `"HS512"`.
|
||||
|
||||
Returns the JSON Web Token.
|
||||
|
||||
!SUBSECTION jwtDecode
|
||||
|
||||
`crypto.jwtDecode(key, token, noVerify): string | null`
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **key**: `string | null`
|
||||
|
||||
The secret cryptographic key that was used to sign the message using the algorithm indicated by the token.
|
||||
Note that this function will raise an error if the key is omitted but the algorithm expects a key.
|
||||
|
||||
If the algorithm does not expect a key but a key is provided, the token will fail to verify.
|
||||
|
||||
* **token**: `string`
|
||||
|
||||
The token to decode.
|
||||
|
||||
Note that the function will raise an error if the token is malformed (e.g. does not have exactly three segments).
|
||||
|
||||
* **noVerify**: `boolean` (Default: `false`)
|
||||
|
||||
Whether verification should be skipped. If this is set to `true` the signature of the token will not be verified.
|
||||
Otherwise the function will raise an error if the signature can not be verified using the given key.
|
||||
|
||||
Returns the decoded JSON message or `null` if no token is provided.
|
||||
|
||||
!SUBSECTION jwtAlgorithms
|
||||
|
||||
A helper object containing the supported JWT algorithms. Each attribute name corresponds to a JWT `alg` and the value is an object with `sign` and `verify` methods.
|
||||
|
||||
!SUBSECTION jwtCanonicalAlgorithmName
|
||||
|
||||
`crypto.jwtCanonicalAlgorithmName(name): string`
|
||||
|
||||
A helper function that translates a JWT `alg` value found in a JWT header into the canonical name of the algorithm in `jwtAlgorithms`. Raises an error if no algorithm with a matching name is found.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Algorithm name to look up.
|
||||
|
||||
Returns the canonical name for the algorithm.
|
||||
|
||||
!SECTION Hashing algorithms
|
||||
|
||||
!SUBSECTION md5
|
||||
|
||||
`crypto.md5(message): string`
|
||||
|
||||
Hashes the given message using the MD5 algorithm.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SUBSECTION sha1
|
||||
|
||||
`crypto.sha1(message): string`
|
||||
|
||||
Hashes the given message using the SHA-1 algorithm.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SUBSECTION sha224
|
||||
|
||||
`crypto.sha224(message): string`
|
||||
|
||||
Hashes the given message using the SHA-224 algorithm.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SUBSECTION sha256
|
||||
|
||||
`crypto.sha256(message): string`
|
||||
|
||||
Hashes the given message using the SHA-256 algorithm.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SUBSECTION sha384
|
||||
|
||||
`crypto.sha384(message): string`
|
||||
|
||||
Hashes the given message using the SHA-384 algorithm.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SUBSECTION sha512
|
||||
|
||||
`crypto.sha512(message): string`
|
||||
|
||||
Hashes the given message using the SHA-512 algorithm.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SECTION Miscellaneous
|
||||
|
||||
!SUBSECTION constantEquals
|
||||
|
||||
`crypto.constantEquals(str1, str2): boolean`
|
||||
|
||||
Compares two strings.
|
||||
This function iterates over the entire length of both strings
|
||||
and can help making certain timing attacks harder.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **str1**: `string`
|
||||
|
||||
The first string to compare.
|
||||
|
||||
* **str2**: `string`
|
||||
|
||||
The second string to compare.
|
||||
|
||||
Returns `true` if the strings are equal, `false` otherwise.
|
||||
|
||||
!SUBSECTION pbkdf2
|
||||
|
||||
`crypto.pbkdf2(salt, password, iterations, keyLength): string`
|
||||
|
||||
Generates a PBKDF2-HMAC-SHA1 hash of the given password.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **salt**: `string`
|
||||
|
||||
The cryptographic salt to hash the password with.
|
||||
|
||||
* **password**: `string`
|
||||
|
||||
The message or password to hash.
|
||||
|
||||
* **iterations**: `number`
|
||||
|
||||
The number of iterations.
|
||||
This should be a very high number.
|
||||
OWASP recommended 64000 iterations in 2012 and recommends doubling that number every two years.
|
||||
|
||||
When using PBKDF2 for password hashes it is also recommended to add a random value
|
||||
(typically between 0 and 32000) to that number that is different for each user.
|
||||
|
||||
* **keyLength**: `number`
|
||||
|
||||
The key length.
|
||||
|
||||
Returns the cryptographic hash.
|
||||
|
||||
!SUBSECTION hmac
|
||||
|
||||
`crypto.hmac(key, message, algorithm): string`
|
||||
|
||||
Generates an HMAC hash of the given message.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **key**: `string`
|
||||
|
||||
The cryptographic key to use to hash the message.
|
||||
|
||||
* **message**: `string`
|
||||
|
||||
The message to hash.
|
||||
|
||||
* **algorithm**: `string`
|
||||
|
||||
The name of the algorithm to use.
|
||||
|
||||
Returns the cryptographic hash.
|
|
@ -1,4 +1,6 @@
|
|||
!CHAPTER Module "fs"
|
||||
!CHAPTER Filesystem Module
|
||||
|
||||
`require('@arangodb/fs')`
|
||||
|
||||
The implementation tries to follow the CommonJS specification where possible.
|
||||
[Filesystem/A/0](http://wiki.commonjs.org/wiki/Filesystem/A/0).
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
!CHAPTER Module "process"
|
||||
|
||||
mimic parts of nodejs process module
|
||||
|
||||
!SUBSECTION env
|
||||
|
||||
env maps the systems environment to an associative array.
|
||||
|
||||
<!--
|
||||
these are copied from node, but don't serve any specific purpose over here
|
||||
x! SUB SECTION domain
|
||||
|
||||
|
||||
x! SUB SECTION _events
|
||||
|
||||
x! SUB SECTION argv
|
||||
|
||||
x! SUB SECTION stdout
|
||||
|
||||
x! SUB SECTION CMD
|
||||
|
||||
x! SUB SECTION nextTick
|
||||
-->
|
|
@ -1,216 +1,132 @@
|
|||
!CHAPTER JavaScript Modules
|
||||
|
||||
!SUBSECTION Introduction to JavaScript Modules
|
||||
ArangoDB uses a Node.js compatible module system. You can use the function *require* in order to load a module or library. It returns the exported variables and functions of the module.
|
||||
|
||||
The ArangoDB uses a [CommonJS](http://wiki.commonjs.org/wiki)
|
||||
compatible module and package concept. You can use the function *require* in
|
||||
order to load a module or package. It returns the exported variables and
|
||||
functions of the module or package.
|
||||
The global variables `global`, `process`, `console`, `Buffer`, `__filename` and `__dirname` are available throughout ArangoDB and Foxx.
|
||||
|
||||
There are some extensions to the CommonJS concept to allow ArangoDB to load
|
||||
Node.js modules as well.
|
||||
!SECTION Node compatibility modules
|
||||
|
||||
!SECTION CommonJS Modules
|
||||
ArangoDB supports a number of modules for compatibility with Node.js, including:
|
||||
|
||||
Unfortunately, the JavaScript libraries are just in the process of being
|
||||
standardized. CommonJS has defined some important modules. ArangoDB implements
|
||||
the following
|
||||
util: 4.1.0 (modified)
|
||||
|
||||
- [console](Console.md) is a well known logging facility to all the JavaScript developers.
|
||||
* [assert](http://nodejs.org/api/assert.html) implements basic assertion and testing functions.
|
||||
|
||||
* [buffer](http://nodejs.org/api/buffer.html) implements a binary data type for JavaScript.
|
||||
|
||||
* [console](Console.md) is a well known logging facility to all the JavaScript developers.
|
||||
ArangoDB implements most of the [Console API](http://wiki.commonjs.org/wiki/Console),
|
||||
with the exceptions of *profile* and *count*.
|
||||
|
||||
- [fs](FileSystem.md) provides a file system API for the manipulation of paths, directories,
|
||||
files, links, and the construction of file streams. ArangoDB implements
|
||||
most [Filesystem/A](http://wiki.commonjs.org/wiki/Filesystem/A) functions.
|
||||
* [events](http://nodejs.org/api/events.html) implements an event emitter.
|
||||
|
||||
- Modules are implemented according to
|
||||
[Modules/1.1.1](http://wiki.commonjs.org/wiki/Modules)
|
||||
* [module](http://nodejs.org/api/modules.html) provides direct access to the module system.
|
||||
|
||||
!SUBSECTION ArangoDB Specific Modules
|
||||
* [path](http://nodejs.org/api/path.html) implements functions dealing with filenames and paths.
|
||||
|
||||
A lot of the modules, however, are ArangoDB specific. These modules
|
||||
are described in the following chapters.
|
||||
* [punycode](http://nodejs.org/api/punycode.html) implements conversion functions for [punycode](http://en.wikipedia.org/wiki/Punycode) encoding.
|
||||
|
||||
!SUBSECTION Node Modules
|
||||
* [querystring](http://nodejs.org/api/querystring.html) provides utilities for dealing with query strings.
|
||||
|
||||
ArangoDB also supports some [node](http://www.nodejs.org) modules.
|
||||
* [stream](http://nodejs.org/api/stream.html) provides a streaming interface.
|
||||
|
||||
- [assert](http://nodejs.org/api/assert.html) implements
|
||||
assertion and testing functions.
|
||||
* [string_decoder](https://nodejs.org/api/string_decoder.html) implements logic for decoding buffers into strings.
|
||||
|
||||
- [buffer](http://nodejs.org/api/buffer.html) implements
|
||||
a binary data type for JavaScript.
|
||||
* [url](http://nodejs.org/api/url.html) provides utilities for URL resolution and parsing.
|
||||
|
||||
- [path](http://nodejs.org/api/path.html) implements
|
||||
functions dealing with filenames and paths.
|
||||
* [util](http://nodejs.org/api/util.html) provides general utility functions like `format` and `inspect`.
|
||||
|
||||
- [punycode](http://nodejs.org/api/punycode.html) implements
|
||||
conversion functions for
|
||||
[punycode](http://en.wikipedia.org/wiki/Punycode) encoding.
|
||||
Additionally ArangoDB provides partial implementations for the following modules:
|
||||
|
||||
- [querystring](http://nodejs.org/api/querystring.html)
|
||||
provides utilities for dealing with query strings.
|
||||
* `net`:
|
||||
only `isIP`, `isIPv4` and `isIPv6`.
|
||||
|
||||
- [stream](http://nodejs.org/api/stream.html)
|
||||
provides a streaming interface.
|
||||
* `process`:
|
||||
only `env` and `cwd`;
|
||||
stubs for `argv`, `stdout.isTTY`, `stdout.write`, `nextTick`.
|
||||
|
||||
- [url](http://nodejs.org/api/url.html)
|
||||
has utilities for URL resolution and parsing.
|
||||
* `timers`:
|
||||
stubs for `setImmediate`, `setTimeout`, `setInterval`, `clearImmediate`, `clearTimeout`, `clearInterval` and `ref`.
|
||||
|
||||
!SUBSECTION Bundled NPM Modules
|
||||
* `tty`:
|
||||
only `isatty` (always returns `false`).
|
||||
|
||||
The following [NPM modules](https://npmjs.org) are preinstalled.
|
||||
* `vm`:
|
||||
only `runInThisContext`.
|
||||
|
||||
- [aqb](https://github.com/arangodb/aqbjs)
|
||||
is the ArangoDB Query Builder and can be used to construct
|
||||
AQL queries with a chaining JavaScript API.
|
||||
The following Node.js modules are not available at all:
|
||||
`child_process`,
|
||||
`cluster`,
|
||||
`constants`,
|
||||
`crypto` (but see `@arangodb/crypto` below),
|
||||
`dgram`,
|
||||
`dns`,
|
||||
`domain`,
|
||||
`fs` (but see `@arangodb/fs` below),
|
||||
`http`,
|
||||
`https`,
|
||||
`os`,
|
||||
`sys`,
|
||||
`tls`,
|
||||
`v8`,
|
||||
`zlib`.
|
||||
|
||||
- [error-stack-parser](http://www.stacktracejs.com)
|
||||
!SECTION ArangoDB Specific Modules
|
||||
|
||||
- [expect.js](https://github.com/Automattic/expect.js)
|
||||
There are a large number of ArangoDB-specific modules using the `@arangodb` namespace, mostly for internal use by ArangoDB itself. The following however are noteworthy:
|
||||
|
||||
- [extendible](https://github.com/3rd-Eden/extendible)
|
||||
* [@arangodb/fs](FileSystem.md) provides a file system API for the manipulation of paths, directories, files, links, and the construction of file streams. ArangoDB implements most [Filesystem/A](http://wiki.commonjs.org/wiki/Filesystem/A) functions.
|
||||
|
||||
- [foxx_generator](https://github.com/moonglum/foxx_generator)
|
||||
* [@arangodb/crypto](Crypto.md) provides various cryptography functions including hashing algorithms.
|
||||
|
||||
- [http-errors](https://github.com/jshttp/http-errors)
|
||||
* [@arangodb/foxx](../../Foxx/README.md) is the namespace providing the various building blocks of the Foxx microservice framework.
|
||||
|
||||
- [i (inflect)](https://github.com/pksunkara/inflect)
|
||||
!SECTION Bundled NPM Modules
|
||||
|
||||
- [joi](https://github.com/hapijs/joi)
|
||||
is a validation library that is used throughout the Foxx framework.
|
||||
The following [NPM modules](https://www.npmjs.com) are preinstalled:
|
||||
|
||||
- [js-yaml](https://github.com/nodeca/js-yaml)
|
||||
* [aqb](https://github.com/arangodb/aqbjs)
|
||||
is the ArangoDB Query Builder and can be used to construct AQL queries with a chaining JavaScript API.
|
||||
|
||||
- [minimatch](https://github.com/isaacs/minimatch)
|
||||
* [chai](http://chaijs.com)
|
||||
is a full-featured assertion library for writing JavaScript tests.
|
||||
|
||||
- [qs](https://github.com/hapijs/qs)
|
||||
provides utilities for dealing with query strings using a different format
|
||||
than the **querystring** module.
|
||||
* [dedent](https://github.com/dmnd/dedent)
|
||||
is a simple utility function for formatting multi-line strings.
|
||||
|
||||
- [ramda](http://ramdajs.com)
|
||||
* [error-stack-parser](http://www.stacktracejs.com)
|
||||
parses stacktraces into a more useful format.
|
||||
|
||||
- [semver](https://github.com/npm/node-semver)
|
||||
<!-- * [expect.js](https://github.com/Automattic/expect.js) (only for legacy tests) -->
|
||||
|
||||
- [sinon](http://sinonjs.org)
|
||||
<!-- * [extendible](https://github.com/3rd-Eden/extendible) (only for legacy mode) -->
|
||||
|
||||
- [underscore](http://underscorejs.org)
|
||||
* [graphql-sync](https://github.com/arangodb/graphql-sync)
|
||||
is an ArangoDB-compatible GraphQL server/schema implementation.
|
||||
|
||||
!SUBSECTION Installing NPM Modules
|
||||
* [highlight.js](https://highlightjs.org)
|
||||
is an HTML syntax highlighter.
|
||||
|
||||
You can install additional modules using `npm install`. Note the following
|
||||
limitations in ArangoDB's compatibility with node or browser modules:
|
||||
* [i (inflect)](https://github.com/pksunkara/inflect)
|
||||
is a utility library for inflecting (e.g. pluralizing) words.
|
||||
|
||||
- modules must be implemented in pure JavaScript (no native extensions)
|
||||
- modules must be strictly synchronous (e.g. no setTimeout or promises)
|
||||
- only a subset of node's built-in modules are supported (see above)
|
||||
- the same limitations apply to each module's dependencies
|
||||
* [joi](https://github.com/hapijs/joi)
|
||||
is a validation library that is supported throughout the Foxx framework.
|
||||
|
||||
!SUBSECTION require
|
||||
* [js-yaml](https://github.com/nodeca/js-yaml)
|
||||
is a JavaScript implementation of the YAML data format (a partial superset of JSON).
|
||||
|
||||
`require(path)`
|
||||
* [lodash](https://lodash.com)
|
||||
is a utility belt for JavaScript providing various useful helper functions.
|
||||
|
||||
*require* checks if the module or package specified by *path* has already
|
||||
been loaded. If not, the content of the file is executed in a new
|
||||
context. Within the context you can use the global variable *exports* in
|
||||
order to export variables and functions. This variable is returned by
|
||||
*require*.
|
||||
* [minimatch](https://github.com/isaacs/minimatch)
|
||||
is a glob matcher for matching wildcards in file paths.
|
||||
|
||||
Assume that your module file is *test1.js* and contains
|
||||
* [node-semver](https://github.com/npm/node-semver)
|
||||
is a utility library for handling semver version numbers.
|
||||
|
||||
```js
|
||||
exports.func1 = function() {
|
||||
print("2");
|
||||
};
|
||||
|
||||
exports.const1 = 4;
|
||||
```
|
||||
|
||||
Then you can use *require* to load the file and access the exports.
|
||||
|
||||
```js
|
||||
unix> ./arangosh
|
||||
arangosh> var test1 = require("test1");
|
||||
|
||||
arangosh> test1.const1;
|
||||
4
|
||||
|
||||
arangosh> test1.func1();
|
||||
2
|
||||
```
|
||||
|
||||
*require* follows the specification
|
||||
[Modules/1.1.1](http://wiki.commonjs.org/wiki/Modules/1.1.1).
|
||||
|
||||
|
||||
*require* will inject two variables into the context of the required code:
|
||||
|
||||
- `__filename`: contains the name of the required file/module
|
||||
|
||||
- `__dirname`: contains the directory name of the required file/module
|
||||
|
||||
The values in `__filename` and `__dirname` can be used for generic debugging and for
|
||||
creating filename relative to the required file, e.g.
|
||||
|
||||
@startDocuBlockInline MODJS_fsDir
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{MODJS_fsDir}
|
||||
var files = require("fs");
|
||||
relativeFile = files.join(__dirname, "scripts", "test.js");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock MODJS_fsDir
|
||||
|
||||
|
||||
*require* can also be used to load JSON data. If the name of the required module ends
|
||||
with *.json*, *require* will run a `JSON.parse()` on the file/module contents and return
|
||||
the result.
|
||||
|
||||
Note: the purpose of *require* is to load modules or packages. It cannot be used to load
|
||||
arbitrary JavaScript files.
|
||||
|
||||
|
||||
!SECTION Modules Path versus Modules Collection
|
||||
|
||||
ArangoDB comes with predefined modules defined in the file-system under the path
|
||||
specified by *startup.startup-directory*. In a standard installation this
|
||||
point to the system share directory. Even if you are an administrator of
|
||||
ArangoDB you might not have write permissions to this location. On the other
|
||||
hand, in order to deploy some extension for ArangoDB, you might need to install
|
||||
additional JavaScript modules. This would require you to become root and copy
|
||||
the files into the share directory. In order to ease the deployment of
|
||||
extensions, ArangoDB uses a second mechanism to look up JavaScript modules.
|
||||
|
||||
JavaScript modules can either be stored in the filesystem as regular file or in
|
||||
the database collection *_modules*.
|
||||
|
||||
If you execute
|
||||
|
||||
```js
|
||||
require("com/example/extension")
|
||||
```
|
||||
then ArangoDB will try to locate the corresponding JavaScript as file as
|
||||
follows
|
||||
|
||||
- There is a cache for the results of previous *require* calls. First of
|
||||
all ArangoDB checks if *com/example/extension* is already in the modules
|
||||
cache. If it is, the export object for this module is returned. No further
|
||||
JavaScript is executed.
|
||||
|
||||
- ArangoDB will then check, if there is a file called **com/example/extension.js** in the system search path. If such a file exists, it is executed in a new module context and the value of *exports* object is returned. This value is also stored in the module cache.
|
||||
|
||||
- If no file can be found, ArangoDB will check if the collection *_modules*
|
||||
contains a document of the form
|
||||
|
||||
```js
|
||||
{
|
||||
path: "/com/example/extension",
|
||||
content: "...."
|
||||
}
|
||||
```
|
||||
|
||||
**Note**: The leading */* is important - even if you call *require* without a
|
||||
leading */*. If such a document exists, then the value of the *content*
|
||||
attribute must contain the JavaScript code of the module. This string is
|
||||
executed in a new module context and the value of *exports* object is
|
||||
returned. This value is also stored in the module cache.
|
||||
* [qs](https://github.com/hapijs/qs)
|
||||
provides utilities for dealing with query strings using a different format than the **querystring** module.
|
||||
|
||||
* [sinon](http://sinonjs.org)
|
||||
is a mocking library for writing test stubs, mocks and spies.
|
||||
|
|
|
@ -32,13 +32,14 @@ The following methods exist on the collection object (returned by *db.name*):
|
|||
* [collection.closedRange(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#closed-range)
|
||||
* [collection.document(object)](../../DataModeling/Documents/DocumentMethods.md#document)
|
||||
* [collection.documents(keys)](../../DataModeling/Documents/DocumentMethods.md#lookup-by-keys)
|
||||
* [collection.edges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#misc)
|
||||
* [collection.edges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges)
|
||||
* [collection.exists(object)](../../DataModeling/Documents/DocumentMethods.md#exists)
|
||||
* [collection.firstExample(example)](../../DataModeling/Documents/DocumentMethods.md#first-example)
|
||||
* [collection.inEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#misc)
|
||||
* [collection.inEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges)
|
||||
* [collection.insert(data)](../../DataModeling/Documents/DocumentMethods.md#insert)
|
||||
* [collection.edges(vertices)](../../DataModeling/Documents/DocumentMethods.md#edges)
|
||||
* [collection.iterate(iterator,options)](../../DataModeling/Documents/DocumentMethods.md#misc)
|
||||
* [collection.outEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#misc)
|
||||
* [collection.outEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges)
|
||||
* [collection.queryByExample(example)](../../DataModeling/Documents/DocumentMethods.md#query-by-example)
|
||||
* [collection.range(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#range)
|
||||
* [collection.remove(selector)](../../DataModeling/Documents/DocumentMethods.md#remove)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
!SECTION Serverside db-Object implementation
|
||||
!SECTION Server-side db-Object implementation
|
||||
|
||||
We [already talked about the arangosh db Object implementation](../GettingStarted/Arangosh.md), Now a little more about the server version, so the following examples won't work properly in arangosh.
|
||||
|
||||
|
|
|
@ -353,48 +353,48 @@ only an error object.
|
|||
|
||||
Returns the document for a document-handle:
|
||||
|
||||
@startDocuBlockInline documentsCollectionNameValid
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValid}
|
||||
@startDocuBlockInline documentsCollectionNameValidPlain
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidPlain}
|
||||
~ db._create("example");
|
||||
~ var myid = db.example.insert({_key: "2873916"});
|
||||
db.example.document("example/2873916");
|
||||
~ db._drop("example");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock documentsCollectionNameValid
|
||||
@endDocuBlock documentsCollectionNameValidPlain
|
||||
|
||||
Returns the document for a document-key:
|
||||
|
||||
@ startDocuBlockInline documentsCollectionNameValidByKey
|
||||
@ EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidByKey}
|
||||
@startDocuBlockInline documentsCollectionNameValidByKey
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidByKey}
|
||||
~ db._create("example");
|
||||
~ var myid = db.example.insert({_key: "2873916"});
|
||||
db.example.document("2873916");
|
||||
~ db._drop("example");
|
||||
@ END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@ endDocuBlock documentsCollectionNameValidByKey
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock documentsCollectionNameValidByKey
|
||||
|
||||
Returns the document for an object:
|
||||
|
||||
@ startDocuBlockInline documentsCollectionNameValidByObject
|
||||
@ EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidByObject}
|
||||
@startDocuBlockInline documentsCollectionNameValidByObject
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidByObject}
|
||||
~ db._create("example");
|
||||
~ var myid = db.example.insert({_key: "2873916"});
|
||||
db.example.document({_id: "example/2873916"});
|
||||
~ db._drop("example");
|
||||
@ END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@ endDocuBlock documentsCollectionNameValidByObject
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock documentsCollectionNameValidByObject
|
||||
|
||||
Returns the document for an array of two keys:
|
||||
|
||||
@ startDocuBlockInline documentsCollectionNameValidMulti
|
||||
@ EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidMulti}
|
||||
@startDocuBlockInline documentsCollectionNameValidMulti
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionNameValidMulti}
|
||||
~ db._create("example");
|
||||
~ var myid = db.example.insert({_key: "2873916"});
|
||||
~ var myid = db.example.insert({_key: "2873917"});
|
||||
db.example.document(["2873916","2873917"]);
|
||||
~ db._drop("example");
|
||||
@ END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@ endDocuBlock documentsCollectionNameValidMulti
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock documentsCollectionNameValidMulti
|
||||
|
||||
An error is raised if the document is unknown:
|
||||
|
||||
|
@ -565,23 +565,23 @@ multiple documents with one call.
|
|||
**Examples**
|
||||
|
||||
|
||||
@startDocuBlockInline documentsCollectionInsert
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsert}
|
||||
@startDocuBlockInline documentsCollectionInsertSingle
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertSingle}
|
||||
~ db._create("example");
|
||||
db.example.insert({ Hello : "World" });
|
||||
db.example.insert({ Hello : "World" }, {waitForSync: true});
|
||||
~ db._drop("example");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock documentsCollectionInsert
|
||||
@endDocuBlock documentsCollectionInsertSingle
|
||||
|
||||
@ startDocuBlockInline documentsCollectionInsertMulti
|
||||
@ EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertMulti}
|
||||
@startDocuBlockInline documentsCollectionInsertMulti
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertMulti}
|
||||
~ db._create("example");
|
||||
db.example.insert([{ Hello : "World" }, {Hello: "there"']);
|
||||
db.example.insert([{ Hello : "World" }, {Hello: "there"}])
|
||||
db.example.insert([{ Hello : "World" }, {}], {waitForSync: true});
|
||||
~ db._drop("example");
|
||||
@ END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@ endDocuBlock documentsCollectionInsertMulti
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock documentsCollectionInsertMulti
|
||||
|
||||
|
||||
|
||||
|
@ -1143,7 +1143,6 @@ an object with the following sub-attributes:
|
|||
|
||||
!SUBSECTION Collection type
|
||||
|
||||
|
||||
`collection.type()`
|
||||
|
||||
Returns the type of a collection. Possible values are:
|
||||
|
@ -1153,7 +1152,6 @@ Returns the type of a collection. Possible values are:
|
|||
|
||||
!SUBSECTION Get the Version of ArangoDB
|
||||
|
||||
|
||||
`db._version()`
|
||||
|
||||
Returns the server version string. Note that this is not the version of the
|
||||
|
@ -1169,28 +1167,99 @@ database.
|
|||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock dbVersion
|
||||
|
||||
!SUBSECTION Edges
|
||||
|
||||
Edges are normal documents that always contain a `_from` and a `_to`
|
||||
attribute. Therefore, you can use the document methods to operate on
|
||||
edges. The following methods, however, are specific to edges.
|
||||
|
||||
`edge-collection.edges(vertex)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound) or ending
|
||||
in (inbound) *vertex*.
|
||||
|
||||
`edge-collection.edges(vertices)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound) or ending
|
||||
in (inbound) a document from *vertices*, which must a list of documents
|
||||
or document handles.
|
||||
|
||||
@startDocuBlockInline EDGCOL_02_Relation
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_Relation}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
~ var myGraph = {};
|
||||
myGraph.v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
myGraph.v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
| myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2,
|
||||
{ label : "knows"});
|
||||
db._document(myGraph.e1);
|
||||
db.relation.edges(myGraph.e1._id);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_02_Relation
|
||||
|
||||
`edge-collection.inEdges(vertex)`
|
||||
|
||||
The *edges* operator finds all edges ending in (inbound) *vertex*.
|
||||
|
||||
`edge-collection.inEdges(vertices)`
|
||||
|
||||
The *edges* operator finds all edges ending in (inbound) a document from
|
||||
*vertices*, which must a list of documents or document handles.
|
||||
|
||||
**Examples**
|
||||
|
||||
@startDocuBlockInline EDGCOL_02_inEdges
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_inEdges}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
~ var myGraph = {};
|
||||
myGraph.v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
myGraph.v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
| myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2,
|
||||
{ label : "knows"});
|
||||
db._document(myGraph.e1);
|
||||
db.relation.inEdges(myGraph.v1._id);
|
||||
db.relation.inEdges(myGraph.v2._id);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_02_inEdges
|
||||
|
||||
`edge-collection.outEdges(vertex)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound)
|
||||
*vertices*.
|
||||
|
||||
`edge-collection.outEdges(vertices)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound) a document
|
||||
from *vertices*, which must a list of documents or document handles.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
@startDocuBlockInline EDGCOL_02_outEdges
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_outEdges}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
~ var myGraph = {};
|
||||
myGraph.v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
myGraph.v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
| myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2,
|
||||
{ label : "knows"});
|
||||
db._document(myGraph.e1);
|
||||
db.relation.outEdges(myGraph.v1._id);
|
||||
db.relation.outEdges(myGraph.v2._id);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_02_outEdges
|
||||
|
||||
!SUBSECTION Misc
|
||||
|
||||
|
||||
`collection.edges(vertex-id)`
|
||||
|
||||
Returns all edges connected to the vertex specified by *vertex-id*.
|
||||
|
||||
|
||||
|
||||
`collection.inEdges(vertex-id)`
|
||||
|
||||
Returns inbound edges connected to the vertex specified by *vertex-id*.
|
||||
|
||||
|
||||
|
||||
`collection.outEdges(vertex-id)`
|
||||
|
||||
Returns outbound edges connected to the vertex specified by *vertex-id*.
|
||||
|
||||
|
||||
|
||||
`collection.iterate(iterator, options)`
|
||||
|
||||
Iterates over some elements of the collection and apply the function
|
||||
|
@ -1205,10 +1274,8 @@ as second argument.
|
|||
- *probability* (optional, default all): a number between *0* and
|
||||
*1*. Documents are chosen with this probability.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
|
||||
@startDocuBlockInline accessViaGeoIndex
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{accessViaGeoIndex}
|
||||
~db._create("example")
|
||||
|
|
|
@ -1 +1,3 @@
|
|||
!CHAPTER Graphs, Vertices & Edges
|
||||
|
||||
Graphs, vertices & edges are defined in the [Graphs](../Graphs/README.md) chapter in details.
|
|
@ -1,31 +1,47 @@
|
|||
!CHAPTER Foxx at a glance
|
||||
|
||||
Each Foxx service is defined by a [JSON manifest](Manifest.md) specifying the
|
||||
entry point, any scripts defined by the service, possible configuration
|
||||
options and Foxx dependencies, as well as other metadata. Within a service,
|
||||
these options are exposed as the [service context](Context.md).
|
||||
Each Foxx service is defined by a [JSON manifest](Manifest.md) specifying the entry point, any scripts defined by the service, possible configuration options and Foxx dependencies, as well as other metadata. Within a service, these options are exposed as the [service context](Context.md).
|
||||
|
||||
At the heart of the Foxx framework lies the [Foxx Router](Router/README.md)
|
||||
which is used to define HTTP endpoints. A service can access the database
|
||||
either directly from its context using prefixed collections or the
|
||||
[ArangoDB database API](Modules.md).
|
||||
At the heart of the Foxx framework lies the [Foxx Router](Router/README.md) which is used to define HTTP endpoints. A service can access the database either directly from its context using prefixed collections or the [ArangoDB database API](Modules.md).
|
||||
|
||||
While Foxx is primarily designed to be used to access the database itself,
|
||||
ArangoDB also provides an [API to make HTTP requests](Modules.md) to
|
||||
external services.
|
||||
While Foxx is primarily designed to be used to access the database itself, ArangoDB also provides an [API to make HTTP requests](Modules.md) to external services.
|
||||
|
||||
Finally, [scripts](Scripts.md) can be used to perform one-off tasks, which
|
||||
can also be scheduled to be performed asynchronously using the built-in
|
||||
job queue.
|
||||
Finally, [scripts](Scripts.md) can be used to perform one-off tasks, which can also be scheduled to be performed asynchronously using the built-in job queue.
|
||||
|
||||
!SECTION How does it work
|
||||
|
||||
Foxx services consist of JavaScript code running in the V8 JavaScript runtime embedded inside ArangoDB. Each service is mounted in each available V8 context (the number of contexts can be adjusted in the ArangoDB configuration). Incoming requests are distributed accross these contexts automatically.
|
||||
|
||||
If you're coming from another JavaScript environment like Node.js this is similar to running multiple Node.js processes behind a load balancer: you should not rely on server-side state (other than the database itself) between different requests as there is no way of making sure consecutive requests will be handled in the same context.
|
||||
|
||||
Because the JavaScript code is running inside the database another difference is that all Foxx and ArangoDB APIs are purely synchronous and should be considered blocking. This is especially important for transactions, which in ArangoDB can execute arbitrary code but may have to lock entire collections (effectively preventing any data to be written) until the code has completed.
|
||||
|
||||
For information on how this affects interoperability with third-party JavaScript modules written for other JavaScript environments see [the chapter on dependencies](./Dependencies.md).
|
||||
|
||||
!SECTION Development mode
|
||||
|
||||
TODO
|
||||
Development mode allows you making changes to deployed services in-place directly from the database server's file system without downloading and re-uploading the service bundle.
|
||||
|
||||
You can toggle development mode from the web interface in the service's settings tab. Once activated the service's file system path will be shown in the info tab.
|
||||
|
||||
<!-- TODO (Add link to relevant aardvark docs) -->
|
||||
|
||||
Once enabled the service's source files and manifest will be re-evaluated every time a route of the service is accessed, effectively re-deploying the service on every request. As the name indicates this is intended to be used strictly during development and is most definitely a bad idea on production servers.
|
||||
|
||||
Also note that if you are serving static files as part of your service accessing these files from a browser may also trigger a re-deployment of the service. Finally, making HTTP requests to a service running in development mode from within the service (i.e. using the `@arangodb/request` module to access the service itself) is probably not a good idea either.
|
||||
|
||||
!SECTION Foxx store
|
||||
|
||||
TODO
|
||||
The Foxx store provides access to a number of ready-to-use official and community-maintained Foxx services you can install with a single click, including example services and wrappers for external SaaS tools like transactional e-mail services, bug loggers or analytics trackers.
|
||||
|
||||
You can find the Foxx store in the web interface by using the "Add Service" button in the service list.
|
||||
|
||||
<!-- TODO (Add link to relevant aardvark docs) -->
|
||||
|
||||
!SECTION Cluster-Foxx
|
||||
|
||||
TODO
|
||||
When running ArangoDB as a cluster the Foxx services will run on each coordinator. Installing, upgrading and uninstalling services on any coordinator will automatically affect the other coordinators, making deployments as easy as in single-server mode. However this means there are some limitations:
|
||||
|
||||
You should avoid any kind of file system state beyond the deployed service bundle itself. Don't write data to the file system or encode any expectations of the file system state other than the files in the service folder that were installed as part of the service (e.g. file uploads or custom log files).
|
||||
|
||||
Additionally the development mode is not supported in cluster mode: as the development is intended to allow modifying the service's code and seeing the effect of those changes in real time and the service will be deployed to multiple coordinators the different copies of the service would become inconsistent leading to unpredictable behaviour. It is recommended you either redeploy services when making changes to code running in a cluster or use development mode on a single-server installation.
|
||||
|
|
|
@ -1,3 +1,68 @@
|
|||
!CHAPTER Foxx configuration
|
||||
|
||||
TODO
|
||||
Foxx services can define configuration parameters to make them more re-usable.
|
||||
|
||||
The `configuration` object maps names to configuration parameters:
|
||||
|
||||
* The key is the name under whicht the parameter will be available
|
||||
on the [service context's](Context.md) `configuration` property.
|
||||
|
||||
* The value is a parameter definition.
|
||||
|
||||
The parameter definition can have the following properties:
|
||||
|
||||
* **description**: `string`
|
||||
|
||||
Human readable description of the parameter.
|
||||
|
||||
* **type**: `string` (Default: `"string"`)
|
||||
|
||||
Type of the configuration parameter. Supported values are:
|
||||
|
||||
* `"integer"` or `"int"`:
|
||||
any finite integer number.
|
||||
|
||||
* `"boolean"` or `"bool"`:
|
||||
the values `true` or `false`.
|
||||
|
||||
* `"number"`:
|
||||
any finite decimal or integer number.
|
||||
|
||||
* `"string"`:
|
||||
any string value.
|
||||
|
||||
* `"json"`:
|
||||
any well-formed JSON value.
|
||||
|
||||
* `"password"`:
|
||||
like *string* but will be displayed as a masked input field in the web frontend.
|
||||
|
||||
* **default**: `any`
|
||||
|
||||
Default value of the configuration parameter.
|
||||
|
||||
* **required**: (Default: `true`)
|
||||
|
||||
Whether the parameter is required.
|
||||
|
||||
If the configuration has parameters that do not specify a default value, you need to configure the service before it becomes active. In the meantime a fallback servicelication will be mounted that responds to all requests with a HTTP 500 status code indicating a server-side error.
|
||||
|
||||
The configuration parameters of a mounted service can be adjusted from the web interface by clicking the *Configuration* button in the service details.
|
||||
|
||||
<!-- TODO (Link to admin docs) -->
|
||||
|
||||
**Examples**
|
||||
|
||||
```json
|
||||
"configuration": {
|
||||
"currency": {
|
||||
"description": "Currency symbol to use for prices in the shop.",
|
||||
"default": "$",
|
||||
"type": "string"
|
||||
},
|
||||
"secretKey": {
|
||||
"description": "Secret key to use for signing session tokens.",
|
||||
"type": "password"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
|
@ -16,8 +16,84 @@ Make sure to include the actual `node_modules` folder in your Foxx service bundl
|
|||
|
||||
!SUBSECTION Compatibility caveats
|
||||
|
||||
TODO Note about async etc
|
||||
Unlike JavaScript in browsers or Node.js, the JavaScript environment in ArangoDB is synchronous. This means any modules that depend on asynchronous behaviour like promises or `setTimeout` will not behave correctly in ArangoDB or Foxx. Additionally unlike Node.js ArangoDB does not support native extensions. All modules have to be implemented in pure JavaScript.
|
||||
|
||||
While ArangoDB provides a lot of compatibility code to support modules written for Node.js, some Node.js built-in modules can not be provided by ArangoDB. For a closer look at the Node.js modules ArangoDB does or does not provide check out the [appendix on JavaScript modules](../Appendix/JavaScriptModules/README.md).
|
||||
|
||||
Also note that these restrictions not only apply on the modules you wish to install but also the dependencies of those modules. As a rule of thumb: modules written to work in Node.js and the browser that do not rely on async behaviour should generally work; modules that rely on network or filesystem I/O or make heavy use of async behaviour most likely will not.
|
||||
|
||||
!SECTION Foxx dependencies
|
||||
|
||||
TODO
|
||||
Foxx dependencies can be declared in a [service's manifest](Manifest.md) using the `provides` and `dependencies` fields:
|
||||
|
||||
* `provides` lists the dependencies a given service provides, i.e. which APIs it claims to be compatible with
|
||||
|
||||
* `dependencies` lists the dependencies a given service uses, i.e. which APIs its dependencies need to be compatible with
|
||||
|
||||
A dependency name should generally use the same format as a namespaced (org-scoped) NPM module, e.g. `@foxx/sessions`.
|
||||
|
||||
Dependency names refer to the external JavaScript API of a service rather than specific services implementing those APIs. Some dependency names defined by officially maintained services are:
|
||||
|
||||
* `@foxx/auth` (version `1.0.0`)
|
||||
* `@foxx/api-keys` (version `1.0.0`)
|
||||
* `@foxx/bugsnag` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/mailgun` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/postageapp` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/postmark` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/sendgrid` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/oauth2` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/segment-io` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/sessions` (versions `1.0.0` and `2.0.0`)
|
||||
* `@foxx/users` (versions `1.0.0`, `2.0.0` and `3.0.0`)
|
||||
|
||||
A `provides` definition maps each provided dependency's name to the provided version:
|
||||
|
||||
```json
|
||||
"provides": {
|
||||
"@foxx/auth": "1.0.0"
|
||||
}
|
||||
```
|
||||
|
||||
A `dependencies` definition maps the local alias of a given dependency against its name and the supported version range (either as a JSON object or a shorthand string):
|
||||
|
||||
```json
|
||||
"dependencies": {
|
||||
"mySessions": "@foxx/sessions:^2.0.0",
|
||||
"myAuth": {
|
||||
"name": "@foxx/auth",
|
||||
"version": "^1.0.0",
|
||||
"required": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Dependencies can be configured from the web interface in a service's settings tab using the "Dependencies" button.
|
||||
|
||||
<!-- TODO (Add link to relevant aardvark docs) -->
|
||||
|
||||
The value for each dependency should be the database-relative mount path of the service (including the leading slash). In order to be usable as the dependency of another service both services need to be mounted in the same database. A service can be used to provide multiple dependencies for the same service (as long as the expected JavaScript APIs don't conflict).
|
||||
|
||||
A service that has unconfigured required dependencies can not be used until all of its dependencies have been configured.
|
||||
|
||||
It is possible to specify the mount path of a service that does not actually declare the dependency as provided. There is currently no validation beyond the manifest formats.
|
||||
|
||||
When a service uses another mounted service as a dependency the dependency's `main` entry file's `exports` object becomes available in the `module.context.dependencies` object of the other service:
|
||||
|
||||
**Examples**
|
||||
|
||||
Service A and Service B are mounted in the same database.
|
||||
Service B has a dependency with the local alias `"greeter"`.
|
||||
The dependency is configured to use the mount path of Service B.
|
||||
|
||||
```js
|
||||
// Entry file of Service A
|
||||
module.exports = {
|
||||
sayHi () {
|
||||
return 'Hello';
|
||||
}
|
||||
};
|
||||
|
||||
// Somewhere in Service B
|
||||
const greeter = module.context.dependencies.greeter;
|
||||
res.write(greeter.sayHi());
|
||||
```
|
||||
|
|
|
@ -63,31 +63,31 @@ The object returned by the router's methods provides additional methods to attac
|
|||
|
||||
!SECTION Try it out
|
||||
|
||||
At this point you can upload the service folder as a zip archive from the web interface using the *Services* tab:
|
||||
At this point you can upload the service folder as a zip archive from the web interface using the *Services* tab.
|
||||
|
||||
![Screenshot of the Services tab with no services listed]()
|
||||
<!-- TODO [Screenshot of the Services tab with no services listed] -->
|
||||
|
||||
Click *Add Service* then pick the *Zip* option in the dialog. You will need to provide a *mount path*, which is the URL prefix at which the service will be mounted (e.g. `/getting-started`):
|
||||
Click *Add Service* then pick the *Zip* option in the dialog. You will need to provide a *mount path*, which is the URL prefix at which the service will be mounted (e.g. `/getting-started`).
|
||||
|
||||
![Screenshot of the Add Service dialog with the Zip tab active]()
|
||||
<!-- TODO [Screenshot of the Add Service dialog with the Zip tab active] -->
|
||||
|
||||
Once you have picked the zip archive using the file picker, the upload should begin immediately and your service should be installed. Otherwise press the *Install* button and wait for the dialog to disappear and the service to show up in the service list:
|
||||
Once you have picked the zip archive using the file picker, the upload should begin immediately and your service should be installed. Otherwise press the *Install* button and wait for the dialog to disappear and the service to show up in the service list.
|
||||
|
||||
![Screenshot of the Services tab with the getting-started service listed]()
|
||||
<!-- TODO [Screenshot of the Services tab with the getting-started service listed] -->
|
||||
|
||||
Click anywhere on the card with your mount path on the label to open the service's details:
|
||||
Click anywhere on the card with your mount path on the label to open the service's details.
|
||||
|
||||
![Screenshot of the details for the getting-started service]()
|
||||
<!-- TODO [Screenshot of the details for the getting-started service] -->
|
||||
|
||||
In the API documentation you should see the route we defined earlier (`/hello-world`) with the word `GET` next to it indicating the HTTP method it supports and the `summary` we provided on the right. By clicking on the route's path you can open the documentation for the route:
|
||||
In the API documentation you should see the route we defined earlier (`/hello-world`) with the word `GET` next to it indicating the HTTP method it supports and the `summary` we provided on the right. By clicking on the route's path you can open the documentation for the route.
|
||||
|
||||
![Screenshot of the API docs with the hello-world route open]()
|
||||
<!-- TODO [Screenshot of the API docs with the hello-world route open] -->
|
||||
|
||||
Note that the `description` we provided appears in the generated documentation as well as the description we added to the `response` (which should correctly indicate the content type `text/plain`, i.e. plain text).
|
||||
|
||||
Click the *Try it out!* button to send a request to the route and you should see an example request with the service's response: "Hello World!":
|
||||
Click the *Try it out!* button to send a request to the route and you should see an example request with the service's response: "Hello World!".
|
||||
|
||||
![Screenshot of the API docs after the request]()
|
||||
<!-- TODO [Screenshot of the API docs after the request] -->
|
||||
|
||||
Congratulations! You have just created, installed and used your first Foxx service.
|
||||
|
||||
|
@ -118,7 +118,7 @@ The path parameters are accessible from the `pathParams` property of the request
|
|||
|
||||
Note that routes with path parameters that fail to validate for the request URL will be skipped as if they wouldn't exist. This allows you to define multiple routes that are only distinguished by the schemas of their path parameters (e.g. a route taking only numeric parameters and one taking any string as a fallback).
|
||||
|
||||
![Screenshot of the API docs after a request to /hello/world]()
|
||||
<!-- TODO [Screenshot of the API docs after a request to /hello/world] -->
|
||||
|
||||
Let's take this further and create a route that takes a JSON request body:
|
||||
|
||||
|
@ -150,7 +150,7 @@ Because we're passing a schema to the `response` method we don't need to explici
|
|||
|
||||
The `body` method works the same way as the `response` method except the schema will be used to validate the request body. If the request body can't be parsed as JSON or doesn't match the schema, Foxx will reject the request with an appropriate error response.
|
||||
|
||||
![Screenshot of the API docs after a request with an array of numbers]()
|
||||
<!-- TODO [Screenshot of the API docs after a request with an array of numbers] -->
|
||||
|
||||
!SECTION Creating collections
|
||||
|
||||
|
@ -191,9 +191,9 @@ Next we need to tell our service about the script by adding it to the manifest f
|
|||
|
||||
The only thing that has changed is that we added a "scripts" field specifying the path of the setup script we just wrote.
|
||||
|
||||
Go back to the web interface and update the service with our new code, then check the *Collections* tab. If everything worked right, you should see a new collection called "myFoxxCollection":
|
||||
Go back to the web interface and update the service with our new code, then check the *Collections* tab. If everything worked right, you should see a new collection called "myFoxxCollection".
|
||||
|
||||
![Screenshot of the Collections tab with "myFoxxCollection" in the list]()
|
||||
<!-- TODO [Screenshot of the Collections tab with "myFoxxCollection" in the list] -->
|
||||
|
||||
!SECTION Accessing collections
|
||||
|
||||
|
@ -288,7 +288,7 @@ const keys = db._query(
|
|||
|
||||
You now know how to create a Foxx service from scratch, how to handle user input and how to access the database from within your Foxx service to store, retrieve and query data you store inside ArangoDB. This should allow you to build meaningful APIs for your own applications but there are many more things you can do with Foxx:
|
||||
|
||||
* Need to go faster? Turn on [development mode](README.md) and hack on your code right on the server.
|
||||
* Need to go faster? Turn on [development mode](AtAGlance.md) and hack on your code right on the server.
|
||||
|
||||
* Concerned about security? You could add [authentication](Auth.md) to your service to protect access to the data before it even leaves the database.
|
||||
|
||||
|
@ -298,4 +298,4 @@ You now know how to create a Foxx service from scratch, how to handle user input
|
|||
|
||||
* Tired of reinventing the wheel? Learn about [dependencies](Dependencies.md).
|
||||
|
||||
* Everything broken? You can [write tests](Testing.md) to make sure your logic remains sound.
|
||||
<!-- TODO * Everything broken? You can [write tests](Testing.md) to make sure your logic remains sound. -->
|
||||
|
|
|
@ -65,16 +65,6 @@ The `@arangodb/foxx` module also provides the same exports as in 2.8, namely:
|
|||
* `getExports` and `requireApp` from `@arangodb/foxx/manager`
|
||||
* `queues` from `@arangodb/foxx/queues`
|
||||
|
||||
!SECTION Incompatibilities with 2.8 and earlier
|
||||
Any feature not supported in 2.8 will also not work in legacy compatibility mode. When migrating from an older version of ArangoDB it is a good idea to migrate to ArangoDB 2.8 first for an easier upgrade path.
|
||||
|
||||
As built-in support for CoffeeScript was removed in 3.0 any service using CoffeeScript source files directly will not work in legacy compatibility mode. If you want to use an alternative language like CoffeeScript, make sure to pre-compile the raw source files to JavaScript and use the compiled JavaScript files in the service.
|
||||
|
||||
The `@arangodb/request` module when used with the `json` option previously overwrote the string in the `body` property of the response object of the response with the parsed JSON body. In 2.8 this was changed so the parsed JSON body is added as the `json` property of the response object in addition to overwriting the `body` property. In 3.0 and later (including legacy compatibility mode) the `body` property is no longer overwritten and must use the `json` property instead. Note that this only affects code using the `json` option when making the request.
|
||||
|
||||
The "magical comments" that could be used to supply documentation for controllers in ArangoDB 2.x are no longer supported and need to be replaced with calls to the controller's `description` and `summary` methods.
|
||||
|
||||
The utility module `lodash` is now available and should be used instead of `underscore`, but both modules will continue to be provided.
|
||||
|
||||
Additionally, any feature not supported in 2.8 will also not work in legacy compatibility mode. When migrating from an older version of ArangoDB it is a good idea to migrate to ArangoDB 2.8 first for an easier upgrade path.
|
||||
|
||||
**Note:** The `org/arangodb` module is aliased to the new name `@arangodb` in ArangoDB 3.0.0 and the `@arangodb` module was aliased to the old name `org/arangodb` in ArangoDB 2.8.0. Either one will work in 2.8 and 3.0 but outside of legacy services you should use `@arangodb` going forward.
|
||||
Additionally please note the differences laid out in the section titled ["Migrating from pre-2.8"](Migrating2x.md#migrating-from-pre28) in the migration guide.
|
||||
|
|
|
@ -75,7 +75,7 @@ Every service comes with a `manifest.json` file providing metadata. The followin
|
|||
|
||||
* **tests**: `string` or `Array<string>` (optional)
|
||||
|
||||
A path or list of paths of JavaScript [tests](Testing.md) provided for this service.
|
||||
A path or list of paths of JavaScript tests provided for this service. <!-- TODO link to Testing.md -->
|
||||
|
||||
Additionally manifests can provide the following metadata:
|
||||
|
||||
|
|
|
@ -4,7 +4,57 @@ When migrating services from older versions of ArangoDB it is generally recommen
|
|||
|
||||
This chapter outlines the major differences in the Foxx API between ArangoDB 2.8 and ArangoDB 3.0.
|
||||
|
||||
Additionally please note the differences laid out in the section titled ["Incompatibilities with 2.8 and earlier"](LegacyMode.md#incompatibilities-with-28-and-earlier) in the chapter covering the legacy compatibility mode.
|
||||
!SECTION Migrating from pre-2.8
|
||||
|
||||
When migrating from a version older than ArangoDB 2.8 please note that starting with ArangoDB 2.8 the behaviour of the `require` function more closely mimics the behaviour observed in Node.js and module bundlers for browsers, e.g.:
|
||||
|
||||
In a file `/routes/examples.js` (relative to the root folder of the service):
|
||||
|
||||
* `require('./my-module')` will be attempted to be resolved in the following order:
|
||||
|
||||
1. `/routes/my-module` (relative to service root)
|
||||
2. `/routes/my-module.js` (relative to service root)
|
||||
3. `/routes/my-module.json` (relative to service root)
|
||||
4. `/routes/my-module/index.js` (relative to service root)
|
||||
5. `/routes/my-module/index.json` (relative to service root)
|
||||
|
||||
* `require('lodash')` will be attempted to be resolved in the following order:
|
||||
|
||||
1. `/routes/node_modules/lodash` (relative to service root)
|
||||
2. `/node_modules/lodash` (relative to service root)
|
||||
3. ArangoDB module `lodash`
|
||||
4. Node compatibility module `lodash`
|
||||
5. Bundled NPM module `lodash`
|
||||
|
||||
* `require('/abs/path')` will be attempted to be resolved in the following order:
|
||||
|
||||
1. `/abs/path` (relative to file system root)
|
||||
2. `/abs/path.js` (relative to file system root)
|
||||
3. `/abs/path.json` (relative to file system root)
|
||||
4. `/abs/path/index.js` (relative to file system root)
|
||||
5. `/abs/path/index.json` (relative to file system root)
|
||||
|
||||
This behaviour is incompatible with the source code generated by the Foxx generator in the web interface before ArangoDB 2.8.
|
||||
|
||||
**Note:** The `org/arangodb` module is aliased to the new name `@arangodb` in ArangoDB 3.0.0 and the `@arangodb` module was aliased to the old name `org/arangodb` in ArangoDB 2.8.0. Either one will work in 2.8 and 3.0 but outside of legacy services you should use `@arangodb` going forward.
|
||||
|
||||
!SUBSECTION Foxx queue
|
||||
|
||||
In ArangoDB 2.6 Foxx introduced a new way to define queued jobs using Foxx scripts to replace the function-based job type definitions which were causing problems when restarting the server. The function-based jobs have been removed in 2.7 and are no longer supported at all.
|
||||
|
||||
!SUBSECTION CoffeeScript
|
||||
|
||||
ArangoDB 3.0 no longer provides built-in support for CoffeeScript source files, even in legacy compatibility mode. If you want to use an alternative language like CoffeeScript, make sure to pre-compile the raw source files to JavaScript and use the compiled JavaScript files in the service.
|
||||
|
||||
!SUBSECTION The request module
|
||||
|
||||
The `@arangodb/request` module when used with the `json` option previously overwrote the string in the `body` property of the response object of the response with the parsed JSON body. In 2.8 this was changed so the parsed JSON body is added as the `json` property of the response object in addition to overwriting the `body` property. In 3.0 and later (including legacy compatibility mode) the `body` property is no longer overwritten and must use the `json` property instead. Note that this only affects code using the `json` option when making the request.
|
||||
|
||||
!SUBSECTION Bundled NPM modules
|
||||
|
||||
The bundled NPM modules have been upgraded and may include backwards-incompatible changes, especially the API of `joi` has changed several times. If in doubt you should bundle your own versions of these modules to ensure specific versions will be used.
|
||||
|
||||
The utility module `lodash` is now available and should be used instead of `underscore`, but both modules will continue to be provided.
|
||||
|
||||
!SECTION Manifest
|
||||
|
||||
|
@ -527,12 +577,14 @@ The names of some attributes of the request object have been adjusted to more cl
|
|||
|
||||
* `req.user` is now called `req.arangoUser`
|
||||
|
||||
Some attributes have been removed:
|
||||
Some attributes have been removed or changed:
|
||||
|
||||
* `req.cookies` has been removed entirely (use `req.cookie(name)`)
|
||||
|
||||
* `req.requestBody` has been removed entirely (see below)
|
||||
|
||||
* `req.suffix` is now a string rather than an array
|
||||
|
||||
Additionally the `req.server` and `req.client` attributes are no longer available. The information is now exposed in a way that can (optionally) transparently handle proxy forwarding headers:
|
||||
|
||||
* `req.hostname` defaults to `req.server.address`
|
||||
|
@ -766,7 +818,7 @@ router.post('/logout', function (req, res) {
|
|||
|
||||
!SECTION Auth and OAuth2
|
||||
|
||||
The `util-simple-auth` and `util-oauth2` Foxx services have been replaced with the [Foxx auth](Auth.md) and [Foxx OAuth2](OAuth2.md) modules. It is no longer necessary to install these services as dependencies in order to use the functionality.
|
||||
The `util-simple-auth` and `util-oauth2` Foxx services have been replaced with the [Foxx auth](Auth.md) and Foxx OAuth2<!-- TODO (link to docs) --> modules. It is no longer necessary to install these services as dependencies in order to use the functionality.
|
||||
|
||||
Old:
|
||||
|
||||
|
|
|
@ -1,27 +1,80 @@
|
|||
!CHAPTER Related modules
|
||||
|
||||
TODO
|
||||
These are some of the modules outside of Foxx you will find useful when writing Foxx services.
|
||||
|
||||
Additionally there are modules providing some level of compatibility with Node.js as well as a number of bundled NPM modules (like lodash and joi). For more information on these modules see [the JavaScript modules appendix](../Appendix/JavaScriptModules/README.md).
|
||||
|
||||
!SECTION The `@arangodb` module
|
||||
|
||||
TODO
|
||||
This module provides access to various ArangoDB internals as well as three of the most important exports necessary to work with the database in Foxx:
|
||||
|
||||
!SUBSECTION The `db` object
|
||||
|
||||
TODO
|
||||
`require('@arangodb').db`
|
||||
|
||||
The `db` object represents the current database and lets you access collections and run queries. For more information see the [db object reference](../Appendix/References/DBObject.html).
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
const thirteen = db._query('RETURN 5 + 8')[0];
|
||||
```
|
||||
|
||||
!SUBSECTION The `aql` template string handler
|
||||
|
||||
TODO
|
||||
`require('@arangodb').aql`
|
||||
|
||||
The `aql` function is a JavaScript template string handler. It can be used to write complex AQL queries as multi-line strings without having to worry about bindVars and the distinction between collections and regular parameters.
|
||||
|
||||
To use it just prefix a JavaScript template string (the ones with backticks instead of quotes) with its import name (e.g. `aql`) and pass in variables like you would with a regular template string. The string will automatically be converted into an object with `query` and `bindVars` attributes which you can pass directly to `db._query` to execute. If you pass in a collection it will be automatically recognized as a collection reference and handled accordingly.
|
||||
|
||||
To find out more about AQL see the [AQL documentation](../../AQL/README.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
const filterValue = 23;
|
||||
const mydata = db._collection('mydata');
|
||||
const result = db._query(aql`
|
||||
FOR d IN ${mydata}
|
||||
FILTER d.num > ${filterValue}
|
||||
RETURN d
|
||||
`);
|
||||
```
|
||||
|
||||
!SUBSECTION The `errors` object
|
||||
|
||||
TODO
|
||||
`require('@arangodb').errors`
|
||||
|
||||
This object provides useful objects for each error code ArangoDB might use in `ArangoError` errors. This is helpful when trying to catch specific errors raised by ArangoDB, e.g. when trying to access a document that does not exist. Each object has a `code` property corresponding to the `errorNum` found on `ArangoError` errors.
|
||||
|
||||
For a complete list of the error names and codes you may encounter see the [appendix on error codes](../Appendix/ErrorCodes.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
try {
|
||||
mydata.document('does-not-exist');
|
||||
} catch (e) {
|
||||
if (e.isArangoError && e.errorNum === errors.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code) {
|
||||
res.throw(404, 'Document does not exist');
|
||||
}
|
||||
res.throw(500, 'Something went wrong', e);
|
||||
}
|
||||
```
|
||||
|
||||
!SECTION The `@arangodb/request` module
|
||||
|
||||
TODO
|
||||
`require('@arangodb/request')`
|
||||
|
||||
This module provides a function for making HTTP requests to external services. Note that while this allows communicating with third-party services it may affect database performance by blocking Foxx requests as ArangoDB waits for the remote service to respond. If you routinely make requests to slow external services and are not directly interested in the response it is probably a better idea to delegate the actual request/response cycle to a gateway service running outside ArangoDB.
|
||||
|
||||
You can find a full description of this module [in the request module appendix](../Appendix/JavaScriptModules/Request.md).
|
||||
|
||||
!SECTION The `@arangodb/general-graph` module
|
||||
|
||||
TODO
|
||||
`require('@arangodb/general-graph')`
|
||||
|
||||
This module provides access to ArangoDB graph definitions and various low-level graph operations in JavaScript. For more complex queries it is probably better to use AQL but this module can be useful in your setup and teardown scripts to create and destroy graph definitions.
|
||||
|
||||
For more information see the [chapter on the general graph module](../Graphs/GeneralGraphs/README.md).
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
!CHAPTER Endpoints
|
||||
|
||||
TODO
|
||||
Endpoints are returned by the `use`, `all` and HTTP verb (e.g. `get`, `post`) methods of [routers](./README.md) as well as the `use` method of the [service context](../Context.md). They can be used to attach metadata to mounted routes, middleware and child routers that affects how requests and responses are processed or provides API documentation.
|
||||
|
||||
Endpoints should only be used to invoke the following methods. Endpoint methods can be chained together (each method returns the endpoint itself).
|
||||
|
||||
!SECTION header
|
||||
|
||||
|
@ -8,22 +10,24 @@ TODO
|
|||
|
||||
Defines a request header recognized by the endpoint. Any additional non-defined headers will be treated as optional string values. The definitions will also be shown in the route details in the API documentation.
|
||||
|
||||
If the endpoint is a child router, all routes of that router will use this header definition unless overridden. If the endpoint is a middleware, this method has no effect.
|
||||
If the endpoint is a child router, all routes of that router will use this header definition unless overridden.
|
||||
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
TODO
|
||||
Name of the header. This should be considered case insensitive as all header names will be converted to lowercase.
|
||||
|
||||
* **schema**: `Schema` (optional)
|
||||
|
||||
TODO
|
||||
A schema describing the format of the header value. This can be a joi schema or anything that has a compatible `validate` method.
|
||||
|
||||
The value of this header will be set to the `value` property of the validation result. A validation failure will result in an automatic 400 (Bad Request) error response.
|
||||
|
||||
* **description**: `string` (optional)
|
||||
|
||||
TODO
|
||||
A human readable string that will be shown in the API documentation.
|
||||
|
||||
Returns the endpoint.
|
||||
|
||||
|
@ -40,21 +44,23 @@ router.get(/* ... */)
|
|||
|
||||
Defines a path parameter recognized by the endpoint. Path parameters are expected to be filled as part of the endpoint's mount path. Any additional non-defined path parameters will be treated as optional string values. The definitions will also be shown in the route details in the API documentation.
|
||||
|
||||
If the endpoint is a child router, all routes of that router will use this header definition unless overridden. If the endpoint is a middleware, this method has no effect.
|
||||
If the endpoint is a child router, all routes of that router will use this parameter definition unless overridden.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
TODO
|
||||
Name of the parameter.
|
||||
|
||||
* **schema**: `Schema` (optional)
|
||||
|
||||
TODO
|
||||
A schema describing the format of the parameter. This can be a joi schema or anything that has a compatible `validate` method.
|
||||
|
||||
The value of this parameter will be set to the `value` property of the validation result. A validation failure will result in the route failing to match and being ignored (resulting in a 404 (Not Found) error response if no other routes match).
|
||||
|
||||
* **description**: `string` (optional)
|
||||
|
||||
TODO
|
||||
A human readable string that will be shown in the API documentation.
|
||||
|
||||
Returns the endpoint.
|
||||
|
||||
|
@ -71,21 +77,23 @@ router.get('/some/:num/here', /* ... */)
|
|||
|
||||
Defines a query parameter recognized by the endpoint. Any additional non-defined query parameters will be treated as optional string values. The definitions will also be shown in the route details in the API documentation.
|
||||
|
||||
If the endpoint is a child router, all routes of that router will use this header definition unless overridden. If the endpoint is a middleware, this method has no effect.
|
||||
If the endpoint is a child router, all routes of that router will use this parameter definition unless overridden.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
TODO
|
||||
Name of the parameter.
|
||||
|
||||
* **schema**: `Schema` (optional)
|
||||
|
||||
TODO
|
||||
A schema describing the format of the parameter. This can be a joi schema or anything that has a compatible `validate` method.
|
||||
|
||||
The value of this parameter will be set to the `value` property of the validation result. A validation failure will result in an automatic 400 (Bad Request) error response.
|
||||
|
||||
* **description**: `string` (optional)
|
||||
|
||||
TODO
|
||||
A human readable string that will be shown in the API documentation.
|
||||
|
||||
Returns the endpoint.
|
||||
|
||||
|
@ -100,21 +108,41 @@ router.get(/* ... */)
|
|||
|
||||
`endpoint.body([model], [mimes], [description]): this`
|
||||
|
||||
TODO
|
||||
Defines the request body recognized by the endpoint. There can only be one request body definition per endpoint. The definition will also be shown in the route details in the API documentation.
|
||||
|
||||
If the endpoint is a child router, all routes of that router will use this body definition unless overridden. If the endpoint is a middleware, the request body will only be parsed once (i.e. the MIME types of the route matching the same request will be ignored but the body will still be validated again).
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **model**: `Model | Schema` (optional)
|
||||
* **model**: `Model | Schema | null` (optional)
|
||||
|
||||
TODO
|
||||
A model or joi schema describing the request body. A validation failure will result in an automatic 400 (Bad Request) error response.
|
||||
|
||||
If the value is a model with a `fromClient` method, that method will be applied to the parsed request body.
|
||||
|
||||
If the value is a schema or a model with a schema, the schema will be used to validate the request body and the `value` property of the validation result of the parsed request body will be used instead of the parsed request body itself.
|
||||
|
||||
If the value is a model or a schema and the MIME type has been omitted, the MIME type will default to JSON instead.
|
||||
|
||||
If the value is explicitly set to `null`, no request body will be expected.
|
||||
|
||||
If the value is an array containing exactly one model or schema, the request body will be treated as an array of items matching that model or schema.
|
||||
|
||||
* **mimes**: `Array<string>` (optional)
|
||||
|
||||
TODO
|
||||
An array of MIME types the route supports.
|
||||
|
||||
Common non-mime aliases like "json" or "html" are also supported and will be expanded to the appropriate MIME type (e.g. "application/json" and "text/html").
|
||||
|
||||
If the MIME type is recognized by Foxx the request body will be parsed into the appropriate structure before being validated. Currently only JSON, `application/x-www-form-urlencoded` and multipart formats are supported in this way.
|
||||
|
||||
If the MIME type indicated in the request headers does not match any of the supported MIME types, the first MIME type in the list will be used instead.
|
||||
|
||||
Failure to parse the request body will result in an automatic 400 (Bad Request) error response.
|
||||
|
||||
* **description**: `string` (optional)
|
||||
|
||||
TODO
|
||||
A human readable string that will be shown in the API documentation.
|
||||
|
||||
Returns the endpoint.
|
||||
|
||||
|
@ -127,6 +155,9 @@ router.post('/expects/some/json', /* ... */)
|
|||
'This implies JSON.'
|
||||
);
|
||||
|
||||
router.post('/expects/nothing', /* ... */)
|
||||
.body(null); // No body allowed
|
||||
|
||||
router.post('/expects/some/plaintext', /* ... */)
|
||||
.body(['text/plain'], 'This body will be a string.');
|
||||
```
|
||||
|
|
|
@ -1,3 +1,62 @@
|
|||
!CHAPTER Middleware
|
||||
|
||||
TODO
|
||||
Middleware in Foxx refers to functions that can be mounted like routes and can manipulate the request and response objects before and after the route itself is invoked. They can also be used to control access or to provide common logic like logging etc. Unlike routes middleware is mounted with the `use` method like a router.
|
||||
|
||||
Instead of a function the `use` method can also accept an object with a `register` function that will be passed the endpoint the middleware will be mounted at and returns the actual middleware function. This allows manipulating the endpoint before creating the middleware (e.g. to document headers, request bodies, path parameters or query parameters).
|
||||
|
||||
**Examples**
|
||||
|
||||
Restrict access to ArangoDB-authenticated users:
|
||||
|
||||
```js
|
||||
module.context.use(function (req, res, next) {
|
||||
if (!req.arangoUser) {
|
||||
res.throw(401, 'Not authenticated with ArangoDB');
|
||||
}
|
||||
next();
|
||||
});
|
||||
```
|
||||
|
||||
Trivial logging middleware:
|
||||
|
||||
```js
|
||||
module.context.use(function (req, res, next) {
|
||||
const start = Date.now();
|
||||
try {
|
||||
next();
|
||||
} finally {
|
||||
console.log(`Handled request in ${Date.now() - start}ms`);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
More complex example for header-based sessions:
|
||||
|
||||
```js
|
||||
const sessions = module.context.collection('sessions');
|
||||
module.context.use({
|
||||
register (endpoint) {
|
||||
endpoint.header('x-session-id', joi.string().optional(), 'The session ID.');
|
||||
return function (req, res, next) {
|
||||
const sid = req.get('x-session-id');
|
||||
if (sid) {
|
||||
try {
|
||||
req.session = sessions.document(sid);
|
||||
} catch (e) {
|
||||
delete req.headers['x-session-id'];
|
||||
}
|
||||
}
|
||||
next();
|
||||
if (req.session) {
|
||||
if (req.session._rev) {
|
||||
sessions.replace(req.session, req.session);
|
||||
res.set('x-session-id', req.session._key);
|
||||
} else {
|
||||
const meta = sessions.save(req.session);
|
||||
res.set('x-session-id', meta._key);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
```
|
||||
|
|
|
@ -2,15 +2,17 @@
|
|||
|
||||
`const createRouter = require('@arangodb/foxx/router');`
|
||||
|
||||
TODO
|
||||
Routers let you define routes that extend ArangoDB's HTTP API with custom endpoints.
|
||||
|
||||
Routers need to be mounted to expose their HTTP routes. See [service context](../Context.md).
|
||||
Routers need to be mounted using the `use` method of a [service context](../Context.md) to expose their HTTP routes at a service's mount path.
|
||||
|
||||
You can pass routers between services mounted in the same database [as dependencies](../Dependencies.md). You can even nest routers within each other.
|
||||
|
||||
!SECTION Creating a router
|
||||
|
||||
`createRouter(): Router`
|
||||
|
||||
This returns a new, clean Router object that has not yet been mounted in the service and can be exported like any other object.
|
||||
This returns a new, clean router object that has not yet been mounted in the service and can be exported like any other object.
|
||||
|
||||
!SECTION Request handlers
|
||||
|
||||
|
@ -26,7 +28,7 @@ This returns a new, clean Router object that has not yet been mounted in the ser
|
|||
|
||||
`router.all([path], handler, [name]): Endpoint`
|
||||
|
||||
TODO
|
||||
These methods let you specify routes on the router. The `all` method defines a route that will match any supported HTTP verb, the other methods define routes that only match the HTTP verb with the same name.
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
@ -50,26 +52,26 @@ TODO
|
|||
|
||||
A name that can be used to generate URLs for the endpoint. For more information see the `reverse` method of the [request object](Request.md).
|
||||
|
||||
TODO
|
||||
Returns an [Endpoint](Endpoints.md) for the route.
|
||||
|
||||
!SECTION Mounting child routers and middleware
|
||||
|
||||
`router.use([path], handler, [name]): Endpoint`
|
||||
|
||||
TODO
|
||||
The `use` method lets you mount a child router or middleware at a given path.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **path**: `string` (optional)
|
||||
|
||||
TODO
|
||||
The path of the request handler relative to the base path the Router is mounted at. If omitted, the request handler will handle requests to the base path of the Router. For information on defining dynamic routes see the section on path parameters in the [chapter on router endpoints](Endpoints.md).
|
||||
|
||||
* **handler**: `Router | Middleware`
|
||||
|
||||
TODO
|
||||
An unmounted router object or a [middleware](Middleware.md).
|
||||
|
||||
* **name**: `string` (optional)
|
||||
|
||||
A name that can be used to generate URLs for endpoints of this router. For more information see the `reverse` method of the [request object](Request.md). Has no effect if *handler* is a Middleware.
|
||||
|
||||
TODO
|
||||
Returns an [Endpoint](Endpoints.md) for the middleware or child router.
|
||||
|
|
|
@ -289,3 +289,21 @@ const ranges = req.range(100);
|
|||
console.log(ranges); // [{start: 40, end: 80}]
|
||||
console.log(ranges.type); // "bytes"
|
||||
```
|
||||
|
||||
!SECTION reverse
|
||||
|
||||
`req.reverse(name, [params]): string`
|
||||
|
||||
TODO
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
TODO
|
||||
|
||||
* **params**: `object` (optional)
|
||||
|
||||
TODO
|
||||
|
||||
TODO
|
||||
|
|
|
@ -1,3 +1,533 @@
|
|||
!CHAPTER Foxx scripts and queued jobs
|
||||
|
||||
TODO
|
||||
Foxx lets you define scripts that can be executed as part of the installation and removal process, invoked manually or scheduled to run at a later time using the job queue.
|
||||
|
||||
To register your script, just add a `scripts` section to your [service manifest](Manifest.md):
|
||||
|
||||
```json
|
||||
{
|
||||
...
|
||||
"scripts": {
|
||||
"setup": "scripts/setup.js",
|
||||
"send-mail": "scripts/send-mail.js"
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
The scripts you define in your service manifest can be invoked from the web interface in the service's settings page with the "Scripts" dropdown.
|
||||
|
||||
<!-- TODO (Link to admin docs) -->
|
||||
|
||||
You can also use the scripts as queued jobs:
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const queues = require('@arangodb/foxx/queues');
|
||||
queues.get('default').push(
|
||||
{mount: '/my-service-mount-point', name: 'send-mail'},
|
||||
{to: 'user@example.com', body: 'Hello'}
|
||||
);
|
||||
```
|
||||
|
||||
!SECTION Script arguments and return values
|
||||
|
||||
If the script was invoked with any arguments, you can access them using the `module.context.argv` array.
|
||||
|
||||
To return data from your script, you can assign the data to `module.exports` as usual. Please note that this data will be converted to JSON.
|
||||
|
||||
Any errors raised by the script will be handled depending on how the script was invoked:
|
||||
|
||||
* if the script was invoked from the HTTP API (e.g. using the web interface), it will return an error response using the exception's `statusCode` property if specified or 500.
|
||||
* if the script was invoked from a Foxx job queue, the job's failure counter will be incremented and the job will be rescheduled or marked as failed if no attempts remain.
|
||||
|
||||
**Examples**
|
||||
|
||||
Let's say you want to define a script that takes two numeric values and returns the result of multiplying them:
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const assert = require('assert');
|
||||
const argv = module.context.argv;
|
||||
|
||||
assert.equal(argv.length, 2, 'Expected exactly two arguments');
|
||||
assert.equal(typeof argv[0], 'number', 'Expected first argument to be a number');
|
||||
assert.equal(typeof argv[1], 'number', 'Expected second argument to be a number');
|
||||
|
||||
module.exports = argv[0] * argv[1];
|
||||
```
|
||||
|
||||
!SECTION Lifecycle Scripts
|
||||
|
||||
Foxx recognizes lifecycle scripts if they are defined and will invoke them during the installation, update and removal process of the service if you want.
|
||||
|
||||
The following scripts are currently recognized as lifecycle scripts:
|
||||
|
||||
!SUBSECTION Setup Script
|
||||
|
||||
The setup script will be executed without arguments during the installation of your Foxx service.
|
||||
|
||||
The setup script is typically used to create collections your service needs or insert seed data like initial administrative user accounts and so on.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const db = require('@arangodb').db;
|
||||
const textsCollectionName = module.context.collectionName('texts');
|
||||
// `textsCollectionName` is now the prefixed name of this service's "texts" collection.
|
||||
// e.g. "example_texts" if the service has been mounted at `/example`
|
||||
|
||||
if (db._collection(textsCollectionName) === null) {
|
||||
const collection = db._create(textsCollectionName);
|
||||
|
||||
collection.save({text: 'entry 1 from collection texts'});
|
||||
collection.save({text: 'entry 2 from collection texts'});
|
||||
collection.save({text: 'entry 3 from collection texts'});
|
||||
} else {
|
||||
console.log(`collection ${texts} already exists. Leaving it untouched.`);
|
||||
}
|
||||
```
|
||||
|
||||
!SUBSECTION Teardown Script
|
||||
|
||||
The teardown script will be executed without arguments during the removal of your Foxx service.
|
||||
|
||||
It can also optionally be executed before upgrading an service.
|
||||
|
||||
This script typically removes the collections and/or documents created by your service's setup script.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const db = require('@arangodb').db;
|
||||
|
||||
const textsCollection = module.context.collection('texts');
|
||||
|
||||
if (textsCollection) {
|
||||
textsCollection.drop();
|
||||
}
|
||||
```
|
||||
|
||||
!SECTION Queues
|
||||
|
||||
Foxx allows defining job queues that let you perform slow or expensive actions asynchronously. These queues can be used to send e-mails, call external APIs or perform other actions that you do not want to perform directly or want to retry on failure.
|
||||
|
||||
@startDocuBlock foxxQueues
|
||||
|
||||
Please note that Foxx job queues are database-specific. Queues and jobs are always relative to the database in which they are created or accessed.
|
||||
|
||||
@startDocuBlock foxxQueuesPollInterval
|
||||
|
||||
For the low-level functionality see [the chapter on the task management module](../Appendix/ModuleTasks/README.md).
|
||||
|
||||
!SUBSECTION Creating or updating a queue
|
||||
|
||||
`queues.create(name, [maxWorkers]): Queue`
|
||||
|
||||
Returns the queue for the given name. If the queue does not exist, a new queue with the given name will be created. If a queue with the given name already exists and maxWorkers is set, the queue's maximum number of workers will be updated.
|
||||
The queue will be created in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the queue to create.
|
||||
|
||||
* **maxWorkers**: `number` (Default: `1`)
|
||||
|
||||
The maximum number of workers.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
// Create a queue with the default number of workers (i.e. one)
|
||||
const queue1 = queues.create("my-queue");
|
||||
// Create a queue with a given number of workers
|
||||
const queue2 = queues.create("another-queue", 2);
|
||||
// Update the number of workers of an existing queue
|
||||
const queue3 = queues.create("my-queue", 10);
|
||||
// queue1 and queue3 refer to the same queue
|
||||
assertEqual(queue1, queue3);
|
||||
```
|
||||
|
||||
!SUBSECTION Fetching an existing queue
|
||||
|
||||
`queues.get(name): Queue`
|
||||
|
||||
Returns the queue for the given name. If the queue does not exist an exception is thrown instead.
|
||||
|
||||
The queue will be looked up in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the queue to fetch.
|
||||
|
||||
**Examples**
|
||||
|
||||
If the queue does not yet exist an exception is thrown:
|
||||
|
||||
```js
|
||||
queues.get("some-queue");
|
||||
// Error: Queue does not exist: some-queue
|
||||
// at ...
|
||||
```
|
||||
|
||||
Otherwise the queue will be returned:
|
||||
|
||||
```js
|
||||
const queue1 = queues.create("some-queue");
|
||||
const queue2 = queues.get("some-queue");
|
||||
assertEqual(queue1, queue2);
|
||||
```
|
||||
|
||||
!SUBSECTION Deleting a queue
|
||||
|
||||
`queues.delete(name): boolean`
|
||||
|
||||
Returns `true` if the queue was deleted successfully. If the queue did not exist, it returns `false` instead.
|
||||
The queue will be looked up and deleted in the current database.
|
||||
|
||||
When a queue is deleted, jobs on that queue will no longer be executed.
|
||||
|
||||
Deleting a queue will not delete any jobs on that queue.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the queue to delete.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
const queue = queues.create("my-queue");
|
||||
queues.delete("my-queue"); // true
|
||||
queues.delete("my-queue"); // false
|
||||
```
|
||||
|
||||
!SUBSECTION Adding a job to a queue
|
||||
|
||||
`queue.push(script, data, [opts]): string`
|
||||
|
||||
The job will be added to the specified queue in the current database.
|
||||
|
||||
Returns the job id.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **script**: `object`
|
||||
|
||||
A job type definition, consisting of an object with the following properties:
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the script that will be invoked.
|
||||
|
||||
* **mount**: `string`
|
||||
|
||||
Mount path of the service that defines the script.
|
||||
|
||||
* **backOff**: `Function | number` (Default: `1000`)
|
||||
|
||||
Either a function that takes the number of times the job has failed before as input and returns the number of milliseconds to wait before trying the job again, or the delay to be used to calculate an [exponential back-off](https://en.wikipedia.org/wiki/Exponential_backoff), or `0` for no delay.
|
||||
|
||||
* **maxFailures**: `number | Infinity` (Default: `0`):
|
||||
|
||||
Number of times a single run of a job will be re-tried before it is marked as `"failed"`. A negative value or `Infinity` means that the job will be re-tried on failure indefinitely.
|
||||
|
||||
* **schema**: `Schema` (optional)
|
||||
|
||||
Schema to validate a job's data against before enqueuing the job.
|
||||
|
||||
* **preprocess**: `Function` (optional)
|
||||
|
||||
Function to pre-process a job's (validated) data before serializing it in the queue.
|
||||
|
||||
* **repeatTimes**: `Function` (Default: `0`)
|
||||
|
||||
If set to a positive number, the job will be repeated this many times (not counting recovery when using *maxFailures*). If set to a negative number or `Infinity`, the job will be repeated indefinitely. If set to `0` the job will not be repeated.
|
||||
|
||||
* **repeatUntil**: `number | Date` (optional)
|
||||
|
||||
If the job is set to automatically repeat, this can be set to a timestamp in milliseconds (or `Date` instance) after which the job will no longer repeat. Setting this value to zero, a negative value or `Infinity` has no effect.
|
||||
|
||||
* **repeatDelay**: `number` (Default: `0`)
|
||||
|
||||
If the job is set to automatically repeat, this can be set to a non-negative value to set the number of milliseconds for which the job will be delayed before it is started again.
|
||||
|
||||
* **data**: `any`
|
||||
|
||||
Job data of the job; must be serializable to JSON.
|
||||
|
||||
* **opts**: `object` (optional)
|
||||
|
||||
Object with any of the following properties:
|
||||
|
||||
* **success**: `Function` (optional)
|
||||
|
||||
Function to be called after the job has been completed successfully.
|
||||
|
||||
* **failure**: `Function` (optional)
|
||||
|
||||
Function to be called after the job has failed too many times.
|
||||
|
||||
* **delayUntil**: `number | Date` (Default: `Date.now()`)
|
||||
|
||||
Timestamp in milliseconds (or `Date` instance) until which the execution of the job should be delayed.
|
||||
|
||||
* **backOff**: `Function | number` (Default: `1000`)
|
||||
|
||||
See *script.backOff*.
|
||||
|
||||
* **maxFailures**: `number | Infinity` (Default: `0`):
|
||||
|
||||
See *script.maxFailures*.
|
||||
|
||||
* **repeatTimes**: `Function` (Default: `0`)
|
||||
|
||||
See *script.repeatTimes*.
|
||||
|
||||
* **repeatUntil**: `number | Date` (optional)
|
||||
|
||||
See *script.repeatUntil*.
|
||||
|
||||
* **repeatDelay**: `number` (Default: `0`)
|
||||
|
||||
See *script.repeatDelay*.
|
||||
|
||||
Note that if you pass a function for the *backOff* calculation, *success* callback or *failure* callback options the function will be serialized to the database as a string and therefore must not rely on any external scope or external variables.
|
||||
|
||||
When the job is set to automatically repeat, the *failure* callback will only be executed when a run of the job has failed more than *maxFailures* times. Note that if the job fails and *maxFailures* is set, it will be rescheduled according to the *backOff* until it has either failed too many times or completed successfully before being scheduled according to the *repeatDelay* again. Recovery attempts by *maxFailures* do not count towards *repeatTimes*.
|
||||
|
||||
The *success* and *failure* callbacks receive the following arguments:
|
||||
|
||||
* **result**: `any`
|
||||
|
||||
The return value of the script for the current run of the job.
|
||||
|
||||
* **jobData**: `any`
|
||||
|
||||
The data passed to this method.
|
||||
|
||||
* **job**: `object`
|
||||
|
||||
ArangoDB document representing the job's current state.
|
||||
|
||||
**Examples**
|
||||
|
||||
Let's say we have an service mounted at `/mailer` that provides a script called `send-mail`:
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const queues = require('@arangodb/foxx/queues');
|
||||
const queue = queues.create('my-queue');
|
||||
queue.push(
|
||||
{mount: '/mailer', name: 'send-mail'},
|
||||
{to: 'hello@example.com', body: 'Hello world'}
|
||||
);
|
||||
```
|
||||
|
||||
This will *not* work, because `log` was defined outside the callback function (the callback must be serializable to a string):
|
||||
|
||||
```js
|
||||
// WARNING: THIS DOES NOT WORK!
|
||||
'use strict';
|
||||
const queues = require('@arangodb/foxx/queues');
|
||||
const queue = queues.create('my-queue');
|
||||
const log = require('console').log; // outside the callback's function scope
|
||||
queue.push(
|
||||
{mount: '/mailer', name: 'send-mail'},
|
||||
{to: 'hello@example.com', body: 'Hello world'},
|
||||
{success: function () {
|
||||
log('Yay!'); // throws 'log is not defined'
|
||||
}}
|
||||
);
|
||||
```
|
||||
|
||||
Here's an example of a job that will be executed every 5 seconds until tomorrow:
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const queues = require('@arangodb/foxx').queues;
|
||||
const queue = queues.create('my-queue');
|
||||
queue.push(
|
||||
{mount: '/mailer', name: 'send-mail'},
|
||||
{to: 'hello@example.com', body: 'Hello world'},
|
||||
{
|
||||
repeatTimes: Infinity,
|
||||
repeatUntil: Date.now() + (24 * 60 * 60 * 1000),
|
||||
repeatDelay: 5 * 1000
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
!SUBSECTION Fetching a job from the queue
|
||||
|
||||
`queue.get(jobId): Job`
|
||||
|
||||
Creates a proxy object representing a job with the given job id.
|
||||
|
||||
The job will be looked up in the specified queue in the current database.
|
||||
|
||||
Returns the job for the given jobId. Properties of the job object will be fetched whenever they are referenced and can not be modified.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **jobId**: `string`
|
||||
|
||||
The id of the job to create a proxy object for.
|
||||
|
||||
**Examples**
|
||||
```js
|
||||
const jobId = queue.push({mount: '/logger', name: 'log'}, 'Hello World!');
|
||||
const job = queue.get(jobId);
|
||||
assertEqual(job.id, jobId);
|
||||
```
|
||||
|
||||
!SUBSECTION Deleting a job from the queue
|
||||
|
||||
`queue.delete(jobId): boolean`
|
||||
|
||||
Deletes a job with the given job id.
|
||||
The job will be looked up and deleted in the specified queue in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **jobId**: `string`
|
||||
|
||||
The id of the job to delete.
|
||||
|
||||
Returns `true` if the job was deleted successfully. If the job did not exist it returns `false` instead.
|
||||
|
||||
!SUBSECTION Fetching an array of jobs in a queue
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
const logScript = {mount: '/logger', name: 'log'};
|
||||
queue.push(logScript, 'Hello World!', {delayUntil: Date.now() + 50});
|
||||
assertEqual(queue.pending(logScript).length, 1);
|
||||
// 50 ms later...
|
||||
assertEqual(queue.pending(logScript).length, 0);
|
||||
assertEqual(queue.progress(logScript).length, 1);
|
||||
// even later...
|
||||
assertEqual(queue.progress(logScript).length, 0);
|
||||
assertEqual(queue.complete(logScript).length, 1);
|
||||
```
|
||||
|
||||
!SUBSUBSECTION Fetching an array of pending jobs in a queue
|
||||
|
||||
`queue.pending([script]): Array<string>`
|
||||
|
||||
Returns an array of job ids of jobs in the given queue with the status `"pending"`, optionally filtered by the given job type.
|
||||
The jobs will be looked up in the specified queue in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **script**: `object` (optional)
|
||||
|
||||
An object with the following properties:
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the script.
|
||||
|
||||
* **mount**: `string`
|
||||
|
||||
Mount path of the service defining the script.
|
||||
|
||||
!SUBSUBSECTION Fetching an array of jobs that are currently in progress
|
||||
|
||||
`queue.progress([script])`
|
||||
|
||||
Returns an array of job ids of jobs in the given queue with the status `"progress"`, optionally filtered by the given job type.
|
||||
The jobs will be looked up in the specified queue in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **script**: `object` (optional)
|
||||
|
||||
An object with the following properties:
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the script.
|
||||
|
||||
* **mount**: `string`
|
||||
|
||||
Mount path of the service defining the script.
|
||||
|
||||
!SUBSUBSECTION Fetching an array of completed jobs in a queue
|
||||
|
||||
`queue.complete([script]): Array<string>`
|
||||
|
||||
Returns an array of job ids of jobs in the given queue with the status `"complete"`, optionally filtered by the given job type.
|
||||
The jobs will be looked up in the specified queue in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **script**: `object` (optional)
|
||||
|
||||
An object with the following properties:
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the script.
|
||||
|
||||
* **mount**: `string`
|
||||
|
||||
Mount path of the service defining the script.
|
||||
|
||||
!SUBSUBSECTION Fetching an array of failed jobs in a queue
|
||||
|
||||
`queue.failed([script]): Array<string>`
|
||||
|
||||
Returns an array of job ids of jobs in the given queue with the status `"failed"`, optionally filtered by the given job type.
|
||||
The jobs will be looked up in the specified queue in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **script**: `object` (optional)
|
||||
|
||||
An object with the following properties:
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the script.
|
||||
|
||||
* **mount**: `string`
|
||||
|
||||
Mount path of the service defining the script.
|
||||
|
||||
!SUBSUBSECTION Fetching an array of all jobs in a queue
|
||||
|
||||
`queue.all([script]): Array<string>`
|
||||
|
||||
Returns an array of job ids of all jobs in the given queue, optionally filtered by the given job type.
|
||||
The jobs will be looked up in the specified queue in the current database.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **script**: `object` (optional)
|
||||
|
||||
An object with the following properties:
|
||||
|
||||
* **name**: `string`
|
||||
|
||||
Name of the script.
|
||||
|
||||
* **mount**: `string`
|
||||
|
||||
Mount path of the service defining the script.
|
||||
|
||||
!SUBSECTION Aborting a job
|
||||
|
||||
`job.abort(): void`
|
||||
|
||||
Aborts a non-completed job.
|
||||
|
||||
Sets a job's status to `"failed"` if it is not already `"complete"`, without calling the job's *onFailure* callback.
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
!CHAPTER Session Transports
|
||||
|
||||
TODO
|
||||
Session transports are used by the sessions middleware to store and retrieve session identifiers in requests and responses. Session transports must implement the `get` and/or `set` methods and can optionally implement the `clear` method.
|
||||
|
||||
!SECTION get
|
||||
|
||||
`transport.get(request): string`
|
||||
`transport.get(request): string | null`
|
||||
|
||||
TODO
|
||||
Retrieves a session identifier from a request object.
|
||||
|
||||
If present this method will automatically be invoked for each transport until a transport returns a session identifier.
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
@ -14,17 +16,23 @@ TODO
|
|||
|
||||
[Request object](../../Router/Request.md) to extract a session identifier from.
|
||||
|
||||
TODO
|
||||
Returns the session identifier or `null` if the transport can not find a session identifier in the request.
|
||||
|
||||
**Examples**
|
||||
|
||||
TODO
|
||||
```js
|
||||
get(req) {
|
||||
return req.get('x-session-id') || null;
|
||||
}
|
||||
```
|
||||
|
||||
!SECTION set
|
||||
|
||||
`transport.set(response, sid): void`
|
||||
|
||||
TODO
|
||||
Attaches a session identifier to a response object.
|
||||
|
||||
If present this method will automatically be invoked at the end of a request regardless of whether the session was modified or not.
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
@ -36,17 +44,24 @@ TODO
|
|||
|
||||
Session identifier to attach to the response.
|
||||
|
||||
TODO
|
||||
Returns nothing.
|
||||
|
||||
**Examples**
|
||||
|
||||
TODO
|
||||
```js
|
||||
set(res) {
|
||||
res.set('x-session-id', value);
|
||||
}
|
||||
```
|
||||
|
||||
!SECTION clear
|
||||
|
||||
`transport.clear(response): void`
|
||||
|
||||
TODO
|
||||
Attaches a payload indicating that the session has been cleared to the response object.
|
||||
This can be used to clear a session cookie when the session has been destroyed (e.g. during logout).
|
||||
|
||||
If present this method will automatically be invoked instead of `set` when the `req.session` attribute was removed by the route handler.
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
@ -54,8 +69,4 @@ TODO
|
|||
|
||||
Response object to remove the session identifier from.
|
||||
|
||||
TODO
|
||||
|
||||
**Examples**
|
||||
|
||||
TODO
|
||||
Returns nothing.
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
!CHAPTER Testing Foxx services
|
||||
|
||||
TODO
|
|
@ -3,4 +3,4 @@
|
|||
Setting up a cluster can be intimidating task. You have to deal with
|
||||
firewalls, ports, different types of machines, and the like. ArangoDB
|
||||
is prepared to deal with all kinds of different setups and
|
||||
requirements.
|
||||
requirements. Checkout the [Deployment](../../Deployment/README.md) chapter.
|
||||
|
|
|
@ -3,222 +3,13 @@
|
|||
The following sections describe how to compile and build the ArangoDB from
|
||||
scratch. ArangoDB will compile on most Linux and Mac OS X systems. We assume
|
||||
that you use the GNU C/C++ compiler or clang/clang++ to compile the
|
||||
source. ArangoDB has been tested with the GNU C/C++ compiler and clang/clang++,
|
||||
but should be able to compile with any Posix-compliant, C++11-enabled compiler.
|
||||
Please let us know whether you successfully compiled it with another C/C++
|
||||
compiler.
|
||||
source. ArangoDB has been tested with these compilers, but should be able to
|
||||
compile with any Posix-compliant, C++11-enabled compiler. Please let us know
|
||||
whether you successfully compiled it with another C/C++ compiler.
|
||||
|
||||
By default, cloning the github repository will checkout **devel**. This version
|
||||
contains the development version of the ArangoDB. Use this branch if you want
|
||||
to make changes to the ArangoDB source.
|
||||
|
||||
!SECTION Devel Version
|
||||
|
||||
Note: a separate [blog
|
||||
article](http://jsteemann.github.io/blog/2014/10/16/how-to-compile-arangodb-from-source/)
|
||||
is available that describes how to compile ArangoDB from source on Ubuntu.
|
||||
|
||||
!SUBSECTION Basic System Requirements
|
||||
|
||||
Verify that your system contains
|
||||
|
||||
* git (to obtain the sources)
|
||||
* a modern C/C++ compiler C++11 capable including full regex support:
|
||||
* GNU "gcc" and "g++" version 4.9.0 or higher
|
||||
* "clang" and "clang++" version 3.6 or higher
|
||||
* Visual C++ 2015 [(see the "compiling under windows" cookbook for more details)](https://docs.arangodb.com/cookbook/Compiling/Windows30.html)
|
||||
* cmake
|
||||
* GNU make
|
||||
* Python, version 2 in order to use gyp for V8
|
||||
* the OpenSSL library, version 1.0.1g or higher (development package)
|
||||
* jemalloc or tcmalloc development packages
|
||||
* the GNU scanner generator FLEX, at least version 2.3.35 (optional)
|
||||
* the GNU parser generator BISON, at least version 2.4 (optional)
|
||||
|
||||
Most Linux systems already supply RPMs or DPKGs for these packages.
|
||||
Some older distributions, for example Ubuntu 12.04 or Centos 5, provide only very out-dated
|
||||
versions of compilers, FLEX and BISON. In that case you need to compile
|
||||
newer versions of the programs and/or libraries.
|
||||
|
||||
!SUBSECTION Download the Source
|
||||
|
||||
Download the latest source using ***git***:
|
||||
|
||||
unix> git clone git://github.com/arangodb/arangodb.git
|
||||
|
||||
This will automatically clone the **devel** branch.
|
||||
|
||||
Note: if you only plan to compile ArangoDB locally and do not want to modify or push
|
||||
any changes, you can speed up cloning substantially by using the *--single-branch* and
|
||||
*--depth* parameters for the clone command as follows:
|
||||
|
||||
unix> git clone --single-branch --depth 1 git://github.com/arangodb/arangodb.git
|
||||
|
||||
!SUBSECTION Setup
|
||||
|
||||
Switch into the ArangoDB directory
|
||||
|
||||
unix> cd ArangoDB
|
||||
unix> mkdir build
|
||||
unix> cd build
|
||||
|
||||
In order to generate the build environment please execute
|
||||
|
||||
unix> cmake ..
|
||||
|
||||
to setup the makefiles. This will check the various system characteristics and
|
||||
installed libraries. If you installed the compiler in a non standard location, you may need to specify it:
|
||||
|
||||
cmake -DCMAKE_C_COMPILER=/opt/bin/gcc -DCMAKE_CXX_COMPILER=/opt/bin/g++ ..
|
||||
|
||||
If you compile on MacOS, you should add the following options to the cmake command:
|
||||
|
||||
cmake .. -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_OSX_DEPLOYMENT_TARGET=10.11
|
||||
|
||||
If you also plan to make changes to the source code of ArangoDB, you should compile with the `Debug` target;
|
||||
The `Debug` target enables additional sanity checks etc. which would slow down production binaries.
|
||||
|
||||
Other options valuable for development:
|
||||
|
||||
-DARANGODB_ENABLE_MAINTAINER_MODE
|
||||
|
||||
Needed, if you plan to make changes to AQL language (which is implemented using a lexer and parser
|
||||
files in `arangod/Aql/grammar.y` and `arangod/Aql/tokens.ll`) your system has to contain the tools FLEX and BISON.
|
||||
|
||||
-DARANGODB_ENABLE_BACKTRACE
|
||||
|
||||
(requires the maintainer mode) If you want to have c++ stacktraces attached to your exceptions.
|
||||
This can be usefull to more quick locate the place where an exception or an assertion was thrown.
|
||||
|
||||
|
||||
scripts. It allows to run ArangoDB from the compile directory directly, without the
|
||||
need for a *make install* command and specifying much configuration parameters.
|
||||
When used, you can start ArangoDB using this command:
|
||||
|
||||
bin/arangod /tmp/database-dir
|
||||
|
||||
ArangoDB will then automatically use the configuration from file *etc/relative/arangod.conf*.
|
||||
|
||||
-DUSE_FAILURE_TESTS
|
||||
|
||||
This option activates additional code in the server that intentionally makes the
|
||||
server crash or misbehave (e.g. by pretending the system ran out of
|
||||
memory). This option is useful for writing tests.
|
||||
|
||||
By default the libc allocator is chosen. If your system offers the jemalloc it will be
|
||||
prefered over tcmalloc and the system allocator.
|
||||
|
||||
!SUBSUBSECTION shared memory
|
||||
Gyp is used as makefile generator by V8. Gyp requires shared memory to be available,
|
||||
which may not if you i.e. compile in a chroot. You can make it available like this:
|
||||
|
||||
none /opt/chroots/ubuntu_precise_x64/dev/shm tmpfs rw,nosuid,nodev,noexec 0 2
|
||||
devpts /opt/chroots/ubuntu_precise_x64/dev/pts devpts gid=5,mode=620 0 0
|
||||
|
||||
|
||||
!SUBSECTION Compile
|
||||
|
||||
Compile the programs (server, client, utilities) by executing
|
||||
|
||||
make
|
||||
|
||||
This will compile ArangoDB and create a binary of the server in
|
||||
|
||||
./bin/arangod
|
||||
|
||||
!SUBSECTION Test
|
||||
|
||||
Create an empty directory
|
||||
|
||||
unix> mkdir /tmp/database-dir
|
||||
|
||||
Check the binary by starting it using the command line.
|
||||
|
||||
unix> ./bin/arangod -c etc/relative/arangod.conf --server.endpoint tcp://127.0.0.1:8529 /tmp/database-dir
|
||||
|
||||
This will start up the ArangoDB and listen for HTTP requests on port 8529 bound
|
||||
to IP address 127.0.0.1. You should see the startup messages similar to the
|
||||
following:
|
||||
|
||||
```
|
||||
2016-06-01T12:47:29Z [29266] INFO ArangoDB xxx ...
|
||||
2016-06-10T12:47:29Z [29266] INFO using endpoint 'tcp://127.0.0.1.8529' for non-encrypted requests
|
||||
2016-06-01T12:47:30Z [29266] INFO Authentication is turned on
|
||||
2016-60-01T12:47:30Z [29266] INFO ArangoDB (version xxx) is ready for business. Have fun!
|
||||
```
|
||||
|
||||
If it fails with a message about the database directory, please make sure the
|
||||
database directory you specified exists and can be written into.
|
||||
|
||||
Use your favorite browser to access the URL
|
||||
|
||||
http://127.0.0.1:8529/_api/version
|
||||
|
||||
This should produce a JSON object like
|
||||
|
||||
{"server" : "arango", "version" : "..."}
|
||||
|
||||
as result.
|
||||
|
||||
!SUBSECTION Re-building ArangoDB after an update
|
||||
|
||||
To stay up-to-date with changes made in the main ArangoDB repository, you will
|
||||
need to pull the changes from it and re-run `make`.
|
||||
|
||||
Normally, this will be as simple as follows:
|
||||
|
||||
unix> git pull
|
||||
unix> make
|
||||
|
||||
From time to time there will be bigger structural changes in ArangoDB, which may
|
||||
render the old Makefiles invalid. Should this be the case and `make` complains
|
||||
about missing files etc., the following commands should fix it:
|
||||
|
||||
|
||||
unix> rm -f CMakeCache.txt
|
||||
unix> cmake ..
|
||||
unix> make
|
||||
|
||||
In order to reset everything and also recompile all 3rd party libraries, issue
|
||||
the following commands:
|
||||
|
||||
unix> git checkout -- .
|
||||
unix> cd ..; rm -rf build; mkdir build; cd build
|
||||
|
||||
This will clean up ArangoDB and the 3rd party libraries, and rebuild everything.
|
||||
|
||||
Sometimes you can get away with the less intrusive commands.
|
||||
|
||||
!SUBSECTION Install
|
||||
|
||||
Install everything by executing
|
||||
|
||||
make install
|
||||
|
||||
You must be root to do this or at least have write permission to the
|
||||
corresponding directories.
|
||||
|
||||
The server will by default be installed in
|
||||
|
||||
/usr/local/sbin/arangod
|
||||
|
||||
The configuration file will be installed in
|
||||
|
||||
/usr/local/etc/arangodb/arangod.conf
|
||||
|
||||
The database will be installed in
|
||||
|
||||
/usr/local/var/lib/arangodb
|
||||
|
||||
The ArangoShell will be installed in
|
||||
|
||||
/usr/local/bin/arangosh
|
||||
|
||||
**Note:** The installation directory will be different if you use one of the
|
||||
`precompiled` packages. Please check the default locations of your operating
|
||||
system, e. g. `/etc` and `/var/lib`.
|
||||
|
||||
When upgrading from a previous version of ArangoDB, please make sure you inspect
|
||||
ArangoDB's log file after an upgrade. It may also be necessary to start ArangoDB
|
||||
with the *--database.upgrade* parameter once to perform required upgrade or
|
||||
initialization tasks.
|
||||
Please checkout the [cookbook](https://docs.arangodb.com/cookbook) on how to
|
||||
compile ArangoDB.
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
easily install ArangoDB using yum, aptitude, urpmi or zypper.
|
||||
- Alternatively, see [Compiling](Compiling.md) if you want to build ArangoDB
|
||||
yourself.
|
||||
- Start up the database server
|
||||
- Start up the database server.
|
||||
|
||||
Normally, this is done by executing the following command:
|
||||
|
||||
|
@ -23,6 +23,12 @@ To stop the server you can use the following command:
|
|||
The exact commands depend on your Linux distribution.
|
||||
You may require root privileges to execute these commands.
|
||||
|
||||
!SECTION Linux Mint
|
||||
|
||||
Please use the corresponding Ubuntu or Debian packages.
|
||||
|
||||
!SECTION Non-Standard Installation
|
||||
|
||||
If you compiled ArangoDB from source and did not use any installation
|
||||
package – or using non-default locations and/or multiple ArangoDB
|
||||
instances on the same host – you may want to start the server process
|
||||
|
@ -60,8 +66,3 @@ make sure to start the server once with the *--database.upgrade* option.
|
|||
|
||||
Note that you may have to enable logging first. If you start the server
|
||||
in a shell, you should see errors logged there as well.
|
||||
|
||||
|
||||
!SUBSECTION Linux Mint
|
||||
|
||||
Please use the corresponding Ubuntu or Debian packages.
|
||||
|
|
|
@ -50,14 +50,14 @@ also need to update homebrew:
|
|||
|
||||
!SECTION Graphical App
|
||||
In case you are not using homebrew, we also provide a graphical app. You can
|
||||
download it from [here](https://www.arangodb.com/install).
|
||||
download it from [here](https://www.arangodb.com/download).
|
||||
|
||||
Choose *Mac OS X*. Download and install the application *ArangoDB* in
|
||||
your application folder.
|
||||
|
||||
!SECTION Command-Line App
|
||||
In case you are not using homebrew, we also provide a command-line app. You can
|
||||
download it from [here](https://www.arangodb.com/install).
|
||||
download it from [here](https://www.arangodb.com/download).
|
||||
|
||||
Choose *Mac OS X*. Download and install the application *ArangoDB-CLI*
|
||||
in your application folder.
|
||||
|
|
|
@ -5,4 +5,4 @@ homebrew on MacOS X. You can find packages for various operation systems at our
|
|||
[install](https://www.arangodb.com/download) section, including installers
|
||||
for Windows.
|
||||
|
||||
How to do that in detail is described the subchapters of this section.
|
||||
How to do that in detail is described in the subchapters of this section.
|
||||
|
|
|
@ -7,14 +7,18 @@ that ArangoDB has been installed in the location *<ROOTDIR>*.
|
|||
You have to be careful when choosing an installation directory. You need either
|
||||
write permission to this directory or you need to modify the config file for the
|
||||
server process. In the latter case the database directory and the Foxx directory
|
||||
has to be writable by the user.
|
||||
have to be writable by the user.
|
||||
|
||||
Installing for a single user: Select a different directory during
|
||||
installation. For example *C:\Users\<Username>\ArangoDB* or *C:\ArangoDB*.
|
||||
!SUBSECTION Single User Installation
|
||||
|
||||
Installing for multiple users: Keep the default directory. After the
|
||||
installation edit the file *<ROOTDIR>\etc\ArangoDB\arangod.conf*. Adjust the
|
||||
*directory* and *app-path* so that these paths point into your home directory.
|
||||
Select a different directory during installation. For example
|
||||
*C:\Users\<Username>\ArangoDB* or *C:\ArangoDB*.
|
||||
|
||||
!SUBSECTION Multiple Users Installation
|
||||
|
||||
Keep the default directory. After the installation edit the file
|
||||
*<ROOTDIR>\etc\ArangoDB\arangod.conf*. Adjust the *directory*
|
||||
and *app-path* so that these paths point into your home directory.
|
||||
|
||||
[database]
|
||||
directory = @HOMEDRIVE@\@HOMEPATH@\arangodb\databases
|
||||
|
@ -24,8 +28,10 @@ installation edit the file *<ROOTDIR>\etc\ArangoDB\arangod.conf*. Adjust t
|
|||
|
||||
Create the directories for each user that wants to use ArangoDB.
|
||||
|
||||
Installing as Service: Keep the default directory. After the installation open
|
||||
a command line as administrator (search for *cmd* and right click *run as
|
||||
!SUBSECTION Service Installation
|
||||
|
||||
Keep the default directory. After the installation open a command line
|
||||
as administrator (search for *cmd* and right click *run as
|
||||
administrator*).
|
||||
|
||||
cmd> arangod --install-service
|
||||
|
@ -40,22 +46,15 @@ option.
|
|||
file = @ROOTDIR@\var\log\arangodb\arangod.log
|
||||
|
||||
|
||||
!SUBSECTION Client, Server and Lock-Files
|
||||
!SECTION Starting
|
||||
|
||||
Please note that ArangoDB consists of a database server and client tools. If you
|
||||
start the server, it will place a (read-only) lock file to prevent accidental
|
||||
access to the data. The server will attempt to remove this lock file when it is
|
||||
started to see if the lock is still valid - this is in case the installation did
|
||||
not proceed correctly or if the server terminated unexpectedly.
|
||||
If you installed ArangoDB as a service it is automatically started.
|
||||
|
||||
!SUBSECTION Starting
|
||||
|
||||
To start an ArangoDB server instance with networking enabled, use the executable
|
||||
*arangod.exe* located in *<ROOTDIR>\bin*. This will use the configuration
|
||||
file *arangod.conf* located in *<ROOTDIR>\etc\arangodb*, which you can adjust
|
||||
to your needs and use the data directory *<ROOTDIR>\var\lib\arangodb*. This
|
||||
is the place where all your data (databases and collections) will be stored
|
||||
by default.
|
||||
Otherwise, use the executable *arangod.exe* located in
|
||||
*<ROOTDIR>\bin*. This will use the configuration file *arangod.conf*
|
||||
located in *<ROOTDIR>\etc\arangodb*, which you can adjust to your needs
|
||||
and use the data directory *<ROOTDIR>\var\lib\arangodb*. This is the place
|
||||
where all your data (databases and collections) will be stored by default.
|
||||
|
||||
Please check the output of the *arangod.exe* executable before going on. If the
|
||||
server started successfully, you should see a line `ArangoDB is ready for
|
||||
|
@ -68,21 +67,13 @@ page:
|
|||
|
||||
http://127.0.0.1:8529/
|
||||
|
||||
To check if your installation was successful, click the *Collection* tab and
|
||||
open the configuration. Select the *System* type. If the installation was
|
||||
successful, then the page should display a few system collections.
|
||||
|
||||
Try to add a new collection and then add some documents to this new collection.
|
||||
If you have succeeded in creating a new collection and inserting one or more
|
||||
documents, then your installation is working correctly.
|
||||
|
||||
!SUBSECTION Advanced Starting
|
||||
!SECTION Advanced Starting
|
||||
|
||||
If you want to provide our own start scripts, you can set the environment
|
||||
variable *ARANGODB_CONFIG_PATH*. This variable should point to a directory
|
||||
containing the configuration files.
|
||||
|
||||
!SUBSECTION Using the Client
|
||||
!SECTION Using the Client
|
||||
|
||||
To connect to an already running ArangoDB server instance, there is a shell
|
||||
*arangosh.exe* located in *<ROOTDIR>\bin*. This starts a shell which can be
|
||||
|
@ -97,27 +88,7 @@ the *arangod.exe* executable.
|
|||
*<ROOTDIR>\etc\arangodb\*. Please adjust this to your needs if you want to
|
||||
use different connection settings etc.
|
||||
|
||||
!SUBSECTION 32bit
|
||||
|
||||
If you have an EXISTING database, then please note that currently a 32 bit
|
||||
version of ArangoDB is NOT compatible with a 64 bit version. This means that
|
||||
if you have a database created with a 32 bit version of ArangoDB it may
|
||||
become corrupted if you execute a 64 bit version of ArangoDB against the same
|
||||
database, and vice versa.
|
||||
|
||||
!SUBSECTION Upgrading
|
||||
|
||||
To upgrade an EXISTING database created with a previous version of ArangoDB,
|
||||
please execute the server *arangod.exe* with the option
|
||||
*--database.upgrade*. Otherwise starting ArangoDB may fail with errors.
|
||||
|
||||
Note that there is no harm in running the upgrade. So you should run this
|
||||
batch file if you are unsure of the database version you are using.
|
||||
|
||||
You should always check the output for errors to see if the upgrade was
|
||||
completed successfully.
|
||||
|
||||
!SUBSECTION Uninstalling
|
||||
!SECTION Uninstalling
|
||||
|
||||
To uninstall the Arango server application you can use the windows control panel
|
||||
(as you would normally uninstall an application). Note however, that any data
|
||||
|
@ -125,7 +96,7 @@ files created by the Arango server will remain as well as the *<ROOTDIR>*
|
|||
directory. To complete the uninstallation process, remove the data files and
|
||||
the *<ROOTDIR>* directory manually.
|
||||
|
||||
!SUBSECTION Limitations for Cygwin
|
||||
!SECTION Limitations for Cygwin
|
||||
|
||||
Please note some important limitations when running ArangoDB under Cygwin:
|
||||
Starting ArangoDB can be started from out of a Cygwin terminal, but pressing
|
||||
|
|
|
@ -99,7 +99,6 @@ operate stable and fast no matter how your data looks like.
|
|||
!SECTION ArangoDB programs
|
||||
|
||||
The ArangoDB package comes with the following programs:
|
||||
<!-- TODO: the next link has gone away, bent it over. -->
|
||||
- `arangod`: The [ArangoDB database daemon](../Administration/Configuration/Arangod.md).
|
||||
This server program is intended to run as a daemon process and to serve the
|
||||
various clients connection to the server via TCP / HTTP.
|
||||
|
|
|
@ -30,151 +30,5 @@ Other fields can be updated as in default collection.
|
|||
|
||||
!SECTION Working with Edges
|
||||
|
||||
!SUBSECTION Insert
|
||||
<!-- arangod/V8Server/v8-collection.cpp -->
|
||||
|
||||
|
||||
saves a new edge document
|
||||
`edge-collection.insert(from, to, document)`
|
||||
|
||||
Saves a new edge and returns the document-handle. *from* and *to*
|
||||
must be documents or document references.
|
||||
|
||||
`edge-collection.insert(from, to, document, waitForSync)`
|
||||
|
||||
The optional *waitForSync* parameter can be used to force
|
||||
synchronization of the document creation operation to disk even in case
|
||||
that the *waitForSync* flag had been disabled for the entire collection.
|
||||
Thus, the *waitForSync* parameter can be used to force synchronization
|
||||
of just specific operations. To use this, set the *waitForSync* parameter
|
||||
to *true*. If the *waitForSync* parameter is not specified or set to
|
||||
*false*, then the collection's default *waitForSync* behavior is
|
||||
applied. The *waitForSync* parameter cannot be used to disable
|
||||
synchronization for collections that have a default *waitForSync* value
|
||||
of *true*.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
|
||||
@startDocuBlockInline EDGCOL_01_SaveEdgeCol
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_01_SaveEdgeCol}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
e1 = db.relation.insert(v1, v2, { label : "knows" });
|
||||
db._document(e1);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_01_SaveEdgeCol
|
||||
|
||||
|
||||
|
||||
!SUBSECTION Edges
|
||||
<!-- arangod/V8Server/v8-query.cpp -->
|
||||
|
||||
|
||||
selects all edges for a set of vertices
|
||||
`edge-collection.edges(vertex)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound) or ending
|
||||
in (inbound) *vertex*.
|
||||
|
||||
`edge-collection.edges(vertices)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound) or ending
|
||||
in (inbound) a document from *vertices*, which must a list of documents
|
||||
or document handles.
|
||||
|
||||
@startDocuBlockInline EDGCOL_02_Relation
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_Relation}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
~ var myGraph = {};
|
||||
myGraph.v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
myGraph.v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
| myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2,
|
||||
{ label : "knows"});
|
||||
db._document(myGraph.e1);
|
||||
db.relation.edges(myGraph.e1._id);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_02_Relation
|
||||
|
||||
|
||||
|
||||
!SUBSECTION InEdges
|
||||
<!-- arangod/V8Server/v8-query.cpp -->
|
||||
|
||||
|
||||
selects all inbound edges
|
||||
`edge-collection.inEdges(vertex)`
|
||||
|
||||
The *edges* operator finds all edges ending in (inbound) *vertex*.
|
||||
|
||||
`edge-collection.inEdges(vertices)`
|
||||
|
||||
The *edges* operator finds all edges ending in (inbound) a document from
|
||||
*vertices*, which must a list of documents or document handles.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
@startDocuBlockInline EDGCOL_02_inEdges
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_inEdges}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
~ var myGraph = {};
|
||||
myGraph.v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
myGraph.v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
| myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2,
|
||||
{ label : "knows"});
|
||||
db._document(myGraph.e1);
|
||||
db.relation.inEdges(myGraph.v1._id);
|
||||
db.relation.inEdges(myGraph.v2._id);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_02_inEdges
|
||||
|
||||
|
||||
|
||||
!SUBSECTION OutEdges
|
||||
<!-- arangod/V8Server/v8-query.cpp -->
|
||||
|
||||
|
||||
selects all outbound edges
|
||||
`edge-collection.outEdges(vertex)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound)
|
||||
*vertices*.
|
||||
|
||||
`edge-collection.outEdges(vertices)`
|
||||
|
||||
The *edges* operator finds all edges starting from (outbound) a document
|
||||
from *vertices*, which must a list of documents or document handles.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
@startDocuBlockInline EDGCOL_02_outEdges
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{EDGCOL_02_outEdges}
|
||||
db._create("vertex");
|
||||
db._createEdgeCollection("relation");
|
||||
~ var myGraph = {};
|
||||
myGraph.v1 = db.vertex.insert({ name : "vertex 1" });
|
||||
myGraph.v2 = db.vertex.insert({ name : "vertex 2" });
|
||||
| myGraph.e1 = db.relation.insert(myGraph.v1, myGraph.v2,
|
||||
{ label : "knows"});
|
||||
db._document(myGraph.e1);
|
||||
db.relation.outEdges(myGraph.v1._id);
|
||||
db.relation.outEdges(myGraph.v2._id);
|
||||
~ db._drop("relation");
|
||||
~ db._drop("vertex");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock EDGCOL_02_outEdges
|
||||
|
||||
|
||||
Edges are normal [documents](../../DataModeling/Documents/DocumentMethods.md)
|
||||
that always contain a `_from` and a `_to` attribute.
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
<div id="header">
|
||||
<div id="logo">
|
||||
<div class="arangodb-header">
|
||||
<div class="arangodb-logo">
|
||||
<a href="https://arangodb.com/">
|
||||
<img src="https://docs.arangodb.com/assets/arangodb_logo_2016.png">
|
||||
</a>
|
||||
</div>
|
||||
<div id="logo-small">
|
||||
<div class="arangodb-logo-small">
|
||||
<a href="https://arangodb.com/">
|
||||
<img src="https://docs.arangodb.com/assets/arangodb_logo_small_2016.png">
|
||||
</a>
|
||||
</div>
|
||||
<select id="version-switcher">
|
||||
<select class="arangodb-version-switcher">
|
||||
<option value="devel">VERSION_NUMBER</option>
|
||||
<option value="2.8">v2.8</option>
|
||||
<option value="2.7">v2.7</option>
|
||||
|
@ -23,7 +23,7 @@
|
|||
<div class="google-search">
|
||||
<gcse:searchbox-only></gcse:searchbox-only>
|
||||
</div>
|
||||
<ul id="navmenu">
|
||||
<ul class="arangodb-navmenu">
|
||||
<li class="active-tab">
|
||||
<a href="BASE_PATH/Manual/index.html">Manual</a>
|
||||
</li>
|
||||
|
|
|
@ -6,30 +6,28 @@ The documentation introduces ArangoDB for you as a user, developer and administr
|
|||
|
||||
New and eager to try it out? Start right away with our beginner's guide: [Getting Started](GettingStarted/README.md)
|
||||
|
||||
!SUBSECTION Overview
|
||||
|
||||
ArangoDB is a multi-model, open-source database with flexible data models for documents, graphs, and key-values. Build high performance applications using a convenient SQL-like query language or JavaScript extensions. Use ACID transactions if you require them. Scale horizontally and vertically with a few mouse clicks.
|
||||
|
||||
Key features include:
|
||||
|
||||
* **Schema-free schemata** let you combine the space efficiency of MySQL with the performance power of NoSQL
|
||||
* Use ArangoDB as an **application server** and fuse your application and database together for maximal throughput
|
||||
* JavaScript for all: **no language zoo**, you can use one language from your browser to your back-end
|
||||
* ArangoDB is **multi-threaded** - exploit the power of all your cores
|
||||
* **Flexible data modeling**: model your data as combination of key-value pairs, documents or graphs - perfect for social relations
|
||||
* Free **index choice**: use the correct index for your problem, be it a skip list or a fulltext search
|
||||
* installing ArangoDB on a [**cluster**](Deployment/README.md) is as easy as installing an app on your mobile
|
||||
* [**Flexible data modeling**](DataModeling/README.md): model your data as combination of key-value pairs, documents or graphs - perfect for social relations
|
||||
* [**Powerful query language**](../AQL/index.html) (AQL) to retrieve and modify data
|
||||
* Use ArangoDB as an [**application server**](Foxx/README.md) and fuse your application and database together for maximal throughput
|
||||
* [**Transactions**](Transactions/README.md): run queries on multiple documents or collections with optional transactional consistency and isolation
|
||||
* [**Replication** and **Sharding**](Administration/README.md): set up the database in a master-slave configuration or spread bigger datasets across multiple servers
|
||||
* Configurable **durability**: let the application decide if it needs more durability or more performance
|
||||
* No-nonsense storage: ArangoDB uses all of the power of **modern storage hardware**, like SSD and large caches
|
||||
* **Powerful query language** (AQL) to retrieve and modify data
|
||||
* **Transactions**: run queries on multiple documents or collections with optional transactional consistency and isolation
|
||||
* **Replication** and **Sharding**: set up the database in a master-slave configuration or spread bigger datasets across multiple servers
|
||||
* JavaScript for all: **no language zoo**, you can use one language from your browser to your back-end
|
||||
* It is **open source** (Apache License 2.0)
|
||||
|
||||
In this documentation you can inform yourself about all the functions, features and programs ArangoDB provides for you.
|
||||
Features are illustrated with interactive usage examples; you can cut'n'paste them into [arangosh](Administration/Arangosh/README.md) to try them out.
|
||||
The http REST-API is demonstrated with cut'n'paste recepies intended to be used with the [cURL](http://curl.haxx.se).
|
||||
Drivers may provide their own examples based on these .js based examples to improve understandeability for their respective users.
|
||||
I.e. for the [java driver](https://github.com/arangodb/arangodb-java-driver#learn-more) some of the samples are re-implemented.
|
||||
!SUBSECTION Structure of the Documentation
|
||||
|
||||
You can also go to our [cookbook](https://docs.arangodb.com/cookbook) and look through some recipes to learn more about ArangoDB specific problems and solutions.
|
||||
In this documentation you can inform yourself about all the functions, features and programs ArangoDB provides for you. There are four handbooks: this manual which describes ArangoDB and its features in detail. The [AQL handbook](../AQL/index.html) explains the query language AQL of ArangoDB. The [HTTP handbook](../HTTP/index.html) which describes the internal API of ArangoDB that is used to communicate with clients. In general, the HTTP handbook will be of interest to driver developers. If you use any of the existing drivers for the language of your choice, you can skip this handbook. You can also go to our [cookbook](https://docs.arangodb.com/cookbook) and look through some recipes to learn more about ArangoDB specific problems and solutions.
|
||||
|
||||
Features are illustrated with interactive usage examples; you can cut'n'paste them into [arangosh](Administration/Arangosh/README.md) to try them out. The http [REST-API](../HTTP/index.html) for driver developers is demonstrated with cut'n'paste recepies intended to be used with the [cURL](http://curl.haxx.se). Drivers may provide their own examples based on these .js based examples to improve understandeability for their respective users. I.e. for the [java driver](https://github.com/arangodb/arangodb-java-driver#learn-more) some of the samples are re-implemented.
|
||||
|
||||
!SUBSECTION Community
|
||||
|
||||
|
|
|
@ -18,8 +18,9 @@
|
|||
# * [Coming from MongoDB](GettingStarted/ComingFromMongoDb.md) #TODO
|
||||
#
|
||||
* [Scalability](Scalability/README.md)
|
||||
* [Cluster](Scalability/Cluster.md)
|
||||
# * [Joins](Scalability/Joins.md)
|
||||
* [Architecture](Scalability/Architecture.md)
|
||||
* [Data models](Scalability/DataModels.md)
|
||||
* [Limitations](Scalability/Limitations.md)
|
||||
#
|
||||
* [Data model & modeling](DataModeling/README.md)
|
||||
# * [Collections](FirstSteps/CollectionsAndDocuments.md) #TODO
|
||||
|
@ -82,14 +83,14 @@
|
|||
* [Cookie transport](Foxx/Sessions/Transports/Cookie.md)
|
||||
* [Header transport](Foxx/Sessions/Transports/Header.md)
|
||||
* [Serving files](Foxx/Assets.md)
|
||||
* [Writing tests](Foxx/Testing.md)
|
||||
# * [Writing tests](Foxx/Testing.md)
|
||||
* [Scripts and queued jobs](Foxx/Scripts.md)
|
||||
* [Migrating 2.x services](Foxx/Migrating2x.md)
|
||||
* [Legacy compatibility mode](Foxx/LegacyMode.md)
|
||||
* [User management](Foxx/Users.md)
|
||||
* [Related modules](Foxx/Modules.md)
|
||||
* [Authentication](Foxx/Auth.md)
|
||||
* [OAuth 2.0](Foxx/OAuth2.md)
|
||||
# * [OAuth 2.0](Foxx/OAuth2.md)
|
||||
* [Transactions](Transactions/README.md)
|
||||
* [Transaction invocation](Transactions/TransactionInvocation.md)
|
||||
* [Passing parameters](Transactions/Passing.md)
|
||||
|
@ -140,14 +141,18 @@
|
|||
* [Replication Limitations](Administration/Replication/Asynchronous/Limitations.md)
|
||||
* [Synchronous Replication](Administration/Replication/Synchronous/README.md)
|
||||
* [Implementation](Administration/Replication/Synchronous/Implementation.md)
|
||||
* [Configuration](Administration/Replication/Synchronous/Configuration.md)
|
||||
* [Sharding](Administration/Sharding/README.md)
|
||||
* [Implementation](Administration/Sharding/StatusOfImplementation.md)
|
||||
* [Authentication](Administration/Sharding/Authentication.md)
|
||||
* [Firewall setup](Administration/Sharding/FirewallSetup.md)
|
||||
# * [Authentication](Administration/Sharding/Authentication.md)
|
||||
# * [Firewall setup](Administration/Sharding/FirewallSetup.md)
|
||||
* [Web Interface](Administration/WebInterface/README.md)
|
||||
* [AQL Editor](Administration/WebInterface/AqlEditor.md)
|
||||
* [Queries](Administration/WebInterface/AqlEditor.md)
|
||||
* [Collections](Administration/WebInterface/Collections.md)
|
||||
* [Graph Viewer](Administration/WebInterface/GraphViewer.md)
|
||||
* [Cluster](Administration/WebInterface/Cluster.md)
|
||||
* [Nodes](Administration/WebInterface/Nodes.md)
|
||||
* [Dashboard](Administration/WebInterface/Dashboard.md)
|
||||
* [Graphs](Administration/WebInterface/Graphs.md)
|
||||
* [Graph Viewer](Administration/WebInterface/GraphViewer.md)
|
||||
#
|
||||
* [Troubleshooting](Troubleshooting/README.md)
|
||||
* [arangod](Troubleshooting/Arangod.md)
|
||||
|
@ -184,8 +189,8 @@
|
|||
* [collection](Appendix/References/CollectionObject.md)
|
||||
* [JavaScript Modules](Appendix/JavaScriptModules/README.md)
|
||||
* [console](Appendix/JavaScriptModules/Console.md)
|
||||
* [crypto](Appendix/JavaScriptModules/Crypto.md)
|
||||
* [fs](Appendix/JavaScriptModules/FileSystem.md)
|
||||
* [process](Appendix/JavaScriptModules/Process.md)
|
||||
* [request](Appendix/JavaScriptModules/Request.md)
|
||||
* [actions](Appendix/JavaScriptModules/Actions.md)
|
||||
* [queries](Appendix/JavaScriptModules/Queries.md)
|
||||
|
|
|
@ -0,0 +1,270 @@
|
|||
!SECTION Architecture
|
||||
|
||||
The cluster architecture of ArangoDB is a CP master/master model with no
|
||||
single point of failure. With "CP" we mean that in the presence of a
|
||||
network partition, the database prefers internal consistency over
|
||||
availability. With "master/master" we mean that clients can send their
|
||||
requests to an arbitrary node, and experience the same view on the
|
||||
database regardless. "No single point of failure" means that the cluster
|
||||
can continue to serve requests, even if one machine fails completely.
|
||||
|
||||
In this way, ArangoDB has been designed as a distributed multi-model
|
||||
database. This section gives a short outline on the cluster architecture and
|
||||
how the above features and capabilities are achieved.
|
||||
|
||||
!SUBSECTION Structure of an ArangoDB cluster
|
||||
|
||||
An ArangoDB cluster consists of a number of ArangoDB instances
|
||||
which talk to each other over the network. They play different roles,
|
||||
which will be explained in detail below. The current configuration
|
||||
of the cluster is held in the "Agency", which is a highly-available
|
||||
resilient key/value store based on an odd number of ArangoDB instances.
|
||||
|
||||
!SUBSUBSECTION Cluster ID
|
||||
|
||||
Every non-Agency ArangoDB instance in a cluster is assigned a unique
|
||||
ID during its startup. Using its ID a node is identifiable
|
||||
throughout the cluster. All cluster operations will communicate
|
||||
via this ID.
|
||||
|
||||
For the various instances in an ArangoDB cluster there are 4 distinct
|
||||
roles: Agents, Coordinators, Primary and Secondary DBservers. In the
|
||||
following sections we will shed light on each of them.
|
||||
|
||||
!SUBSUBSECTION Agents
|
||||
|
||||
One or multiple Agents form the Agency in an ArangoDB cluster. The
|
||||
Agency is the central place to store the configuration in a cluster. It
|
||||
performs leader elections and provies other synchronisation services for
|
||||
the whole cluster. Without the Agency none of the other components can
|
||||
operate.
|
||||
|
||||
While generally invisible to the outside it is the heart of the
|
||||
cluster. As such, fault tolerance is of course a must have for the
|
||||
Agency. To achieve that the Agents are using the [Raft Consensus
|
||||
Algorithm](https://raft.github.io/). The algorithm formally guarantees
|
||||
conflict free configuration management within the ArangoDB cluster.
|
||||
|
||||
At its core the Agency manages a big configuration tree. It supports
|
||||
transactional read and write operations on this tree, and other servers
|
||||
can subscribe to HTTP callbacks for all changes to the tree.
|
||||
|
||||
!SUBSUBSECTION Coordinators
|
||||
|
||||
Coordinators should be accessible from the outside. These are the ones
|
||||
the clients talk to. They will coordinate cluster tasks like
|
||||
executing queries and running Foxx services. They know where the
|
||||
data is stored and will optimize where to run user supplied queries or
|
||||
parts thereof. Coordinators are stateless and can thus easily be shut down
|
||||
and restarted as needed.
|
||||
|
||||
!SUBSUBSECTION Primary DBservers
|
||||
|
||||
Primary DBservers are the ones where the data is actually hosted. They
|
||||
host shards of data and using synchronous replication a primary may
|
||||
either be leader or follower for a shard.
|
||||
|
||||
They should not be accessed from the outside but indirectly through the
|
||||
coordinators. They may also execute queries in part or as a whole when
|
||||
asked by a coordinator.
|
||||
|
||||
!SUBSUBSECTION Secondaries
|
||||
|
||||
Secondary DBservers are asynchronous replicas of primaries. For each
|
||||
primary, there can be one ore more secondaries. Since the replication
|
||||
works asynchronously (eventual consistency), the replication does not
|
||||
impede the performance of the primaries. On the other hand, their
|
||||
replica of the data can be slightly out of date. The secondaries are
|
||||
perfectly suitable for backups as they don't interfere with the normal
|
||||
cluster operation.
|
||||
|
||||
!SUBSECTION Sharding
|
||||
|
||||
Using the roles outlined above an ArangoDB cluster is able to distribute
|
||||
data in so called shards across multiple primaries. From the outside
|
||||
this process is fully transparent and as such we achieve the goals of
|
||||
what other systems call "master-master replication". In an ArangoDB
|
||||
cluster you talk to any coordinator and whenever you read or write data
|
||||
it will automatically figure out where the data is stored (read) or to
|
||||
be stored (write). The information about the shards is shared across the
|
||||
coordinators using the Agency.
|
||||
|
||||
!SUBSECTION Many sensible configurations
|
||||
|
||||
This architecture is very flexible and thus allows many configurations,
|
||||
which are suitable for different usage scenarios:
|
||||
|
||||
1. The default configuration is to run exactly one coorddinator and
|
||||
one primary DBserver on each machine. This achieves the classical
|
||||
master/master setup, since there is a perfect symmetry between the
|
||||
different nodes, clients can equally well talk to any one of the
|
||||
coordinators and all expose the same view to the datastore.
|
||||
2. One can deploy more coordinators than DBservers. This is a sensible
|
||||
approach if one needs a lot of CPU power for the Foxx services,
|
||||
because they run on the coordinators.
|
||||
3. One can deploy more DBservers than coordinators if more data capacity
|
||||
is needed and the query performance is the lesser bottleneck
|
||||
4. One can deploy a coordinator on each machine where an application
|
||||
server (e.g. a node.js server) runs, and the Agents and DBservers
|
||||
on a separate set of machines elsewhere. This avoids a network hop
|
||||
between the application server and the database and thus decreases
|
||||
latency. Essentially, this moves some of the database distribution
|
||||
logic to the machine where the client runs.
|
||||
|
||||
These for shall suffice for now. The important piece of information here
|
||||
is that the coordinator layer can be scaled and deployed independently
|
||||
from the DBserver layer.
|
||||
|
||||
!SUBSECTION Replication
|
||||
|
||||
ArangoDB offers two ways of data replication within a cluster, synchronous
|
||||
and asynchronous. In this section we explain some details and highlight
|
||||
the advantages and disadvantages respectively.
|
||||
|
||||
!SUBSUBSECTION Synchronous replication with automatic failover
|
||||
|
||||
Synchronous replication works on a per-shard basis. One configures for
|
||||
each collection, how many copies of each shard are kept in the cluster.
|
||||
At any given time, one of the copies is declared to be the "leader" and
|
||||
all other replicas are "followers". Write operations for this shard
|
||||
are always sent to the DBserver which happens to hold the leader copy,
|
||||
which in turn replicates the changes to all followers before the operation
|
||||
is considered to be done and reported back to the coordinator.
|
||||
Read operations are all served by the server holding the leader copy,
|
||||
this allows to provide snapshot semantices for complex transactions.
|
||||
|
||||
If a DBserver fails that holds a follower copy of a shard, then the leader
|
||||
can no longer synchronize its changes to that follower. After a short timeout
|
||||
(3 seconds), the leader gives up on the follower, declares it to be
|
||||
out of sync, and continues service without the follower. When the server
|
||||
with the follower copy comes back, it automatically resynchronizes its
|
||||
data with the leader and synchronous replication is restored.
|
||||
|
||||
If a DBserver fails that holds a leader copy of a shard, then the leader
|
||||
can no longer serve any requests. It will no longer send a heartbeat to
|
||||
the Agency. Therefore, a supervision process running in the Raft leader
|
||||
of the Agency, can take the necessary action (after 15 seconds of missing
|
||||
heartbeats), namely to promote one of the servers that hold in-sync
|
||||
replicas of the shard to leader for that shard. This involves a
|
||||
reconfiguration in the Agency and leads to the fact that coordinators
|
||||
now contact a different DBserver for requests to this shard. Service
|
||||
resumes. The other surviving replicas automatically resynchronize their
|
||||
data with the new leader. When the DBserver with the original leader
|
||||
copy comes back, it notices that it now holds a follower replica,
|
||||
resynchronizes its data with the new leader and order is restored.
|
||||
|
||||
All shard data synchronizations are done in an incremental way, such that
|
||||
resynchronizations are quick. This technology allows to move shards
|
||||
(follower and leader ones) between DBservers without service interruptions.
|
||||
Therefore, an ArangoDB cluster can move all the data on a specific DBserver
|
||||
to other DBservers and then shut down that server in a controlled way.
|
||||
This allows to scale down an ArangoDB cluster without service interruption,
|
||||
loss of fault tolerance or data loss. Furthermore, one can rebalance the
|
||||
distribution of the shards, either manually or automatically.
|
||||
|
||||
All these operations can be triggered via a REST/JSON API or via the
|
||||
graphical web UI. All failover operations are completely handled within
|
||||
the ArangoDB cluster.
|
||||
|
||||
Obviously, synchronous replication involves a certain increased latency for
|
||||
write operations, simply because there is one more network hop within the
|
||||
cluster for every request. Therefore the user can set the replication factor
|
||||
to 1, which means that only one copy of each shard ist kept, thereby
|
||||
switching off synchronous replication. This is a suitable setting for
|
||||
less important or easily recoverable data for which low latency write
|
||||
operations matter.
|
||||
|
||||
!SUBSUBSECTION Asynchronous replication with automatic failover
|
||||
|
||||
Asynchronous replication works differently, in that it is organised
|
||||
using primary and secondary DBservers. Each secondary server replicates
|
||||
all the data held on a primary by polling in an asynchronous way. This
|
||||
process has very little impact on the performance of the primary. The
|
||||
disadvantage is that there is a delay between the confirmation of a
|
||||
write operation that is sent to the client and the actual replication of
|
||||
the data. If the master server fails during this delay, then committed
|
||||
and confirmed data can be lost.
|
||||
|
||||
Nevertheless, we also offer automatic failover with this setup. Contrary
|
||||
to the synchronous case, here the failover management is done from outside
|
||||
the ArangoDB cluster. In a future version we might move this management
|
||||
into the supervision process in the Agency, but as of now, the management
|
||||
is done via the Mesos framework scheduler for ArangoDB (see below).
|
||||
|
||||
The granularity of the replication is a whole ArangoDB instance with
|
||||
all data that resides on that instance, which means that
|
||||
you need twice as many instances as without asynchronous replication.
|
||||
Synchronous replication is more flexible in that respect, you can have
|
||||
smaller and larger instances, and if one fails, the data can be rebalanced
|
||||
across the remaining ones.
|
||||
|
||||
!SUBSECTION Microservices and zero administation
|
||||
|
||||
The design and capabilities of ArangoDB are geared towards usage in
|
||||
modern microservice architectures of applications. With the
|
||||
[Foxx services](../Foxx/README.md) it is very easy to deploy a data
|
||||
centric microservice within an ArangoDB cluster.
|
||||
|
||||
Alternatively, one can deploy multiple instances of ArangoDB within the
|
||||
same project. One part of the project might need a scalable document
|
||||
store, another might need a graph database, and yet another might need
|
||||
the full power of a multi-model database actually mixing the various
|
||||
data models. There are enormous efficiency benefits to be reaped by
|
||||
being able to use a single technology for various roles in a project.
|
||||
|
||||
To simplify live of the devops in such a scenario we try as much as
|
||||
possible to use a zero administration approach for ArangoDB. A running
|
||||
ArangoDB cluster is resilient against failures and essentially repairs
|
||||
itself in case of temporary failures. See the next section for further
|
||||
capabilities in this direction.
|
||||
|
||||
!SUBSECTION Apache Mesos integration
|
||||
|
||||
For the distributed setup, we use the Apache Mesos infrastructure by default.
|
||||
ArangoDB is a fully certified package for the Mesosphere DC/OS and can thus
|
||||
be deployed essentially with a few mouse clicks or a single command, once
|
||||
you have an existing DC/OS cluster. But even on a plain Apache Mesos cluster
|
||||
one can deploy ArangoDB via Marathon with a single API call and some JSON
|
||||
configuration.
|
||||
|
||||
The advantage of this approach is that we can not only implement the
|
||||
initial deployment, but also the later management of automatic
|
||||
replacement of failed instances and the scaling of the ArangoDB cluster
|
||||
(triggered manually or even automatically). Since all manipulations are
|
||||
either via the graphical web UI or via JSON/REST calls, one can even
|
||||
implement autoscaling very easily.
|
||||
|
||||
A DC/OS cluster is a very natural environment to deploy microservice
|
||||
architectures, since it is so convenient to deploy various services,
|
||||
including potentially multiple ArangoDB cluster instances within the
|
||||
same DC/OS cluster. The builtin service discovery makes it extremely
|
||||
simple to connect the various microservices and Mesos automatically
|
||||
takes care of the distribution and deployment of the various tasks.
|
||||
|
||||
As of June 2016, we offer Apache Mesos integration, later there will
|
||||
be integration with other cluster management infrastructures. See the
|
||||
[Deployment](../Deployment/README.md) chapter and its subsections for
|
||||
instructions.
|
||||
|
||||
It is possible to deploy an ArangoDB cluster by simply launching a bunch of
|
||||
Docker containers with the right command line options to link them up,
|
||||
or even on a single machine starting multiple ArangoDB processes. In that
|
||||
case, synchronous replication will work within the deployed ArangoDB cluster,
|
||||
and automatic failover in the sense that the duties of a failed server will
|
||||
automatically be assigned to another, surviving one. However, since the
|
||||
ArangoDB cluster cannot within itself launch additional instances, replacement
|
||||
of failed nodes is not automatic and scaling up and down has to be managed
|
||||
manually. This is why we do not recommend this setup for production
|
||||
deployment.
|
||||
|
||||
!SUBSECTION Authentication
|
||||
|
||||
As of version 3.0 ArangoDB authentication is **NOT** supported within a
|
||||
cluster. You **HAVE** to properly secure your cluster to the outside.
|
||||
Most setups will have a secured data center anyway and ArangoDB will
|
||||
be accessed from the outside via an application. To this application
|
||||
only the coordinators need to be made available. If you want to isolate
|
||||
even further you can install a reverse proxy like haproxy or nginx in
|
||||
front of the coordinators (that will also allow easy access from the
|
||||
application).
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
!CHAPTER Cluster Scalability
|
||||
|
||||
ArangoDB has been designed as a distributed multi model database. In this chapter we will give a short outline on the cluster archtiecture.
|
||||
|
||||
!SUBSECTION Cluster ID
|
||||
|
||||
Every node in a cluster will be assigned a uniquely generated ID during its startup. As such a node is identifiable througout the cluster. All cluster operations will communicate via this ID.
|
||||
|
||||
!SUBSECTION Roles in an ArangoDB cluster
|
||||
|
||||
In an ArangoDB cluster there are 4 distinct roles: Agents, Coordinators, Primaries, Secondaries. In the following sections we will shed light on each of them.
|
||||
|
||||
!SUBSUBSECTION Agents
|
||||
|
||||
One or multiple Agents form the Agency in an ArangoDB cluster. The Agency is the central place to store the configuration in a cluster. Without it none of the other components can operate. While generally invisible to the outside it is the heart of the cluster. To be able to serve a such a critical component in the cluster the Agents are using the [Raft Consensus Algorithm](https://raft.github.io/) at their core. The algorithm formally guarantees conflict free configuration management within the ArangoDB cluster.
|
||||
|
||||
At its core the Agency manages a big configuration tree. It supports transactional read and write operations on this tree.
|
||||
|
||||
!SUBSUBSECTION Coordinators
|
||||
|
||||
Coordinators should be accessible from the outside. These are the ones the clients should talk to. They will coordinate cluster tasks like executing queries and running foxx applications. They know where the data is stored and will optimize where to run user supplied queries or parts thereof.
|
||||
|
||||
!SUBSUBSECTION Primaries
|
||||
|
||||
Primaries are the ones where the data is actually hosted. They host shards of data and using synchronized replication a Primary may either be leader or follower for a shard.
|
||||
|
||||
They should not be accessed from the outside but indirect through the coordinator. They may also execute queries in part or as a whole when asked by a coordinator.
|
||||
|
||||
!SUBSUBSECTION Secondaries
|
||||
|
||||
Secondaries are asynchronous followers of primaries. They are perfectly suitable for backups as they don't interfere with the normal cluster operation.
|
||||
|
||||
!SUBSECTION Sharding
|
||||
|
||||
Using the roles outlined above an ArangoDB cluster is able to distribute data in so called shards across multiple primaries. From the outside this process is fully transparent and as such we achieve the goals of what other systems call "master-master replication". In an ArangoDB cluster you talk to any coordinator and whenever you read or write data it will automatically figure out where the data is stored (read) or to be stored (write). The information about the shards is shared across the coordinators using the Agency.
|
|
@ -0,0 +1,35 @@
|
|||
!SECTION Different data models and scalability
|
||||
|
||||
In this section we discuss scalability in the context of the different
|
||||
data models supported by ArangoDB.
|
||||
|
||||
!SUBSECTION Key/value pairs
|
||||
|
||||
The key/value store data model is the easiest to scale. In ArangoDB,
|
||||
this is implemented in the sense that a document collection always has
|
||||
a primary key `_key` attribute and in the absence of further secondary
|
||||
indexes the document collection behaves like a simple key/value store.
|
||||
|
||||
The only operations that are possible in this context are single key
|
||||
lookups and key/value pair insertions and updates. If `_key` is the
|
||||
only sharding attribute then the sharding is done with respect to the
|
||||
primary key and all these operations scale linearly. If the sharding is
|
||||
done using different shard keys, then a lookup of a single key involves
|
||||
asking all shards and thus does not scale linearly.
|
||||
|
||||
!SUBSECTION document store
|
||||
|
||||
For the document store case even in the presence of secondary indexes
|
||||
essentially the same arguments apply, since an index for a sharded
|
||||
collection is simply the same as a local index for each shard. Therefore,
|
||||
single document operations still scale linearly with the size of the
|
||||
cluster, unless a special sharding configuration makes lookups or
|
||||
write operations more expensive.
|
||||
|
||||
!SUBSECTION complex queries and joins
|
||||
|
||||
TODO
|
||||
|
||||
!SUBSECTION graph database
|
||||
|
||||
TODO
|
|
@ -0,0 +1,13 @@
|
|||
!SECTION Limitations
|
||||
|
||||
ArangoDB has no builtin limitations to horizontal scalability. The
|
||||
central resilient Agency will easily sustain hundreds of DBservers
|
||||
and coordinators, and the usual database operations work completely
|
||||
decentrally and do not require assistence of the Agency.
|
||||
|
||||
Likewise, the supervision process in the Agency can easily deal
|
||||
with lots of servers, since all its activities are not performance
|
||||
critical.
|
||||
|
||||
Obviously, an ArangoDB cluster is limited by the available resources
|
||||
of CPU, memory, disk and network bandwidth and latency.
|
|
@ -1,37 +1,23 @@
|
|||
!CHAPTER Scalability
|
||||
|
||||
Text zur eventuellen Wiederverwertung:
|
||||
ArangoDB is a distributed database supporting multiple data models,
|
||||
and can thus be scaled horizontally, that is, by using many servers,
|
||||
typically based on commodity hardware. This approach not only delivers
|
||||
performance as well as capacity improvements, but also achieves
|
||||
resilience by means of replication and automatic failover. Furthermore,
|
||||
one can build systems that scale their capacity dynamically up and down
|
||||
automatically according to demand.
|
||||
|
||||
For single instance setups we provide binary packages for various Linux
|
||||
distributions, for Mac OSX and for Windows, as well as Docker images.
|
||||
Installation is mostly straightforward using the standard package managers
|
||||
or deployment strategies. See also
|
||||
[our download page](https://www.arangodb.com/download/).
|
||||
Obviously, one can also scale ArangoDB vertically, that is, by using
|
||||
ever larger servers. However, this has the disadvantage that the
|
||||
costs grow faster than linear with the size of the server, and
|
||||
none of the resilience and dynamical capabilities can be achieved
|
||||
in this way.
|
||||
|
||||
For the distributed setup, we use the Apache Mesos infrastructure by default.
|
||||
ArangoDB is a fully certified package for the Mesosphere DC/OS and can thus
|
||||
be deployed essentially with a few mouse clicks or a single command, once
|
||||
you have an existing DC/OS cluster. But even on a plain Apache Mesos cluster
|
||||
one can deploy ArangoDB via Marathon with a single API call and some JSON
|
||||
configuration.
|
||||
In this chapter we explain the distributed architecture of ArangoDB and
|
||||
discuss its scalability features and limitations:
|
||||
|
||||
The advantage of this approach is that we can not only implement the
|
||||
initial deployment, but also the later management of automatic
|
||||
replacement of failed instances and the scaling of the ArangoDB cluster
|
||||
(triggered manually or even automatically).
|
||||
- [ArangoDB's distributed architecture](Architecture.md)
|
||||
- [Different data models and scalability](DataModels.md)
|
||||
- [Limitations](Limitations.md)
|
||||
|
||||
As of June 2016, we offer Apache Mesos integration, later there will be
|
||||
integration with other cluster management infrastructures.
|
||||
|
||||
It is possible to deploy an ArangoDB cluster by simply launching a bunch of
|
||||
Docker containers with the right command line options to link them up,
|
||||
or even on a single machine starting multiple ArangoDB processes. In that
|
||||
case, synchronous replication will work within the deployed ArangoDB cluster,
|
||||
and automatic failover in the sense that the duties of a failed server will
|
||||
automatically be assigned to another, surviving one. However, since the
|
||||
ArangoDB cluster cannot within itself launch additional instances, replacement
|
||||
of failed nodes is not automatic and scaling up and down has to be managed
|
||||
manually. This is why we do not recommend this setup for production
|
||||
deployment.
|
||||
|
||||
TODO: Verweise auf das Deployment chapter.
|
||||
|
|
|
@ -92,22 +92,22 @@ body {
|
|||
margin-top: 48px;
|
||||
}
|
||||
|
||||
#logo, #logo-small {
|
||||
.arangodb-logo, arangodb-logo-small {
|
||||
display: inline;
|
||||
float: left;
|
||||
padding-top: 10px;
|
||||
margin-left:5%;
|
||||
}
|
||||
|
||||
#logo img {
|
||||
.arangodb-logo img {
|
||||
height: 23px;
|
||||
}
|
||||
|
||||
#logo-small {
|
||||
.arangodb-logo-small {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
width: 62px;
|
||||
height: 44px;
|
||||
margin-left: 24px;
|
||||
|
@ -119,37 +119,37 @@ body {
|
|||
border: 0;
|
||||
}
|
||||
|
||||
#version-switcher option {
|
||||
.arangodb-version-switcher option {
|
||||
background-color: white;
|
||||
color: black;
|
||||
}
|
||||
|
||||
|
||||
#header {
|
||||
.arangodb-header {
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 48px;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
#header .socialIcons-googlegroups a img {
|
||||
.arangodb-header .socialIcons-googlegroups a img {
|
||||
position: relative;
|
||||
height: 14px;
|
||||
top: 3px;
|
||||
}
|
||||
|
||||
#navmenu {
|
||||
.arangodb-navmenu {
|
||||
display: block;
|
||||
float: right;
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
#navmenu li {
|
||||
.arangodb-navmenu li {
|
||||
display: block;
|
||||
float: left;
|
||||
}
|
||||
|
||||
#navmenu li a {
|
||||
.arangodb-navmenu li a {
|
||||
display: block;
|
||||
float: left;
|
||||
padding: 0 10px;
|
||||
|
@ -161,14 +161,14 @@ body {
|
|||
font-family: Roboto, Helvetica, sans-serif;
|
||||
}
|
||||
|
||||
#navmenu li.active-tab a, #navmenu li a:hover {
|
||||
.arangodb-navmenu li.active-tab a, .arangodb-navmenu li a:hover {
|
||||
background-color: #88A049 !important;
|
||||
}
|
||||
|
||||
/** simple responsive updates **/
|
||||
|
||||
@media screen and (max-width: 1100px) {
|
||||
#logo {
|
||||
.arangodb-logo {
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
|
@ -190,29 +190,29 @@ body {
|
|||
width: 130px !important;
|
||||
}
|
||||
|
||||
#navmenu {
|
||||
.arangodb-navmenu {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#navmenu li a {
|
||||
.arangodb-navmenu li a {
|
||||
font-size: 15px;
|
||||
padding: 0 7px;
|
||||
}
|
||||
|
||||
#logo {
|
||||
.arangodb-logo {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#logo-small {
|
||||
.arangodb-logo-small {
|
||||
display: inline;
|
||||
margin-left: 10px;
|
||||
}
|
||||
|
||||
#logo-small img {
|
||||
.arangodb-logo-small img {
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
margin: 0;
|
||||
width: 50px;
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ body {
|
|||
}
|
||||
|
||||
@media screen and (max-width: 480px) {
|
||||
#version-switcher {
|
||||
.arangodb-version-switcher {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ body {
|
|||
}
|
||||
|
||||
@media screen and (max-width: 330px) {
|
||||
#logo-small, .google-search {
|
||||
.arangodb-logo-small, .google-search {
|
||||
display: none;
|
||||
}
|
||||
}
|
|
@ -14,7 +14,7 @@ function appendHeader() {
|
|||
|
||||
|
||||
function rerenderNavbar() {
|
||||
$('#header').remove();
|
||||
$('.arangodb-header').remove();
|
||||
appendHeader();
|
||||
renderGoogleSearch();
|
||||
};
|
||||
|
@ -35,7 +35,7 @@ function appendHeader() {
|
|||
};
|
||||
addGoogleSrc();
|
||||
|
||||
$("#version-switcher").on("change", function(e) {
|
||||
$(".arangodb-version-switcher").on("change", function(e) {
|
||||
window.location.href = "https://docs.arangodb.com/" + e.target.value;
|
||||
});
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ arangosh> db._query(<span class="hljs-string">`FOR i IN 1..100
|
|||
<span class="hljs-string">"scannedFull"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"scannedIndex"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"filtered"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.0010228157043457031</span>
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.0004038810729980469</span>
|
||||
},
|
||||
<span class="hljs-string">"warnings"</span> : [ ]
|
||||
}
|
||||
|
|
|
@ -7,15 +7,15 @@ arangosh> c.getExtra();
|
|||
<span class="hljs-string">"scannedFull"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"scannedIndex"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"filtered"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.00018596649169921875</span>
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.00007510185241699219</span>
|
||||
},
|
||||
<span class="hljs-string">"profile"</span> : {
|
||||
<span class="hljs-string">"initializing"</span> : <span class="hljs-number">0.0000059604644775390625</span>,
|
||||
<span class="hljs-string">"parsing"</span> : <span class="hljs-number">0.00003910064697265625</span>,
|
||||
<span class="hljs-string">"optimizing ast"</span> : <span class="hljs-number">0.0000059604644775390625</span>,
|
||||
<span class="hljs-string">"instantiating plan"</span> : <span class="hljs-number">0.000025987625122070312</span>,
|
||||
<span class="hljs-string">"optimizing plan"</span> : <span class="hljs-number">0.000053882598876953125</span>,
|
||||
<span class="hljs-string">"executing"</span> : <span class="hljs-number">0.00006914138793945312</span>
|
||||
<span class="hljs-string">"initializing"</span> : <span class="hljs-number">0.0000019073486328125</span>,
|
||||
<span class="hljs-string">"parsing"</span> : <span class="hljs-number">0.000014066696166992188</span>,
|
||||
<span class="hljs-string">"optimizing ast"</span> : <span class="hljs-number">0.0000040531158447265625</span>,
|
||||
<span class="hljs-string">"instantiating plan"</span> : <span class="hljs-number">0.0000069141387939453125</span>,
|
||||
<span class="hljs-string">"optimizing plan"</span> : <span class="hljs-number">0.000029087066650390625</span>,
|
||||
<span class="hljs-string">"executing"</span> : <span class="hljs-number">0.0000247955322265625</span>
|
||||
},
|
||||
<span class="hljs-string">"warnings"</span> : [ ]
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ arangosh> db._query(<span class="hljs-string">`
|
|||
<span class="hljs-string">"scannedFull"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"scannedIndex"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"filtered"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.0011429786682128906</span>
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.0004699230194091797</span>
|
||||
},
|
||||
<span class="hljs-string">"warnings"</span> : [ ]
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ arangosh> db._query({
|
|||
<span class="hljs-string">"scannedFull"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"scannedIndex"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"filtered"</span> : <span class="hljs-number">0</span>,
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.0011050701141357422</span>
|
||||
<span class="hljs-string">"executionTime"</span> : <span class="hljs-number">0.00047087669372558594</span>
|
||||
},
|
||||
<span class="hljs-string">"warnings"</span> : [ ]
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ arangosh> stmt.explain();
|
|||
},
|
||||
<span class="hljs-string">"indexes"</span> : [
|
||||
{
|
||||
<span class="hljs-string">"id"</span> : <span class="hljs-string">"10"</span>,
|
||||
<span class="hljs-string">"id"</span> : <span class="hljs-string">"11"</span>,
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"hash"</span>,
|
||||
<span class="hljs-string">"fields"</span> : [
|
||||
<span class="hljs-string">"user"</span>
|
||||
|
|
|
@ -7,8 +7,8 @@ shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8005</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8005</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -12,8 +12,8 @@ shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8029</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8029</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -5,8 +5,8 @@ shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8128</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8128</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -5,8 +5,8 @@ shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8153</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8153</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -16,8 +16,8 @@ shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8214</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8214</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/edge/relation
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8421</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8421</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
shell> curl --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
etag: <span class="hljs-number">8497</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8497</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
shell> curl --dump - http://localhost:8529/_api/gharial/myGraph
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
etag: <span class="hljs-number">8533</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8533</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
shell> curl --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
etag: <span class="hljs-number">8562</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8562</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -5,8 +5,8 @@ shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharia
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8898</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8898</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -5,8 +5,8 @@ shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharia
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">8957</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">8957</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -7,8 +7,8 @@ shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">9015</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">9015</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/otherVertices
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">9073</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">9073</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -15,8 +15,8 @@ shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">9156</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">9156</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -6,8 +6,8 @@ shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/
|
|||
EOF
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">202</span> Accepted
|
||||
etag: <span class="hljs-number">9253</span>
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
etag: <span class="hljs-number">9253</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
|
|
|
@ -6,7 +6,7 @@ EOF
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364867
|
||||
x-arango-async-id: 146616976397868
|
||||
|
||||
shell> curl --dump - http://localhost:8529/_api/job/pending
|
||||
|
||||
|
@ -14,9 +14,9 @@ HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> O
|
|||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
||||
[
|
||||
<span class="hljs-string">"146581063364867"</span>
|
||||
<span class="hljs-string">"146616976397868"</span>
|
||||
]
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146581063364867/cancel
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146616976397868/cancel
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
@ -30,5 +30,5 @@ HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> O
|
|||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
||||
[
|
||||
<span class="hljs-string">"146581063364867"</span>
|
||||
<span class="hljs-string">"146616976397868"</span>
|
||||
]
|
||||
|
|
|
@ -2,7 +2,7 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364873
|
||||
x-arango-async-id: 146616976397874
|
||||
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/all
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364877
|
||||
x-arango-async-id: 146616976397878
|
||||
|
||||
shell> curl --dump - http://localhost:8529/_admin/time
|
||||
|
||||
|
@ -10,11 +10,11 @@ HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> O
|
|||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"time"</span> : <span class="hljs-number">1465810635.698738</span>,
|
||||
<span class="hljs-string">"time"</span> : <span class="hljs-number">1466169779.749448</span>,
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">false</span>,
|
||||
<span class="hljs-string">"code"</span> : <span class="hljs-number">200</span>
|
||||
}
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/expired?stamp=1465810635.698738
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/expired?stamp=1466169779.749448
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
|
|
@ -2,9 +2,9 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364884
|
||||
x-arango-async-id: 146616976397885
|
||||
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/146581063364884
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/146616976397885
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
|
|
@ -2,13 +2,13 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364894
|
||||
x-arango-async-id: 146616976397895
|
||||
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146581063364894
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146616976397895
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
x-arango-<span class="hljs-keyword">async</span>-id: <span class="hljs-number">146581063364894</span>
|
||||
x-arango-<span class="hljs-keyword">async</span>-id: <span class="hljs-number">146616976397895</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"server"</span> : <span class="hljs-string">"arango"</span>,
|
||||
|
|
|
@ -6,13 +6,13 @@ EOF
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364898
|
||||
x-arango-async-id: 146616976397899
|
||||
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146581063364898
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146616976397899
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">400</span> Bad Request
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
x-arango-<span class="hljs-keyword">async</span>-id: <span class="hljs-number">146581063364898</span>
|
||||
x-arango-<span class="hljs-keyword">async</span>-id: <span class="hljs-number">146616976397899</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"error"</span> : <span class="hljs-literal">true</span>,
|
||||
|
|
|
@ -2,7 +2,7 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364902
|
||||
x-arango-async-id: 146616976397903
|
||||
|
||||
shell> curl --dump - http://localhost:8529/_api/job/done
|
||||
|
||||
|
@ -10,5 +10,5 @@ HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> O
|
|||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
||||
[
|
||||
<span class="hljs-string">"146581063364902"</span>
|
||||
<span class="hljs-string">"146616976397903"</span>
|
||||
]
|
||||
|
|
|
@ -2,7 +2,7 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364906
|
||||
x-arango-async-id: 146616976397907
|
||||
|
||||
shell> curl --dump - http://localhost:8529/_api/job/pending
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ shell> curl --header 'x-arango-async: store' --dump - http://localhost:8529/_adm
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364910
|
||||
x-arango-async-id: 146616976397911
|
||||
|
||||
shell> curl --dump - http://localhost:8529/_api/job/pending
|
||||
|
||||
|
@ -10,9 +10,9 @@ HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> O
|
|||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
||||
[
|
||||
<span class="hljs-string">"146581063364910"</span>
|
||||
<span class="hljs-string">"146616976397911"</span>
|
||||
]
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/146581063364910
|
||||
shell> curl -X DELETE --dump - http://localhost:8529/_api/job/146616976397911
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
|
|
|
@ -2,13 +2,13 @@ shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:85
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364915
|
||||
x-arango-async-id: 146616976397916
|
||||
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146581063364915
|
||||
shell> curl -X PUT --dump - http://localhost:8529/_api/job/146616976397916
|
||||
|
||||
HTTP/<span class="hljs-number">1.1</span> <span class="hljs-number">200</span> OK
|
||||
content-type: application/json; charset=utf<span class="hljs-number">-8</span>
|
||||
x-arango-<span class="hljs-keyword">async</span>-id: <span class="hljs-number">146581063364915</span>
|
||||
x-arango-<span class="hljs-keyword">async</span>-id: <span class="hljs-number">146616976397916</span>
|
||||
|
||||
{
|
||||
<span class="hljs-string">"server"</span> : <span class="hljs-string">"arango"</span>,
|
||||
|
|
|
@ -2,9 +2,9 @@ shell> curl --header 'x-arango-async: store' --dump - http://localhost:8529/_adm
|
|||
|
||||
HTTP/1.1 202 Accepted
|
||||
content-type: text/plain; charset=utf-8
|
||||
x-arango-async-id: 146581063364919
|
||||
x-arango-async-id: 146616976397920
|
||||
|
||||
shell> curl --dump - http://localhost:8529/_api/job/146581063364919
|
||||
shell> curl --dump - http://localhost:8529/_api/job/146616976397920
|
||||
|
||||
HTTP/1.1 204 No Content
|
||||
content-type: text/plain; charset=utf-8
|
||||
|
|