mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
65c82ee38e
|
@ -1,6 +1,6 @@
|
|||
!CHAPTER Filesystem Module
|
||||
|
||||
`require('@arangodb/fs')`
|
||||
`require('fs')`
|
||||
|
||||
The implementation tries to follow the CommonJS specification where possible.
|
||||
[Filesystem/A/0](http://wiki.commonjs.org/wiki/Filesystem/A/0).
|
||||
|
|
|
@ -8,8 +8,6 @@ The global variables `global`, `process`, `console`, `Buffer`, `__filename` and
|
|||
|
||||
ArangoDB supports a number of modules for compatibility with Node.js, including:
|
||||
|
||||
util: 4.1.0 (modified)
|
||||
|
||||
* [assert](http://nodejs.org/api/assert.html) implements basic assertion and testing functions.
|
||||
|
||||
* [buffer](http://nodejs.org/api/buffer.html) implements a binary data type for JavaScript.
|
||||
|
@ -20,6 +18,8 @@ util: 4.1.0 (modified)
|
|||
|
||||
* [events](http://nodejs.org/api/events.html) implements an event emitter.
|
||||
|
||||
* [fs](FileSystem.md) provides a file system API for the manipulation of paths, directories, files, links, and the construction of file streams. ArangoDB implements most [Filesystem/A](http://wiki.commonjs.org/wiki/Filesystem/A) functions.
|
||||
|
||||
* [module](http://nodejs.org/api/modules.html) provides direct access to the module system.
|
||||
|
||||
* [path](http://nodejs.org/api/path.html) implements functions dealing with filenames and paths.
|
||||
|
@ -62,7 +62,6 @@ The following Node.js modules are not available at all:
|
|||
`dgram`,
|
||||
`dns`,
|
||||
`domain`,
|
||||
`fs` (but see `@arangodb/fs` below),
|
||||
`http`,
|
||||
`https`,
|
||||
`os`,
|
||||
|
@ -75,8 +74,6 @@ The following Node.js modules are not available at all:
|
|||
|
||||
There are a large number of ArangoDB-specific modules using the `@arangodb` namespace, mostly for internal use by ArangoDB itself. The following however are noteworthy:
|
||||
|
||||
* [@arangodb/fs](FileSystem.md) provides a file system API for the manipulation of paths, directories, files, links, and the construction of file streams. ArangoDB implements most [Filesystem/A](http://wiki.commonjs.org/wiki/Filesystem/A) functions.
|
||||
|
||||
* [@arangodb/crypto](Crypto.md) provides various cryptography functions including hashing algorithms.
|
||||
|
||||
* [@arangodb/foxx](../../Foxx/README.md) is the namespace providing the various building blocks of the Foxx microservice framework.
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
!SECTION Launching an ArangoDB cluster on multiple machines
|
||||
|
||||
Essentially, one can use the method from [the previous
|
||||
section](Local.md) to start an ArangoDB cluster on multiple machines as
|
||||
well. The only changes are that one has to replace all local addresses `127.0.0.1` by the actual IP address of the corresponding server.
|
||||
|
||||
If we assume that you want to start you ArangoDB cluster on three different machines with IP addresses
|
||||
|
||||
```
|
||||
192.168.1.1
|
||||
192.168.1.2
|
||||
192.168.1.3
|
||||
```
|
||||
|
||||
then the commands you have to use are (you can use host names if they can be resolved to IP addresses on all machines):
|
||||
|
||||
On 192.168.1.1:
|
||||
|
||||
```
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5001 --server.authentication false --agency.id 0 --agency.size 3 --agency.supervision true agency1 &
|
||||
```
|
||||
|
||||
On 192.168.1.2:
|
||||
|
||||
```
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5002 --server.authentication false --agency.id 1 --agency.size 3 --agency.supervision true agency2 &
|
||||
```
|
||||
|
||||
On 192.168.1.3:
|
||||
|
||||
```
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5003 --server.authentication false --agency.id 2 --agency.size 3 --agency.endpoint tcp://192.168.1.1:5001 --agency.endpoint tcp://192.168.1.2:5002 --agency.endpoint tcp://192.168.1.3:5003 --agency.notify true --agency.supervision true agency3 &
|
||||
```
|
||||
|
||||
On 192.168.1.1:
|
||||
```
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://192.168.1.1:8529 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 primary1 &
|
||||
```
|
||||
|
||||
On 192.168.1.2:
|
||||
```
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8530 --cluster.my-address tcp://192.168.1.2:8530 --cluster.my-local-info db2 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 primary2 &
|
||||
```
|
||||
|
||||
On 192.168.1.3:
|
||||
```
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8531 --cluster.my-address tcp://192.168.1.3:8531 --cluster.my-local-info coord1 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 coordinator &
|
||||
```
|
||||
|
||||
Obviously, it would no longer be necessary to use different port numbers on different servers. We have chosen to keep all port numbers in comparison to the local setup to minimize the necessary changes.
|
||||
|
||||
If you want to setup secondaries, the following commands will do the job:
|
||||
|
||||
On 192.168.1.2:
|
||||
|
||||
curl -f -X PUT --data '{"primary": "DBServer001", "oldSecondary": "none", "newSecondary": "Secondary001"}' -H "Content-Type: application/json" http://192.168.1.3:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8629 --cluster.my-id Secondary001 --cluster.my-address tcp://192.168.1.2:8629 --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 secondary1 &
|
||||
|
||||
On 192.168.1.1:
|
||||
|
||||
curl -f -X PUT --data '{"primary": "DBServer002", "oldSecondary": "none", "newSecondary": "Secondary002"}' -H "Content-Type: application/json" http://localhost:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8630 --cluster.my-id Secondary002 --cluster.my-address tcp://192.168.1.1:8630 --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 secondary2 &
|
||||
|
||||
Note that we have started the `Secondary002` on the same machine as `DBServer001` and `Secondary001` on the same machine as `DBServer002` to avoid that a complete pair is lost when a machine fails. Furthermore, note that ArangoDB does not yet perform automatic failover to the secondary, if a primary fails. This only works in the Apache Mesos setting. For synchronous replication, automatic failover always works and you do not need to setup secondaries for this.
|
||||
|
||||
After having swallowed these longish commands, we hope that you appreciate the simplicity of the setup with Apache Mesos and DC/OS.
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
!SUBSECTION Networking
|
||||
|
||||
A bit of extra care has to be invested in how Docker isolates its network. By default it fully isolates the network and by doing so an endpoint like `--server.endpoint tcp://0.0.0.0:8529` will only bind to all interfaces inside the Docker container which does not include any external interface on the host machine. This may be sufficient if you just want to access it locally but in case you want to expose it to the outside you must facilitate Dockers port forwarding using the `-p` command line option. Be sure to check the [official Docker documentation](https://docs.docker.com/engine/reference/run/).
|
||||
A bit of extra care has to be invested due to the way in which Docker isolates its network. By default it fully isolates the network and by doing so an endpoint like `--server.endpoint tcp://0.0.0.0:8529` will only bind to all interfaces inside the Docker container which does not include any external interface on the host machine. This may be sufficient if you just want to access it locally but in case you want to expose it to the outside you must facilitate Dockers port forwarding using the `-p` command line option. Be sure to check the [official Docker documentation](https://docs.docker.com/engine/reference/run/).
|
||||
|
||||
To simply make arangodb available on all host interfaces on port 8529:
|
||||
|
||||
|
@ -12,12 +12,12 @@ Another possibility is to start Docker via network mode `host`. This is possible
|
|||
|
||||
!SUBSUBSECTION Docker and Cluster tasks
|
||||
|
||||
To start the cluster via Docker is basically the same as starting [locally](Local.md). However just like with the single networking image we will face networking issues. You can simply use the `-p` flag to make the individual task available on the host machine or you could use Docker's [links](https://docs.docker.com/engine/reference/run/) to enable task intercommunication.
|
||||
To start the cluster via Docker is basically the same as starting [locally](Local.md) or on [multiple machines](Distributed.md). However just like with the single networking image we will face networking issues. You can simply use the `-p` flag to make the individual task available on the host machine or you could use Docker's [links](https://docs.docker.com/engine/reference/run/) to enable task intercommunication.
|
||||
|
||||
Please note that there are some flags that specify how ArangoDB can reach a task from the outside. These are very important and built for this exact usecase. An example configuration might look like this:
|
||||
|
||||
```
|
||||
docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb arangod --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://192.168.1.1:10000 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001
|
||||
docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb/arangodb arangod --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://192.168.1.1:10000 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003
|
||||
```
|
||||
|
||||
This will start a primary DB server within a Docker container with an isolated network. Within the Docker container it will bind on all interfaces (this will be 127.0.0.1:8529 and some internal Docker ip on port 8529). By supplying `-p 192.168.1.1:10000:8529` we are establishing a port forwarding from our local IP (192.168.1.1 in this example) to port 8529 inside the container. Within the command we are telling arangod how it can be reached from the outside `--cluster.my-address tcp://192.168.1.1:10000`. This information will be forwarded to the agency so that the other tasks in your cluster can see how this particular DBServer may be reached.
|
||||
This will start a primary DB server within a Docker container with an isolated network. Within the Docker container it will bind to all interfaces (this will be 127.0.0.1:8529 and some internal Docker ip on port 8529). By supplying `-p 192.168.1.1:10000:8529` we are establishing a port forwarding from our local IP (192.168.1.1 port 10000 in this example) to port 8529 inside the container. Within the command we are telling arangod how it can be reached from the outside `--cluster.my-address tcp://192.168.1.1:10000`. This information will be forwarded to the agency so that the other tasks in your cluster can see how this particular DBServer may be reached.
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
!SECTION Launching a local ArangoDB cluster for testing
|
||||
!SECTION Launching an ArangoDB cluster for testing
|
||||
|
||||
An ArangoDB cluster consists of several running tasks which form the cluster. ArangoDB itself won't start or monitor any of these tasks. So it will need some kind of supervisor which is monitoring and starting these tasks. For production usage we recommend using Apache Mesos as the cluster supervisor.
|
||||
|
||||
However starting a cluster locally is possible and a very easy method to get a first impression of what an ArangoDB cluster looks like.
|
||||
However starting a cluster manually is possible and is a very easy method to get a first impression of what an ArangoDB cluster looks like.
|
||||
|
||||
The easiest way to start a local cluster for testing purposes is to run `scripts/startLocalCluster.sh` from a clone of the [source repository](https://github.com/ArangoDB/ArangoDB) after compiling ArangoDB from source (see instructions in the file `README_maintainers.md` in the repository. This will start 1 Agency, 2 DBServers and 1 Coordinator. To stop the cluster issue `scripts/stopLocalCluster.sh`.
|
||||
|
||||
This section will discuss the required parameters for every role in an ArangoDB cluster. Be sure to read the [Architecture](../Scalability/README.md) documentation to get a basic understanding of the underlying architecture and the involved roles in an ArangoDB cluster.
|
||||
This section will discuss the required parameters for every role in an ArangoDB cluster. Be sure to read the [Architecture](../Scalability/Architecture.md) documentation to get a basic understanding of the underlying architecture and the involved roles in an ArangoDB cluster.
|
||||
|
||||
In the following sections we will go through the relevant options per role.
|
||||
|
||||
|
@ -21,34 +21,51 @@ To start up the agency in its fault tolerant mode set the `--agency.size` to `3`
|
|||
So in summary this is what your startup might look like:
|
||||
|
||||
```
|
||||
build/bin/arangod --server.endpoint tcp://127.0.0.1:5001 --server.authentication false --agency.id 0 --agency.size 3 agency1 &
|
||||
build/bin/arangod --server.endpoint tcp://127.0.0.1:5002 --server.authentication false --agency.id 1 --agency.size 3 agency2 &
|
||||
build/bin/arangod --server.endpoint tcp://127.0.0.1:5003 --server.authentication false --agency.id 2 --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.endpoint tcp://127.0.0.1:5002 --agency.endpoint tcp://127.0.0.1:5003 --agency.notify true agency3 &
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5001 --server.authentication false --agency.id 0 --agency.size 3 --agency.supervision true agency1 &
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5002 --server.authentication false --agency.id 1 --agency.size 3 --agency.supervision trueagency2 &
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5003 --server.authentication false --agency.id 2 --agency.size 3 --agency.endpoint tcp://127.0.0.1:5001 --agency.endpoint tcp://127.0.0.1:5002 --agency.endpoint tcp://127.0.0.1:5003 --agency.notify true --agency.supervision true agency3 &
|
||||
```
|
||||
|
||||
Note in particular that the endpoint descriptions given under `--agency.endpoint` must not use the IP address `0.0.0.0` because they must contain an actual address that can be routed to the corresponding server. The `0.0.0.0` in `--server.endpoint` simply means that the server binds itself to all available network devices with all available IP addresses.
|
||||
|
||||
If you are happy with a single agent, then simply use a single command like this:
|
||||
```
|
||||
build/bin/arangod --server.endpoint tcp://0.0.0.0:5001 --server.authentication false --agency.id 0 --agency.size 1 --agency.endpoint tcp://127.0.0.1:5001 --agency.supervision true agency1 &
|
||||
```
|
||||
|
||||
Furthermore, in the following sections when `--cluster.agency-address` is used multiple times to specify all three agent addresses, just use a single option ```--cluster.agency.address tcp://127.0.0.1:5001``` instead.
|
||||
|
||||
|
||||
!SUBSECTION Coordinators and DBServers
|
||||
|
||||
These two roles share a common set of relevant options. First you should specify the role using `--cluster.my-role`. This can either be `PRIMARY` (a database server) or `COORDINATOR`. Both also need some unique information with which they will register in the agency. This could for example be some combination of host name and port or whatever you have at hand. However it must be unique for each instance and be provided as value for `--cluster.my-local-info`. Furthermore provide the external endpoint (IP and port) of the task via `--cluster.my-address`.
|
||||
These two roles share a common set of relevant options. First you should specify the role using `--cluster.my-role`. This can either be `PRIMARY` (a database server) or `COORDINATOR`. Both need some unique information with which they will register in the agency, too. This could for example be some combination of host name and port or whatever you have at hand. However it must be unique for each instance and be provided as value for `--cluster.my-local-info`. Furthermore provide the external endpoint (IP and port) of the task via `--cluster.my-address`.
|
||||
|
||||
The following is a full-example of what it might look like:
|
||||
|
||||
```
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://127.0.0.1:8529 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://127.0.0.1:5001 primary1 &
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8530 --cluster.my-address tcp://127.0.0.1:8530 --cluster.my-local-info db2 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://127.0.0.1:5001 primary2 &
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8531 --cluster.my-address tcp://127.0.0.1:8531 --cluster.my-local-info coord1 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://127.0.0.1:5001 coordinator &
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://127.0.0.1:8529 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 primary1 &
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8530 --cluster.my-address tcp://127.0.0.1:8530 --cluster.my-local-info db2 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 primary2 &
|
||||
build/bin/arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8531 --cluster.my-address tcp://127.0.0.1:8531 --cluster.my-local-info coord1 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 coordinator &
|
||||
```
|
||||
|
||||
Upon registering with the agency during startup the cluster will assign an ID to every task. Instead of starting with `--cluster.my-local-info` you can start with this ID from now on. The generated ID will be printed out to the log or can be accessed via the http API by calling `http://server-address/_admin/server/id`.
|
||||
Note in particular that the endpoint descriptions given under `--cluster.my-address` and `--cluster.agency-endpoint` must not use the IP address `0.0.0.0` because they must contain an actual address that can be routed to the corresponding server. The `0.0.0.0` in `--server.endpoint` simply means that the server binds itself to all available network devices with all available IP addresses.
|
||||
|
||||
Upon registering with the agency during startup the cluster will assign an ID to every task. The generated ID will be printed out to the log or can be accessed via the http API by calling `http://server-address/_admin/server/id`.
|
||||
Should you ever have to restart a task, simply reuse the same value for `--cluster.my-local-info` and the same ID will be picked.
|
||||
|
||||
You have now launched a complete ArangoDB cluster and can contact its coordinator at the endpoint `tcp://127.0.0.1:8531`, which means that you can reach the web UI under `http://127.0.0.1:8531`.
|
||||
|
||||
|
||||
!SUBSECTION Secondaries
|
||||
|
||||
Secondaries need a bit more work. Secondaries need to have some primary assigned. To do that there is a special route. To register a Secondary you must first find out the Server-ID of the primary server. Then generate your own ID for the secondary (put it into "newSecondary") you are about to start and call one of the coordinators like this:
|
||||
Secondaries need a bit more work. Secondaries need to have some primary assigned. To do that there is a special route. To register a secondary you must first find out the Server-ID of the primary server. Then generate your own ID for the secondary you are about to start and call one of the coordinators like this (replace the value of "newSecondary" in the command):
|
||||
|
||||
curl -f -X PUT --data '{"primary": "DBServer1", "oldSecondary": "none", "newSecondary": "Secondary1"}' -H "Content-Type: application/json" http://localhost:8530/_admin/cluster/replaceSecondary
|
||||
curl -f -X PUT --data '{"primary": "DBServer001", "oldSecondary": "none", "newSecondary": "Secondary001"}' -H "Content-Type: application/json" http://127.0.0.1:8531/_admin/cluster/replaceSecondary
|
||||
|
||||
If that call was successful you can start the secondary. Instead of providing `--cluster.my-local-info` you should now provide the Id in the curl call above via `--cluster.my-id`. You can omit the `--cluster.my-role` in this case. The secondary will find out from the agency about its role.
|
||||
|
||||
To sum it up:
|
||||
|
||||
curl -f -X PUT --data '{"primary": "DBServer1", "oldSecondary": "none", "newSecondary": "Secondary1"}' -H "Content-Type: application/json" http://localhost:8530/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8629 --cluster.my-id Secondary1 --cluster.agency-endpoint tcp://127.0.0.1:5001 secondary1 &
|
||||
curl -f -X PUT --data '{"primary": "DBServer2", "oldSecondary": "none", "newSecondary": "Secondary2"}' -H "Content-Type: application/json" http://localhost:8530/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8630 --cluster.my-id Secondary2 --cluster.agency-endpoint tcp://127.0.0.1:5001 secondary2 &
|
||||
curl -f -X PUT --data '{"primary": "DBServer001", "oldSecondary": "none", "newSecondary": "Secondary001"}' -H "Content-Type: application/json" http://127.0.0.1:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8629 --cluster.my-id Secondary001 --cluster.my-address tcp://127.0.0.1:8629 --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 secondary1 &
|
||||
curl -f -X PUT --data '{"primary": "DBServer002", "oldSecondary": "none", "newSecondary": "Secondary002"}' -H "Content-Type: application/json" http://127.0.0.1:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8630 --cluster.my-id Secondary002 --cluster.my-address tcp://127.0.0.1:8630 --cluster.agency-endpoint tcp://127.0.0.1:5001 --cluster.agency-endpoint tcp://127.0.0.1:5002 --cluster.agency-endpoint tcp://127.0.0.1:5003 secondary2 &
|
||||
|
||||
|
|
|
@ -8,15 +8,17 @@ DC/OS is the recommended way to install a cluster as it eases much of the proces
|
|||
|
||||
!SUBSUBSECTION Installing
|
||||
|
||||
First prepare a DC/OS cluster by going to https://dcos.io and following
|
||||
the instructions there.
|
||||
|
||||
DC/OS comes with its own package management. Packages can be installed from the so called "Universe". As an official DC/OS partner ArangoDB can be installed from there straight away.
|
||||
|
||||
1. Installing via DC/OS UI
|
||||
|
||||
1. Go to https://dcos.io and prepare a cluster
|
||||
2. Open your browser and go to the DC/OS admin interface
|
||||
3. Open the "Universe" tab
|
||||
4. Locate arangodb and hit "Install Package"
|
||||
5. Press "Install Package"
|
||||
1. Open your browser and go to the DC/OS admin interface
|
||||
2. Open the "Universe" tab
|
||||
3. Locate arangodb and hit "Install Package"
|
||||
4. Press "Install Package"
|
||||
|
||||
2. Installing via the DC/OS command line
|
||||
|
||||
|
@ -142,7 +144,7 @@ Carefully review the settings (especially the IPs and the resources). Then you c
|
|||
|
||||
curl -X POST -H "Content-Type: application/json" http://url-of-marathon/v2/apps -d @arangodb3.json
|
||||
|
||||
Alternatively use the web interface of Marathon to deploy ArangoDB.
|
||||
Alternatively use the web interface of Marathon to deploy ArangoDB. It has a JSON mode and you can use the above configuration file.
|
||||
|
||||
!SUBSUBSECTION Deinstallation via Marathon
|
||||
|
||||
|
|
|
@ -1,7 +1,15 @@
|
|||
!CHAPTER Deployment
|
||||
|
||||
In this chapter we describe various possibilities to deploy ArangoDB.
|
||||
In particular for the cluster mode, there are different ways
|
||||
and we want to highlight their advantages and disadvantages.
|
||||
We even document in detail, how to set up a cluster by simply starting
|
||||
various ArangoDB processes on different machines, either directly
|
||||
or using Docker containers.
|
||||
|
||||
- [Single instance](Single.md)
|
||||
- [Cluster: Local test setup](Local.md)
|
||||
- [Cluster: DC/OS, Apache Mesos and Marathon](Mesos.md)
|
||||
- [Cluster: Test setup on a local machine](Local.md)
|
||||
- [Cluster: Starting processes on different machines](Distributed.md)
|
||||
- [Cluster: Launching an ArangoDB cluster using Docker containers](Docker.md)
|
||||
|
||||
|
|
|
@ -38,6 +38,6 @@ There are three options:
|
|||
|
||||
To get going quickly:
|
||||
|
||||
`docker run -e ARANGO_RANDOM_ROOT_PASSWORD=1 arangodb`
|
||||
`docker run -e ARANGO_RANDOM_ROOT_PASSWORD=1 arangodb/arangodb`
|
||||
|
||||
For an in depth guide about Docker and ArangoDB please check the official documentation: https://hub.docker.com/r/_/arangodb/
|
||||
For an in depth guide about Docker and ArangoDB please check the official documentation: https://hub.docker.com/r/arangodb/arangodb/ . Note that we are using the image `arangodb/arangodb` here which is always the most current one. There is also the "official" one called `arangodb` whose documentation is here: https://hub.docker.com/_/arangodb/
|
||||
|
|
|
@ -115,8 +115,9 @@
|
|||
#
|
||||
* [Deployment](Deployment/README.md)
|
||||
* [Single instance](Deployment/Single.md)
|
||||
* [Cluster: Mesos, DC/OS](Deployment/Mesos.md)
|
||||
* [Cluster: Local test](Deployment/Local.md)
|
||||
* [Cluster: DC/OS](Deployment/Mesos.md)
|
||||
* [Cluster: Processes](Deployment/Distributed.md)
|
||||
* [Cluster: Docker](Deployment/Docker.md)
|
||||
#
|
||||
* [Administration](Administration/README.md)
|
||||
|
|
|
@ -46,6 +46,8 @@ Agent::Agent(config_t const& config)
|
|||
: Thread("Agent"),
|
||||
_config(config),
|
||||
_lastCommitIndex(0),
|
||||
_spearhead(this),
|
||||
_readDB(this),
|
||||
_nextCompationAfter(_config.compactionStepSize) {
|
||||
_state.configure(this);
|
||||
_constituent.configure(this);
|
||||
|
@ -238,12 +240,12 @@ bool Agent::recvAppendEntriesRPC(term_t term,
|
|||
|
||||
// 2. Reply false if log does not contain an entry at prevLogIndex
|
||||
// whose term matches prevLogTerm ($5.3)
|
||||
if (!_state.find(prevIndex, prevTerm)) {
|
||||
/*if (!_state.find(prevIndex, prevTerm)) {
|
||||
LOG_TOPIC(WARN, Logger::AGENCY)
|
||||
<< "Unable to find matching entry to previous entry (index,term) = ("
|
||||
<< prevIndex << "," << prevTerm << ")";
|
||||
// return false;
|
||||
}
|
||||
}*/
|
||||
|
||||
// 3. If an existing entry conflicts with a new one (same index
|
||||
// but different terms), delete the existing entry and all that
|
||||
|
@ -349,8 +351,8 @@ bool Agent::load() {
|
|||
reportIn(id(), _state.lastLog().index);
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Starting spearhead worker.";
|
||||
_spearhead.start(this);
|
||||
_readDB.start(this);
|
||||
_spearhead.start();
|
||||
_readDB.start();
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Starting constituent personality.";
|
||||
auto queryRegistry = QueryRegistryFeature::QUERY_REGISTRY;
|
||||
|
|
|
@ -193,22 +193,26 @@ void Constituent::follow(term_t t) {
|
|||
/// Become leader
|
||||
void Constituent::lead(std::vector<bool> const& votes) {
|
||||
|
||||
MUTEX_LOCKER(guard, _castLock);
|
||||
{
|
||||
MUTEX_LOCKER(guard, _castLock);
|
||||
|
||||
if (_role != LEADER) {
|
||||
std::stringstream ss;
|
||||
ss << _id << ": Converted to leader in term " << _term << " with votes (";
|
||||
for (auto const& vote : votes) {
|
||||
ss << vote;
|
||||
if (_role == LEADER) {
|
||||
return;
|
||||
}
|
||||
ss << ")";
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << ss.str();
|
||||
_agent->lead(); // We need to rebuild spear_head and read_db;
|
||||
|
||||
_role = LEADER;
|
||||
_leaderID = _id;
|
||||
}
|
||||
|
||||
std::stringstream ss;
|
||||
ss << _id << ": Converted to leader in term " << _term << " with votes (";
|
||||
for (auto const& vote : votes) {
|
||||
ss << vote;
|
||||
}
|
||||
ss << ")";
|
||||
|
||||
_role = LEADER;
|
||||
_leaderID = _id;
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << ss.str();
|
||||
_agent->lead(); // We need to rebuild spear_head and read_db;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -180,6 +180,8 @@ std::vector<VPackSlice> State::slices(
|
|||
std::vector<VPackSlice> slices;
|
||||
MUTEX_LOCKER(mutexLocker, _logLock);
|
||||
|
||||
|
||||
|
||||
if (start < _log.front().index) { // no start specified
|
||||
start = _log.front().index;
|
||||
}
|
||||
|
@ -260,12 +262,26 @@ bool State::createCollection(std::string const& name) {
|
|||
}
|
||||
|
||||
bool State::loadCollections(TRI_vocbase_t* vocbase, bool waitForSync) {
|
||||
_vocbase = vocbase;
|
||||
|
||||
_vocbase = vocbase;
|
||||
_options.waitForSync = waitForSync;
|
||||
_options.silent = true;
|
||||
|
||||
return loadPersisted();
|
||||
if (loadPersisted()) {
|
||||
if (_log.empty()) {
|
||||
std::shared_ptr<Buffer<uint8_t>> buf = std::make_shared<Buffer<uint8_t>>();
|
||||
VPackSlice value = arangodb::basics::VelocyPackHelper::EmptyObjectValue();
|
||||
buf->append(value.startAs<char const>(), value.byteSize());
|
||||
_log.push_back(log_t(arangodb::consensus::index_t(0), term_t(0),
|
||||
arangodb::consensus::id_t(0), buf));
|
||||
persist(
|
||||
0, 0, (std::numeric_limits<arangodb::consensus::id_t>::max)(), value);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
bool State::loadPersisted() {
|
||||
|
|
|
@ -106,7 +106,8 @@ inline static bool endpointPathFromUrl(
|
|||
|
||||
|
||||
/// Ctor with name
|
||||
Store::Store(std::string const& name) : Thread(name), _node(name, this) {}
|
||||
Store::Store(Agent* agent, std::string const& name)
|
||||
: Thread(name), _agent(agent), _node(name, this) {}
|
||||
|
||||
|
||||
/// Copy ctor
|
||||
|
@ -514,13 +515,6 @@ bool Store::start() {
|
|||
}
|
||||
|
||||
|
||||
// Start thread with agent
|
||||
bool Store::start(Agent* agent) {
|
||||
_agent = agent;
|
||||
return start();
|
||||
}
|
||||
|
||||
|
||||
// Work ttls and callbacks
|
||||
void Store::run() {
|
||||
CONDITION_LOCKER(guard, _cv);
|
||||
|
|
|
@ -37,7 +37,7 @@ class Agent;
|
|||
class Store : public arangodb::Thread {
|
||||
public:
|
||||
/// @brief Construct with name
|
||||
explicit Store(std::string const& name = "root");
|
||||
explicit Store(Agent* agent, std::string const& name = "root");
|
||||
|
||||
/// @brief Destruct
|
||||
virtual ~Store();
|
||||
|
@ -69,9 +69,6 @@ class Store : public arangodb::Thread {
|
|||
/// @brief Start thread
|
||||
bool start();
|
||||
|
||||
/// @brief Start thread with access to agent
|
||||
bool start(Agent*);
|
||||
|
||||
/// @brief Set name
|
||||
void name(std::string const& name);
|
||||
|
||||
|
|
|
@ -199,6 +199,9 @@ function indexSuite() {
|
|||
|
||||
collection.drop();
|
||||
|
||||
if (internal.coverage || internal.valgrind) {
|
||||
internal.wait(2, false);
|
||||
}
|
||||
try {
|
||||
collection.index(idx.id);
|
||||
fail();
|
||||
|
|
Loading…
Reference in New Issue