diff --git a/Documentation/Books/AQL/Graphs/Traversals.md b/Documentation/Books/AQL/Graphs/Traversals.md index e03c526f06..88217cb38b 100644 --- a/Documentation/Books/AQL/Graphs/Traversals.md +++ b/Documentation/Books/AQL/Graphs/Traversals.md @@ -277,7 +277,7 @@ the vertex IDs: @startDocuBlockInline GRAPHTRAV_02_traverse_all_a @EXAMPLE_AQL{GRAPHTRAV_02_traverse_all_a} @DATASET{traversalGraph} - FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' + FOR v IN 1..3 OUTBOUND 'circles/A' GRAPH 'traversalGraph' RETURN v._key @END_EXAMPLE_AQL @endDocuBlock GRAPHTRAV_02_traverse_all_a diff --git a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md b/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md index 7292c97f1d..42922247bf 100644 --- a/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md +++ b/Documentation/Books/Manual/Architecture/DeploymentModes/MasterSlave/Architecture.md @@ -70,7 +70,6 @@ and maximum tick values per logfile: require("@arangodb/replication").logger.tickRanges(); - ### Replication Applier **Purpose** diff --git a/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md b/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md index f6feab0b6a..4878d700b2 100644 --- a/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md +++ b/Documentation/Books/Manual/Deployment/ActiveFailover/ManualStart.md @@ -40,31 +40,31 @@ So in summary these are the commands to start an _Agency_ of size 3: ``` arangod --server.endpoint tcp://0.0.0.0:5001 \ - --agency.my-address=tcp://127.0.0.1:5001 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent1 & + --agency.my-address=tcp://127.0.0.1:5001 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://127.0.0.1:5001 \ + --agency.supervision true \ + --database.directory agent1 & arangod --server.endpoint tcp://0.0.0.0:5002 \ - --agency.my-address=tcp://127.0.0.1:5002 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent2 & + --agency.my-address=tcp://127.0.0.1:5002 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://127.0.0.1:5001 \ + --agency.supervision true \ + --database.directory agent2 & arangod --server.endpoint tcp://0.0.0.0:5003 \ - --agency.my-address=tcp://127.0.0.1:5003 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent3 & + --agency.my-address=tcp://127.0.0.1:5003 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://127.0.0.1:5001 \ + --agency.supervision true \ + --database.directory agent3 & ``` ### Single Server Test Instances @@ -73,25 +73,25 @@ To start the two single server instances, you can use the following commands: ``` arangod --server.authentication false \ - --server.endpoint tcp://127.0.0.1:6001 \ - --cluster.my-address tcp://127.0.0.1:6001 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --replication.automatic-failover true \ - --database.directory singleserver6001 & + --server.endpoint tcp://127.0.0.1:6001 \ + --cluster.my-address tcp://127.0.0.1:6001 \ + --cluster.my-role SINGLE \ + --cluster.agency-endpoint tcp://127.0.0.1:5001 \ + --cluster.agency-endpoint tcp://127.0.0.1:5002 \ + --cluster.agency-endpoint tcp://127.0.0.1:5003 \ + --replication.automatic-failover true \ + --database.directory singleserver6001 & arangod --server.authentication false \ - --server.endpoint tcp://127.0.0.1:6002 \ - --cluster.my-address tcp://127.0.0.1:6002 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --replication.automatic-failover true \ - --database.directory singleserver6002 & -``` + --server.endpoint tcp://127.0.0.1:6002 \ + --cluster.my-address tcp://127.0.0.1:6002 \ + --cluster.my-role SINGLE \ + --cluster.agency-endpoint tcp://127.0.0.1:5001 \ + --cluster.agency-endpoint tcp://127.0.0.1:5002 \ + --cluster.agency-endpoint tcp://127.0.0.1:5003 \ + --replication.automatic-failover true \ + --database.directory singleserver6002 & +``` Multiple Machines ----------------- @@ -127,39 +127,39 @@ On 192.168.1.1: ``` arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.1:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent + --agency.my-address tcp://192.168.1.1:8531 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.supervision true \ + --database.directory agent ``` On 192.168.1.2: ``` arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.2:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent + --agency.my-address tcp://192.168.1.2:8531 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.supervision true \ + --database.directory agent ``` On 192.168.1.3: ``` arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.3:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://192.168.1.1:8531 \ - --agency.endpoint tcp://192.168.1.2:8531 \ - --agency.endpoint tcp://192.168.1.3:8531 \ - --agency.supervision true \ - --database.directory agent + --agency.my-address tcp://192.168.1.3:8531 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://192.168.1.1:8531 \ + --agency.endpoint tcp://192.168.1.2:8531 \ + --agency.endpoint tcp://192.168.1.3:8531 \ + --agency.supervision true \ + --database.directory agent ``` ### Single Server Instances @@ -168,14 +168,14 @@ On 192.168.1.1: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.1:8529 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --replication.automatic-failover true \ - --database.directory singleserver & + --server.endpoint tcp://0.0.0.0:8529 \ + --cluster.my-address tcp://192.168.1.1:8529 \ + --cluster.my-role SINGLE \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --replication.automatic-failover true \ + --database.directory singleserver & ``` On 192.168.1.2: @@ -185,14 +185,14 @@ instance: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.2:8529 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --replication.automatic-failover true \ - --database.directory singleserver & + --server.endpoint tcp://0.0.0.0:8529 \ + --cluster.my-address tcp://192.168.1.2:8529 \ + --cluster.my-role SINGLE \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --replication.automatic-failover true \ + --database.directory singleserver & ``` **Note:** in the above commands, you can use host names, if they can be resolved, @@ -220,13 +220,13 @@ An example configuration might look like this: ``` docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb/arangodb arangod \ - --server.endpoint tcp://0.0.0.0:8529\ - --cluster.my-address tcp://192.168.1.1:10000 \ - --cluster.my-role SINGLE \ - --cluster.agency-endpoint tcp://192.168.1.1:9001 \ - --cluster.agency-endpoint tcp://192.168.1.2:9001 \ - --cluster.agency-endpoint tcp://192.168.1.3:9001 \ - --replication.automatic-failover true + --server.endpoint tcp://0.0.0.0:8529\ + --cluster.my-address tcp://192.168.1.1:10000 \ + --cluster.my-role SINGLE \ + --cluster.agency-endpoint tcp://192.168.1.1:9001 \ + --cluster.agency-endpoint tcp://192.168.1.2:9001 \ + --cluster.agency-endpoint tcp://192.168.1.3:9001 \ + --replication.automatic-failover true ``` This will start a single server within a Docker container with an isolated network. diff --git a/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md b/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md index f8b0971be4..dc91e0474a 100644 --- a/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md +++ b/Documentation/Books/Manual/Deployment/Cluster/ManualStart.md @@ -41,31 +41,31 @@ So in summary these are the commands to start an _Agency_ of size 3: ``` arangod --server.endpoint tcp://0.0.0.0:5001 \ - --agency.my-address=tcp://127.0.0.1:5001 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent1 & + --agency.my-address=tcp://127.0.0.1:5001 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://127.0.0.1:5001 \ + --agency.supervision true \ + --database.directory agent1 & arangod --server.endpoint tcp://0.0.0.0:5002 \ - --agency.my-address=tcp://127.0.0.1:5002 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent2 & + --agency.my-address=tcp://127.0.0.1:5002 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://127.0.0.1:5001 \ + --agency.supervision true \ + --database.directory agent2 & arangod --server.endpoint tcp://0.0.0.0:5003 \ - --agency.my-address=tcp://127.0.0.1:5003 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://127.0.0.1:5001 \ - --agency.supervision true \ - --database.directory agent3 & + --agency.my-address=tcp://127.0.0.1:5003 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://127.0.0.1:5001 \ + --agency.supervision true \ + --database.directory agent3 & ``` ### Local Test DBServers and Coordinators @@ -82,46 +82,46 @@ The following is a full example of what it might look like. ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:6001 \ - --cluster.my-address tcp://127.0.0.1:6001 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory dbserver1 & + --server.endpoint tcp://0.0.0.0:6001 \ + --cluster.my-address tcp://127.0.0.1:6001 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://127.0.0.1:5001 \ + --cluster.agency-endpoint tcp://127.0.0.1:5002 \ + --cluster.agency-endpoint tcp://127.0.0.1:5003 \ + --database.directory dbserver1 & arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:6002 \ - --cluster.my-address tcp://127.0.0.1:6002 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory dbserver2 & + --server.endpoint tcp://0.0.0.0:6002 \ + --cluster.my-address tcp://127.0.0.1:6002 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://127.0.0.1:5001 \ + --cluster.agency-endpoint tcp://127.0.0.1:5002 \ + --cluster.agency-endpoint tcp://127.0.0.1:5003 \ + --database.directory dbserver2 & ``` **Coordinators:** ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:7001 \ - --cluster.my-address tcp://127.0.0.1:7001 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory coordinator1 & + --server.endpoint tcp://0.0.0.0:7001 \ + --cluster.my-address tcp://127.0.0.1:7001 \ + --cluster.my-role COORDINATOR \ + --cluster.agency-endpoint tcp://127.0.0.1:5001 \ + --cluster.agency-endpoint tcp://127.0.0.1:5002 \ + --cluster.agency-endpoint tcp://127.0.0.1:5003 \ + --database.directory coordinator1 & ``` ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:7002 \ - --cluster.my-address tcp://127.0.0.1:7002 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://127.0.0.1:5001 \ - --cluster.agency-endpoint tcp://127.0.0.1:5002 \ - --cluster.agency-endpoint tcp://127.0.0.1:5003 \ - --database.directory coordinator2 & + --server.endpoint tcp://0.0.0.0:7002 \ + --cluster.my-address tcp://127.0.0.1:7002 \ + --cluster.my-role COORDINATOR \ + --cluster.agency-endpoint tcp://127.0.0.1:5001 \ + --cluster.agency-endpoint tcp://127.0.0.1:5002 \ + --cluster.agency-endpoint tcp://127.0.0.1:5003 \ + --database.directory coordinator2 & ``` Note in particular that the endpoint descriptions given under `--cluster.my-address` @@ -172,39 +172,39 @@ On 192.168.1.1: ``` arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.1:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent + --agency.my-address tcp://192.168.1.1:8531 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.supervision true \ + --database.directory agent ``` On 192.168.1.2: ``` arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.2:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.supervision true \ - --database.directory agent + --agency.my-address tcp://192.168.1.2:8531 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.supervision true \ + --database.directory agent ``` On 192.168.1.3: ``` arangod --server.endpoint tcp://0.0.0.0:8531 \ - --agency.my-address tcp://192.168.1.3:8531 \ - --server.authentication false \ - --agency.activate true \ - --agency.size 3 \ - --agency.endpoint tcp://192.168.1.1:8531 \ - --agency.endpoint tcp://192.168.1.2:8531 \ - --agency.endpoint tcp://192.168.1.3:8531 \ - --agency.supervision true \ - --database.directory agent + --agency.my-address tcp://192.168.1.3:8531 \ + --server.authentication false \ + --agency.activate true \ + --agency.size 3 \ + --agency.endpoint tcp://192.168.1.1:8531 \ + --agency.endpoint tcp://192.168.1.2:8531 \ + --agency.endpoint tcp://192.168.1.3:8531 \ + --agency.supervision true \ + --database.directory agent ``` ### DBServers @@ -217,39 +217,39 @@ On 192.168.1.1: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.1:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & + --server.endpoint tcp://0.0.0.0:8530 \ + --cluster.my-address tcp://192.168.1.1:8530 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory dbserver & ``` On 192.168.1.2: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.2:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & + --server.endpoint tcp://0.0.0.0:8530 \ + --cluster.my-address tcp://192.168.1.2:8530 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory dbserver & ``` On 192.168.1.3: ``` sudo arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.3:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & + --server.endpoint tcp://0.0.0.0:8530 \ + --cluster.my-address tcp://192.168.1.3:8530 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory dbserver & ``` ### Coordinators @@ -258,39 +258,39 @@ On 192.168.1.1: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.1:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & + --server.endpoint tcp://0.0.0.0:8529 \ + --cluster.my-address tcp://192.168.1.1:8529 \ + --cluster.my-role COORDINATOR \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory coordinator & ``` On 192.168.1.2: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.2:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & + --server.endpoint tcp://0.0.0.0:8529 \ + --cluster.my-address tcp://192.168.1.2:8529 \ + --cluster.my-role COORDINATOR \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory coordinator & ``` On 192.168.1.3: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.3:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & + --server.endpoint tcp://0.0.0.0:8529 \ + --cluster.my-address tcp://192.168.1.3:8529 \ + --cluster.my-role COORDINATOR \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory coordinator & ``` **Note:** in the above commands, you can use host names, if they can be resolved, @@ -304,22 +304,22 @@ On 192.168.1.4: ``` arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.4.1:8530 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory dbserver & - + --server.endpoint tcp://0.0.0.0:8530 \ + --cluster.my-address tcp://192.168.4.1:8530 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory dbserver & + arangod --server.authentication=false \ - --server.endpoint tcp://0.0.0.0:8529 \ - --cluster.my-address tcp://192.168.1.4:8529 \ - --cluster.my-role COORDINATOR \ - --cluster.agency-endpoint tcp://192.168.1.1:8531 \ - --cluster.agency-endpoint tcp://192.168.1.2:8531 \ - --cluster.agency-endpoint tcp://192.168.1.3:8531 \ - --database.directory coordinator & + --server.endpoint tcp://0.0.0.0:8529 \ + --cluster.my-address tcp://192.168.1.4:8529 \ + --cluster.my-role COORDINATOR \ + --cluster.agency-endpoint tcp://192.168.1.1:8531 \ + --cluster.agency-endpoint tcp://192.168.1.2:8531 \ + --cluster.agency-endpoint tcp://192.168.1.3:8531 \ + --database.directory coordinator & ``` Manual Start in Docker @@ -344,12 +344,12 @@ An example configuration might look like this: ``` docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8530 arangodb/arangodb arangod \ - --server.endpoint tcp://0.0.0.0:8530 \ - --cluster.my-address tcp://192.168.1.1:10000 \ - --cluster.my-role DBSERVER \ - --cluster.agency-endpoint tcp://192.168.1.1:9001 \ - --cluster.agency-endpoint tcp://192.168.1.2:9001 \ - --cluster.agency-endpoint tcp://192.168.1.3:9001 + --server.endpoint tcp://0.0.0.0:8530 \ + --cluster.my-address tcp://192.168.1.1:10000 \ + --cluster.my-role DBSERVER \ + --cluster.agency-endpoint tcp://192.168.1.1:9001 \ + --cluster.agency-endpoint tcp://192.168.1.2:9001 \ + --cluster.agency-endpoint tcp://192.168.1.3:9001 ``` This will start a _DBServer_ within a Docker container with an isolated network. diff --git a/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md b/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md index 96a41b762e..128a78985b 100644 --- a/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md +++ b/Documentation/Books/Manual/Deployment/SingleInstance/ManualStart.md @@ -11,7 +11,7 @@ We will assume that your IP is 127.0.0.1 and that the port 8529 is free: ``` arangod --server.endpoint tcp://0.0.0.0:8529 \ - --database.directory standalone & + --database.directory standalone & ``` Manual Start in Docker @@ -36,7 +36,7 @@ An example configuration might look like this: ``` docker run -e ARANGO_NO_AUTH=1 -p 192.168.1.1:10000:8529 arangodb/arangodb arangod \ - --server.endpoint tcp://0.0.0.0:8529\ + --server.endpoint tcp://0.0.0.0:8529\ ``` This will start a single server within a Docker container with an isolated network. diff --git a/Documentation/Books/Manual/Indexing/Hash.md b/Documentation/Books/Manual/Indexing/Hash.md index 9f6f20598f..dd4893d982 100644 --- a/Documentation/Books/Manual/Indexing/Hash.md +++ b/Documentation/Books/Manual/Indexing/Hash.md @@ -118,7 +118,8 @@ details, including the index-identifier, is returned. @endDocuBlock ensureHashIndexArray - +``` -For more information see [Creating Indexes in Background](IndexBasics.md#creating-indexes-in-background) +For more information see "Creating Indexes in Background" in the [Index basics](IndexBasics.md) page. +###} Ensure uniqueness of relations in edge collections -------------------------------------------------- diff --git a/Documentation/Books/Manual/Indexing/IndexBasics.md b/Documentation/Books/Manual/Indexing/IndexBasics.md index 42591d21a1..a9a36d7006 100644 --- a/Documentation/Books/Manual/Indexing/IndexBasics.md +++ b/Documentation/Books/Manual/Indexing/IndexBasics.md @@ -22,13 +22,15 @@ are covered by an edge collection's edge index automatically. Using the system attribute `_id` in user-defined indexes is not possible, but indexing `_key`, `_rev`, `_from`, and `_to` is. - +creating indexes in "background". The collection remains available during the index creation, +see the section "Creating Indexes in Background" for more information. +###} ArangoDB provides the following index types: @@ -549,7 +551,7 @@ based on the costs it estimates, even if a vertex centric index might in fact be faster. Vertex centric indexes are more likely to be chosen for highly connected graphs and with RocksDB storage engine. - +Building an index is always a write heavy operation (internally), it is always a good idea to build indexes +during times with less load. +###} diff --git a/Documentation/Books/Manual/Indexing/Skiplist.md b/Documentation/Books/Manual/Indexing/Skiplist.md index 656e93dfb7..b235252e8b 100644 --- a/Documentation/Books/Manual/Indexing/Skiplist.md +++ b/Documentation/Books/Manual/Indexing/Skiplist.md @@ -187,8 +187,9 @@ and will match. - +For more information see "Creating Indexes in Background" in the [Index basics](IndexBasics.md#) page. +###} diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md index 0596c80a70..9c7225549c 100644 --- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md +++ b/Documentation/Books/Manual/ReleaseNotes/NewFeatures34.md @@ -757,18 +757,18 @@ are used later in the query. The optimizer will add automatic *KEEP* clauses to the COLLECT statement then if possible. For example, the query - + FOR doc1 IN collection1 FOR doc2 IN collection2 - COLLECT x = doc1.x INTO g - RETURN { x, all: g[*].doc1.y } + COLLECT x = doc1.x INTO g + RETURN { x, all: g[*].doc1.y } will automatically be turned into - + FOR doc1 IN collection1 FOR doc2 IN collection2 - COLLECT x = doc1.x INTO g KEEP doc1 - RETURN { x, all: g[*].doc1.y } + COLLECT x = doc1.x INTO g KEEP doc1 + RETURN { x, all: g[*].doc1.y } This prevents variable `doc2` from being temporarily stored in the variable `g`, which saves processing time and memory, especially for big result sets. diff --git a/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md b/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md index 5621f165b2..20446bba58 100644 --- a/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md +++ b/Documentation/Books/Manual/Upgrading/GeneralInfo/README.md @@ -54,7 +54,7 @@ Upgrade Paths - Examples: - To upgrade from 3.2 to 3.3, first upgrade your 3.2 installation to 3.2.latest. - To upgrade from 3.3 to 3.4, first upgrade your 3.3 installation to 3.3.latest. - + ### Additional Notes Regarding Rolling Upgrades In addition to the paragraph above, rolling upgrades via the tool _Starter_ are supported, diff --git a/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md b/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md index 67114b7aa3..2a5cffc464 100644 --- a/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md +++ b/Documentation/DocuBlocks/Rest/AQL/post_api_aqlfunction.md @@ -74,7 +74,7 @@ a descriptive error message var body = { name: "myfunctions::temperature::celsiustofahrenheit", code : "function (celsius) { return celsius * 1.8 + 32; }", - isDeterministic: true + isDeterministic: true }; var response = logCurlRequest('POST', url, body);