mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
# Conflicts: # scripts/startLocalCluster.sh
This commit is contained in:
commit
730ea4755e
|
@ -98,10 +98,6 @@ set(CMAKE_USE_LIBSSH2 OFF CACHE type BOOL)
|
|||
set(CMAKE_USE_OPENSSL ON CACHE type BOOL)
|
||||
# mop: super important...if this is off curl will not handle request timeouts < 1000ms
|
||||
set(ENABLE_THREADED_RESOLVER ON CACHE type BOOL)
|
||||
# bugfix for HAVE_POSIX_STRERROR_R define on cross compiling.
|
||||
if (CROSS_COMPILING)
|
||||
add_definitions("-DHAVE_POSIX_STRERROR_R=1")
|
||||
endif()
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/curl/curl-7.50.3)
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Library to build up VPack documents.
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2015 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Max Neunhoeffer
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2015, ArangoDB GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef VELOCYPACK_UTF8HELPER_H
|
||||
#define VELOCYPACK_UTF8HELPER_H 1
|
||||
|
||||
#include "velocypack/velocypack-common.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
|
||||
struct Utf8Helper {
|
||||
static bool isValidUtf8(uint8_t const* p, ValueLength len);
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -151,6 +151,13 @@ using VPackSlimBuffer = arangodb::velocypack::SliceContainer;
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef VELOCYPACK_UTF8HELPER_H
|
||||
#ifndef VELOCYPACK_ALIAS_UTF8HELPER
|
||||
#define VELOCYPACK_ALIAS_UTF8HELPER
|
||||
using VPackUtf8Helper = arangodb::velocypack::Utf8Helper;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef VELOCYPACK_VALIDATOR_H
|
||||
#ifndef VELOCYPACK_ALIAS_VALIDATOR
|
||||
#define VELOCYPACK_ALIAS_VALIDATOR
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Library to build up VPack documents.
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2015 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Max Neunhoeffer
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2015, ArangoDB GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "velocypack/velocypack-common.h"
|
||||
#include "velocypack/Utf8Helper.h"
|
||||
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
namespace {
|
||||
|
||||
static constexpr uint8_t ValidChar = 0;
|
||||
static constexpr uint8_t InvalidChar = 1;
|
||||
|
||||
static const uint8_t states[] = {
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 00..1f
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 20..3f
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 40..5f
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 60..7f
|
||||
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, // 80..9f
|
||||
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, // a0..bf
|
||||
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // c0..df
|
||||
0xa,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x4,0x3,0x3, // e0..ef
|
||||
0xb,0x6,0x6,0x6,0x5,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8, // f0..ff
|
||||
0x0,0x1,0x2,0x3,0x5,0x8,0x7,0x1,0x1,0x1,0x4,0x6,0x1,0x1,0x1,0x1, // s0..s0
|
||||
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1, // s1..s2
|
||||
1,2,1,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1, // s3..s4
|
||||
1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,3,1,3,1,1,1,1,1,1, // s5..s6
|
||||
1,3,1,1,1,1,1,3,1,3,1,1,1,1,1,1,1,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // s7..s8
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
bool Utf8Helper::isValidUtf8(uint8_t const* p, ValueLength len) {
|
||||
uint8_t const* end = p + len;
|
||||
|
||||
uint8_t state = ValidChar;
|
||||
while (p < end) {
|
||||
state = states[256 + state * 16 + states[*p]];
|
||||
if (state == InvalidChar) {
|
||||
return false;
|
||||
}
|
||||
++p;
|
||||
}
|
||||
|
||||
return (state == ValidChar);
|
||||
}
|
|
@ -28,6 +28,7 @@
|
|||
#include "velocypack/Validator.h"
|
||||
#include "velocypack/Exception.h"
|
||||
#include "velocypack/Slice.h"
|
||||
#include "velocypack/Utf8Helper.h"
|
||||
#include "velocypack/ValueType.h"
|
||||
|
||||
using namespace arangodb::velocypack;
|
||||
|
@ -88,11 +89,23 @@ bool Validator::validate(uint8_t const* ptr, size_t length, bool isSubPart) cons
|
|||
}
|
||||
|
||||
case ValueType::String: {
|
||||
uint8_t const* p;
|
||||
ValueLength len;
|
||||
if (head == 0xbfU) {
|
||||
// long UTF-8 string. must be at least 9 bytes long so we
|
||||
// can read the entire string length safely
|
||||
validateBufferLength(1 + 8, length, true);
|
||||
}
|
||||
p = ptr + 1 + 8;
|
||||
len = readInteger<ValueLength>(p, 8);
|
||||
} else {
|
||||
p = ptr + 1;
|
||||
len = head - 0x40U;
|
||||
}
|
||||
validateBufferLength(length - (p - ptr), length, true);
|
||||
|
||||
if (options->validateUtf8Strings && !Utf8Helper::isValidUtf8(p, len)) {
|
||||
throw Exception(Exception::InvalidUtf8Sequence);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
34
CHANGELOG
34
CHANGELOG
|
@ -5,7 +5,6 @@ devel
|
|||
|
||||
db._query("FOR i IN 1..100000 SORT i RETURN i", {}, { options: { memoryLimit: 100000 } });
|
||||
|
||||
|
||||
* added convenience function to create vertex-centric indexes.
|
||||
Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})`
|
||||
That will create an index that can be used on OUTBOUND with filtering on the
|
||||
|
@ -26,7 +25,16 @@ edge attribute `label`.
|
|||
|
||||
* process.stdout.isTTY now returns `true` in arangosh and when running arangod with the `--console` flag
|
||||
|
||||
v3.1.3 (xxxx-xx-xx)
|
||||
|
||||
v3.1.4 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* fixed issue #2211
|
||||
|
||||
* fixed issue #2204
|
||||
|
||||
|
||||
v3.1.3 (2016-12-02)
|
||||
-------------------
|
||||
|
||||
* fix a traversal bug when using skiplist indexes:
|
||||
|
@ -39,7 +47,8 @@ v3.1.3 (xxxx-xx-xx)
|
|||
|
||||
* fix endless loop when trying to create a collection with replicationFactor: -1
|
||||
|
||||
v3.1.2 (20XX-XX-XX)
|
||||
|
||||
v3.1.2 (2016-11-24)
|
||||
-------------------
|
||||
|
||||
* added support for descriptions field in Foxx dependencies
|
||||
|
@ -48,8 +57,9 @@ v3.1.2 (20XX-XX-XX)
|
|||
Now they state correctly how many documents were fetched from the index and how many
|
||||
have been filtered.
|
||||
|
||||
* Prevent uniform shard distribution when replicationFactor == numServers
|
||||
|
||||
v3.1.1 (XXXX-XX-XX)
|
||||
v3.1.1 (2016-11-15)
|
||||
-------------------
|
||||
|
||||
* fixed issue #2176
|
||||
|
@ -343,13 +353,27 @@ v3.1.0 (2016-10-29)
|
|||
* fixed issue #2156
|
||||
|
||||
|
||||
v3.0.12 (XXXX-XX-XX)
|
||||
v3.0.13 (XXXX-XX-XX)
|
||||
--------------------
|
||||
|
||||
* fixed issue #2210
|
||||
|
||||
|
||||
v3.0.12 (2016-11-23)
|
||||
--------------------
|
||||
|
||||
* fixed issue #2176
|
||||
|
||||
* fixed issue #2168
|
||||
|
||||
* fixed issues #2149, #2159
|
||||
|
||||
* fixed error reporting for issue #2158
|
||||
|
||||
* fixed assembly linkage bug in CRC4 module
|
||||
|
||||
* added support for descriptions field in Foxx dependencies
|
||||
|
||||
|
||||
v3.0.11 (2016-11-08)
|
||||
--------------------
|
||||
|
|
|
@ -284,6 +284,18 @@ FOR v, e, p IN 1..5 OUTBOUND 'circles/A' GRAPH 'traversalGraph'
|
|||
It is guaranteed that at least one, but potentially more edges fulfill the condition.
|
||||
All of the above filters can be defined on vertices in the exact same way.
|
||||
|
||||
### Filtering on the path vs. filtering on vertices or edges
|
||||
Filtering on the path influences the Iteration on your graph. If certain conditions
|
||||
aren't met, the traversal may stop continuing along this path.
|
||||
|
||||
In contrast filters on vertex or edge only express whether you're interestet in the actual value of these
|
||||
documents. Thus, it influences the list of returned documents (if you return v or e) similar
|
||||
as specifying a non-null `min` value. If you specify a min value of 2, the traversal over the first
|
||||
two nodes of these paths has to be executed - you just won't see them in your result array.
|
||||
|
||||
Similar are filters on vertices or edges - the traverser has to walk along these nodes, since
|
||||
you may be interested in documents further down the path.
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
|
|
|
@ -14,13 +14,6 @@
|
|||
<option value="3.1">v3.1</option>
|
||||
<option value="3.0">v3.0</option>
|
||||
<option value="2.8">v2.8</option>
|
||||
<option value="2.7">v2.7</option>
|
||||
<option value="2.6">v2.6</option>
|
||||
<option value="2.5">v2.5</option>
|
||||
<option value="2.4">v2.4</option>
|
||||
<option value="2.3">v2.3</option>
|
||||
<option value="2.2">v2.2</option>
|
||||
<option value="2.1">v2.1</option>
|
||||
</select>
|
||||
<div class="google-search">
|
||||
<gcse:searchbox-only></gcse:searchbox-only>
|
||||
|
|
|
@ -103,7 +103,7 @@ allowed to use:
|
|||
@EXAMPLE_ARANGOSH_OUTPUT{02_workWithAQL_memoryLimit}
|
||||
|db._query(
|
||||
| 'FOR i IN 1..100000 SORT i RETURN i', {}, {
|
||||
| options: { memoryLimit: 100000 }
|
||||
| memoryLimit: 100000
|
||||
}).toArray();
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock 02_workWithAQL_memoryLimit
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{
|
||||
"gitbook": "^2.6.7",
|
||||
"gitbook": "^3.2.2",
|
||||
"title": "ArangoDB VERSION_NUMBER AQL Documentation",
|
||||
"author": "ArangoDB GmbH",
|
||||
"description": "Official AQL manual for ArangoDB - the multi-model NoSQL database",
|
||||
"language": "en",
|
||||
"plugins":["-search", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "add-header", "piwik"],
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "piwik", "sitemap-general"],
|
||||
"pdf": {
|
||||
"fontSize": 12,
|
||||
"toc": true,
|
||||
|
@ -23,12 +23,12 @@
|
|||
"js": ["styles/header.js"],
|
||||
"css": ["styles/header.css"]
|
||||
},
|
||||
"add-header": {
|
||||
"BASE_PATH": "https://docs.arangodb.com/devel"
|
||||
},
|
||||
"piwik": {
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
},
|
||||
"sitemap-general": {
|
||||
"prefix": "https://docs.arangodb.com/devel/AQL/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Try to set the version number early, jQuery not available yet
|
||||
document.addEventListener("DOMContentLoaded", function(event) {
|
||||
if (!gitbook.state.root) return;
|
||||
var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
|
||||
var switcher = document.getElementsByClassName("arangodb-version-switcher")[0];
|
||||
if (bookVersion) {
|
||||
|
@ -15,25 +16,81 @@ window.localStorage.removeItem(":keyword");
|
|||
$(document).ready(function() {
|
||||
|
||||
function appendHeader() {
|
||||
/*
|
||||
|
||||
var div = document.createElement('div');
|
||||
div.innerHTML = '<header id="header" class="header absolute"><div class="wrap"><div class="clearfix" style="width:100%;"><div id="logo"><a href="https://docs.arangodb.com/"><img src="https://docs.arangodb.com/assets/arangodb_logo.png"></a></div><div class="arangodb_version">VERSION_NUMBER</div><div class="google-search"><gcse:searchbox-only></div><ul id="navmenu"><li><a href="https://tst.arangodb.com/simran/all-in-one/">Docs</a></li><li><a href="https://docs.arangodb.com/cookbook">Cookbook</a></li><li class="socialIcons"><a href="https://github.com/ArangoDB/ArangoDB/issues" target="blank" name="github"><i title="GitHub" class="fa fa-github"></i></a></li><li class="socialIcons"><a href="http://stackoverflow.com/questions/tagged/arangodb" target="blank" name="stackoverflow"><i title="Stackoverflow" class="fa fa-stack-overflow"></i></a></li><li class="socialIcons socialIcons-googlegroups"><a href="https://groups.google.com/forum/#!forum/arangodb" target="blank" name="google groups"><img title="Google Groups" alt="Google Groups" src="https://docs.arangodb.com/assets/googlegroupsIcon.png" style="height:14px"></img></a></li></ul></div></div></header>';
|
||||
div.innerHTML = '<div class="arangodb-header">\n' +
|
||||
' <div class="arangodb-logo">\n' +
|
||||
' <a href="https://arangodb.com/">\n' +
|
||||
' <img src="https://docs.arangodb.com/assets/arangodb_logo_2016.png">\n' +
|
||||
' </a>\n' +
|
||||
' </div>\n' +
|
||||
' <div class="arangodb-logo-small">\n' +
|
||||
' <a href="https://arangodb.com/">\n' +
|
||||
' <img src="https://docs.arangodb.com/assets/arangodb_logo_small_2016.png">\n' +
|
||||
' </a>\n' +
|
||||
' </div>\n' +
|
||||
' <select class="arangodb-version-switcher">\n' +
|
||||
' <option value="devel">devel</option>\n' +
|
||||
' <option value="3.1">v3.1</option>\n' +
|
||||
' <option value="3.0">v3.0</option>\n' +
|
||||
' <option value="2.8">v2.8</option>\n' +
|
||||
' </select>\n' +
|
||||
' <div class="google-search">\n' +
|
||||
' <gcse:searchbox-only></gcse:searchbox-only>\n' +
|
||||
' </div>\n' +
|
||||
' <ul class="arangodb-navmenu">\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="Manual">Manual</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="active-tab">\n' +
|
||||
' <a href="#" data-book="AQL">AQL</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="HTTP">HTTP</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="cookbook">Cookbook</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="downloadIcon" title="Download">\n' +
|
||||
' <a href="https://www.arangodb.com/download-arangodb-docs" target="_blank">\n' +
|
||||
' <i class="fa fa-download"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="GitHub">\n' +
|
||||
' <a href="https://github.com/ArangoDB/ArangoDB/issues" target="_blank">\n' +
|
||||
' <i class="fa fa-github"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="StackOverflow">\n' +
|
||||
' <a href="http://stackoverflow.com/questions/tagged/arangodb" target="_blank">\n' +
|
||||
' <i class="fa fa-stack-overflow"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons socialIcons-googlegroups" title="Google Groups">\n' +
|
||||
' <a href="https://groups.google.com/forum/#!forum/arangodb" target="_blank">\n' +
|
||||
' <img alt="Google Groups" src="https://docs.arangodb.com/assets/googlegroupsIcon.png" />\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="Slack">\n' +
|
||||
' <a href="https://slack.arangodb.com" target="_blank">\n' +
|
||||
' <i class="fa fa-slack"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' </ul>\n' +
|
||||
'</div>\n';
|
||||
|
||||
$('.book').before(div.innerHTML);
|
||||
*/
|
||||
|
||||
};
|
||||
|
||||
|
||||
function rerenderNavbar() {
|
||||
$('#header').remove();
|
||||
appendHeader();
|
||||
renderGoogleSearch();
|
||||
};
|
||||
|
||||
function renderGoogleSearch() {
|
||||
};
|
||||
//render header
|
||||
//rerenderNavbar();
|
||||
rerenderNavbar();
|
||||
function addGoogleSrc() {
|
||||
var cx = '010085642145132923492:6ymjhhr677k';
|
||||
var gcse = document.createElement('script');
|
||||
|
@ -68,7 +125,10 @@ function appendHeader() {
|
|||
if (urlSplit.length == 6) {
|
||||
urlSplit.pop(); // ""
|
||||
var currentBook = urlSplit.pop(); // e.g. "Manual"
|
||||
urlSplit.pop() // e.g. "3.0"
|
||||
var version = urlSplit.pop() // e.g. "3.0"
|
||||
if (version < "2.9") {
|
||||
currentBook = "Users";
|
||||
}
|
||||
window.location.href = urlSplit.join("/") + "/" + e.target.value + "/" + currentBook + "/";
|
||||
} else {
|
||||
window.location.href = "https://docs.arangodb.com/" + e.target.value;
|
||||
|
|
|
@ -14,13 +14,6 @@
|
|||
<option value="3.1">v3.1</option>
|
||||
<option value="3.0">v3.0</option>
|
||||
<option value="2.8">v2.8</option>
|
||||
<option value="2.7">v2.7</option>
|
||||
<option value="2.6">v2.6</option>
|
||||
<option value="2.5">v2.5</option>
|
||||
<option value="2.4">v2.4</option>
|
||||
<option value="2.3">v2.3</option>
|
||||
<option value="2.2">v2.2</option>
|
||||
<option value="2.1">v2.1</option>
|
||||
</select>
|
||||
<div class="google-search">
|
||||
<gcse:searchbox-only></gcse:searchbox-only>
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{
|
||||
"gitbook": "^2.6.7",
|
||||
"gitbook": "^3.2.2",
|
||||
"title": "ArangoDB VERSION_NUMBER HTTP API Documentation",
|
||||
"author": "ArangoDB GmbH",
|
||||
"description": "Official HTTP API manual for ArangoDB - the multi-model NoSQL database",
|
||||
"language": "en",
|
||||
"plugins":["-search", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "add-header", "piwik"],
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "piwik", "sitemap-general"],
|
||||
"pdf": {
|
||||
"fontSize": 12,
|
||||
"toc": true,
|
||||
|
@ -24,8 +24,11 @@
|
|||
"css": ["styles/header.css"]
|
||||
},
|
||||
"piwik": {
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
},
|
||||
"sitemap-general": {
|
||||
"prefix": "https://docs.arangodb.com/devel/HTTP/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Try to set the version number early, jQuery not available yet
|
||||
document.addEventListener("DOMContentLoaded", function(event) {
|
||||
if (!gitbook.state.root) return;
|
||||
var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
|
||||
var switcher = document.getElementsByClassName("arangodb-version-switcher")[0];
|
||||
if (bookVersion) {
|
||||
|
@ -15,25 +16,81 @@ window.localStorage.removeItem(":keyword");
|
|||
$(document).ready(function() {
|
||||
|
||||
function appendHeader() {
|
||||
/*
|
||||
|
||||
var div = document.createElement('div');
|
||||
div.innerHTML = '<header id="header" class="header absolute"><div class="wrap"><div class="clearfix" style="width:100%;"><div id="logo"><a href="https://docs.arangodb.com/"><img src="https://docs.arangodb.com/assets/arangodb_logo.png"></a></div><div class="arangodb_version">VERSION_NUMBER</div><div class="google-search"><gcse:searchbox-only></div><ul id="navmenu"><li><a href="https://tst.arangodb.com/simran/all-in-one/">Docs</a></li><li><a href="https://docs.arangodb.com/cookbook">Cookbook</a></li><li class="socialIcons"><a href="https://github.com/ArangoDB/ArangoDB/issues" target="blank" name="github"><i title="GitHub" class="fa fa-github"></i></a></li><li class="socialIcons"><a href="http://stackoverflow.com/questions/tagged/arangodb" target="blank" name="stackoverflow"><i title="Stackoverflow" class="fa fa-stack-overflow"></i></a></li><li class="socialIcons socialIcons-googlegroups"><a href="https://groups.google.com/forum/#!forum/arangodb" target="blank" name="google groups"><img title="Google Groups" alt="Google Groups" src="https://docs.arangodb.com/assets/googlegroupsIcon.png" style="height:14px"></img></a></li></ul></div></div></header>';
|
||||
div.innerHTML = '<div class="arangodb-header">\n' +
|
||||
' <div class="arangodb-logo">\n' +
|
||||
' <a href="https://arangodb.com/">\n' +
|
||||
' <img src="https://docs.arangodb.com/assets/arangodb_logo_2016.png">\n' +
|
||||
' </a>\n' +
|
||||
' </div>\n' +
|
||||
' <div class="arangodb-logo-small">\n' +
|
||||
' <a href="https://arangodb.com/">\n' +
|
||||
' <img src="https://docs.arangodb.com/assets/arangodb_logo_small_2016.png">\n' +
|
||||
' </a>\n' +
|
||||
' </div>\n' +
|
||||
' <select class="arangodb-version-switcher">\n' +
|
||||
' <option value="devel">devel</option>\n' +
|
||||
' <option value="3.1">v3.1</option>\n' +
|
||||
' <option value="3.0">v3.0</option>\n' +
|
||||
' <option value="2.8">v2.8</option>\n' +
|
||||
' </select>\n' +
|
||||
' <div class="google-search">\n' +
|
||||
' <gcse:searchbox-only></gcse:searchbox-only>\n' +
|
||||
' </div>\n' +
|
||||
' <ul class="arangodb-navmenu">\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="Manual">Manual</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="AQL">AQL</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="active-tab">\n' +
|
||||
' <a href="#" data-book="HTTP">HTTP</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="cookbook">Cookbook</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="downloadIcon" title="Download">\n' +
|
||||
' <a href="https://www.arangodb.com/download-arangodb-docs" target="_blank">\n' +
|
||||
' <i class="fa fa-download"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="GitHub">\n' +
|
||||
' <a href="https://github.com/ArangoDB/ArangoDB/issues" target="_blank">\n' +
|
||||
' <i class="fa fa-github"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="StackOverflow">\n' +
|
||||
' <a href="http://stackoverflow.com/questions/tagged/arangodb" target="_blank">\n' +
|
||||
' <i class="fa fa-stack-overflow"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons socialIcons-googlegroups" title="Google Groups">\n' +
|
||||
' <a href="https://groups.google.com/forum/#!forum/arangodb" target="_blank">\n' +
|
||||
' <img alt="Google Groups" src="https://docs.arangodb.com/assets/googlegroupsIcon.png" />\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="Slack">\n' +
|
||||
' <a href="https://slack.arangodb.com" target="_blank">\n' +
|
||||
' <i class="fa fa-slack"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' </ul>\n' +
|
||||
'</div>\n';
|
||||
|
||||
$('.book').before(div.innerHTML);
|
||||
*/
|
||||
|
||||
};
|
||||
|
||||
|
||||
function rerenderNavbar() {
|
||||
$('.arangodb-header').remove();
|
||||
appendHeader();
|
||||
renderGoogleSearch();
|
||||
};
|
||||
|
||||
function renderGoogleSearch() {
|
||||
};
|
||||
//render header
|
||||
//rerenderNavbar();
|
||||
rerenderNavbar();
|
||||
function addGoogleSrc() {
|
||||
var cx = '010085642145132923492:fixi4yzeiz8';
|
||||
var gcse = document.createElement('script');
|
||||
|
@ -68,7 +125,10 @@ function appendHeader() {
|
|||
if (urlSplit.length == 6) {
|
||||
urlSplit.pop(); // ""
|
||||
var currentBook = urlSplit.pop(); // e.g. "Manual"
|
||||
urlSplit.pop() // e.g. "3.0"
|
||||
var version = urlSplit.pop() // e.g. "3.0"
|
||||
if (version < "2.9") {
|
||||
currentBook = "Users";
|
||||
}
|
||||
window.location.href = urlSplit.join("/") + "/" + e.target.value + "/" + currentBook + "/";
|
||||
} else {
|
||||
window.location.href = "https://docs.arangodb.com/" + e.target.value;
|
||||
|
|
|
@ -121,9 +121,9 @@ book-check-markdown-leftovers:
|
|||
echo "${RESET}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if test "`find books/$(NAME) -name '*.html' -exec grep '\.md\"' {} \; | grep -v data-filepath | wc -l`" -gt 0; then \
|
||||
@if test "`find books/$(NAME) -name '*.html' -exec grep '\.md\"[ />]' {} \; | grep -v data-filepath | wc -l`" -gt 0; then \
|
||||
echo "${ERR_COLOR}"; \
|
||||
find books/$(NAME) -name '*.html' -exec grep '\.md"' {} \; -print | grep -v data-filepath; \
|
||||
find books/$(NAME) -name '*.html' -exec grep '\.md"[ />]' {} \; -print | grep -v data-filepath; \
|
||||
echo "found dangling markdown links; see the list above "; \
|
||||
echo "${RESET}"; \
|
||||
exit 1; \
|
||||
|
|
|
@ -13,6 +13,14 @@ Synchronous replication only works in in a cluster and is typically used for mis
|
|||
|
||||
Synchronous replication is organized in a way that every shard has a leader and r-1 followers. The number of followers can be controlled using the `replicationFactor` whenever you create a collection, the `replicationFactor` is the total number of copies being kept, that is, it is one plus the number of followers.
|
||||
|
||||
### Satellite collections
|
||||
|
||||
Satellite collections are synchronously replicated collections having a dynamic replicationFactor.
|
||||
They will replicate all data to all database servers allowing the database servers to join data
|
||||
locally instead of doing heavy network operations.
|
||||
|
||||
Satellite collections are an enterprise only feature.
|
||||
|
||||
### Asynchronous replication
|
||||
|
||||
In ArangoDB any write operation will be logged to the write-ahead log. When using Asynchronous replication slaves will connect to a master and apply all the events from the log in the same order locally. After that, they will have the same state of data as the master database.
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
Satellite Collections
|
||||
=====================
|
||||
|
||||
Satellite Collections are an *Enterprise* only feature. When doing Joins in an
|
||||
ArangoDB cluster data has to exchanged between different servers.
|
||||
|
||||
Joins will be executed on a coordinator. It will prepare an execution plan
|
||||
and execute it. When executing the coordinator will contact all shards of the
|
||||
starting point of the join and ask for their data. The database servers carrying
|
||||
out this operation will load all their local data and then ask the cluster for
|
||||
the other part of the join. This again will be distributed to all involved shards
|
||||
of this join part.
|
||||
|
||||
In sum this results in much network traffic and slow results depending of the
|
||||
amount of data that has to be sent throughout the cluster.
|
||||
|
||||
Satellite collections are collections that are intended to address this issue.
|
||||
|
||||
They will facilitate the synchronous replication and replicate all its data
|
||||
to all database servers that are part of the cluster.
|
||||
|
||||
This enables the database servers to execute that part of any Join locally.
|
||||
|
||||
This greatly improves performance for such joins at the costs of increased
|
||||
storage requirements and poorer write performance on this data.
|
||||
|
||||
To create a satellite collection set the *replicationFactor* of this collection
|
||||
to "satellite".
|
||||
|
||||
Using arangosh:
|
||||
|
||||
arangosh> db._create("satellite", {"replicationFactor": "satellite"});
|
||||
|
||||
### A full example
|
||||
|
||||
arangosh> var explain = require("@arangodb/aql/explainer").explain
|
||||
arangosh> db._create("satellite", {"replicationFactor": "satellite"})
|
||||
arangosh> db._create("nonsatellite", {numberOfShards: 8})
|
||||
arangosh> db._create("nonsatellite2", {numberOfShards: 8})
|
||||
|
||||
Let's analyse a normal join not involving satellite collections:
|
||||
|
||||
```
|
||||
arangosh> explain("FOR doc in nonsatellite FOR doc2 in nonsatellite2 RETURN 1")
|
||||
|
||||
Query string:
|
||||
FOR doc in nonsatellite FOR doc2 in nonsatellite2 RETURN 1
|
||||
|
||||
Execution plan:
|
||||
Id NodeType Site Est. Comment
|
||||
1 SingletonNode DBS 1 * ROOT
|
||||
4 CalculationNode DBS 1 - LET #2 = 1 /* json expression */ /* const assignment */
|
||||
2 EnumerateCollectionNode DBS 0 - FOR doc IN nonsatellite /* full collection scan */
|
||||
12 RemoteNode COOR 0 - REMOTE
|
||||
13 GatherNode COOR 0 - GATHER
|
||||
6 ScatterNode COOR 0 - SCATTER
|
||||
7 RemoteNode DBS 0 - REMOTE
|
||||
3 EnumerateCollectionNode DBS 0 - FOR doc2 IN nonsatellite2 /* full collection scan */
|
||||
8 RemoteNode COOR 0 - REMOTE
|
||||
9 GatherNode COOR 0 - GATHER
|
||||
5 ReturnNode COOR 0 - RETURN #2
|
||||
|
||||
Indexes used:
|
||||
none
|
||||
|
||||
Optimization rules applied:
|
||||
Id RuleName
|
||||
1 move-calculations-up
|
||||
2 scatter-in-cluster
|
||||
3 remove-unnecessary-remote-scatter
|
||||
```
|
||||
|
||||
All shards involved querying the `nonsatellite` collection will fan out via the
|
||||
coordinator to the shards of `nonsatellite`. In sum 8 shards will open 8 connections
|
||||
to the coordinator asking for the results of the `nonsatellite2` join. The coordinator
|
||||
will fan out to the 8 shards of `nonsatellite2`. So there will be quite some
|
||||
network traffic.
|
||||
|
||||
Let's now have a look at the same using satellite collections:
|
||||
|
||||
```
|
||||
arangosh> db._query("FOR doc in nonsatellite FOR doc2 in satellite RETURN 1")
|
||||
|
||||
Query string:
|
||||
FOR doc in nonsatellite FOR doc2 in satellite RETURN 1
|
||||
|
||||
Execution plan:
|
||||
Id NodeType Site Est. Comment
|
||||
1 SingletonNode DBS 1 * ROOT
|
||||
4 CalculationNode DBS 1 - LET #2 = 1 /* json expression */ /* const assignment */
|
||||
2 EnumerateCollectionNode DBS 0 - FOR doc IN nonsatellite /* full collection scan */
|
||||
3 EnumerateCollectionNode DBS 0 - FOR doc2 IN satellite /* full collection scan, satellite */
|
||||
8 RemoteNode COOR 0 - REMOTE
|
||||
9 GatherNode COOR 0 - GATHER
|
||||
5 ReturnNode COOR 0 - RETURN #2
|
||||
|
||||
Indexes used:
|
||||
none
|
||||
|
||||
Optimization rules applied:
|
||||
Id RuleName
|
||||
1 move-calculations-up
|
||||
2 scatter-in-cluster
|
||||
3 remove-unnecessary-remote-scatter
|
||||
4 remove-satellite-joins
|
||||
```
|
||||
|
||||
In this scenario all shards of nonsatellite will be contacted. However
|
||||
as the join is a satellite join all shards can do the join locally
|
||||
as the data is replicated to all servers reducing the network overhead
|
||||
dramatically.
|
||||
|
||||
### Caveats
|
||||
|
||||
The cluster will automatically keep all satellite collections on all servers in sync
|
||||
by facilitating the synchronous replication. This means that write will be executed
|
||||
on the leader only and this server will coordinate replication to the followers.
|
||||
If a follower doesn't answer in time (due to network problems, temporary shutdown etc.)
|
||||
it may be removed as a follower. This is being reported to the Agency.
|
||||
|
||||
The follower (once back in business) will then periodically check the Agency and know
|
||||
that it is out of sync. It will then automatically catch up. This may take a while
|
||||
depending on how much data has to be synced. When doing a join involving the satellite
|
||||
you can specify how long the DBServer is allowed to wait for sync until the query
|
||||
is being aborted.
|
||||
|
||||
Check [https://docs.arangodb.com/3.1/HTTP/AqlQueryCursor/AccessingCursors.html] for
|
||||
details.
|
||||
|
||||
During network failure there is also a minimal chance that a query was properly
|
||||
distributed to the DBServers but that a previous satellite write could not be
|
||||
replicated to a follower and the leader dropped the follower. The follower however
|
||||
only checks every few seconds if it is really in sync so it might indeed deliver
|
||||
stale results.
|
|
@ -138,7 +138,13 @@ to the [naming conventions](../NamingConventions/README.md).
|
|||
|
||||
If a server fails, this is detected automatically and one of the
|
||||
servers holding copies take over, usually without an error being
|
||||
reported.
|
||||
reported.
|
||||
|
||||
When using the *Enterprise* version of ArangoDB the replicationFactor
|
||||
may be set to "satellite" making the collection locally joinable
|
||||
on every database server. This reduces the number of network hops
|
||||
dramatically when using joins in AQL at the costs of reduced write
|
||||
performance on these collections.
|
||||
|
||||
`db._create(collection-name, properties, type)`
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ Mac OS X
|
|||
|
||||
The preferred method for installing ArangoDB under Mac OS X is
|
||||
[homebrew](#homebrew). However, in case you are not using homebrew, we
|
||||
provide a [command-line app](#commandline-app) or [graphical
|
||||
provide a [command-line app](#command-line-app) or [graphical
|
||||
app](#graphical-app) which contains all the executables.
|
||||
|
||||
Homebrew
|
||||
|
@ -58,7 +58,7 @@ download it from [here](https://www.arangodb.com/download).
|
|||
Choose *Mac OS X*. Download and install the application *ArangoDB* in
|
||||
your application folder.
|
||||
|
||||
Command-Line App
|
||||
Command line App
|
||||
----------------
|
||||
In case you are not using homebrew, we also provide a command-line app. You can
|
||||
download it from [here](https://www.arangodb.com/download).
|
||||
|
|
|
@ -428,7 +428,7 @@ not always have to be an array of objects:
|
|||
Now let's do something crazy: for every document in the users collection,
|
||||
iterate over all user documents again and return user pairs, e.g. John and Katie.
|
||||
We can use a loop inside a loop for this to get the cross product (every possible
|
||||
combination of all user records, 3 * 3 = 9). We don't want pairings like *John +
|
||||
combination of all user records, 3 \* 3 = 9). We don't want pairings like *John +
|
||||
John* however, so let's eliminate them with a filter condition:
|
||||
|
||||
```js
|
||||
|
|
|
@ -14,13 +14,6 @@
|
|||
<option value="3.1">v3.1</option>
|
||||
<option value="3.0">v3.0</option>
|
||||
<option value="2.8">v2.8</option>
|
||||
<option value="2.7">v2.7</option>
|
||||
<option value="2.6">v2.6</option>
|
||||
<option value="2.5">v2.5</option>
|
||||
<option value="2.4">v2.4</option>
|
||||
<option value="2.3">v2.3</option>
|
||||
<option value="2.2">v2.2</option>
|
||||
<option value="2.1">v2.1</option>
|
||||
</select>
|
||||
<div class="google-search">
|
||||
<gcse:searchbox-only></gcse:searchbox-only>
|
||||
|
|
|
@ -269,7 +269,7 @@ The geo index stores two-dimensional coordinates. It can be created on either tw
|
|||
separate document attributes (latitude and longitude) or a single array attribute that
|
||||
contains both latitude and longitude. Latitude and longitude must be numeric values.
|
||||
|
||||
Th geo index provides operations to find documents with coordinates nearest to a given
|
||||
The geo index provides operations to find documents with coordinates nearest to a given
|
||||
comparison coordinate, and to find documents with coordinates that are within a specifiable
|
||||
radius around a comparison coordinate.
|
||||
|
||||
|
|
|
@ -162,6 +162,7 @@
|
|||
* [Synchronous Replication](Administration/Replication/Synchronous/README.md)
|
||||
* [Implementation](Administration/Replication/Synchronous/Implementation.md)
|
||||
* [Configuration](Administration/Replication/Synchronous/Configuration.md)
|
||||
* [Satellite Collections](Administration/Replication/Synchronous/Satellites/README.md)
|
||||
* [Sharding](Administration/Sharding/README.md)
|
||||
# * [Authentication](Administration/Sharding/Authentication.md)
|
||||
# * [Firewall setup](Administration/Sharding/FirewallSetup.md)
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{
|
||||
"gitbook": "^2.6.7",
|
||||
"gitbook": "^3.2.2",
|
||||
"title": "ArangoDB VERSION_NUMBER Documentation",
|
||||
"author": "ArangoDB GmbH",
|
||||
"description": "Official manual for ArangoDB - the multi-model NoSQL database",
|
||||
"language": "en",
|
||||
"plugins":["-search", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "add-header", "piwik"],
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "piwik", "sitemap-general"],
|
||||
"pdf": {
|
||||
"fontSize": 12,
|
||||
"toc": true,
|
||||
|
@ -24,8 +24,11 @@
|
|||
"css": ["styles/header.css"]
|
||||
},
|
||||
"piwik": {
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
},
|
||||
"sitemap-general": {
|
||||
"prefix": "https://docs.arangodb.com/devel/Manual/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Try to set the version number early, jQuery not available yet
|
||||
document.addEventListener("DOMContentLoaded", function(event) {
|
||||
if (!gitbook.state.root) return;
|
||||
var bookVersion = gitbook.state.root.match(/\/(\d\.\d(\.\d)?|devel)\//);
|
||||
var switcher = document.getElementsByClassName("arangodb-version-switcher")[0];
|
||||
if (bookVersion) {
|
||||
|
@ -15,25 +16,81 @@ window.localStorage.removeItem(":keyword");
|
|||
$(document).ready(function() {
|
||||
|
||||
function appendHeader() {
|
||||
/*
|
||||
var div = document.createElement('div');
|
||||
div.innerHTML = '<header id="header" class="header absolute"><div class="wrap"><div class="clearfix" style="width:100%;"><div id="logo"><a href="https://docs.arangodb.com/"><img src="https://docs.arangodb.com/assets/arangodb_logo.png"></a></div><div class="arangodb_version">VERSION_NUMBER</div><div class="google-search"><gcse:searchbox-only></div><ul id="navmenu"><li><a href="https://tst.arangodb.com/simran/all-in-one/">Docs</a></li><li><a href="https://docs.arangodb.com/cookbook">Cookbook</a></li><li class="socialIcons"><a href="https://github.com/ArangoDB/ArangoDB/issues" target="blank" name="github"><i title="GitHub" class="fa fa-github"></i></a></li><li class="socialIcons"><a href="http://stackoverflow.com/questions/tagged/arangodb" target="blank" name="stackoverflow"><i title="Stackoverflow" class="fa fa-stack-overflow"></i></a></li><li class="socialIcons socialIcons-googlegroups"><a href="https://groups.google.com/forum/#!forum/arangodb" target="blank" name="google groups"><img title="Google Groups" alt="Google Groups" src="https://docs.arangodb.com/assets/googlegroupsIcon.png" style="height:14px"></img></a></li></ul></div></div></header>';
|
||||
|
||||
$('.book').before(div.innerHTML);
|
||||
*/
|
||||
var div = document.createElement('div');
|
||||
div.innerHTML = '<div class="arangodb-header">\n' +
|
||||
' <div class="arangodb-logo">\n' +
|
||||
' <a href="https://arangodb.com/">\n' +
|
||||
' <img src="https://docs.arangodb.com/assets/arangodb_logo_2016.png">\n' +
|
||||
' </a>\n' +
|
||||
' </div>\n' +
|
||||
' <div class="arangodb-logo-small">\n' +
|
||||
' <a href="https://arangodb.com/">\n' +
|
||||
' <img src="https://docs.arangodb.com/assets/arangodb_logo_small_2016.png">\n' +
|
||||
' </a>\n' +
|
||||
' </div>\n' +
|
||||
' <select class="arangodb-version-switcher">\n' +
|
||||
' <option value="devel">devel</option>\n' +
|
||||
' <option value="3.1">v3.1</option>\n' +
|
||||
' <option value="3.0">v3.0</option>\n' +
|
||||
' <option value="2.8">v2.8</option>\n' +
|
||||
' </select>\n' +
|
||||
' <div class="google-search">\n' +
|
||||
' <gcse:searchbox-only></gcse:searchbox-only>\n' +
|
||||
' </div>\n' +
|
||||
' <ul class="arangodb-navmenu">\n' +
|
||||
' <li class="active-tab">\n' +
|
||||
' <a href="#" data-book="Manual">Manual</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="AQL">AQL</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="HTTP">HTTP</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li>\n' +
|
||||
' <a href="#" data-book="cookbook">Cookbook</a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="downloadIcon" title="Download">\n' +
|
||||
' <a href="https://www.arangodb.com/download-arangodb-docs" target="_blank">\n' +
|
||||
' <i class="fa fa-download"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="GitHub">\n' +
|
||||
' <a href="https://github.com/ArangoDB/ArangoDB/issues" target="_blank">\n' +
|
||||
' <i class="fa fa-github"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="StackOverflow">\n' +
|
||||
' <a href="http://stackoverflow.com/questions/tagged/arangodb" target="_blank">\n' +
|
||||
' <i class="fa fa-stack-overflow"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons socialIcons-googlegroups" title="Google Groups">\n' +
|
||||
' <a href="https://groups.google.com/forum/#!forum/arangodb" target="_blank">\n' +
|
||||
' <img alt="Google Groups" src="https://docs.arangodb.com/assets/googlegroupsIcon.png" />\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' <li class="socialIcons" title="Slack">\n' +
|
||||
' <a href="https://slack.arangodb.com" target="_blank">\n' +
|
||||
' <i class="fa fa-slack"></i>\n' +
|
||||
' </a>\n' +
|
||||
' </li>\n' +
|
||||
' </ul>\n' +
|
||||
'</div>\n';
|
||||
|
||||
$('.book').before(div.innerHTML);
|
||||
|
||||
};
|
||||
|
||||
|
||||
function rerenderNavbar() {
|
||||
$('.arangodb-header').remove();
|
||||
appendHeader();
|
||||
renderGoogleSearch();
|
||||
};
|
||||
|
||||
function renderGoogleSearch() {
|
||||
};
|
||||
//render header
|
||||
//rerenderNavbar();
|
||||
rerenderNavbar();
|
||||
function addGoogleSrc() {
|
||||
var cx = '010085642145132923492:djexw6vlsgo';
|
||||
var gcse = document.createElement('script');
|
||||
|
@ -68,7 +125,10 @@ function appendHeader() {
|
|||
if (urlSplit.length == 6) {
|
||||
urlSplit.pop(); // ""
|
||||
var currentBook = urlSplit.pop(); // e.g. "Manual"
|
||||
urlSplit.pop() // e.g. "3.0"
|
||||
var version = urlSplit.pop() // e.g. "3.0"
|
||||
if (version < "2.9") {
|
||||
currentBook = "Users";
|
||||
}
|
||||
window.location.href = urlSplit.join("/") + "/" + e.target.value + "/" + currentBook + "/";
|
||||
} else {
|
||||
window.location.href = "https://docs.arangodb.com/" + e.target.value;
|
||||
|
|
|
@ -73,6 +73,12 @@ If set to *true*, then the additional query profiling information will be return
|
|||
in the sub-attribute *profile* of the *extra* return attribute if the query result
|
||||
is not served from the query cache.
|
||||
|
||||
@RESTSTRUCT{satelliteSyncWait,JSF_post_api_cursor_opts,boolean,optional,}
|
||||
This *enterprise* parameter allows to configure how long a DBServer will have time
|
||||
to bring the satellite collections involved in the query into sync.
|
||||
The default value is *60.0* (seconds). When the max time has been reached the query
|
||||
will be stopped.
|
||||
|
||||
@RESTDESCRIPTION
|
||||
The query details include the query string plus optional query options and
|
||||
bind parameters. These values need to be passed in a JSON representation in
|
||||
|
|
|
@ -17,6 +17,12 @@ The number of documents to skip in the query (optional).
|
|||
The maximal amount of documents to return. The *skip*
|
||||
is applied before the *limit* restriction. (optional)
|
||||
|
||||
@RESTBODYPARAM{batchSize,integer,optional,int64}
|
||||
maximum number of result documents to be transferred from
|
||||
the server to the client in one roundtrip. If this attribute is
|
||||
not set, a server-controlled default value will be used. A *batchSize* value of
|
||||
*0* is disallowed.
|
||||
|
||||
@RESTDESCRIPTION
|
||||
|
||||
This will find all documents matching a given example.
|
||||
|
|
|
@ -323,14 +323,10 @@ while [ $# -gt 0 ]; do
|
|||
CLEAN_IT=1
|
||||
shift
|
||||
;;
|
||||
--cxArmV8)
|
||||
ARMV8=1
|
||||
CXGCC=1
|
||||
--xcArm)
|
||||
shift
|
||||
;;
|
||||
--cxArmV7)
|
||||
ARMV7=1
|
||||
CXGCC=1
|
||||
TOOL_PREFIX=$1
|
||||
XCGCC=1
|
||||
shift
|
||||
;;
|
||||
|
||||
|
@ -388,20 +384,21 @@ elif [ "$CLANG36" == 1 ]; then
|
|||
CC=/usr/bin/clang-3.6
|
||||
CXX=/usr/bin/clang++-3.6
|
||||
CXXFLAGS="${CXXFLAGS} -std=c++11"
|
||||
elif [ "${CXGCC}" = 1 ]; then
|
||||
elif [ "${XCGCC}" = 1 ]; then
|
||||
USE_JEMALLOC=0
|
||||
if [ "${ARMV8}" = 1 ]; then
|
||||
export TOOL_PREFIX=aarch64-linux-gnu
|
||||
BUILD_DIR="${BUILD_DIR}-ARMV8"
|
||||
elif [ "${ARMV7}" = 1 ]; then
|
||||
export TOOL_PREFIX=aarch64-linux-gnu
|
||||
BUILD_DIR="${BUILD_DIR}-ARMV7"
|
||||
else
|
||||
echo "Unknown CX-Compiler!"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
BUILD_DIR="${BUILD_DIR}-`basename ${TOOL_PREFIX}`"
|
||||
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DCROSS_COMPILING=true" # -DCMAKE_LIBRARY_ARCHITECTURE=${TOOL_PREFIX} "
|
||||
# tell cmake we're cross compiling:
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DCROSS_COMPILING=true -DCMAKE_SYSTEM_NAME=Linux"
|
||||
# -DCMAKE_LIBRARY_ARCHITECTURE=${TOOL_PREFIX} "
|
||||
# these options would be evaluated using TRY_RUN(), which obviously doesn't work:
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DHAVE_POLL_FINE_EXITCODE=0"
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DHAVE_GLIBC_STRERROR_R=0"
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DHAVE_GLIBC_STRERROR_R__TRYRUN_OUTPUT=TRUE"
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DHAVE_POSIX_STRERROR_R=1"
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DHAVE_POSIX_STRERROR_R__TRYRUN_OUTPUT=FALSE"
|
||||
|
||||
export CXX=$TOOL_PREFIX-g++
|
||||
export AR=$TOOL_PREFIX-ar
|
||||
export RANLIB=$TOOL_PREFIX-ranlib
|
||||
|
@ -409,13 +406,9 @@ elif [ "${CXGCC}" = 1 ]; then
|
|||
export LD=$TOOL_PREFIX-g++
|
||||
export LINK=$TOOL_PREFIX-g++
|
||||
export STRIP=$TOOL_PREFIX-strip
|
||||
|
||||
# we need ARM LD:
|
||||
GOLD=0;
|
||||
|
||||
# tell cmake we're cross compiling:
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DCROSS_COMPILING=true"
|
||||
|
||||
# V8's mksnapshot won't work - ignore it:
|
||||
MAKE_PARAMS="${MAKE_PARAMS} -i"
|
||||
fi
|
||||
|
@ -470,9 +463,11 @@ if [ -z "${MSVC}" ]; then
|
|||
if [ ! -f ${STRIP} ] ; then
|
||||
STRIP=`which strip`
|
||||
fi
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DCMAKE_STRIP=${STRIP}"
|
||||
export STRIP
|
||||
fi
|
||||
if test -n "${STRIP}"; then
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DCMAKE_STRIP=${STRIP}"
|
||||
fi
|
||||
fi
|
||||
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} ${MAINTAINER_MODE}"
|
||||
|
@ -519,6 +514,7 @@ if test -n "${ENTERPRISE_GIT_URL}" ; then
|
|||
GITARGS=devel
|
||||
fi
|
||||
echo "I'm on Branch: ${GITARGS}"
|
||||
export FINAL_PULL="git pull"
|
||||
fi
|
||||
# clean up if we're commanded to:
|
||||
if test -d enterprise -a ${CLEAN_IT} -eq 1; then
|
||||
|
@ -527,7 +523,7 @@ if test -n "${ENTERPRISE_GIT_URL}" ; then
|
|||
if test ! -d enterprise; then
|
||||
git clone ${ENTERPRISE_GIT_URL} enterprise
|
||||
fi
|
||||
(cd enterprise; git checkout master; git fetch --tags; git pull --all; git checkout ${GITARGS} )
|
||||
(cd enterprise; git checkout master; git fetch --tags; git pull --all; git checkout ${GITARGS}; ${FINAL_PULL} )
|
||||
fi
|
||||
|
||||
|
||||
|
@ -568,20 +564,27 @@ if test -n "${TARGET_DIR}"; then
|
|||
${PACKAGE_MAKE} copy_packages
|
||||
${PACKAGE_MAKE} clean_packages
|
||||
else
|
||||
# we re-use a generic cpack tarball:
|
||||
${PACKAGE_MAKE} TGZ_package
|
||||
PKG_NAME=`grep CPACK_PACKAGE_FILE_NAME CPackConfig.cmake |sed -e 's;".$;;' -e 's;.*";;'`
|
||||
|
||||
|
||||
TARFILE=arangodb-`uname`${TAR_SUFFIX}.tar.gz
|
||||
TARFILE_TMP=`pwd`/arangodb.tar.$$
|
||||
|
||||
mkdir -p ${dir}
|
||||
trap "rm -rf ${TARFILE_TMP}" EXIT
|
||||
|
||||
mkdir -p ${dir}
|
||||
|
||||
(cd _CPack_Packages/*/TGZ/${PKG_NAME}/; rm -rf ${dir}/share/arangodb3/js; tar -c -f ${TARFILE_TMP} *)
|
||||
|
||||
(cd ${SOURCE_DIR}
|
||||
|
||||
touch 3rdParty/.keepme
|
||||
touch arangod/.keepme
|
||||
touch arangosh/.keepme
|
||||
|
||||
tar -c -f ${TARFILE_TMP} \
|
||||
VERSION utils scripts etc/relative UnitTests Documentation js \
|
||||
|
||||
tar -u -f ${TARFILE_TMP} \
|
||||
VERSION utils scripts etc/relative etc/testing UnitTests Documentation js \
|
||||
lib/Basics/errors.dat \
|
||||
3rdParty/.keepme \
|
||||
arangod/.keepme \
|
||||
|
|
|
@ -3,7 +3,7 @@ set -ex
|
|||
|
||||
SCRIPT_DIR=`dirname $0`
|
||||
SRC_DIR="${SCRIPT_DIR}/../"
|
||||
ENTERPRISE_SRC_DIR=${SRC_DIR}/enterprise
|
||||
ENTERPRISE_SRC_DIR=${SRC_DIR}enterprise
|
||||
|
||||
FORCE_TAG=0
|
||||
TAG=1
|
||||
|
@ -113,7 +113,7 @@ else
|
|||
fi
|
||||
echo "I'm on Branch: ${GITARGS}"
|
||||
fi
|
||||
(cd enterprise; git checkout master; git fetch --tags; git pull --all; git checkout ${GITARGS} )
|
||||
(cd enterprise; git checkout master; git fetch --tags; git pull --all; git checkout ${GITARGS}; git pull )
|
||||
|
||||
|
||||
|
||||
|
@ -220,7 +220,7 @@ if [ "$TAG" == "1" ]; then
|
|||
fi
|
||||
|
||||
cd ${ENTERPRISE_SRC_DIR}
|
||||
git commit -m "release version $VERSION enterprise" -a
|
||||
git commit --allow-empty -m "release version $VERSION enterprise" -a
|
||||
git push
|
||||
|
||||
if test "${FORCE_TAG}" == 0; then
|
||||
|
|
|
@ -86,6 +86,38 @@ BOOST_FIXTURE_TEST_SUITE(CHashesTest, CHashesSetup)
|
|||
/// @brief test fasthash64
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
BOOST_AUTO_TEST_CASE (tst_fasthash64_uint64) {
|
||||
uint64_t value;
|
||||
|
||||
value = 0;
|
||||
BOOST_CHECK_EQUAL((uint64_t) 606939172421154273ULL, fasthash64(&value, sizeof(value), 0x12345678));
|
||||
BOOST_CHECK_EQUAL((uint64_t) 606939172421154273ULL, fasthash64_uint64(value, 0x12345678));
|
||||
|
||||
value = 1;
|
||||
BOOST_CHECK_EQUAL((uint64_t) 2986466439906256014ULL, fasthash64(&value, sizeof(value), 0x12345678));
|
||||
BOOST_CHECK_EQUAL((uint64_t) 2986466439906256014ULL, fasthash64_uint64(value, 0x12345678));
|
||||
|
||||
value = 123456;
|
||||
BOOST_CHECK_EQUAL((uint64_t) 10846706210321519612ULL, fasthash64(&value, sizeof(value), 0x12345678));
|
||||
BOOST_CHECK_EQUAL((uint64_t) 10846706210321519612ULL, fasthash64_uint64(value, 0x12345678));
|
||||
|
||||
value = 123456789012345ULL;
|
||||
BOOST_CHECK_EQUAL((uint64_t) 11872028338155052138ULL, fasthash64(&value, sizeof(value), 0x12345678));
|
||||
BOOST_CHECK_EQUAL((uint64_t) 11872028338155052138ULL, fasthash64_uint64(value, 0x12345678));
|
||||
|
||||
value = 0xffffff000000ULL;
|
||||
BOOST_CHECK_EQUAL((uint64_t) 5064027312035038651ULL, fasthash64(&value, sizeof(value), 0x12345678));
|
||||
BOOST_CHECK_EQUAL((uint64_t) 5064027312035038651ULL, fasthash64_uint64(value, 0x12345678));
|
||||
|
||||
value = 0xffffffffffffULL;
|
||||
BOOST_CHECK_EQUAL((uint64_t) 12472603196990564371ULL, fasthash64(&value, sizeof(value), 0x12345678));
|
||||
BOOST_CHECK_EQUAL((uint64_t) 12472603196990564371ULL, fasthash64_uint64(value, 0x12345678));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test fasthash64
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
BOOST_AUTO_TEST_CASE (tst_fasthash64) {
|
||||
std::string buffer;
|
||||
|
||||
|
|
|
@ -598,6 +598,34 @@ describe ArangoDB do
|
|||
doc.parsed_response['cached'].should eq(false)
|
||||
end
|
||||
|
||||
it "calls wrong export API" do
|
||||
cmd = api
|
||||
body = "{ \"query\" : \"FOR u IN #{@cn} LIMIT 5 RETURN u.n\", \"count\" : true, \"batchSize\" : 2 }"
|
||||
doc = ArangoDB.log_post("#{prefix}-create-wrong-api", cmd, :body => body)
|
||||
|
||||
doc.code.should eq(201)
|
||||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(false)
|
||||
doc.parsed_response['code'].should eq(201)
|
||||
doc.parsed_response['id'].should be_kind_of(String)
|
||||
doc.parsed_response['id'].should match(@reId)
|
||||
doc.parsed_response['hasMore'].should eq(true)
|
||||
doc.parsed_response['count'].should eq(5)
|
||||
doc.parsed_response['result'].length.should eq(2)
|
||||
doc.parsed_response['cached'].should eq(false)
|
||||
|
||||
id = doc.parsed_response['id']
|
||||
|
||||
cmd = "/_api/export/#{id}"
|
||||
doc = ArangoDB.log_put("#{prefix}-create-wrong-api", cmd)
|
||||
|
||||
doc.code.should eq(404)
|
||||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['code'].should eq(404)
|
||||
doc.parsed_response['errorNum'].should eq(1600)
|
||||
end
|
||||
|
||||
it "creates a query that survives memory limit constraints" do
|
||||
cmd = api
|
||||
body = "{ \"query\" : \"FOR i IN 1..10000 SORT i RETURN i\", \"memoryLimit\" : 10000000, \"batchSize\": 10 }"
|
||||
|
|
|
@ -678,7 +678,34 @@ describe ArangoDB do
|
|||
doc.parsed_response['count'].should eq(2000)
|
||||
doc.parsed_response['result'].length.should eq(2000)
|
||||
end
|
||||
|
||||
it "calls wrong cursor API" do
|
||||
cmd = api + "?collection=#{@cn}"
|
||||
body = "{ \"count\" : true, \"batchSize\" : 100, \"flush\" : true }"
|
||||
doc = ArangoDB.log_post("#{prefix}-limit-return", cmd, :body => body)
|
||||
|
||||
doc.code.should eq(201)
|
||||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(false)
|
||||
doc.parsed_response['code'].should eq(201)
|
||||
doc.parsed_response['id'].should be_kind_of(String)
|
||||
doc.parsed_response['id'].should match(@reId)
|
||||
doc.parsed_response['hasMore'].should eq(true)
|
||||
doc.parsed_response['count'].should eq(2000)
|
||||
doc.parsed_response['result'].length.should eq(100)
|
||||
|
||||
id = doc.parsed_response['id']
|
||||
|
||||
# intentionally wrong
|
||||
cmd = "/_api/cursor/#{id}"
|
||||
doc = ArangoDB.log_put("#{prefix}-return-cont", cmd)
|
||||
|
||||
doc.code.should eq(404)
|
||||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['code'].should eq(404)
|
||||
doc.parsed_response['errorNum'].should eq(1600)
|
||||
end
|
||||
end
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -32,7 +32,34 @@ AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
|||
std::string const& prefix, std::string const& database,
|
||||
std::string const& collection,
|
||||
std::string const& shard,
|
||||
std::string const& newFollower)
|
||||
std::initializer_list<std::string> const& newFollower)
|
||||
: Job(snapshot, agent, jobId, creator, prefix),
|
||||
_database(database),
|
||||
_collection(collection),
|
||||
_shard(shard),
|
||||
_newFollower(newFollower) {
|
||||
try {
|
||||
JOB_STATUS js = status();
|
||||
|
||||
if (js == TODO) {
|
||||
start();
|
||||
} else if (js == NOTFOUND) {
|
||||
if (create()) {
|
||||
start();
|
||||
}
|
||||
}
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << __FILE__ << __LINE__;
|
||||
finish("Shards/" + _shard, false, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
||||
std::string const& jobId, std::string const& creator,
|
||||
std::string const& prefix, std::string const& database,
|
||||
std::string const& collection,
|
||||
std::string const& shard,
|
||||
std::vector<std::string> const& newFollower)
|
||||
: Job(snapshot, agent, jobId, creator, prefix),
|
||||
_database(database),
|
||||
_collection(collection),
|
||||
|
@ -73,6 +100,16 @@ bool AddFollower::create() {
|
|||
TRI_ASSERT(current[0].isString());
|
||||
#endif
|
||||
|
||||
size_t sub = 0;
|
||||
auto const& myClones = clones(_snapshot, _database, _collection, _shard);
|
||||
if (!myClones.empty()) {
|
||||
for (auto const& clone : myClones) {
|
||||
AddFollower(_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
|
||||
_jobId, _agencyPrefix, _database, clone.collection,
|
||||
clone.shard, _newFollower);
|
||||
}
|
||||
}
|
||||
|
||||
_jb = std::make_shared<Builder>();
|
||||
_jb->openArray();
|
||||
_jb->openObject();
|
||||
|
@ -85,7 +122,13 @@ bool AddFollower::create() {
|
|||
_jb->add("database", VPackValue(_database));
|
||||
_jb->add("collection", VPackValue(_collection));
|
||||
_jb->add("shard", VPackValue(_shard));
|
||||
_jb->add("newFollower", VPackValue(_newFollower));
|
||||
_jb->add(VPackValue("newFollower"));
|
||||
{
|
||||
VPackArrayBuilder b(_jb.get());
|
||||
for (auto const& i : _newFollower) {
|
||||
_jb->add(VPackValue(i));
|
||||
}
|
||||
}
|
||||
_jb->add("jobId", VPackValue(_jobId));
|
||||
_jb->add("timeCreated", VPackValue(now));
|
||||
|
||||
|
@ -118,7 +161,7 @@ bool AddFollower::start() {
|
|||
|
||||
for (auto const& srv : VPackArrayIterator(current)) {
|
||||
TRI_ASSERT(srv.isString());
|
||||
if (srv.copyString() == _newFollower) {
|
||||
if (srv.copyString() == _newFollower.front()) {
|
||||
finish("Shards/" + _shard, false,
|
||||
"newFollower must not be already holding the shard.");
|
||||
return false;
|
||||
|
@ -126,7 +169,7 @@ bool AddFollower::start() {
|
|||
}
|
||||
for (auto const& srv : VPackArrayIterator(planned)) {
|
||||
TRI_ASSERT(srv.isString());
|
||||
if (srv.copyString() == _newFollower) {
|
||||
if (srv.copyString() == _newFollower.front()) {
|
||||
finish("Shards/" + _shard, false,
|
||||
"newFollower must not be planned for shard already.");
|
||||
return false;
|
||||
|
@ -182,7 +225,9 @@ bool AddFollower::start() {
|
|||
for (auto const& srv : VPackArrayIterator(planned)) {
|
||||
pending.add(srv);
|
||||
}
|
||||
pending.add(VPackValue(_newFollower));
|
||||
for (auto const& i : _newFollower) {
|
||||
pending.add(VPackValue(i));
|
||||
}
|
||||
pending.close();
|
||||
|
||||
// --- Increment Plan/Version
|
||||
|
@ -213,7 +258,7 @@ bool AddFollower::start() {
|
|||
|
||||
if (res.accepted && res.indices.size() == 1 && res.indices[0]) {
|
||||
LOG_TOPIC(INFO, Logger::AGENCY)
|
||||
<< "Pending: Addfollower " + _newFollower + " to shard " + _shard;
|
||||
<< "Pending: Addfollower " << _newFollower << " to shard " << _shard;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -229,8 +274,12 @@ JOB_STATUS AddFollower::status() {
|
|||
try {
|
||||
_database = _snapshot(pos[status] + _jobId + "/database").getString();
|
||||
_collection = _snapshot(pos[status] + _jobId + "/collection").getString();
|
||||
_newFollower =
|
||||
_snapshot(pos[status] + _jobId + "/newFollower").getString();
|
||||
for (auto const& i :
|
||||
VPackArrayIterator(
|
||||
_snapshot(pos[status] + _jobId + "/newFollower").getArray())) {
|
||||
_newFollower.push_back(i.copyString());
|
||||
}
|
||||
_snapshot(pos[status] + _jobId + "/newFollower").getArray();
|
||||
_shard = _snapshot(pos[status] + _jobId + "/shard").getString();
|
||||
} catch (std::exception const& e) {
|
||||
std::stringstream err;
|
||||
|
@ -247,7 +296,7 @@ JOB_STATUS AddFollower::status() {
|
|||
|
||||
Slice current = _snapshot(curPath).slice();
|
||||
for (auto const& srv : VPackArrayIterator(current)) {
|
||||
if (srv.copyString() == _newFollower) {
|
||||
if (srv.copyString() == _newFollower.front()) {
|
||||
if (finish("Shards/" + _shard)) {
|
||||
return FINISHED;
|
||||
}
|
||||
|
|
|
@ -33,14 +33,26 @@ namespace consensus {
|
|||
struct AddFollower : public Job {
|
||||
|
||||
AddFollower (Node const& snapshot,
|
||||
Agent* agent,
|
||||
std::string const& jobId,
|
||||
std::string const& creator,
|
||||
std::string const& prefix,
|
||||
std::string const& database = std::string(),
|
||||
std::string const& collection = std::string(),
|
||||
std::string const& shard = std::string(),
|
||||
std::string const& newFollower = std::string());
|
||||
Agent* agent,
|
||||
std::string const& jobId,
|
||||
std::string const& creator,
|
||||
std::string const& prefix,
|
||||
std::string const& database,
|
||||
std::string const& collection,
|
||||
std::string const& shard,
|
||||
std::initializer_list<std::string> const&);
|
||||
|
||||
|
||||
AddFollower (Node const& snapshot,
|
||||
Agent* agent,
|
||||
std::string const& jobId,
|
||||
std::string const& creator,
|
||||
std::string const& prefix,
|
||||
std::string const& database = std::string(),
|
||||
std::string const& collection = std::string(),
|
||||
std::string const& shard = std::string(),
|
||||
std::vector<std::string> const& newFollowers = {});
|
||||
|
||||
|
||||
virtual ~AddFollower ();
|
||||
|
||||
|
@ -51,7 +63,7 @@ struct AddFollower : public Job {
|
|||
std::string _database;
|
||||
std::string _collection;
|
||||
std::string _shard;
|
||||
std::string _newFollower;
|
||||
std::vector<std::string> _newFollower;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -124,7 +124,6 @@ void AgencyPrecondition::toGeneralBuilder(VPackBuilder& builder) const {
|
|||
AgencyOperation::AgencyOperation(std::string const& key)
|
||||
: _key(AgencyCommManager::path(key)), _opType() {
|
||||
_opType.type = AgencyOperationType::Type::READ;
|
||||
LOG(WARN) << _opType.toString();
|
||||
}
|
||||
|
||||
AgencyOperation::AgencyOperation(std::string const& key,
|
||||
|
|
|
@ -575,8 +575,9 @@ query_t Agent::lastAckedAgo() const {
|
|||
trans_ret_t Agent::transact(query_t const& queries) {
|
||||
arangodb::consensus::index_t maxind = 0; // maximum write index
|
||||
|
||||
if (!_constituent.leading()) {
|
||||
return trans_ret_t(false, _constituent.leaderID());
|
||||
auto leader = _constituent.leaderID();
|
||||
if (leader != id()) {
|
||||
return trans_ret_t(false, leader);
|
||||
}
|
||||
|
||||
// Apply to spearhead and get indices for log entries
|
||||
|
@ -635,8 +636,9 @@ write_ret_t Agent::write(query_t const& query) {
|
|||
std::vector<bool> applied;
|
||||
std::vector<index_t> indices;
|
||||
|
||||
if (!_constituent.leading()) {
|
||||
return write_ret_t(false, _constituent.leaderID());
|
||||
auto leader = _constituent.leaderID();
|
||||
if (leader != id()) {
|
||||
return write_ret_t(false, leader);
|
||||
}
|
||||
|
||||
// Apply to spearhead and get indices for log entries
|
||||
|
@ -668,8 +670,10 @@ write_ret_t Agent::write(query_t const& query) {
|
|||
|
||||
/// Read from store
|
||||
read_ret_t Agent::read(query_t const& query) {
|
||||
if (!_constituent.leading()) {
|
||||
return read_ret_t(false, _constituent.leaderID());
|
||||
|
||||
auto leader = _constituent.leaderID();
|
||||
if (leader != id()) {
|
||||
return read_ret_t(false, leader);
|
||||
}
|
||||
|
||||
MUTEX_LOCKER(mutexLocker, _ioLock);
|
||||
|
@ -700,6 +704,13 @@ void Agent::run() {
|
|||
|
||||
// Leader working only
|
||||
if (leading()) {
|
||||
|
||||
// Really leading?
|
||||
if (challengeLeadership()) {
|
||||
_constituent.candidate();
|
||||
}
|
||||
|
||||
// Don't panic
|
||||
_appendCV.wait(1000);
|
||||
|
||||
// Append entries to followers
|
||||
|
@ -910,6 +921,8 @@ void Agent::notifyInactive() const {
|
|||
out.add("id", VPackValue(id()));
|
||||
out.add("active", _config.activeToBuilder()->slice());
|
||||
out.add("pool", _config.poolToBuilder()->slice());
|
||||
out.add("min ping", VPackValue(_config.minPing()));
|
||||
out.add("max ping", VPackValue(_config.maxPing()));
|
||||
out.close();
|
||||
|
||||
for (auto const& p : pool) {
|
||||
|
@ -954,6 +967,12 @@ void Agent::notify(query_t const& message) {
|
|||
if (!slice.hasKey("pool") || !slice.get("pool").isObject()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_AGENCY_INFORM_MUST_CONTAIN_POOL);
|
||||
}
|
||||
if (!slice.hasKey("min ping") || !slice.get("min ping").isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_AGENCY_INFORM_MUST_CONTAIN_POOL);
|
||||
}
|
||||
if (!slice.hasKey("max ping") || !slice.get("max ping").isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_AGENCY_INFORM_MUST_CONTAIN_POOL);
|
||||
}
|
||||
|
||||
_config.update(message);
|
||||
_state.persistActiveAgents(_config.activeToBuilder(),
|
||||
|
|
|
@ -345,13 +345,15 @@ void config_t::update(query_t const& message) {
|
|||
VPackSlice slice = message->slice();
|
||||
std::map<std::string, std::string> pool;
|
||||
bool changed = false;
|
||||
for (auto const& p : VPackObjectIterator(slice.get("pool"))) {
|
||||
for (auto const& p : VPackObjectIterator(slice.get(poolStr))) {
|
||||
pool[p.key.copyString()] = p.value.copyString();
|
||||
}
|
||||
std::vector<std::string> active;
|
||||
for (auto const& a : VPackArrayIterator(slice.get("active"))) {
|
||||
for (auto const& a : VPackArrayIterator(slice.get(activeStr))) {
|
||||
active.push_back(a.copyString());
|
||||
}
|
||||
double minPing = slice.get(minPingStr).getDouble();
|
||||
double maxPing = slice.get(maxPingStr).getDouble();
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
if (pool != _pool) {
|
||||
_pool = pool;
|
||||
|
@ -361,6 +363,14 @@ void config_t::update(query_t const& message) {
|
|||
_active = active;
|
||||
changed=true;
|
||||
}
|
||||
if (minPing != _minPing) {
|
||||
_minPing = minPing;
|
||||
changed=true;
|
||||
}
|
||||
if (maxPing != _maxPing) {
|
||||
_maxPing = maxPing;
|
||||
changed=true;
|
||||
}
|
||||
if (changed) {
|
||||
++_version;
|
||||
}
|
||||
|
|
|
@ -243,7 +243,7 @@ bool CleanOutServer::start() {
|
|||
|
||||
bool CleanOutServer::scheduleMoveShards() {
|
||||
|
||||
std::vector<std::string> servers = availableServers();
|
||||
std::vector<std::string> servers = availableServers(_snapshot);
|
||||
|
||||
// Minimum 1 DB server must remain
|
||||
if (servers.size() == 1) {
|
||||
|
|
|
@ -303,6 +303,11 @@ bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex,
|
|||
/// @brief Vote
|
||||
bool Constituent::vote(term_t termOfPeer, std::string id, index_t prevLogIndex,
|
||||
term_t prevLogTerm) {
|
||||
|
||||
if (!_agent->ready()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
|
||||
LOG_TOPIC(TRACE, Logger::AGENCY)
|
||||
|
|
|
@ -66,7 +66,19 @@ bool FailedFollower::create() {
|
|||
<< "Todo: failed Follower for " + _shard + " from " + _from + " to " + _to;
|
||||
|
||||
std::string path = _agencyPrefix + toDoPrefix + _jobId;
|
||||
std::string planPath =
|
||||
planColPrefix + _database + "/" + _collection + "/shards";
|
||||
|
||||
size_t sub = 0;
|
||||
auto const& myClones = clones(_snapshot, _database, _collection, _shard);
|
||||
if (!myClones.empty()) {
|
||||
for (auto const& clone : myClones) {
|
||||
FailedFollower(_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
|
||||
_jobId, _agencyPrefix, _database, clone.collection,
|
||||
clone.shard, _from, _to);
|
||||
}
|
||||
}
|
||||
|
||||
_jb = std::make_shared<Builder>();
|
||||
_jb->openArray();
|
||||
_jb->openObject();
|
||||
|
@ -182,16 +194,10 @@ bool FailedFollower::start() {
|
|||
|
||||
pending.close();
|
||||
|
||||
// Precondition
|
||||
// --- Check that Current servers are as we expect
|
||||
|
||||
// Preconditions
|
||||
pending.openObject();
|
||||
/* pending.add(_agencyPrefix + curPath, VPackValue(VPackValueType::Object));
|
||||
pending.add("old", current.slice());
|
||||
pending.close();
|
||||
*/
|
||||
|
||||
// --- Check if shard is not blocked
|
||||
// --- Check if shard is not blocked by other job
|
||||
pending.add(_agencyPrefix + blockedShardsPrefix + _shard,
|
||||
VPackValue(VPackValueType::Object));
|
||||
pending.add("oldEmpty", VPackValue(true));
|
||||
|
|
|
@ -130,14 +130,23 @@ bool FailedServer::start() {
|
|||
auto cdatabase = current.at(database.first)->children();
|
||||
|
||||
for (auto const& collptr : database.second->children()) {
|
||||
Node const& collection = *(collptr.second);
|
||||
auto const& collection = *(collptr.second);
|
||||
|
||||
if (!cdatabase.find(collptr.first)->second->children().empty()) {
|
||||
Node const& collection = *(collptr.second);
|
||||
Node const& replicationFactor = collection("replicationFactor");
|
||||
|
||||
auto const& collection = *(collptr.second);
|
||||
auto const& replicationFactor = collection("replicationFactor");
|
||||
|
||||
if (replicationFactor.slice().getUInt() > 1) {
|
||||
auto available = availableServers();
|
||||
|
||||
bool isClone = false;
|
||||
try { // Clone
|
||||
if(!collection("distributeShardsLike").slice().copyString().empty()) {
|
||||
isClone = true;
|
||||
}
|
||||
} catch (...) {} // Not clone
|
||||
|
||||
auto available = availableServers(_snapshot);
|
||||
|
||||
for (auto const& shard : collection("shards").children()) {
|
||||
|
||||
|
@ -167,7 +176,7 @@ bool FailedServer::start() {
|
|||
++pos;
|
||||
}
|
||||
|
||||
if (found && available.size() > 0) {
|
||||
if (found && !available.empty() && !isClone) {
|
||||
auto randIt = available.begin();
|
||||
std::advance(randIt, std::rand() % available.size());
|
||||
FailedFollower(
|
||||
|
|
|
@ -51,22 +51,22 @@ Inception::~Inception() { shutdown(); }
|
|||
void Inception::gossip() {
|
||||
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Entering gossip phase ...";
|
||||
using namespace std::chrono;
|
||||
|
||||
auto s = std::chrono::system_clock::now();
|
||||
std::chrono::seconds timeout(3600);
|
||||
auto startTime = system_clock::now();
|
||||
seconds timeout(3600);
|
||||
size_t j = 0;
|
||||
bool complete = false;
|
||||
long waitInterval = 250000;
|
||||
|
||||
CONDITION_LOCKER(guard, _cv);
|
||||
|
||||
while (!this->isStopping() && !_agent->isStopping()) {
|
||||
|
||||
config_t config = _agent->config(); // get a copy of conf
|
||||
size_t version = config.version();
|
||||
auto const config = _agent->config(); // get a copy of conf
|
||||
auto const version = config.version();
|
||||
|
||||
// Build gossip message
|
||||
query_t out = std::make_shared<Builder>();
|
||||
auto out = std::make_shared<Builder>();
|
||||
out->openObject();
|
||||
out->add("endpoint", VPackValue(config.endpoint()));
|
||||
out->add("id", VPackValue(config.id()));
|
||||
|
@ -77,7 +77,7 @@ void Inception::gossip() {
|
|||
out->close();
|
||||
out->close();
|
||||
|
||||
std::string path = privApiPrefix + "gossip";
|
||||
auto const path = privApiPrefix + "gossip";
|
||||
|
||||
// gossip peers
|
||||
for (auto const& p : config.gossipPeers()) {
|
||||
|
@ -101,6 +101,7 @@ void Inception::gossip() {
|
|||
}
|
||||
|
||||
// pool entries
|
||||
bool complete = true;
|
||||
for (auto const& pair : config.pool()) {
|
||||
if (pair.second != config.endpoint()) {
|
||||
{
|
||||
|
@ -109,7 +110,8 @@ void Inception::gossip() {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
std::string clientid = config.id() + std::to_string(j++);
|
||||
complete = false;
|
||||
auto const clientid = config.id() + std::to_string(j++);
|
||||
auto hf =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
||||
|
@ -129,11 +131,10 @@ void Inception::gossip() {
|
|||
_agent->startConstituent();
|
||||
break;
|
||||
}
|
||||
complete = true;
|
||||
}
|
||||
|
||||
// Timed out? :(
|
||||
if ((std::chrono::system_clock::now() - s) > timeout) {
|
||||
if ((system_clock::now() - startTime) > timeout) {
|
||||
if (config.poolComplete()) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Stopping active gossipping!";
|
||||
} else {
|
||||
|
@ -145,176 +146,131 @@ void Inception::gossip() {
|
|||
|
||||
// don't panic just yet
|
||||
_cv.wait(waitInterval);
|
||||
waitInterval *= 2;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// @brief Active agency from persisted database
|
||||
bool Inception::activeAgencyFromPersistence() {
|
||||
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Found persisted agent pool ...";
|
||||
|
||||
auto myConfig = _agent->config();
|
||||
std::string const path = pubApiPrefix + "config";
|
||||
|
||||
// Can only be done responcibly, if we are complete
|
||||
if (myConfig.poolComplete()) {
|
||||
|
||||
// Contact hosts on pool in hopes of finding a leader Id
|
||||
for (auto const& pair : myConfig.pool()) {
|
||||
|
||||
if (pair.first != myConfig.id()) {
|
||||
|
||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
myConfig.id(), 1, pair.second, rest::RequestType::GET, path,
|
||||
std::string(), std::unordered_map<std::string, std::string>(), 1.0);
|
||||
|
||||
if (comres->status == CL_COMM_SENT) {
|
||||
|
||||
auto body = comres->result->getBodyVelocyPack();
|
||||
auto theirConfig = body->slice();
|
||||
|
||||
std::string leaderId;
|
||||
|
||||
// LeaderId in configuration?
|
||||
try {
|
||||
leaderId = theirConfig.get("leaderId").copyString();
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Failed to get leaderId from" << pair.second << ": "
|
||||
<< e.what();
|
||||
}
|
||||
|
||||
if (leaderId != "") { // Got leaderId. Let's get do it.
|
||||
|
||||
try {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Found active agency with leader " << leaderId
|
||||
<< " at endpoint "
|
||||
<< theirConfig.get("configuration").get(
|
||||
"pool").get(leaderId).copyString();
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Failed to get leaderId from" << pair.second << ": "
|
||||
<< e.what();
|
||||
}
|
||||
|
||||
auto agency = std::make_shared<Builder>();
|
||||
agency->openObject();
|
||||
agency->add("term", theirConfig.get("term"));
|
||||
agency->add("id", VPackValue(leaderId));
|
||||
agency->add("active", theirConfig.get("configuration").get("active"));
|
||||
agency->add("pool", theirConfig.get("configuration").get("pool"));
|
||||
agency->close();
|
||||
_agent->notify(agency);
|
||||
|
||||
return true;
|
||||
|
||||
} else { // No leaderId. Move on.
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Failed to get leaderId from" << pair.second;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (waitInterval < 2500000) { // 2.5s
|
||||
waitInterval *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
bool Inception::restartingActiveAgent() {
|
||||
|
||||
auto myConfig = _agent->config();
|
||||
std::string const path = pubApiPrefix + "config";
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Restarting agent from persistence ...";
|
||||
|
||||
auto s = std::chrono::system_clock::now();
|
||||
std::chrono::seconds timeout(60);
|
||||
using namespace std::chrono;
|
||||
|
||||
// Can only be done responcibly, if we are complete
|
||||
if (myConfig.poolComplete()) {
|
||||
auto const path = pubApiPrefix + "config";
|
||||
auto const myConfig = _agent->config();
|
||||
auto const startTime = system_clock::now();
|
||||
auto pool = myConfig.pool();
|
||||
auto active = myConfig.active();
|
||||
auto const& clientId = myConfig.id();
|
||||
auto const majority = (myConfig.size()+1)/2;
|
||||
|
||||
seconds const timeout(3600);
|
||||
|
||||
CONDITION_LOCKER(guard, _cv);
|
||||
|
||||
long waitInterval(500000);
|
||||
|
||||
active.erase(
|
||||
std::remove(active.begin(), active.end(), myConfig.id()), active.end());
|
||||
|
||||
while (!this->isStopping() && !_agent->isStopping()) {
|
||||
|
||||
auto pool = myConfig.pool();
|
||||
auto active = myConfig.active();
|
||||
active.erase(
|
||||
std::remove(active.begin(), active.end(), ""), active.end());
|
||||
|
||||
CONDITION_LOCKER(guard, _cv);
|
||||
|
||||
long waitInterval(500000);
|
||||
|
||||
while (!this->isStopping() && !_agent->isStopping()) {
|
||||
|
||||
active.erase(
|
||||
std::remove(active.begin(), active.end(), myConfig.id()), active.end());
|
||||
active.erase(
|
||||
std::remove(active.begin(), active.end(), ""), active.end());
|
||||
|
||||
if (active.empty()) {
|
||||
return true;
|
||||
}
|
||||
if (active.size() < majority) {
|
||||
LOG_TOPIC(INFO, Logger::AGENCY)
|
||||
<< "Found majority of agents in agreement over active pool. "
|
||||
"Finishing startup sequence.";
|
||||
return true;
|
||||
}
|
||||
|
||||
for (auto& p : pool) {
|
||||
|
||||
for (auto& i : active) {
|
||||
if (p.first != myConfig.id() && p.first != "") {
|
||||
|
||||
if (i != myConfig.id() && i != "") {
|
||||
|
||||
auto clientId = myConfig.id();
|
||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
clientId, 1, pool.at(i), rest::RequestType::GET, path, std::string(),
|
||||
std::unordered_map<std::string, std::string>(), 2.0);
|
||||
|
||||
if (comres->status == CL_COMM_SENT) {
|
||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
clientId, 1, p.second, rest::RequestType::GET, path, std::string(),
|
||||
std::unordered_map<std::string, std::string>(), 2.0);
|
||||
|
||||
if (comres->status == CL_COMM_SENT) {
|
||||
try {
|
||||
|
||||
try {
|
||||
|
||||
auto theirActive = comres->result->getBodyVelocyPack()->
|
||||
slice().get("configuration").get("active").toJson();
|
||||
auto myActive = myConfig.activeToBuilder()->toJson();
|
||||
|
||||
auto const theirConfigVP = comres->result->getBodyVelocyPack();
|
||||
auto const& theirConfig = theirConfigVP->slice();
|
||||
auto const& theirLeaderId = theirConfig.get("leaderId").copyString();
|
||||
auto const& tcc = theirConfig.get("configuration");
|
||||
|
||||
// Known leader. We are done.
|
||||
if (!theirLeaderId.empty()) {
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) <<
|
||||
"Found active RAFTing agency lead by " << theirLeaderId <<
|
||||
"Finishing startup sequence.";
|
||||
auto agency = std::make_shared<Builder>();
|
||||
agency->openObject();
|
||||
agency->add("term", theirConfig.get("term"));
|
||||
agency->add("id", VPackValue(theirLeaderId));
|
||||
agency->add("active", tcc.get("active"));
|
||||
agency->add("pool", tcc.get("pool"));
|
||||
agency->add("min ping", tcc.get("min ping"));
|
||||
agency->add("max ping", tcc.get("max ping"));
|
||||
agency->close();
|
||||
_agent->notify(agency);
|
||||
return true;
|
||||
}
|
||||
|
||||
auto const theirActive = tcc.get("active").toJson();
|
||||
auto const myActive = myConfig.activeToBuilder()->toJson();
|
||||
auto i = std::find(active.begin(),active.end(),p.first);
|
||||
|
||||
if (i != active.end()) {
|
||||
if (theirActive != myActive) {
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "Assumed active RAFT peer and I disagree on active membership."
|
||||
<< "Administrative intervention needed.";
|
||||
<< "Assumed active RAFT peer and I disagree on active membership:";
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "Their active list is " << theirActive;
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "My active list is " << myActive;
|
||||
FATAL_ERROR_EXIT();
|
||||
return false;
|
||||
} else {
|
||||
i = "";
|
||||
*i = "";
|
||||
}
|
||||
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "Assumed active RAFT peer has no active agency list: " << e.what()
|
||||
<< "Administrative intervention needed.";
|
||||
FATAL_ERROR_EXIT();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "Assumed active RAFT peer has no active agency list: "
|
||||
<< e.what() << "Administrative intervention needed.";
|
||||
FATAL_ERROR_EXIT();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Timed out? :(
|
||||
if ((std::chrono::system_clock::now() - s) > timeout) {
|
||||
if (myConfig.poolComplete()) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Joined complete pool!";
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to find complete pool of agents. Giving up!";
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
_cv.wait(waitInterval);
|
||||
waitInterval *= 2;
|
||||
|
||||
}
|
||||
|
||||
// Timed out? :(
|
||||
if ((system_clock::now() - startTime) > timeout) {
|
||||
if (myConfig.poolComplete()) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Joined complete pool!";
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to find complete pool of agents. Giving up!";
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
_cv.wait(waitInterval);
|
||||
if (waitInterval < 2500000) { // 2.5s
|
||||
waitInterval *= 2;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -368,7 +324,7 @@ bool Inception::estimateRAFTInterval() {
|
|||
auto config = _agent->config();
|
||||
|
||||
auto myid = _agent->id();
|
||||
double to = 0.25;
|
||||
auto to = duration<double,std::milli>(1.0); //
|
||||
|
||||
for (size_t i = 0; i < nrep; ++i) {
|
||||
for (auto const& peer : config.pool()) {
|
||||
|
@ -383,7 +339,7 @@ bool Inception::estimateRAFTInterval() {
|
|||
2.0, true);
|
||||
}
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::duration<double,std::milli>(to));
|
||||
std::this_thread::sleep_for(to);
|
||||
to *= 1.01;
|
||||
}
|
||||
|
||||
|
@ -512,11 +468,6 @@ bool Inception::estimateRAFTInterval() {
|
|||
}
|
||||
|
||||
|
||||
// @brief Active agency from persisted database
|
||||
bool Inception::activeAgencyFromCommandLine() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// @brief Thread main
|
||||
void Inception::run() {
|
||||
while (arangodb::rest::RestHandlerFactory::isMaintenance() &&
|
||||
|
@ -528,23 +479,25 @@ void Inception::run() {
|
|||
}
|
||||
|
||||
config_t config = _agent->config();
|
||||
// 1. If active agency, do as you're told
|
||||
if (config.startup() == "persistence" && activeAgencyFromPersistence()) {
|
||||
_agent->ready(true);
|
||||
|
||||
// Are we starting from persisted pool?
|
||||
if (config.startup() == "persistence") {
|
||||
if (restartingActiveAgent()) {
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
|
||||
_agent->ready(true);
|
||||
} else {
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "Unable to restart with persisted pool. Fatal exit.";
|
||||
FATAL_ERROR_EXIT();
|
||||
// FATAL ERROR
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// 2. If we think that we used to be active agent
|
||||
if (!_agent->ready() && restartingActiveAgent()) {
|
||||
_agent->ready(true);
|
||||
}
|
||||
|
||||
// 3. Else gossip
|
||||
config = _agent->config();
|
||||
if (!_agent->ready() && !config.poolComplete()) {
|
||||
gossip();
|
||||
}
|
||||
// Gossip
|
||||
gossip();
|
||||
|
||||
// 4. If still incomplete bail out :(
|
||||
// No complete pool after gossip?
|
||||
config = _agent->config();
|
||||
if (!_agent->ready() && !config.poolComplete()) {
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
|
@ -552,12 +505,13 @@ void Inception::run() {
|
|||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
// 5. If command line RAFT timings have not been set explicitly
|
||||
// Try good estimate of RAFT time limits
|
||||
// If command line RAFT timings have not been set explicitly
|
||||
// Try good estimate of RAFT time limits
|
||||
if (!config.cmdLineTimings()) {
|
||||
estimateRAFTInterval();
|
||||
}
|
||||
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
|
||||
_agent->ready(true);
|
||||
|
||||
}
|
||||
|
|
|
@ -69,15 +69,9 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
/// @brief Find active agency from persisted
|
||||
bool activeAgencyFromPersistence();
|
||||
|
||||
/// @brief We are a restarting active RAFT agent
|
||||
bool restartingActiveAgent();
|
||||
|
||||
/// @brief Find active agency from command line
|
||||
bool activeAgencyFromCommandLine();
|
||||
|
||||
/// @brief Try to estimate good RAFT min/max timeouts
|
||||
bool estimateRAFTInterval();
|
||||
|
||||
|
|
|
@ -143,12 +143,12 @@ bool Job::finish(std::string const& type, bool success,
|
|||
}
|
||||
|
||||
|
||||
std::vector<std::string> Job::availableServers() const {
|
||||
std::vector<std::string> Job::availableServers(Node const& snapshot) {
|
||||
|
||||
std::vector<std::string> ret;
|
||||
|
||||
// Get servers from plan
|
||||
Node::Children const& dbservers = _snapshot(plannedServers).children();
|
||||
Node::Children const& dbservers = snapshot(plannedServers).children();
|
||||
for (auto const& srv : dbservers) {
|
||||
ret.push_back(srv.first);
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ std::vector<std::string> Job::availableServers() const {
|
|||
// Remove cleaned servers from ist
|
||||
try {
|
||||
for (auto const& srv :
|
||||
VPackArrayIterator(_snapshot(cleanedPrefix).slice())) {
|
||||
VPackArrayIterator(snapshot(cleanedPrefix).slice())) {
|
||||
ret.erase(
|
||||
std::remove(ret.begin(), ret.end(), srv.copyString()),
|
||||
ret.end());
|
||||
|
@ -167,7 +167,7 @@ std::vector<std::string> Job::availableServers() const {
|
|||
// Remove failed servers from list
|
||||
try {
|
||||
for (auto const& srv :
|
||||
VPackArrayIterator(_snapshot(failedServersPrefix).slice())) {
|
||||
VPackArrayIterator(snapshot(failedServersPrefix).slice())) {
|
||||
ret.erase(
|
||||
std::remove(ret.begin(), ret.end(), srv.copyString()),
|
||||
ret.end());
|
||||
|
@ -177,3 +177,38 @@ std::vector<std::string> Job::availableServers() const {
|
|||
return ret;
|
||||
|
||||
}
|
||||
|
||||
std::vector<Job::shard_t> Job::clones(
|
||||
Node const& snapshot, std::string const& database,
|
||||
std::string const& collection, std::string const& shard) {
|
||||
|
||||
std::vector<shard_t> ret;
|
||||
|
||||
std::string databasePath = planColPrefix + database,
|
||||
planPath = databasePath + "/" + collection + "/shards";
|
||||
|
||||
auto myshards = snapshot(planPath).children();
|
||||
auto steps = std::distance(myshards.begin(), myshards.find(shard));
|
||||
|
||||
for (const auto& colptr : snapshot(databasePath).children()) { // collections
|
||||
|
||||
auto const col = *colptr.second;
|
||||
auto const otherCollection = colptr.first;
|
||||
|
||||
try {
|
||||
std::string const& prototype =
|
||||
col("distributeShardsLike").slice().copyString();
|
||||
if (otherCollection != collection && prototype == collection) {
|
||||
auto othershards = col("shards").children();
|
||||
auto opos = othershards.begin();
|
||||
std::advance(opos, steps);
|
||||
auto const& otherShard = opos->first;
|
||||
ret.push_back(shard_t(otherCollection, otherShard));
|
||||
}
|
||||
} catch(...) {}
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
|
|
@ -93,6 +93,13 @@ struct JobCallback {
|
|||
};
|
||||
|
||||
struct Job {
|
||||
|
||||
struct shard_t {
|
||||
std::string collection;
|
||||
std::string shard;
|
||||
shard_t (std::string const& c, std::string const& s) :
|
||||
collection(c), shard(s) {}
|
||||
};
|
||||
|
||||
Job(Node const& snapshot, Agent* agent, std::string const& jobId,
|
||||
std::string const& creator, std::string const& agencyPrefix);
|
||||
|
@ -109,7 +116,12 @@ struct Job {
|
|||
|
||||
virtual bool start() = 0;
|
||||
|
||||
virtual std::vector<std::string> availableServers() const;
|
||||
static std::vector<std::string> availableServers(
|
||||
const arangodb::consensus::Node&);
|
||||
|
||||
static std::vector<shard_t> clones(
|
||||
Node const& snap, std::string const& db, std::string const& col,
|
||||
std::string const& shrd);
|
||||
|
||||
Node const _snapshot;
|
||||
Agent* _agent;
|
||||
|
|
|
@ -781,3 +781,15 @@ std::string Node::getString() const {
|
|||
}
|
||||
return slice().copyString();
|
||||
}
|
||||
|
||||
Slice Node::getArray() const {
|
||||
if (type() == NODE) {
|
||||
throw StoreException("Must not convert NODE type to array");
|
||||
}
|
||||
if (!_isArray) {
|
||||
throw StoreException("Not an array type");
|
||||
}
|
||||
rebuildVecBuf();
|
||||
return Slice(_vecBuf.data());
|
||||
}
|
||||
|
||||
|
|
|
@ -217,6 +217,9 @@ class Node {
|
|||
/// @brief Get string value (throws if type NODE or if conversion fails)
|
||||
std::string getString() const;
|
||||
|
||||
/// @brief Get array value
|
||||
Slice getArray() const;
|
||||
|
||||
protected:
|
||||
/// @brief Add time to live entry
|
||||
virtual bool addTimeToLive(long millis);
|
||||
|
@ -231,8 +234,7 @@ class Node {
|
|||
Store* _store; ///< @brief Store
|
||||
Children _children; ///< @brief child nodes
|
||||
TimePoint _ttl; ///< @brief my expiry
|
||||
// Buffer<uint8_t> _value; ///< @brief my value
|
||||
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
|
||||
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
|
||||
mutable Buffer<uint8_t> _vecBuf;
|
||||
mutable bool _vecBufDirty;
|
||||
bool _isArray;
|
||||
|
|
|
@ -286,7 +286,7 @@ bool RemoveServer::start() {
|
|||
|
||||
bool RemoveServer::scheduleAddFollowers() {
|
||||
|
||||
std::vector<std::string> servers = availableServers();
|
||||
std::vector<std::string> servers = availableServers(_snapshot);
|
||||
|
||||
// Minimum 1 DB server must remain
|
||||
if (servers.size() == 1) {
|
||||
|
@ -355,7 +355,7 @@ bool RemoveServer::scheduleAddFollowers() {
|
|||
|
||||
AddFollower(_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
|
||||
_jobId, _agencyPrefix, database.first, collptr.first,
|
||||
shard.first, newServer);
|
||||
shard.first, {newServer});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -406,6 +406,7 @@ inline RestStatus RestAgencyHandler::handleRead() {
|
|||
return RestStatus::DONE;
|
||||
|
||||
} else {
|
||||
TRI_ASSERT(ret.redirect != _agent->id());
|
||||
redirectRequest(ret.redirect);
|
||||
}
|
||||
return RestStatus::DONE;
|
||||
|
|
|
@ -462,7 +462,7 @@ void Supervision::run() {
|
|||
}
|
||||
}
|
||||
}
|
||||
_cv.wait(1000000 * _frequency);
|
||||
_cv.wait(static_cast<uint64_t>(1000000 * _frequency));
|
||||
}
|
||||
}
|
||||
if (shutdown) {
|
||||
|
@ -548,8 +548,10 @@ bool Supervision::handleJobs() {
|
|||
}
|
||||
|
||||
// Do supervision
|
||||
|
||||
shrinkCluster();
|
||||
workJobs();
|
||||
enforceReplication();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -610,6 +612,64 @@ void Supervision::workJobs() {
|
|||
}
|
||||
}
|
||||
|
||||
void Supervision::enforceReplication() {
|
||||
|
||||
auto const& plannedDBs = _snapshot(planColPrefix).children();
|
||||
auto available = Job::availableServers(_snapshot);
|
||||
|
||||
for (const auto& db_ : plannedDBs) { // Planned databases
|
||||
auto const& db = *(db_.second);
|
||||
for (const auto& col_ : db.children()) { // Planned collections
|
||||
auto const& col = *(col_.second);
|
||||
auto replicationFactor = col("replicationFactor").slice().getUInt();
|
||||
|
||||
// mop: satellites => distribute to every server
|
||||
if (replicationFactor == 0) {
|
||||
replicationFactor = available.size();
|
||||
}
|
||||
|
||||
bool clone = false;
|
||||
try {
|
||||
clone = !col("distributeShardsLike").slice().copyString().empty();
|
||||
} catch (...) {}
|
||||
|
||||
if (!clone) {
|
||||
for (auto const& shard_ : col("shards").children()) { // Pl shards
|
||||
auto const& shard = *(shard_.second);
|
||||
|
||||
// Enough DBServer to
|
||||
if (replicationFactor > shard.slice().length() &&
|
||||
available.size() > shard.slice().length()) {
|
||||
for (auto const& i : VPackArrayIterator(shard.slice())) {
|
||||
available.erase(
|
||||
std::remove(
|
||||
available.begin(), available.end(), i.copyString()),
|
||||
available.end());
|
||||
}
|
||||
|
||||
size_t optimal = replicationFactor - shard.slice().length();
|
||||
std::vector<std::string> newFollowers;
|
||||
for (size_t i = 0; i < optimal; ++i) {
|
||||
auto randIt = available.begin();
|
||||
std::advance(randIt, std::rand() % available.size());
|
||||
newFollowers.push_back(*randIt);
|
||||
available.erase(randIt);
|
||||
if (available.empty()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
AddFollower(
|
||||
_snapshot, _agent, std::to_string(_jobId++), "supervision",
|
||||
_agencyPrefix, db_.first, col_.first, shard_.first, newFollowers);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Shrink cluster if applicable, guarded by caller
|
||||
void Supervision::shrinkCluster() {
|
||||
// Get servers from plan
|
||||
|
@ -705,7 +765,7 @@ void Supervision::shrinkCluster() {
|
|||
**/
|
||||
// Find greatest replication factor among all collections
|
||||
uint64_t maxReplFact = 1;
|
||||
Node::Children const& databases = _snapshot("/Plan/Collections").children();
|
||||
Node::Children const& databases = _snapshot(planColPrefix).children();
|
||||
for (auto const& database : databases) {
|
||||
for (auto const& collptr : database.second->children()) {
|
||||
uint64_t replFact{0};
|
||||
|
|
|
@ -117,6 +117,9 @@ class Supervision : public arangodb::Thread {
|
|||
|
||||
private:
|
||||
|
||||
/// @brief Check for inconsistencies in replication factor vs dbs entries
|
||||
void enforceReplication();
|
||||
|
||||
/// @brief Update agency prefix from agency itself
|
||||
bool updateAgencyPrefix(size_t nTries = 10, int intervalSec = 1);
|
||||
|
||||
|
|
|
@ -1634,6 +1634,7 @@ void Ast::validateAndOptimize() {
|
|||
struct TraversalContext {
|
||||
std::unordered_set<std::string> writeCollectionsSeen;
|
||||
std::unordered_map<std::string, int64_t> collectionsFirstSeen;
|
||||
std::unordered_map<Variable const*, AstNode const*> variableDefinitions;
|
||||
int64_t stopOptimizationRequests = 0;
|
||||
int64_t nestingLevel = 0;
|
||||
bool isInFilter = false;
|
||||
|
@ -1769,7 +1770,7 @@ void Ast::validateAndOptimize() {
|
|||
|
||||
// attribute access
|
||||
if (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
return this->optimizeAttributeAccess(node);
|
||||
return this->optimizeAttributeAccess(node, static_cast<TraversalContext*>(data)->variableDefinitions);
|
||||
}
|
||||
|
||||
// passthru node
|
||||
|
@ -1813,6 +1814,22 @@ void Ast::validateAndOptimize() {
|
|||
|
||||
// LET
|
||||
if (node->type == NODE_TYPE_LET) {
|
||||
// remember variable assignments
|
||||
TRI_ASSERT(node->numMembers() == 2);
|
||||
auto context = static_cast<TraversalContext*>(data);
|
||||
Variable const* variable = static_cast<Variable const*>(node->getMember(0)->getData());
|
||||
AstNode const* definition = node->getMember(1);
|
||||
// recursively process assignments so we can track LET a = b LET c = b
|
||||
|
||||
while (definition->type == NODE_TYPE_REFERENCE) {
|
||||
auto it = context->variableDefinitions.find(static_cast<Variable const*>(definition->getData()));
|
||||
if (it == context->variableDefinitions.end()) {
|
||||
break;
|
||||
}
|
||||
definition = (*it).second;
|
||||
}
|
||||
|
||||
context->variableDefinitions.emplace(variable, definition);
|
||||
return this->optimizeLet(node);
|
||||
}
|
||||
|
||||
|
@ -2669,12 +2686,21 @@ AstNode* Ast::optimizeTernaryOperator(AstNode* node) {
|
|||
}
|
||||
|
||||
/// @brief optimizes an attribute access
|
||||
AstNode* Ast::optimizeAttributeAccess(AstNode* node) {
|
||||
AstNode* Ast::optimizeAttributeAccess(AstNode* node, std::unordered_map<Variable const*, AstNode const*> const& variableDefinitions) {
|
||||
TRI_ASSERT(node != nullptr);
|
||||
TRI_ASSERT(node->type == NODE_TYPE_ATTRIBUTE_ACCESS);
|
||||
TRI_ASSERT(node->numMembers() == 1);
|
||||
|
||||
AstNode* what = node->getMember(0);
|
||||
AstNode const* what = node->getMember(0);
|
||||
|
||||
if (what->type == NODE_TYPE_REFERENCE) {
|
||||
// check if the access value is a variable and if it is an alias
|
||||
auto it = variableDefinitions.find(static_cast<Variable const*>(what->getData()));
|
||||
|
||||
if (it != variableDefinitions.end()) {
|
||||
what = (*it).second;
|
||||
}
|
||||
}
|
||||
|
||||
if (!what->isConstant()) {
|
||||
return node;
|
||||
|
@ -2689,6 +2715,7 @@ AstNode* Ast::optimizeAttributeAccess(AstNode* node) {
|
|||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
AstNode const* member = what->getMember(0);
|
||||
|
||||
if (member->type == NODE_TYPE_OBJECT_ELEMENT &&
|
||||
member->getStringLength() == length &&
|
||||
memcmp(name, member->getStringValue(), length) == 0) {
|
||||
|
@ -2942,6 +2969,8 @@ AstNode* Ast::nodeFromVPack(VPackSlice const& slice, bool copyStringValues) {
|
|||
for (auto const& it : VPackArrayIterator(slice)) {
|
||||
node->addMember(nodeFromVPack(it, copyStringValues));
|
||||
}
|
||||
|
||||
node->setFlag(DETERMINED_CONSTANT, VALUE_CONSTANT);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
@ -2963,6 +2992,8 @@ AstNode* Ast::nodeFromVPack(VPackSlice const& slice, bool copyStringValues) {
|
|||
node->addMember(createNodeObjectElement(
|
||||
attributeName, static_cast<size_t>(nameLength), nodeFromVPack(it.value, copyStringValues)));
|
||||
}
|
||||
|
||||
node->setFlag(DETERMINED_CONSTANT, VALUE_CONSTANT);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
@ -2970,6 +3001,61 @@ AstNode* Ast::nodeFromVPack(VPackSlice const& slice, bool copyStringValues) {
|
|||
return createNodeValueNull();
|
||||
}
|
||||
|
||||
/// @brief resolve an attribute access
|
||||
AstNode const* Ast::resolveConstAttributeAccess(AstNode const* node) {
|
||||
TRI_ASSERT(node != nullptr);
|
||||
TRI_ASSERT(node->type == NODE_TYPE_ATTRIBUTE_ACCESS);
|
||||
|
||||
std::vector<std::string> attributeNames;
|
||||
|
||||
while (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
attributeNames.emplace_back(node->getString());
|
||||
node = node->getMember(0);
|
||||
}
|
||||
|
||||
size_t which = attributeNames.size();
|
||||
TRI_ASSERT(which > 0);
|
||||
|
||||
while (which > 0) {
|
||||
TRI_ASSERT(node->type == NODE_TYPE_VALUE || node->type == NODE_TYPE_ARRAY ||
|
||||
node->type == NODE_TYPE_OBJECT);
|
||||
|
||||
bool found = false;
|
||||
|
||||
if (node->type == NODE_TYPE_OBJECT) {
|
||||
TRI_ASSERT(which > 0);
|
||||
std::string const& attributeName = attributeNames[which - 1];
|
||||
--which;
|
||||
|
||||
size_t const n = node->numMembers();
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
auto member = node->getMember(i);
|
||||
|
||||
if (member->type == NODE_TYPE_OBJECT_ELEMENT &&
|
||||
member->getString() == attributeName) {
|
||||
// found the attribute
|
||||
node = member->getMember(0);
|
||||
if (which == 0) {
|
||||
// we found what we looked for
|
||||
return node;
|
||||
}
|
||||
// we found the correct attribute but there is now an attribute
|
||||
// access on the result
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// attribute not found or non-array
|
||||
return createNodeValueNull();
|
||||
}
|
||||
|
||||
/// @brief traverse the AST, using pre- and post-order visitors
|
||||
AstNode* Ast::traverseAndModify(
|
||||
AstNode* node, std::function<bool(AstNode const*, void*)> preVisitor,
|
||||
|
|
|
@ -412,6 +412,9 @@ class Ast {
|
|||
|
||||
/// @brief create an AST node from vpack
|
||||
AstNode* nodeFromVPack(arangodb::velocypack::Slice const&, bool);
|
||||
|
||||
/// @brief resolve an attribute access
|
||||
static AstNode const* resolveConstAttributeAccess(AstNode const*);
|
||||
|
||||
/// @brief traverse the AST using a depth-first visitor
|
||||
static AstNode* traverseAndModify(AstNode*,
|
||||
|
@ -457,7 +460,7 @@ class Ast {
|
|||
AstNode* optimizeTernaryOperator(AstNode*);
|
||||
|
||||
/// @brief optimizes an attribute access
|
||||
AstNode* optimizeAttributeAccess(AstNode*);
|
||||
AstNode* optimizeAttributeAccess(AstNode*, std::unordered_map<Variable const*, AstNode const*> const&);
|
||||
|
||||
/// @brief optimizes a call to a built-in function
|
||||
AstNode* optimizeFunctionCall(AstNode*);
|
||||
|
|
|
@ -187,61 +187,6 @@ std::unordered_map<int, std::string const> const AstNode::ValueTypeNames{
|
|||
|
||||
namespace {
|
||||
|
||||
/// @brief resolve an attribute access
|
||||
static AstNode const* ResolveAttribute(AstNode const* node) {
|
||||
TRI_ASSERT(node != nullptr);
|
||||
TRI_ASSERT(node->type == NODE_TYPE_ATTRIBUTE_ACCESS);
|
||||
|
||||
std::vector<std::string> attributeNames;
|
||||
|
||||
while (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
attributeNames.emplace_back(node->getString());
|
||||
node = node->getMember(0);
|
||||
}
|
||||
|
||||
size_t which = attributeNames.size();
|
||||
TRI_ASSERT(which > 0);
|
||||
|
||||
while (which > 0) {
|
||||
TRI_ASSERT(node->type == NODE_TYPE_VALUE || node->type == NODE_TYPE_ARRAY ||
|
||||
node->type == NODE_TYPE_OBJECT);
|
||||
|
||||
bool found = false;
|
||||
|
||||
if (node->type == NODE_TYPE_OBJECT) {
|
||||
TRI_ASSERT(which > 0);
|
||||
std::string const& attributeName = attributeNames[which - 1];
|
||||
--which;
|
||||
|
||||
size_t const n = node->numMembers();
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
auto member = node->getMember(i);
|
||||
|
||||
if (member->type == NODE_TYPE_OBJECT_ELEMENT &&
|
||||
member->getString() == attributeName) {
|
||||
// found the attribute
|
||||
node = member->getMember(0);
|
||||
if (which == 0) {
|
||||
// we found what we looked for
|
||||
return node;
|
||||
}
|
||||
// we found the correct attribute but there is now an attribute
|
||||
// access on the result
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// attribute not found or non-array
|
||||
return Ast::createNodeValueNull();
|
||||
}
|
||||
|
||||
/// @brief get the node type for inter-node comparisons
|
||||
static VPackValueType GetNodeCompareType(AstNode const* node) {
|
||||
TRI_ASSERT(node != nullptr);
|
||||
|
@ -276,10 +221,10 @@ int arangodb::aql::CompareAstNodes(AstNode const* lhs, AstNode const* rhs,
|
|||
TRI_ASSERT(rhs != nullptr);
|
||||
|
||||
if (lhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
lhs = ResolveAttribute(lhs);
|
||||
lhs = Ast::resolveConstAttributeAccess(lhs);
|
||||
}
|
||||
if (rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
rhs = ResolveAttribute(rhs);
|
||||
rhs = Ast::resolveConstAttributeAccess(rhs);
|
||||
}
|
||||
|
||||
auto lType = GetNodeCompareType(lhs);
|
||||
|
@ -419,6 +364,7 @@ AstNode::AstNode(bool v, AstNodeValueType valueType)
|
|||
value.value._bool = v;
|
||||
TRI_ASSERT(flags == 0);
|
||||
TRI_ASSERT(computedValue == nullptr);
|
||||
setFlag(DETERMINED_CONSTANT, VALUE_CONSTANT);
|
||||
}
|
||||
|
||||
/// @brief create an int node, with defining a value
|
||||
|
@ -428,6 +374,7 @@ AstNode::AstNode(int64_t v, AstNodeValueType valueType)
|
|||
value.value._int = v;
|
||||
TRI_ASSERT(flags == 0);
|
||||
TRI_ASSERT(computedValue == nullptr);
|
||||
setFlag(DETERMINED_CONSTANT, VALUE_CONSTANT);
|
||||
}
|
||||
|
||||
/// @brief create a string node, with defining a value
|
||||
|
@ -437,6 +384,7 @@ AstNode::AstNode(char const* v, size_t length, AstNodeValueType valueType)
|
|||
setStringValue(v, length);
|
||||
TRI_ASSERT(flags == 0);
|
||||
TRI_ASSERT(computedValue == nullptr);
|
||||
setFlag(DETERMINED_CONSTANT, VALUE_CONSTANT);
|
||||
}
|
||||
|
||||
/// @brief create the node from VPack
|
||||
|
|
|
@ -183,6 +183,9 @@ class ScatterNode : public ExecutionNode {
|
|||
/// @brief return the collection
|
||||
Collection const* collection() const { return _collection; }
|
||||
|
||||
/// @brief set collection
|
||||
void setCollection(Collection const* collection) { _collection = collection; }
|
||||
|
||||
private:
|
||||
/// @brief the underlying database
|
||||
TRI_vocbase_t* _vocbase;
|
||||
|
@ -302,7 +305,8 @@ class GatherNode : public ExecutionNode {
|
|||
public:
|
||||
GatherNode(ExecutionPlan* plan, size_t id, TRI_vocbase_t* vocbase,
|
||||
Collection const* collection)
|
||||
: ExecutionNode(plan, id), _vocbase(vocbase), _collection(collection) {}
|
||||
: ExecutionNode(plan, id), _vocbase(vocbase), _collection(collection),
|
||||
_auxiliaryCollections() {}
|
||||
|
||||
GatherNode(ExecutionPlan*, arangodb::velocypack::Slice const& base,
|
||||
SortElementVector const& elements);
|
||||
|
@ -357,6 +361,18 @@ class GatherNode : public ExecutionNode {
|
|||
/// @brief return the collection
|
||||
Collection const* collection() const { return _collection; }
|
||||
|
||||
void setCollection(Collection const* collection) { _collection = collection; }
|
||||
|
||||
std::unordered_set<Collection const*> auxiliaryCollections() const {
|
||||
return _auxiliaryCollections;
|
||||
}
|
||||
|
||||
void addAuxiliaryCollection(Collection const* auxiliaryCollection) {
|
||||
_auxiliaryCollections.emplace(auxiliaryCollection);
|
||||
}
|
||||
|
||||
bool hasAuxiliaryCollections() const { return !_auxiliaryCollections.empty(); }
|
||||
|
||||
private:
|
||||
/// @brief pairs, consisting of variable and sort direction
|
||||
/// (true = ascending | false = descending)
|
||||
|
@ -367,6 +383,9 @@ class GatherNode : public ExecutionNode {
|
|||
|
||||
/// @brief the underlying collection
|
||||
Collection const* _collection;
|
||||
|
||||
/// @brief (optional) auxiliary collections (satellites)
|
||||
std::unordered_set<Collection const*> _auxiliaryCollections;
|
||||
};
|
||||
|
||||
} // namespace arangodb::aql
|
||||
|
|
|
@ -86,12 +86,17 @@ struct Collection {
|
|||
|
||||
/// @brief either use the set collection or get one from ClusterInfo:
|
||||
std::shared_ptr<arangodb::LogicalCollection> getCollection() const;
|
||||
|
||||
|
||||
/// @brief check smartness of the underlying collection
|
||||
bool isSmart() const {
|
||||
return getCollection()->isSmart();
|
||||
}
|
||||
|
||||
/// @brief check if collection is a satellite collection
|
||||
bool isSatellite() const {
|
||||
return getCollection()->isSatellite();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
arangodb::LogicalCollection* collection;
|
||||
|
|
|
@ -722,14 +722,20 @@ void Condition::optimize(ExecutionPlan* plan) {
|
|||
auto operand = andNode->getMemberUnchecked(j);
|
||||
|
||||
if (operand->isComparisonOperator()) {
|
||||
auto lhs = operand->getMember(0);
|
||||
auto rhs = operand->getMember(1);
|
||||
AstNode const* lhs = operand->getMember(0);
|
||||
AstNode const* rhs = operand->getMember(1);
|
||||
|
||||
if (lhs->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
if (lhs->isConstant()) {
|
||||
lhs = Ast::resolveConstAttributeAccess(lhs);
|
||||
}
|
||||
storeAttributeAccess(varAccess, variableUsage, lhs, j, ATTRIBUTE_LEFT);
|
||||
}
|
||||
if (rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS ||
|
||||
rhs->type == NODE_TYPE_EXPANSION) {
|
||||
if (rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS && rhs->isConstant()) {
|
||||
rhs = Ast::resolveConstAttributeAccess(rhs);
|
||||
}
|
||||
storeAttributeAccess(varAccess, variableUsage, rhs, j, ATTRIBUTE_RIGHT);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,11 +22,13 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "EnumerateCollectionBlock.h"
|
||||
|
||||
#include "Aql/AqlItemBlock.h"
|
||||
#include "Aql/Collection.h"
|
||||
#include "Aql/CollectionScanner.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
|
@ -115,6 +117,39 @@ int EnumerateCollectionBlock::initialize() {
|
|||
auto ep = static_cast<EnumerateCollectionNode const*>(_exeNode);
|
||||
_mustStoreResult = ep->isVarUsedLater(ep->_outVariable);
|
||||
|
||||
if (_collection->isSatellite()) {
|
||||
auto logicalCollection = _collection->getCollection();
|
||||
auto cid = logicalCollection->planId();
|
||||
auto dbName = logicalCollection->dbName();
|
||||
auto collectionInfoCurrent = ClusterInfo::instance()->getCollectionCurrent(dbName, std::to_string(cid));
|
||||
|
||||
double maxWait = _engine->getQuery()->getNumericOption("satelliteSyncWait", 60.0);
|
||||
bool inSync = false;
|
||||
unsigned long waitInterval = 10000;
|
||||
double startTime = TRI_microtime();
|
||||
double now = startTime;
|
||||
double endTime = startTime + maxWait;
|
||||
|
||||
while (!inSync) {
|
||||
auto followers = collectionInfoCurrent->servers(_collection->getName());
|
||||
inSync = std::find(followers.begin(), followers.end(), ServerState::instance()->getId()) != followers.end();
|
||||
if (!inSync) {
|
||||
if (endTime - now < waitInterval) {
|
||||
waitInterval = static_cast<unsigned long>(endTime - now);
|
||||
}
|
||||
usleep(waitInterval);
|
||||
}
|
||||
now = TRI_microtime();
|
||||
if (now > endTime) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!inSync) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_AQL_COLLECTION_OUT_OF_SYNC, "collection " + _collection->name);
|
||||
}
|
||||
}
|
||||
|
||||
return ExecutionBlock::initialize();
|
||||
|
||||
// cppcheck-suppress style
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ExecutionEngine.h"
|
||||
|
||||
#include "Aql/BasicBlocks.h"
|
||||
#include "Aql/CalculationBlock.h"
|
||||
#include "Aql/ClusterBlocks.h"
|
||||
|
@ -343,38 +344,78 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
id(id),
|
||||
nodes(),
|
||||
part(p),
|
||||
idOfRemoteNode(idOfRemoteNode) {}
|
||||
|
||||
Collection* getCollection() const {
|
||||
Collection* collection = nullptr;
|
||||
idOfRemoteNode(idOfRemoteNode),
|
||||
collection(nullptr),
|
||||
auxiliaryCollections(),
|
||||
populated(false) {
|
||||
}
|
||||
|
||||
void populate() {
|
||||
// mop: compiler should inline that I suppose :S
|
||||
auto collectionFn = [&](Collection* col) -> void {
|
||||
if (col->isSatellite()) {
|
||||
auxiliaryCollections.emplace(col);
|
||||
} else {
|
||||
collection = col;
|
||||
}
|
||||
};
|
||||
Collection* localCollection = nullptr;
|
||||
for (auto en = nodes.rbegin(); en != nodes.rend(); ++en) {
|
||||
// find the collection to be used
|
||||
if ((*en)->getType() == ExecutionNode::ENUMERATE_COLLECTION) {
|
||||
collection = const_cast<Collection*>(
|
||||
localCollection = const_cast<Collection*>(
|
||||
static_cast<EnumerateCollectionNode*>((*en))->collection());
|
||||
collectionFn(localCollection);
|
||||
} else if ((*en)->getType() == ExecutionNode::INDEX) {
|
||||
collection = const_cast<Collection*>(
|
||||
localCollection = const_cast<Collection*>(
|
||||
static_cast<IndexNode*>((*en))->collection());
|
||||
collectionFn(localCollection);
|
||||
} else if ((*en)->getType() == ExecutionNode::INSERT ||
|
||||
(*en)->getType() == ExecutionNode::UPDATE ||
|
||||
(*en)->getType() == ExecutionNode::REPLACE ||
|
||||
(*en)->getType() == ExecutionNode::REMOVE ||
|
||||
(*en)->getType() == ExecutionNode::UPSERT) {
|
||||
collection = const_cast<Collection*>(
|
||||
localCollection = const_cast<Collection*>(
|
||||
static_cast<ModificationNode*>((*en))->collection());
|
||||
collectionFn(localCollection);
|
||||
}
|
||||
}
|
||||
// mop: no non satellite collection found
|
||||
if (collection == nullptr) {
|
||||
// mop: just take the last satellite then
|
||||
collection = localCollection;
|
||||
}
|
||||
// mop: ok we are actually only working with a satellite...
|
||||
// so remove its shardId from the auxiliaryShards again
|
||||
if (collection != nullptr && collection->isSatellite()) {
|
||||
auxiliaryCollections.erase(collection);
|
||||
}
|
||||
populated = true;
|
||||
}
|
||||
|
||||
Collection* getCollection() {
|
||||
if (!populated) {
|
||||
populate();
|
||||
}
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
return collection;
|
||||
}
|
||||
|
||||
std::unordered_set<Collection*> getAuxiliaryCollections() {
|
||||
if (!populated) {
|
||||
populate();
|
||||
}
|
||||
return auxiliaryCollections;
|
||||
}
|
||||
|
||||
EngineLocation const location;
|
||||
size_t const id;
|
||||
std::vector<ExecutionNode*> nodes;
|
||||
arangodb::aql::QueryPart part; // only relevant for DBserver parts
|
||||
size_t idOfRemoteNode; // id of the remote node
|
||||
Collection* collection;
|
||||
std::unordered_set<Collection*> auxiliaryCollections;
|
||||
bool populated;
|
||||
// in the original plan that needs this engine
|
||||
};
|
||||
|
||||
|
@ -392,6 +433,8 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
// query or a dependent one.
|
||||
|
||||
std::unordered_map<std::string, std::string> queryIds;
|
||||
|
||||
std::unordered_set<Collection*> auxiliaryCollections;
|
||||
// this map allows to find the queries which are the parts of the big
|
||||
// query. There are two cases, the first is for the remote queries on
|
||||
// the DBservers, for these, the key is:
|
||||
|
@ -435,7 +478,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
/// @brief generatePlanForOneShard
|
||||
void generatePlanForOneShard(VPackBuilder& builder, size_t nr,
|
||||
EngineInfo const& info, QueryId& connectedId,
|
||||
EngineInfo* info, QueryId& connectedId,
|
||||
std::string const& shardId, bool verbose) {
|
||||
// copy the relevant fragment of the plan for each shard
|
||||
// Note that in these parts of the query there are no SubqueryNodes,
|
||||
|
@ -443,7 +486,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
ExecutionPlan plan(query->ast());
|
||||
|
||||
ExecutionNode* previous = nullptr;
|
||||
for (ExecutionNode const* current : info.nodes) {
|
||||
for (ExecutionNode const* current : info->nodes) {
|
||||
auto clone = current->clone(&plan, false, false);
|
||||
// UNNECESSARY, because clone does it: plan.registerNode(clone);
|
||||
|
||||
|
@ -473,34 +516,43 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
/// @brief distributePlanToShard, send a single plan to one shard
|
||||
void distributePlanToShard(arangodb::CoordTransactionID& coordTransactionID,
|
||||
EngineInfo const& info, Collection* collection,
|
||||
EngineInfo* info,
|
||||
QueryId& connectedId, std::string const& shardId,
|
||||
VPackSlice const& planSlice) {
|
||||
// inject the current shard id into the collection
|
||||
collection->setCurrentShard(shardId);
|
||||
|
||||
Collection* collection = info->getCollection();
|
||||
// create a JSON representation of the plan
|
||||
VPackBuilder result;
|
||||
result.openObject();
|
||||
|
||||
result.add("plan", VPackValue(VPackValueType::Object));
|
||||
|
||||
|
||||
VPackBuilder tmp;
|
||||
query->ast()->variables()->toVelocyPack(tmp);
|
||||
result.add("variables", tmp.slice());
|
||||
|
||||
result.add("collections", VPackValue(VPackValueType::Array));
|
||||
// add the collection
|
||||
result.openObject();
|
||||
result.add("name", VPackValue(collection->getName()));
|
||||
result.add("name", VPackValue(shardId));
|
||||
result.add("type", VPackValue(TRI_TransactionTypeGetStr(collection->accessType)));
|
||||
result.close();
|
||||
|
||||
// mop: this is currently only working for satellites and hardcoded to their structure
|
||||
for (auto auxiliaryCollection: info->getAuxiliaryCollections()) {
|
||||
TRI_ASSERT(auxiliaryCollection->isSatellite());
|
||||
|
||||
// add the collection
|
||||
result.openObject();
|
||||
auto auxiliaryShards = auxiliaryCollection->shardIds();
|
||||
result.add("name", VPackValue((*auxiliaryShards)[0]));
|
||||
result.add("type", VPackValue(TRI_TransactionTypeGetStr(collection->accessType)));
|
||||
result.close();
|
||||
}
|
||||
result.close(); // collections
|
||||
|
||||
|
||||
result.add(VPackObjectIterator(planSlice));
|
||||
result.close(); // plan
|
||||
|
||||
if (info.part == arangodb::aql::PART_MAIN) {
|
||||
if (info->part == arangodb::aql::PART_MAIN) {
|
||||
result.add("part", VPackValue("main"));
|
||||
} else {
|
||||
result.add("part", VPackValue("dependent"));
|
||||
|
@ -514,6 +566,8 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
result.close(); // options.optimizer
|
||||
double tracing = query->getNumericOption("tracing", 0);
|
||||
result.add("tracing", VPackValue(tracing));
|
||||
double satelliteSyncWait = query->getNumericOption("satelliteSyncWait", 60.0);
|
||||
result.add("satelliteSyncWait", VPackValue(satelliteSyncWait));
|
||||
result.close(); // options
|
||||
|
||||
result.close();
|
||||
|
@ -522,13 +576,13 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
auto body = std::make_shared<std::string const>(result.slice().toJson());
|
||||
|
||||
// std::cout << "GENERATED A PLAN FOR THE REMOTE SERVERS: " << *(body.get())
|
||||
//LOG(ERR) << "GENERATED A PLAN FOR THE REMOTE SERVERS: " << *(body.get());
|
||||
// << "\n";
|
||||
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
||||
std::string const url("/_db/" + arangodb::basics::StringUtils::urlEncode(
|
||||
collection->vocbase->name()) +
|
||||
std::string const url("/_db/"
|
||||
+ arangodb::basics::StringUtils::urlEncode(collection->vocbase->name()) +
|
||||
"/_api/aql/instantiate");
|
||||
|
||||
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
|
@ -539,7 +593,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
}
|
||||
|
||||
/// @brief aggregateQueryIds, get answers for all shards in a Scatter/Gather
|
||||
void aggregateQueryIds(EngineInfo const& info, arangodb::ClusterComm*& cc,
|
||||
void aggregateQueryIds(EngineInfo* info, arangodb::ClusterComm*& cc,
|
||||
arangodb::CoordTransactionID& coordTransactionID,
|
||||
Collection* collection) {
|
||||
// pick up the remote query ids
|
||||
|
@ -569,13 +623,13 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
// res.answer->body() << ", REMOTENODEID: " << info.idOfRemoteNode <<
|
||||
// " SHARDID:" << res.shardID << ", QUERYID: " << queryId << "\n";
|
||||
std::string theID =
|
||||
arangodb::basics::StringUtils::itoa(info.idOfRemoteNode) + ":" +
|
||||
arangodb::basics::StringUtils::itoa(info->idOfRemoteNode) + ":" +
|
||||
res.shardID;
|
||||
if (info.part == arangodb::aql::PART_MAIN) {
|
||||
queryIds.emplace(theID, queryId + "*");
|
||||
} else {
|
||||
queryIds.emplace(theID, queryId);
|
||||
|
||||
if (info->part == arangodb::aql::PART_MAIN) {
|
||||
queryId += "*";
|
||||
}
|
||||
queryIds.emplace(theID, queryId);
|
||||
} else {
|
||||
error += "DB SERVER ANSWERED WITH ERROR: ";
|
||||
error += res.answer->payload().toJson();
|
||||
|
@ -589,7 +643,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
}
|
||||
}
|
||||
|
||||
// std::cout << "GOT ALL RESPONSES FROM DB SERVERS: " << nrok << "\n";
|
||||
//LOG(ERR) << "GOT ALL RESPONSES FROM DB SERVERS: " << nrok << "\n";
|
||||
|
||||
if (nrok != (int)shardIds->size()) {
|
||||
if (errorCode == TRI_ERROR_NO_ERROR) {
|
||||
|
@ -600,9 +654,16 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
}
|
||||
|
||||
/// @brief distributePlansToShards, for a single Scatter/Gather block
|
||||
void distributePlansToShards(EngineInfo const& info, QueryId connectedId) {
|
||||
// std::cout << "distributePlansToShards: " << info.id << std::endl;
|
||||
Collection* collection = info.getCollection();
|
||||
void distributePlansToShards(EngineInfo* info, QueryId connectedId) {
|
||||
//LOG(ERR) << "distributePlansToShards: " << info.id;
|
||||
Collection* collection = info->getCollection();
|
||||
|
||||
auto auxiliaryCollections = info->getAuxiliaryCollections();
|
||||
for (auto const& auxiliaryCollection: auxiliaryCollections) {
|
||||
TRI_ASSERT(auxiliaryCollection->shardIds()->size() == 1);
|
||||
auxiliaryCollection->setCurrentShard((*auxiliaryCollection->shardIds())[0]);
|
||||
}
|
||||
|
||||
// now send the plan to the remote servers
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
@ -613,23 +674,27 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
auto shardIds = collection->shardIds();
|
||||
for (auto const& shardId : *shardIds) {
|
||||
// inject the current shard id into the collection
|
||||
collection->setCurrentShard(shardId);
|
||||
VPackBuilder b;
|
||||
collection->setCurrentShard(shardId);
|
||||
generatePlanForOneShard(b, nr++, info, connectedId, shardId, true);
|
||||
|
||||
distributePlanToShard(coordTransactionID, info, collection, connectedId,
|
||||
shardId, b.slice());
|
||||
distributePlanToShard(coordTransactionID, info,
|
||||
connectedId, shardId,
|
||||
b.slice());
|
||||
}
|
||||
collection->resetCurrentShard();
|
||||
for (auto const& auxiliaryCollection: auxiliaryCollections) {
|
||||
TRI_ASSERT(auxiliaryCollection->shardIds()->size() == 1);
|
||||
auxiliaryCollection->resetCurrentShard();
|
||||
}
|
||||
|
||||
// fix collection
|
||||
collection->resetCurrentShard();
|
||||
aggregateQueryIds(info, cc, coordTransactionID, collection);
|
||||
}
|
||||
|
||||
/// @brief buildEngineCoordinator, for a single piece
|
||||
ExecutionEngine* buildEngineCoordinator(EngineInfo& info) {
|
||||
ExecutionEngine* buildEngineCoordinator(EngineInfo* info) {
|
||||
Query* localQuery = query;
|
||||
bool needToClone = info.id > 0; // use the original for the main part
|
||||
bool needToClone = info->id > 0; // use the original for the main part
|
||||
if (needToClone) {
|
||||
// need a new query instance on the coordinator
|
||||
localQuery = query->clone(PART_DEPENDENT, false);
|
||||
|
@ -646,7 +711,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
std::unordered_map<ExecutionNode*, ExecutionBlock*> cache;
|
||||
RemoteNode* remoteNode = nullptr;
|
||||
|
||||
for (auto en = info.nodes.begin(); en != info.nodes.end(); ++en) {
|
||||
for (auto en = info->nodes.begin(); en != info->nodes.end(); ++en) {
|
||||
auto const nodeType = (*en)->getType();
|
||||
|
||||
if (nodeType == ExecutionNode::REMOTE) {
|
||||
|
@ -688,15 +753,15 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
// now we'll create a remote node for each shard and add it to the
|
||||
// gather node
|
||||
Collection const* collection =
|
||||
static_cast<GatherNode const*>((*en))->collection();
|
||||
auto gatherNode = static_cast<GatherNode const*>(*en);
|
||||
Collection const* collection = gatherNode->collection();
|
||||
|
||||
auto shardIds = collection->shardIds();
|
||||
|
||||
for (auto const& shardId : *shardIds) {
|
||||
std::string theId =
|
||||
arangodb::basics::StringUtils::itoa(remoteNode->id()) + ":" +
|
||||
shardId;
|
||||
|
||||
auto it = queryIds.find(theId);
|
||||
if (it == queryIds.end()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
|
@ -962,11 +1027,12 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
QueryId id = 0;
|
||||
|
||||
for (auto it = engines.rbegin(); it != engines.rend(); ++it) {
|
||||
// std::cout << "Doing engine: " << it->id << " location:"
|
||||
// << it->location << std::endl;
|
||||
if ((*it).location == COORDINATOR) {
|
||||
EngineInfo* info = &(*it);
|
||||
//LOG(ERR) << "Doing engine: " << it->id << " location:"
|
||||
// << it->location;
|
||||
if (info->location == COORDINATOR) {
|
||||
// create a coordinator-based engine
|
||||
engine = buildEngineCoordinator(*it);
|
||||
engine = buildEngineCoordinator(info);
|
||||
TRI_ASSERT(engine != nullptr);
|
||||
|
||||
if ((*it).id > 0) {
|
||||
|
@ -997,12 +1063,11 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
} else {
|
||||
// create an engine on a remote DB server
|
||||
// hand in the previous engine's id
|
||||
distributePlansToShards((*it), id);
|
||||
distributePlansToShards(info, id);
|
||||
}
|
||||
}
|
||||
|
||||
TRI_ASSERT(engine != nullptr);
|
||||
|
||||
// return the last created coordinator-based engine
|
||||
// this is the local engine that we'll use to run the query
|
||||
return engine;
|
||||
|
@ -1104,6 +1169,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
for (auto& q : inst.get()->queryIds) {
|
||||
std::string theId = q.first;
|
||||
std::string queryId = q.second;
|
||||
|
||||
// std::cout << "queryIds: " << theId << " : " << queryId <<
|
||||
// std::endl;
|
||||
auto pos = theId.find(':');
|
||||
|
@ -1156,6 +1222,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
std::string const& shardId = p.first;
|
||||
std::string const& queryId = p.second.first;
|
||||
bool isTraverserEngine = p.second.second;
|
||||
|
||||
// Lock shard on DBserver:
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
|
|
@ -1153,7 +1153,11 @@ EnumerateCollectionNode::EnumerateCollectionNode(
|
|||
_collection(plan->getAst()->query()->collections()->get(
|
||||
base.get("collection").copyString())),
|
||||
_outVariable(varFromVPack(plan->getAst(), base, "outVariable")),
|
||||
_random(base.get("random").getBoolean()) {}
|
||||
_random(base.get("random").getBoolean()) {
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
TRI_ASSERT(_outVariable != nullptr);
|
||||
}
|
||||
|
||||
/// @brief toVelocyPack, for EnumerateCollectionNode
|
||||
void EnumerateCollectionNode::toVelocyPackHelper(VPackBuilder& nodes,
|
||||
|
|
|
@ -53,6 +53,30 @@ IndexBlock::IndexBlock(ExecutionEngine* engine, IndexNode const* en)
|
|||
_hasV8Expression(false) {
|
||||
|
||||
_mmdr.reset(new ManagedDocumentResult(transaction()));
|
||||
|
||||
if (_condition != nullptr) {
|
||||
// fix const attribute accesses, e.g. { "a": 1 }.a
|
||||
for (size_t i = 0; i < _condition->numMembers(); ++i) {
|
||||
auto andCond = _condition->getMemberUnchecked(i);
|
||||
for (size_t j = 0; j < andCond->numMembers(); ++j) {
|
||||
auto leaf = andCond->getMemberUnchecked(j);
|
||||
|
||||
// We only support binary conditions
|
||||
TRI_ASSERT(leaf->numMembers() == 2);
|
||||
AstNode* lhs = leaf->getMember(0);
|
||||
AstNode* rhs = leaf->getMember(1);
|
||||
|
||||
if (lhs->type == NODE_TYPE_ATTRIBUTE_ACCESS && lhs->isConstant()) {
|
||||
lhs = const_cast<AstNode*>(Ast::resolveConstAttributeAccess(lhs));
|
||||
leaf->changeMember(0, lhs);
|
||||
}
|
||||
if (rhs->type == NODE_TYPE_ATTRIBUTE_ACCESS && rhs->isConstant()) {
|
||||
rhs = const_cast<AstNode*>(Ast::resolveConstAttributeAccess(rhs));
|
||||
leaf->changeMember(1, rhs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
IndexBlock::~IndexBlock() {
|
||||
|
@ -126,7 +150,7 @@ int IndexBlock::initialize() {
|
|||
|
||||
auto en = static_cast<IndexNode const*>(getPlanNode());
|
||||
auto ast = en->_plan->getAst();
|
||||
|
||||
|
||||
// instantiate expressions:
|
||||
auto instantiateExpression =
|
||||
[&](size_t i, size_t j, size_t k, AstNode* a) -> void {
|
||||
|
@ -176,8 +200,8 @@ int IndexBlock::initialize() {
|
|||
|
||||
// We only support binary conditions
|
||||
TRI_ASSERT(leaf->numMembers() == 2);
|
||||
auto lhs = leaf->getMember(0);
|
||||
auto rhs = leaf->getMember(1);
|
||||
AstNode* lhs = leaf->getMember(0);
|
||||
AstNode* rhs = leaf->getMember(1);
|
||||
|
||||
if (lhs->isAttributeAccessForVariable(outVariable, false)) {
|
||||
// Index is responsible for the left side, check if right side has to be
|
||||
|
|
|
@ -65,6 +65,7 @@ void IndexNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
|||
// Now put info about vocbase and cid in there
|
||||
nodes.add("database", VPackValue(_vocbase->name()));
|
||||
nodes.add("collection", VPackValue(_collection->getName()));
|
||||
nodes.add("satellite", VPackValue(_collection->isSatellite()));
|
||||
nodes.add(VPackValue("outVariable"));
|
||||
_outVariable->toVelocyPack(nodes);
|
||||
|
||||
|
|
|
@ -512,5 +512,11 @@ void Optimizer::setupRules() {
|
|||
registerRule("undistribute-remove-after-enum-coll",
|
||||
undistributeRemoveAfterEnumCollRule,
|
||||
undistributeRemoveAfterEnumCollRule_pass10, DoesNotCreateAdditionalPlans, true);
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
registerRule("remove-satellite-joins",
|
||||
removeSatelliteJoinsRule,
|
||||
removeSatelliteJoinsRule_pass10, DoesNotCreateAdditionalPlans, true);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -191,6 +191,11 @@ class Optimizer {
|
|||
// only a SingletonNode and possibly some CalculationNodes as dependencies
|
||||
removeUnnecessaryRemoteScatterRule_pass10 = 1040,
|
||||
|
||||
// remove any superflous satellite collection joins...
|
||||
// put it after Scatter rule because we would do
|
||||
// the work twice otherwise
|
||||
removeSatelliteJoinsRule_pass10 = 1045,
|
||||
|
||||
// recognize that a RemoveNode can be moved to the shards
|
||||
undistributeRemoveAfterEnumCollRule_pass10 = 1050,
|
||||
|
||||
|
|
|
@ -125,6 +125,9 @@ void distributeInClusterRule(Optimizer*, ExecutionPlan*,
|
|||
#ifdef USE_ENTERPRISE
|
||||
void distributeInClusterRuleSmartEdgeCollection(Optimizer*, ExecutionPlan*,
|
||||
Optimizer::Rule const*);
|
||||
|
||||
/// @brief remove scatter/gather and remote nodes for satellite collections
|
||||
void removeSatelliteJoinsRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
|
||||
#endif
|
||||
|
||||
void distributeFilternCalcToClusterRule(Optimizer*, ExecutionPlan*,
|
||||
|
|
|
@ -564,7 +564,6 @@ RestStatus RestAqlHandler::execute() {
|
|||
// GeneralRequest::translateMethod(_request->requestType()) << ",
|
||||
// " << arangodb::ServerState::instance()->getId() << ": " <<
|
||||
// _request->fullUrl() << ": " << _request->body() << "\n\n";
|
||||
|
||||
std::vector<std::string> const& suffixes = _request->suffixes();
|
||||
|
||||
// extract the sub-request type
|
||||
|
|
|
@ -182,6 +182,7 @@ SET(ARANGOD_SOURCES
|
|||
Cluster/ClusterInfo.cpp
|
||||
Cluster/ClusterMethods.cpp
|
||||
Cluster/ClusterTraverser.cpp
|
||||
Cluster/FollowerInfo.cpp
|
||||
Cluster/DBServerAgencySync.cpp
|
||||
Cluster/HeartbeatThread.cpp
|
||||
Cluster/RestAgencyCallbacksHandler.cpp
|
||||
|
|
|
@ -308,7 +308,8 @@ void ClusterFeature::prepare() {
|
|||
LOG(INFO) << "Waiting for DBservers to show up...";
|
||||
ci->loadCurrentDBServers();
|
||||
std::vector<ServerID> DBServers = ci->getCurrentDBServers();
|
||||
if (DBServers.size() > 1 || TRI_microtime() - start > 30.0) {
|
||||
if (DBServers.size() >= 1 &&
|
||||
(DBServers.size() > 1 || TRI_microtime() - start > 15.0)) {
|
||||
LOG(INFO) << "Found " << DBServers.size() << " DBservers.";
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1029,6 +1029,7 @@ int ClusterInfo::dropDatabaseCoordinator(std::string const& name,
|
|||
int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
||||
std::string const& collectionID,
|
||||
uint64_t numberOfShards,
|
||||
uint64_t replicationFactor,
|
||||
VPackSlice const& json,
|
||||
std::string& errorMsg,
|
||||
double timeout) {
|
||||
|
@ -1074,6 +1075,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
std::shared_ptr<int> dbServerResult = std::make_shared<int>(-1);
|
||||
std::shared_ptr<std::string> errMsg = std::make_shared<std::string>();
|
||||
|
||||
auto dbServers = getCurrentDBServers();
|
||||
std::function<bool(VPackSlice const& result)> dbServerChanged =
|
||||
[=](VPackSlice const& result) {
|
||||
if (result.isObject() && result.length() == (size_t)numberOfShards) {
|
||||
|
@ -1081,6 +1083,13 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
bool tmpHaveError = false;
|
||||
|
||||
for (auto const& p : VPackObjectIterator(result)) {
|
||||
if (replicationFactor == 0) {
|
||||
VPackSlice servers = p.value.get("servers");
|
||||
if (!servers.isArray() || servers.length() < dbServers.size()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
p.value, "error", false)) {
|
||||
tmpHaveError = true;
|
||||
|
@ -1149,7 +1158,6 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
|
||||
// Update our cache:
|
||||
loadPlan();
|
||||
|
||||
if (numberOfShards == 0) {
|
||||
loadCurrent();
|
||||
events::CreateCollection(name, TRI_ERROR_NO_ERROR);
|
||||
|
@ -2553,226 +2561,3 @@ std::shared_ptr<VPackBuilder> ClusterInfo::getCurrent() {
|
|||
READ_LOCKER(readLocker, _currentProt.lock);
|
||||
return _current;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get information about current followers of a shard.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<std::vector<ServerID> const> FollowerInfo::get() {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
return _followers;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief change JSON under
|
||||
/// Current/Collection/<DB-name>/<Collection-ID>/<shard-ID>
|
||||
/// to add or remove a serverID, if add flag is true, the entry is added
|
||||
/// (if it is not yet there), otherwise the entry is removed (if it was
|
||||
/// there).
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static VPackBuilder newShardEntry(VPackSlice oldValue, ServerID const& sid,
|
||||
bool add) {
|
||||
VPackBuilder newValue;
|
||||
VPackSlice servers;
|
||||
{
|
||||
VPackObjectBuilder b(&newValue);
|
||||
// Now need to find the `servers` attribute, which is a list:
|
||||
for (auto const& it : VPackObjectIterator(oldValue)) {
|
||||
if (it.key.isEqualString("servers")) {
|
||||
servers = it.value;
|
||||
} else {
|
||||
newValue.add(it.key);
|
||||
newValue.add(it.value);
|
||||
}
|
||||
}
|
||||
newValue.add(VPackValue("servers"));
|
||||
if (servers.isArray() && servers.length() > 0) {
|
||||
VPackArrayBuilder bb(&newValue);
|
||||
newValue.add(servers[0]);
|
||||
VPackArrayIterator it(servers);
|
||||
bool done = false;
|
||||
for (++it; it.valid(); ++it) {
|
||||
if ((*it).isEqualString(sid)) {
|
||||
if (add) {
|
||||
newValue.add(*it);
|
||||
done = true;
|
||||
}
|
||||
} else {
|
||||
newValue.add(*it);
|
||||
}
|
||||
}
|
||||
if (add && !done) {
|
||||
newValue.add(VPackValue(sid));
|
||||
}
|
||||
} else {
|
||||
VPackArrayBuilder bb(&newValue);
|
||||
newValue.add(VPackValue(ServerState::instance()->getId()));
|
||||
if (add) {
|
||||
newValue.add(VPackValue(sid));
|
||||
}
|
||||
}
|
||||
}
|
||||
return newValue;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief add a follower to a shard, this is only done by the server side
|
||||
/// of the "get-in-sync" capabilities. This reports to the agency under
|
||||
/// `/Current` but in asynchronous "fire-and-forget" way.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FollowerInfo::add(ServerID const& sid) {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
|
||||
// Fully copy the vector:
|
||||
auto v = std::make_shared<std::vector<ServerID>>(*_followers);
|
||||
v->push_back(sid); // add a single entry
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
// Now tell the agency, path is
|
||||
// Current/Collections/<dbName>/<collectionID>/<shardID>
|
||||
std::string path = "Current/Collections/";
|
||||
path += _docColl->vocbase()->name();
|
||||
path += "/";
|
||||
path += std::to_string(_docColl->planId());
|
||||
path += "/";
|
||||
path += _docColl->name();
|
||||
AgencyComm ac;
|
||||
double startTime = TRI_microtime();
|
||||
bool success = false;
|
||||
do {
|
||||
AgencyCommResult res = ac.getValues(path);
|
||||
|
||||
if (res.successful()) {
|
||||
velocypack::Slice currentEntry =
|
||||
res.slice()[0].get(std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Current", "Collections",
|
||||
_docColl->vocbase()->name(), std::to_string(_docColl->planId()),
|
||||
_docColl->name()}));
|
||||
|
||||
if (!currentEntry.isObject()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::add, did not find object in " << path;
|
||||
if (!currentEntry.isNone()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Found: " << currentEntry.toJson();
|
||||
}
|
||||
} else {
|
||||
auto newValue = newShardEntry(currentEntry, sid, true);
|
||||
std::string key = "Current/Collections/" + _docColl->vocbase()->name() +
|
||||
"/" + std::to_string(_docColl->planId()) + "/" +
|
||||
_docColl->name();
|
||||
AgencyWriteTransaction trx;
|
||||
trx.preconditions.push_back(AgencyPrecondition(
|
||||
key, AgencyPrecondition::Type::VALUE, currentEntry));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
key, AgencyValueOperationType::SET, newValue.slice()));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
"Current/Version", AgencySimpleOperationType::INCREMENT_OP));
|
||||
AgencyCommResult res2 = ac.sendTransactionWithFailover(trx);
|
||||
if (res2.successful()) {
|
||||
success = true;
|
||||
break; //
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER)
|
||||
<< "FollowerInfo::add, could not cas key " << path;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "FollowerInfo::add, could not read "
|
||||
<< path << " in agency.";
|
||||
}
|
||||
usleep(500000);
|
||||
} while (TRI_microtime() < startTime + 30);
|
||||
if (!success) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::add, timeout in agency operation for key " << path;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief remove a follower from a shard, this is only done by the
|
||||
/// server if a synchronous replication request fails. This reports to
|
||||
/// the agency under `/Current` but in asynchronous "fire-and-forget"
|
||||
/// way. The method fails silently, if the follower information has
|
||||
/// since been dropped (see `dropFollowerInfo` below).
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FollowerInfo::remove(ServerID const& sid) {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
|
||||
auto v = std::make_shared<std::vector<ServerID>>();
|
||||
v->reserve(_followers->size() - 1);
|
||||
for (auto const& i : *_followers) {
|
||||
if (i != sid) {
|
||||
v->push_back(i);
|
||||
}
|
||||
}
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
// Now tell the agency, path is
|
||||
// Current/Collections/<dbName>/<collectionID>/<shardID>
|
||||
std::string path = "Current/Collections/";
|
||||
path += _docColl->vocbase()->name();
|
||||
path += "/";
|
||||
path += std::to_string(_docColl->planId());
|
||||
path += "/";
|
||||
path += _docColl->name();
|
||||
AgencyComm ac;
|
||||
double startTime = TRI_microtime();
|
||||
bool success = false;
|
||||
do {
|
||||
AgencyCommResult res = ac.getValues(path);
|
||||
if (res.successful()) {
|
||||
velocypack::Slice currentEntry =
|
||||
res.slice()[0].get(std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Current", "Collections",
|
||||
_docColl->vocbase()->name(), std::to_string(_docColl->planId()),
|
||||
_docColl->name()}));
|
||||
|
||||
if (!currentEntry.isObject()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::remove, did not find object in " << path;
|
||||
if (!currentEntry.isNone()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Found: " << currentEntry.toJson();
|
||||
}
|
||||
} else {
|
||||
auto newValue = newShardEntry(currentEntry, sid, false);
|
||||
std::string key = "Current/Collections/" + _docColl->vocbase()->name() +
|
||||
"/" + std::to_string(_docColl->planId()) + "/" +
|
||||
_docColl->name();
|
||||
AgencyWriteTransaction trx;
|
||||
trx.preconditions.push_back(AgencyPrecondition(
|
||||
key, AgencyPrecondition::Type::VALUE, currentEntry));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
key, AgencyValueOperationType::SET, newValue.slice()));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
"Current/Version", AgencySimpleOperationType::INCREMENT_OP));
|
||||
AgencyCommResult res2 = ac.sendTransactionWithFailover(trx);
|
||||
if (res2.successful()) {
|
||||
success = true;
|
||||
break; //
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER)
|
||||
<< "FollowerInfo::remove, could not cas key " << path;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "FollowerInfo::remove, could not read "
|
||||
<< path << " in agency.";
|
||||
}
|
||||
usleep(500000);
|
||||
} while (TRI_microtime() < startTime + 30);
|
||||
if (!success) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::remove, timeout in agency operation for key " << path;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clear follower list, no changes in agency necesary
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FollowerInfo::clear() {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
auto v = std::make_shared<std::vector<ServerID>>();
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
}
|
||||
|
|
|
@ -348,6 +348,7 @@ class ClusterInfo {
|
|||
int createCollectionCoordinator(std::string const& databaseName,
|
||||
std::string const& collectionID,
|
||||
uint64_t numberOfShards,
|
||||
uint64_t replicationFactor,
|
||||
arangodb::velocypack::Slice const& json,
|
||||
std::string& errorMsg, double timeout);
|
||||
|
||||
|
@ -648,53 +649,6 @@ class ClusterInfo {
|
|||
std::vector<std::string> _failedServers;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief a class to track followers that are in sync for a shard
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class FollowerInfo {
|
||||
std::shared_ptr<std::vector<ServerID> const> _followers;
|
||||
Mutex _mutex;
|
||||
arangodb::LogicalCollection* _docColl;
|
||||
|
||||
public:
|
||||
|
||||
explicit FollowerInfo(arangodb::LogicalCollection* d)
|
||||
: _followers(new std::vector<ServerID>()), _docColl(d) { }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get information about current followers of a shard.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<std::vector<ServerID> const> get();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief add a follower to a shard, this is only done by the server side
|
||||
/// of the "get-in-sync" capabilities. This reports to the agency under
|
||||
/// `/Current` but in asynchronous "fire-and-forget" way. The method
|
||||
/// fails silently, if the follower information has since been dropped
|
||||
/// (see `dropFollowerInfo` below).
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void add(ServerID const& s);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief remove a follower from a shard, this is only done by the
|
||||
/// server if a synchronous replication request fails. This reports to
|
||||
/// the agency under `/Current` but in an asynchronous "fire-and-forget"
|
||||
/// way.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void remove(ServerID const& s);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clear follower list, no changes in agency necesary
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void clear();
|
||||
|
||||
};
|
||||
|
||||
} // end namespace arangodb
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2059,35 +2059,42 @@ std::unordered_map<std::string, std::vector<std::string>> distributeShards(
|
|||
random_shuffle(dbServers.begin(), dbServers.end());
|
||||
}
|
||||
|
||||
// mop: distribute satellite collections on all servers
|
||||
if (replicationFactor == 0) {
|
||||
replicationFactor = dbServers.size();
|
||||
}
|
||||
|
||||
// fetch a unique id for each shard to create
|
||||
uint64_t const id = ci->uniqid(numberOfShards);
|
||||
|
||||
// now create the shards
|
||||
size_t count = 0;
|
||||
|
||||
size_t leaderIndex = 0;
|
||||
size_t followerIndex = 0;
|
||||
for (uint64_t i = 0; i < numberOfShards; ++i) {
|
||||
// determine responsible server(s)
|
||||
std::vector<std::string> serverIds;
|
||||
for (uint64_t j = 0; j < replicationFactor; ++j) {
|
||||
std::string candidate;
|
||||
size_t count2 = 0;
|
||||
bool found = true;
|
||||
do {
|
||||
candidate = dbServers[count++];
|
||||
if (count >= dbServers.size()) {
|
||||
count = 0;
|
||||
}
|
||||
if (++count2 == dbServers.size() + 1) {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER)
|
||||
<< "createCollectionCoordinator: replicationFactor is "
|
||||
<< "too large for the number of DBservers";
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
} while (std::find(serverIds.begin(), serverIds.end(), candidate) !=
|
||||
serverIds.end());
|
||||
if (found) {
|
||||
serverIds.push_back(candidate);
|
||||
if (j >= dbServers.size()) {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER)
|
||||
<< "createCollectionCoordinator: replicationFactor is "
|
||||
<< "too large for the number of DBservers";
|
||||
break;
|
||||
}
|
||||
std::string candidate;
|
||||
// mop: leader
|
||||
if (serverIds.size() == 0) {
|
||||
candidate = dbServers[leaderIndex++];
|
||||
if (leaderIndex >= dbServers.size()) {
|
||||
leaderIndex = 0;
|
||||
}
|
||||
} else {
|
||||
do {
|
||||
candidate = dbServers[followerIndex++];
|
||||
if (followerIndex >= dbServers.size()) {
|
||||
followerIndex = 0;
|
||||
}
|
||||
} while (candidate == serverIds[0]); // mop: ignore leader
|
||||
}
|
||||
serverIds.push_back(candidate);
|
||||
}
|
||||
|
||||
// determine shard id
|
||||
|
|
|
@ -0,0 +1,253 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Max Neunhoeffer
|
||||
/// @author Andreas Streichardt
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "FollowerInfo.h"
|
||||
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get information about current followers of a shard.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<std::vector<ServerID> const> FollowerInfo::get() {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
return _followers;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief change JSON under
|
||||
/// Current/Collection/<DB-name>/<Collection-ID>/<shard-ID>
|
||||
/// to add or remove a serverID, if add flag is true, the entry is added
|
||||
/// (if it is not yet there), otherwise the entry is removed (if it was
|
||||
/// there).
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static VPackBuilder newShardEntry(VPackSlice oldValue, ServerID const& sid,
|
||||
bool add) {
|
||||
VPackBuilder newValue;
|
||||
VPackSlice servers;
|
||||
{
|
||||
VPackObjectBuilder b(&newValue);
|
||||
// Now need to find the `servers` attribute, which is a list:
|
||||
for (auto const& it : VPackObjectIterator(oldValue)) {
|
||||
if (it.key.isEqualString("servers")) {
|
||||
servers = it.value;
|
||||
} else {
|
||||
newValue.add(it.key);
|
||||
newValue.add(it.value);
|
||||
}
|
||||
}
|
||||
newValue.add(VPackValue("servers"));
|
||||
if (servers.isArray() && servers.length() > 0) {
|
||||
VPackArrayBuilder bb(&newValue);
|
||||
newValue.add(servers[0]);
|
||||
VPackArrayIterator it(servers);
|
||||
bool done = false;
|
||||
for (++it; it.valid(); ++it) {
|
||||
if ((*it).isEqualString(sid)) {
|
||||
if (add) {
|
||||
newValue.add(*it);
|
||||
done = true;
|
||||
}
|
||||
} else {
|
||||
newValue.add(*it);
|
||||
}
|
||||
}
|
||||
if (add && !done) {
|
||||
newValue.add(VPackValue(sid));
|
||||
}
|
||||
} else {
|
||||
VPackArrayBuilder bb(&newValue);
|
||||
newValue.add(VPackValue(ServerState::instance()->getId()));
|
||||
if (add) {
|
||||
newValue.add(VPackValue(sid));
|
||||
}
|
||||
}
|
||||
}
|
||||
return newValue;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief add a follower to a shard, this is only done by the server side
|
||||
/// of the "get-in-sync" capabilities. This reports to the agency under
|
||||
/// `/Current` but in asynchronous "fire-and-forget" way.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FollowerInfo::add(ServerID const& sid) {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
|
||||
// Fully copy the vector:
|
||||
auto v = std::make_shared<std::vector<ServerID>>(*_followers);
|
||||
v->push_back(sid); // add a single entry
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
// Now tell the agency, path is
|
||||
// Current/Collections/<dbName>/<collectionID>/<shardID>
|
||||
std::string path = "Current/Collections/";
|
||||
path += _docColl->vocbase()->name();
|
||||
path += "/";
|
||||
path += std::to_string(_docColl->planId());
|
||||
path += "/";
|
||||
path += _docColl->name();
|
||||
AgencyComm ac;
|
||||
double startTime = TRI_microtime();
|
||||
bool success = false;
|
||||
do {
|
||||
AgencyCommResult res = ac.getValues(path);
|
||||
|
||||
if (res.successful()) {
|
||||
velocypack::Slice currentEntry =
|
||||
res.slice()[0].get(std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Current", "Collections",
|
||||
_docColl->vocbase()->name(), std::to_string(_docColl->planId()),
|
||||
_docColl->name()}));
|
||||
|
||||
if (!currentEntry.isObject()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::add, did not find object in " << path;
|
||||
if (!currentEntry.isNone()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Found: " << currentEntry.toJson();
|
||||
}
|
||||
} else {
|
||||
auto newValue = newShardEntry(currentEntry, sid, true);
|
||||
std::string key = "Current/Collections/" + _docColl->vocbase()->name() +
|
||||
"/" + std::to_string(_docColl->planId()) + "/" +
|
||||
_docColl->name();
|
||||
AgencyWriteTransaction trx;
|
||||
trx.preconditions.push_back(AgencyPrecondition(
|
||||
key, AgencyPrecondition::Type::VALUE, currentEntry));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
key, AgencyValueOperationType::SET, newValue.slice()));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
"Current/Version", AgencySimpleOperationType::INCREMENT_OP));
|
||||
AgencyCommResult res2 = ac.sendTransactionWithFailover(trx);
|
||||
if (res2.successful()) {
|
||||
success = true;
|
||||
break; //
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER)
|
||||
<< "FollowerInfo::add, could not cas key " << path;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "FollowerInfo::add, could not read "
|
||||
<< path << " in agency.";
|
||||
}
|
||||
usleep(500000);
|
||||
} while (TRI_microtime() < startTime + 30);
|
||||
if (!success) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::add, timeout in agency operation for key " << path;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief remove a follower from a shard, this is only done by the
|
||||
/// server if a synchronous replication request fails. This reports to
|
||||
/// the agency under `/Current` but in asynchronous "fire-and-forget"
|
||||
/// way. The method fails silently, if the follower information has
|
||||
/// since been dropped (see `dropFollowerInfo` below).
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FollowerInfo::remove(ServerID const& sid) {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
|
||||
auto v = std::make_shared<std::vector<ServerID>>();
|
||||
v->reserve(_followers->size() - 1);
|
||||
for (auto const& i : *_followers) {
|
||||
if (i != sid) {
|
||||
v->push_back(i);
|
||||
}
|
||||
}
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
// Now tell the agency, path is
|
||||
// Current/Collections/<dbName>/<collectionID>/<shardID>
|
||||
std::string path = "Current/Collections/";
|
||||
path += _docColl->vocbase()->name();
|
||||
path += "/";
|
||||
path += std::to_string(_docColl->planId());
|
||||
path += "/";
|
||||
path += _docColl->name();
|
||||
AgencyComm ac;
|
||||
double startTime = TRI_microtime();
|
||||
bool success = false;
|
||||
do {
|
||||
AgencyCommResult res = ac.getValues(path);
|
||||
if (res.successful()) {
|
||||
velocypack::Slice currentEntry =
|
||||
res.slice()[0].get(std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Current", "Collections",
|
||||
_docColl->vocbase()->name(), std::to_string(_docColl->planId()),
|
||||
_docColl->name()}));
|
||||
|
||||
if (!currentEntry.isObject()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::remove, did not find object in " << path;
|
||||
if (!currentEntry.isNone()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Found: " << currentEntry.toJson();
|
||||
}
|
||||
} else {
|
||||
auto newValue = newShardEntry(currentEntry, sid, false);
|
||||
std::string key = "Current/Collections/" + _docColl->vocbase()->name() +
|
||||
"/" + std::to_string(_docColl->planId()) + "/" +
|
||||
_docColl->name();
|
||||
AgencyWriteTransaction trx;
|
||||
trx.preconditions.push_back(AgencyPrecondition(
|
||||
key, AgencyPrecondition::Type::VALUE, currentEntry));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
key, AgencyValueOperationType::SET, newValue.slice()));
|
||||
trx.operations.push_back(AgencyOperation(
|
||||
"Current/Version", AgencySimpleOperationType::INCREMENT_OP));
|
||||
AgencyCommResult res2 = ac.sendTransactionWithFailover(trx);
|
||||
if (res2.successful()) {
|
||||
success = true;
|
||||
break; //
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER)
|
||||
<< "FollowerInfo::remove, could not cas key " << path;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "FollowerInfo::remove, could not read "
|
||||
<< path << " in agency.";
|
||||
}
|
||||
usleep(500000);
|
||||
} while (TRI_microtime() < startTime + 30);
|
||||
if (!success) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
<< "FollowerInfo::remove, timeout in agency operation for key " << path;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clear follower list, no changes in agency necesary
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FollowerInfo::clear() {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
auto v = std::make_shared<std::vector<ServerID>>();
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Max Neunhoeffer
|
||||
/// @author Andreas Streichardt
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_CLUSTER_FOLLOWER_INFO_H
|
||||
#define ARANGOD_CLUSTER_FOLLOWER_INFO_H 1
|
||||
|
||||
#include "ClusterInfo.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief a class to track followers that are in sync for a shard
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class FollowerInfo {
|
||||
std::shared_ptr<std::vector<ServerID> const> _followers;
|
||||
Mutex _mutex;
|
||||
arangodb::LogicalCollection* _docColl;
|
||||
|
||||
public:
|
||||
|
||||
explicit FollowerInfo(arangodb::LogicalCollection* d)
|
||||
: _followers(new std::vector<ServerID>()), _docColl(d) { }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get information about current followers of a shard.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<std::vector<ServerID> const> get();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief add a follower to a shard, this is only done by the server side
|
||||
/// of the "get-in-sync" capabilities. This reports to the agency under
|
||||
/// `/Current` but in asynchronous "fire-and-forget" way. The method
|
||||
/// fails silently, if the follower information has since been dropped
|
||||
/// (see `dropFollowerInfo` below).
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void add(ServerID const& s);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief remove a follower from a shard, this is only done by the
|
||||
/// server if a synchronous replication request fails. This reports to
|
||||
/// the agency under `/Current` but in an asynchronous "fire-and-forget"
|
||||
/// way.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void remove(ServerID const& s);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clear follower list, no changes in agency necesary
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void clear();
|
||||
|
||||
};
|
||||
} // end namespace arangodb
|
||||
|
||||
#endif
|
||||
#define ARANGOD_CLUSTER_CLUSTER_INFO_H 1
|
|
@ -64,7 +64,7 @@ static uint64_t HashElementEdge(void*, SimpleIndexElement const& element, bool b
|
|||
}
|
||||
|
||||
TRI_voc_rid_t revisionId = element.revisionId();
|
||||
return fasthash64(&revisionId, sizeof(revisionId), 0x56781234);
|
||||
return fasthash64_uint64(revisionId, 0x56781234);
|
||||
}
|
||||
|
||||
/// @brief checks if key and element match
|
||||
|
|
|
@ -237,7 +237,7 @@ class HashIndex final : public PathBasedIndex {
|
|||
}
|
||||
|
||||
TRI_voc_rid_t revisionId = element->revisionId();
|
||||
return fasthash64(&revisionId, sizeof(revisionId), hash);
|
||||
return fasthash64_uint64(revisionId, hash);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -447,7 +447,7 @@ void RestCursorHandler::modifyCursor() {
|
|||
auto cursorId = static_cast<arangodb::CursorId>(
|
||||
arangodb::basics::StringUtils::uint64(id));
|
||||
bool busy;
|
||||
auto cursor = cursors->find(cursorId, busy);
|
||||
auto cursor = cursors->find(cursorId, Cursor::CURSOR_VPACK, busy);
|
||||
|
||||
if (cursor == nullptr) {
|
||||
if (busy) {
|
||||
|
@ -499,7 +499,7 @@ void RestCursorHandler::deleteCursor() {
|
|||
|
||||
auto cursorId = static_cast<arangodb::CursorId>(
|
||||
arangodb::basics::StringUtils::uint64(id));
|
||||
bool found = cursors->remove(cursorId);
|
||||
bool found = cursors->remove(cursorId, Cursor::CURSOR_VPACK);
|
||||
|
||||
if (!found) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_CURSOR_NOT_FOUND);
|
||||
|
|
|
@ -306,7 +306,7 @@ void RestExportHandler::modifyCursor() {
|
|||
auto cursorId = static_cast<arangodb::CursorId>(
|
||||
arangodb::basics::StringUtils::uint64(id));
|
||||
bool busy;
|
||||
auto cursor = cursors->find(cursorId, busy);
|
||||
auto cursor = cursors->find(cursorId, Cursor::CURSOR_EXPORT, busy);
|
||||
|
||||
if (cursor == nullptr) {
|
||||
if (busy) {
|
||||
|
@ -356,7 +356,7 @@ void RestExportHandler::deleteCursor() {
|
|||
|
||||
auto cursorId = static_cast<arangodb::CursorId>(
|
||||
arangodb::basics::StringUtils::uint64(id));
|
||||
bool found = cursors->remove(cursorId);
|
||||
bool found = cursors->remove(cursorId, Cursor::CURSOR_EXPORT);
|
||||
|
||||
if (!found) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_CURSOR_NOT_FOUND);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "Basics/conversions.h"
|
||||
#include "Basics/files.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "GeneralServer/GeneralServer.h"
|
||||
#include "Indexes/EdgeIndex.h"
|
||||
|
@ -1761,7 +1762,7 @@ int RestReplicationHandler::processRestoreCollectionCoordinator(
|
|||
VPackCollection::merge(parameters, sliceToMerge, false);
|
||||
VPackSlice const merged = mergedBuilder.slice();
|
||||
|
||||
int res = ci->createCollectionCoordinator(dbName, newId, numberOfShards,
|
||||
int res = ci->createCollectionCoordinator(dbName, newId, numberOfShards, replicationFactor,
|
||||
merged, errorMsg, 0.0);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg =
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include "AqlTransaction.h"
|
||||
#include "CollectionNameResolver.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
@ -55,10 +56,22 @@ int AqlTransaction::processCollectionCoordinator(aql::Collection* collection) {
|
|||
/// @brief add a regular collection to the transaction
|
||||
|
||||
int AqlTransaction::processCollectionNormal(aql::Collection* collection) {
|
||||
arangodb::LogicalCollection const* col =
|
||||
this->resolver()->getCollectionStruct(collection->getName());
|
||||
TRI_voc_cid_t cid = 0;
|
||||
|
||||
arangodb::LogicalCollection const* col =
|
||||
this->resolver()->getCollectionStruct(collection->getName());
|
||||
/*if (col == nullptr) {
|
||||
auto startTime = TRI_microtime();
|
||||
auto endTime = startTime + 60.0;
|
||||
do {
|
||||
usleep(10000);
|
||||
if (TRI_microtime() > endTime) {
|
||||
break;
|
||||
}
|
||||
col = this->resolver()->getCollectionStruct(collection->getName());
|
||||
} while (col == nullptr);
|
||||
}
|
||||
*/
|
||||
if (col != nullptr) {
|
||||
cid = col->cid();
|
||||
}
|
||||
|
@ -89,7 +102,6 @@ LogicalCollection* AqlTransaction::documentCollection(TRI_voc_cid_t cid) {
|
|||
|
||||
int AqlTransaction::lockCollections() {
|
||||
auto trx = getInternals();
|
||||
|
||||
for (auto& trxCollection : trx->_collections) {
|
||||
int res = TRI_LockCollectionTransaction(trxCollection,
|
||||
trxCollection->_accessType, 0);
|
||||
|
|
|
@ -44,6 +44,11 @@ typedef TRI_voc_tick_t CursorId;
|
|||
|
||||
class Cursor {
|
||||
public:
|
||||
enum CursorType {
|
||||
CURSOR_VPACK,
|
||||
CURSOR_EXPORT
|
||||
};
|
||||
|
||||
Cursor(Cursor const&) = delete;
|
||||
Cursor& operator=(Cursor const&) = delete;
|
||||
|
||||
|
@ -90,6 +95,8 @@ class Cursor {
|
|||
_isUsed = false;
|
||||
}
|
||||
|
||||
virtual CursorType type() const = 0;
|
||||
|
||||
virtual bool hasNext() = 0;
|
||||
|
||||
virtual arangodb::velocypack::Slice next() = 0;
|
||||
|
@ -110,7 +117,7 @@ class Cursor {
|
|||
bool _isUsed;
|
||||
};
|
||||
|
||||
class VelocyPackCursor : public Cursor {
|
||||
class VelocyPackCursor final : public Cursor {
|
||||
public:
|
||||
VelocyPackCursor(TRI_vocbase_t*, CursorId, aql::QueryResult&&, size_t,
|
||||
std::shared_ptr<arangodb::velocypack::Builder>, double,
|
||||
|
@ -120,6 +127,8 @@ class VelocyPackCursor : public Cursor {
|
|||
|
||||
public:
|
||||
aql::QueryResult const* result() const { return &_result; }
|
||||
|
||||
CursorType type() const override final { return CURSOR_VPACK; }
|
||||
|
||||
bool hasNext() override final;
|
||||
|
||||
|
@ -136,7 +145,7 @@ class VelocyPackCursor : public Cursor {
|
|||
bool _cached;
|
||||
};
|
||||
|
||||
class ExportCursor : public Cursor {
|
||||
class ExportCursor final : public Cursor {
|
||||
public:
|
||||
ExportCursor(TRI_vocbase_t*, CursorId, arangodb::CollectionExport*, size_t,
|
||||
double, bool);
|
||||
|
@ -144,6 +153,8 @@ class ExportCursor : public Cursor {
|
|||
~ExportCursor();
|
||||
|
||||
public:
|
||||
CursorType type() const override final { return CURSOR_EXPORT; }
|
||||
|
||||
bool hasNext() override final;
|
||||
|
||||
arangodb::velocypack::Slice next() override final;
|
||||
|
|
|
@ -140,7 +140,7 @@ ExportCursor* CursorRepository::createFromExport(arangodb::CollectionExport* ex,
|
|||
/// @brief remove a cursor by id
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool CursorRepository::remove(CursorId id) {
|
||||
bool CursorRepository::remove(CursorId id, Cursor::CursorType type) {
|
||||
arangodb::Cursor* cursor = nullptr;
|
||||
|
||||
{
|
||||
|
@ -159,6 +159,11 @@ bool CursorRepository::remove(CursorId id) {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (cursor->type() != type) {
|
||||
// wrong type
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cursor->isUsed()) {
|
||||
// cursor is in use by someone else. now mark as deleted
|
||||
cursor->deleted();
|
||||
|
@ -181,7 +186,7 @@ bool CursorRepository::remove(CursorId id) {
|
|||
/// it must be returned later using release()
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Cursor* CursorRepository::find(CursorId id, bool& busy) {
|
||||
Cursor* CursorRepository::find(CursorId id, Cursor::CursorType type, bool& busy) {
|
||||
arangodb::Cursor* cursor = nullptr;
|
||||
busy = false;
|
||||
|
||||
|
@ -201,6 +206,11 @@ Cursor* CursorRepository::find(CursorId id, bool& busy) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
if (cursor->type() != type) {
|
||||
// wrong cursor type
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (cursor->isUsed()) {
|
||||
busy = true;
|
||||
return nullptr;
|
||||
|
|
|
@ -79,7 +79,7 @@ class CursorRepository {
|
|||
/// @brief remove a cursor by id
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool remove(CursorId);
|
||||
bool remove(CursorId, Cursor::CursorType);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief find an existing cursor by id
|
||||
|
@ -87,7 +87,7 @@ class CursorRepository {
|
|||
/// it must be returned later using release()
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Cursor* find(CursorId, bool&);
|
||||
Cursor* find(CursorId, Cursor::CursorType, bool&);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return a cursor
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Indexes/EdgeIndex.h"
|
||||
#include "Indexes/HashIndex.h"
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Indexes/PrimaryIndex.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
|
@ -1178,7 +1179,7 @@ static void JS_PropertiesVocbaseCol(
|
|||
TRI_V8_THROW_EXCEPTION_PARAMETER(
|
||||
"indexBuckets must be a two-power between 1 and 1024");
|
||||
}
|
||||
|
||||
|
||||
int res = info->update(slice, false);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -1219,9 +1220,15 @@ static void JS_PropertiesVocbaseCol(
|
|||
TRI_GET_GLOBAL_STRING(KeyOptionsKey);
|
||||
result->Set(KeyOptionsKey, TRI_VPackToV8(isolate, keyOpts)->ToObject());
|
||||
}
|
||||
result->Set(
|
||||
TRI_V8_ASCII_STRING("replicationFactor"),
|
||||
v8::Number::New(isolate, static_cast<double>(c->replicationFactor())));
|
||||
if (c->isSatellite()) {
|
||||
result->Set(
|
||||
TRI_V8_ASCII_STRING("replicationFactor"),
|
||||
TRI_V8_STD_STRING(std::string("satellite")));
|
||||
} else {
|
||||
result->Set(
|
||||
TRI_V8_ASCII_STRING("replicationFactor"),
|
||||
v8::Number::New(isolate, static_cast<double>(c->replicationFactor())));
|
||||
}
|
||||
std::string shardsLike = c->distributeShardsLike();
|
||||
if (!shardsLike.empty()) {
|
||||
CollectionNameResolver resolver(c->vocbase());
|
||||
|
|
|
@ -394,7 +394,7 @@ static void JS_ChecksumCollection(
|
|||
if (withData) {
|
||||
// with data
|
||||
uint64_t const n = slice.length() ^ 0xf00ba44ba5;
|
||||
uint64_t seed = fasthash64(&n, sizeof(n), 0xdeadf054);
|
||||
uint64_t seed = fasthash64_uint64(n, 0xdeadf054);
|
||||
|
||||
for (auto const& it : VPackObjectIterator(slice, false)) {
|
||||
// loop over all attributes, but exclude _rev, _id and _key
|
||||
|
|
|
@ -2733,7 +2733,7 @@ static void JS_DecodeRev(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
#endif
|
||||
char buffer[32];
|
||||
strftime(buffer, 32, "%Y-%m-%dT%H:%M:%S.000Z", &date);
|
||||
buffer[20] = (millis / 100) + '0';
|
||||
buffer[20] = static_cast<char>(millis / 100) + '0';
|
||||
buffer[21] = ((millis / 10) % 10) + '0';
|
||||
buffer[22] = (millis % 10) + '0';
|
||||
buffer[24] = 0;
|
||||
|
@ -2742,7 +2742,7 @@ static void JS_DecodeRev(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
result->Set(TRI_V8_ASCII_STRING("date"),
|
||||
TRI_V8_ASCII_STRING(buffer));
|
||||
result->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, count));
|
||||
v8::Number::New(isolate, static_cast<double>(count)));
|
||||
|
||||
TRI_V8_RETURN(result);
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ static void JS_JsonCursor(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_ASSERT(cursors != nullptr);
|
||||
|
||||
bool busy;
|
||||
auto cursor = cursors->find(cursorId, busy);
|
||||
auto cursor = cursors->find(cursorId, Cursor::CURSOR_VPACK, busy);
|
||||
|
||||
if (cursor == nullptr) {
|
||||
if (busy) {
|
||||
|
|
|
@ -705,7 +705,7 @@ std::unique_ptr<LogicalCollection> CreateCollectionCoordinator(LogicalCollection
|
|||
std::string errorMsg;
|
||||
int myerrno = ci->createCollectionCoordinator(
|
||||
parameters->dbName(), parameters->cid_as_string(),
|
||||
parameters->numberOfShards(), velocy.slice(), errorMsg, 240.0);
|
||||
parameters->numberOfShards(), parameters->replicationFactor(), velocy.slice(), errorMsg, 240.0);
|
||||
|
||||
if (myerrno != TRI_ERROR_NO_ERROR) {
|
||||
if (errorMsg.empty()) {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "Aql/QueryCache.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Indexes/EdgeIndex.h"
|
||||
#include "Indexes/FulltextIndex.h"
|
||||
|
@ -381,7 +382,7 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
_version(ReadNumericValue<uint32_t>(info, "version", currentVersion())),
|
||||
_indexBuckets(ReadNumericValue<uint32_t>(
|
||||
info, "indexBuckets", DatabaseFeature::defaultIndexBuckets())),
|
||||
_replicationFactor(ReadNumericValue<size_t>(info, "replicationFactor", 1)),
|
||||
_replicationFactor(1),
|
||||
_numberOfShards(ReadNumericValue<size_t>(info, "numberOfShards", 1)),
|
||||
_allowUserKeys(ReadBooleanValue(info, "allowUserKeys", true)),
|
||||
_shardIds(new ShardMap()),
|
||||
|
@ -412,7 +413,8 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
"with the --database.auto-upgrade option.");
|
||||
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_FAILED, errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (_isVolatile && _waitForSync) {
|
||||
// Illegal collection configuration
|
||||
|
@ -427,10 +429,59 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
VPackSlice shardKeysSlice = info.get("shardKeys");
|
||||
|
||||
|
||||
bool const isCluster = ServerState::instance()->isRunningInCluster();
|
||||
// Cluster only tests
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
if ( (_numberOfShards == 0 && !_isSmart) || _numberOfShards > 1000) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"invalid number of shards");
|
||||
}
|
||||
|
||||
if (shardKeysSlice.isNone()) {
|
||||
VPackSlice keyGenSlice = info.get("keyOptions");
|
||||
if (keyGenSlice.isObject()) {
|
||||
keyGenSlice = keyGenSlice.get("type");
|
||||
if (keyGenSlice.isString()) {
|
||||
StringRef tmp(keyGenSlice);
|
||||
if (!tmp.empty() && tmp != "traditional") {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_UNSUPPORTED,
|
||||
"non-traditional key generators are "
|
||||
"not supported for sharded "
|
||||
"collections");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto replicationFactorSlice = info.get("replicationFactor");
|
||||
if (!replicationFactorSlice.isNone()) {
|
||||
bool isError = true;
|
||||
if (replicationFactorSlice.isNumber()) {
|
||||
_replicationFactor = replicationFactorSlice.getNumber<size_t>();
|
||||
// mop: only allow satellite collections to be created explicitly
|
||||
if (_replicationFactor > 0 && _replicationFactor <= 10) {
|
||||
isError = false;
|
||||
#ifdef USE_ENTERPRISE
|
||||
} else if (_replicationFactor == 0) {
|
||||
isError = false;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#ifdef USE_ENTERPRISE
|
||||
else if (replicationFactorSlice.isString() && replicationFactorSlice.copyString() == "satellite") {
|
||||
_replicationFactor = 0;
|
||||
_numberOfShards = 1;
|
||||
_distributeShardsLike = "";
|
||||
isError = false;
|
||||
}
|
||||
#endif
|
||||
if (isError) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"invalid replicationFactor");
|
||||
}
|
||||
}
|
||||
|
||||
if (shardKeysSlice.isNone() || isSatellite()) {
|
||||
// Use default.
|
||||
_shardKeys.emplace_back(StaticStrings::KeyString);
|
||||
} else {
|
||||
|
@ -470,32 +521,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
|
||||
// Cluster only tests
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
if ( (_numberOfShards == 0 && !_isSmart) || _numberOfShards > 1000) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"invalid number of shards");
|
||||
}
|
||||
|
||||
VPackSlice keyGenSlice = info.get("keyOptions");
|
||||
if (keyGenSlice.isObject()) {
|
||||
keyGenSlice = keyGenSlice.get("type");
|
||||
if (keyGenSlice.isString()) {
|
||||
StringRef tmp(keyGenSlice);
|
||||
if (!tmp.empty() && tmp != "traditional") {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_UNSUPPORTED,
|
||||
"non-traditional key generators are "
|
||||
"not supported for sharded "
|
||||
"collections");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_replicationFactor == 0 || _replicationFactor > 10) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"invalid replicationFactor");
|
||||
}
|
||||
}
|
||||
|
||||
_keyGenerator.reset(KeyGenerator::factory(info.get("keyOptions")));
|
||||
|
||||
|
@ -3583,3 +3608,8 @@ bool LogicalCollection::skipForAqlWrite(arangodb::velocypack::Slice document,
|
|||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool LogicalCollection::isSatellite() const {
|
||||
return _replicationFactor == 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -241,6 +241,7 @@ class LogicalCollection {
|
|||
|
||||
// SECTION: Replication
|
||||
int replicationFactor() const;
|
||||
bool isSatellite() const;
|
||||
|
||||
|
||||
// SECTION: Sharding
|
||||
|
@ -541,7 +542,7 @@ class LogicalCollection {
|
|||
std::vector<std::shared_ptr<arangodb::Index>> _indexes;
|
||||
|
||||
// SECTION: Replication
|
||||
size_t const _replicationFactor;
|
||||
size_t _replicationFactor;
|
||||
|
||||
// SECTION: Sharding
|
||||
size_t _numberOfShards;
|
||||
|
|
|
@ -144,104 +144,6 @@ static bool CheckCrcMarker(TRI_df_marker_t const* marker, char const* end) {
|
|||
|
||||
}
|
||||
|
||||
/// @brief creates a new datafile
|
||||
///
|
||||
/// returns the file descriptor or -1 if the file cannot be created
|
||||
static int CreateDatafile(std::string const& filename, TRI_voc_size_t maximalSize) {
|
||||
TRI_ERRORBUF;
|
||||
|
||||
// open the file
|
||||
int fd = TRI_CREATE(filename.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC | TRI_NOATIME,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
|
||||
|
||||
TRI_IF_FAILURE("CreateDatafile1") {
|
||||
// intentionally fail
|
||||
TRI_CLOSE(fd);
|
||||
fd = -1;
|
||||
errno = ENOSPC;
|
||||
}
|
||||
|
||||
if (fd < 0) {
|
||||
if (errno == ENOSPC) {
|
||||
TRI_set_errno(TRI_ERROR_ARANGO_FILESYSTEM_FULL);
|
||||
LOG(ERR) << "cannot create datafile '" << filename << "': " << TRI_last_error();
|
||||
} else {
|
||||
TRI_SYSTEM_ERROR();
|
||||
|
||||
TRI_set_errno(TRI_ERROR_SYS_ERROR);
|
||||
LOG(ERR) << "cannot create datafile '" << filename << "': " << TRI_GET_ERRORBUF;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
// no fallocate present, or at least pretend it's not there...
|
||||
int res = TRI_ERROR_NOT_IMPLEMENTED;
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef FALLOC_FL_ZERO_RANGE
|
||||
// try fallocate
|
||||
res = fallocate(fd, FALLOC_FL_ZERO_RANGE, 0, maximalSize);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
// either fallocate failed or it is not there...
|
||||
|
||||
// fill file with zeros from FileNullBuffer
|
||||
size_t writeSize = TRI_GetNullBufferSizeFiles();
|
||||
size_t written = 0;
|
||||
while (written < maximalSize) {
|
||||
if (writeSize + written > maximalSize) {
|
||||
writeSize = maximalSize - written;
|
||||
}
|
||||
|
||||
ssize_t writeResult =
|
||||
TRI_WRITE(fd, TRI_GetNullBufferFiles(), static_cast<TRI_write_t>(writeSize));
|
||||
|
||||
TRI_IF_FAILURE("CreateDatafile2") {
|
||||
// intentionally fail
|
||||
writeResult = -1;
|
||||
errno = ENOSPC;
|
||||
}
|
||||
|
||||
if (writeResult < 0) {
|
||||
if (errno == ENOSPC) {
|
||||
TRI_set_errno(TRI_ERROR_ARANGO_FILESYSTEM_FULL);
|
||||
LOG(ERR) << "cannot create datafile '" << filename << "': " << TRI_last_error();
|
||||
} else {
|
||||
TRI_SYSTEM_ERROR();
|
||||
TRI_set_errno(TRI_ERROR_SYS_ERROR);
|
||||
LOG(ERR) << "cannot create datafile '" << filename << "': " << TRI_GET_ERRORBUF;
|
||||
}
|
||||
|
||||
TRI_CLOSE(fd);
|
||||
TRI_UnlinkFile(filename.c_str());
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
written += static_cast<size_t>(writeResult);
|
||||
}
|
||||
}
|
||||
|
||||
// go back to offset 0
|
||||
TRI_lseek_t offset = TRI_LSEEK(fd, (TRI_lseek_t)0, SEEK_SET);
|
||||
|
||||
if (offset == (TRI_lseek_t)-1) {
|
||||
TRI_SYSTEM_ERROR();
|
||||
TRI_set_errno(TRI_ERROR_SYS_ERROR);
|
||||
TRI_CLOSE(fd);
|
||||
|
||||
// remove empty file
|
||||
TRI_UnlinkFile(filename.c_str());
|
||||
|
||||
LOG(ERR) << "cannot seek in datafile '" << filename << "': '" << TRI_GET_ERRORBUF << "'";
|
||||
return -1;
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief extract the numeric part from a filename
|
||||
/// the filename must look like this: /.*type-abc\.ending$/, where abc is
|
||||
|
@ -292,8 +194,8 @@ static TRI_datafile_t* CreateAnonymousDatafile(TRI_voc_fid_t fid,
|
|||
// memory map the data
|
||||
void* data;
|
||||
void* mmHandle;
|
||||
ssize_t res = TRI_MMFile(nullptr, maximalSize, PROT_WRITE | PROT_READ, flags,
|
||||
fd, &mmHandle, 0, &data);
|
||||
int res = TRI_MMFile(nullptr, maximalSize, PROT_WRITE | PROT_READ, flags,
|
||||
fd, &mmHandle, 0, &data);
|
||||
|
||||
#ifdef MAP_ANONYMOUS
|
||||
// nothing to do
|
||||
|
@ -327,7 +229,7 @@ static TRI_datafile_t* CreatePhysicalDatafile(std::string const& filename,
|
|||
TRI_voc_size_t maximalSize) {
|
||||
TRI_ASSERT(!filename.empty());
|
||||
|
||||
int fd = CreateDatafile(filename, maximalSize);
|
||||
int fd = TRI_CreateDatafile(filename, maximalSize);
|
||||
|
||||
if (fd < 0) {
|
||||
// an error occurred
|
||||
|
@ -342,8 +244,8 @@ static TRI_datafile_t* CreatePhysicalDatafile(std::string const& filename,
|
|||
// try populating the mapping already
|
||||
flags |= MAP_POPULATE;
|
||||
#endif
|
||||
ssize_t res = TRI_MMFile(0, maximalSize, PROT_WRITE | PROT_READ, flags, fd,
|
||||
&mmHandle, 0, &data);
|
||||
int res = TRI_MMFile(0, maximalSize, PROT_WRITE | PROT_READ, flags, fd,
|
||||
&mmHandle, 0, &data);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_set_errno(res);
|
||||
|
|
|
@ -398,7 +398,6 @@ static int SliceifyMarker(TRI_replication_dump_t* dump,
|
|||
builder.add("rev", slice.get(StaticStrings::RevString));
|
||||
}
|
||||
// convert 2300 markers to 2301 markers for edges
|
||||
Append(dump, ",\"type\":");
|
||||
if (type == TRI_DF_MARKER_VPACK_DOCUMENT && isEdgeCollection) {
|
||||
builder.add("type", VPackValue(2301));
|
||||
} else {
|
||||
|
@ -538,7 +537,8 @@ static int DumpCollection(TRI_replication_dump_t* dump,
|
|||
bool bufferFull = false;
|
||||
|
||||
auto callback = [&dump, &lastFoundTick, &databaseId, &collectionId,
|
||||
&withTicks, &isEdgeCollection, &bufferFull, &useVpp](
|
||||
&withTicks, &isEdgeCollection, &bufferFull, &useVpp,
|
||||
&collection](
|
||||
TRI_voc_tick_t foundTick, TRI_df_marker_t const* marker) {
|
||||
// note the last tick we processed
|
||||
lastFoundTick = foundTick;
|
||||
|
@ -553,6 +553,7 @@ static int DumpCollection(TRI_replication_dump_t* dump,
|
|||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
LOG(ERR) << "got error during dump dump of collection '" << collection->name() << "': " << TRI_errno_string(res);
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
|
|
|
@ -171,3 +171,31 @@ configure_file (
|
|||
"${CMAKE_CURRENT_BINARY_DIR}/lib/Basics/directories.h"
|
||||
NEWLINE_STYLE UNIX
|
||||
)
|
||||
|
||||
|
||||
if (MSVC)
|
||||
# so we don't need to ship dll's twice, make it one directory:
|
||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/InstallMacros.cmake)
|
||||
set(CMAKE_INSTALL_FULL_SBINDIR "${CMAKE_INSTALL_FULL_BINDIR}")
|
||||
|
||||
# other platforms link the file into the binary
|
||||
install(FILES ${ICU_DT}
|
||||
DESTINATION "${INSTALL_ICU_DT_DEST}"
|
||||
RENAME ${ICU_DT_DEST})
|
||||
|
||||
install_readme(README.windows README.windows.txt)
|
||||
|
||||
# install the visual studio runtime:
|
||||
set(CMAKE_INSTALL_UCRT_LIBRARIES 1)
|
||||
include(InstallRequiredSystemLibraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ${CMAKE_INSTALL_SBINDIR} COMPONENT Libraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT} DESTINATION ${CMAKE_INSTALL_SBINDIR} COMPONENT Libraries)
|
||||
|
||||
# install openssl
|
||||
if (NOT LIB_EAY_RELEASE_DLL OR NOT SSL_EAY_RELEASE_DLL)
|
||||
message(FATAL_ERROR, "BUNDLE_OPENSSL set but couldn't locate SSL DLLs. Please set LIB_EAY_RELEASE_DLL and SSL_EAY_RELEASE_DLL")
|
||||
endif()
|
||||
|
||||
install (FILES "${LIB_EAY_RELEASE_DLL}" DESTINATION "${CMAKE_INSTALL_BINDIR}/" COMPONENT Libraries)
|
||||
install (FILES "${SSL_EAY_RELEASE_DLL}" DESTINATION "${CMAKE_INSTALL_BINDIR}/" COMPONENT Libraries)
|
||||
endif()
|
||||
|
|
|
@ -10,6 +10,8 @@ cmake_minimum_required(VERSION 2.8)
|
|||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "@PROJECT_BINARY_DIR@/bin/")
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_X ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
|
||||
set(CMAKE_INSTALL_DO_STRIP 1)
|
||||
set(CMAKE_STRIP @CMAKE_STRIP@)
|
||||
set(CROSS_COMPILING @CROSS_COMPILING@)
|
||||
|
||||
set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@)
|
||||
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
|
||||
|
|
|
@ -10,13 +10,7 @@ set(PACKAGING_HANDLE_CONFIG_FILES true)
|
|||
FILE(READ "${PROJECT_SOURCE_DIR}/Installation/debian/packagedesc.txt" CPACK_DEBIAN_PACKAGE_DESCRIPTION)
|
||||
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONFLICTS "arangodb, ${CPACKG_PACKAGE_CONFLICTS}, ${CPACKG_PACKAGE_CONFLICTS}-client, ${CPACK_PACKAGE_NAME}-client")
|
||||
# build of dependecies (yet) don't works on cross compiling
|
||||
if (CROSS_COMPILING)
|
||||
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS OFF)
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libc6 (>= 2.14), libgcc1 (>= 1:3.4), libssl1.0.0 (>= 1.0.1), libstdc++6 (>= 5.2)")
|
||||
else()
|
||||
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
|
||||
endif()
|
||||
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
|
||||
if(NOT DISABLE_XZ_DEB)
|
||||
set(CPACK_DEBIAN_COMPRESSION_TYPE "xz")
|
||||
endif()
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
# so we don't need to ship dll's twice, make it one directory:
|
||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/InstallMacros.cmake)
|
||||
set(CMAKE_INSTALL_FULL_SBINDIR "${CMAKE_INSTALL_FULL_BINDIR}")
|
||||
set(W_INSTALL_FILES "${PROJECT_SOURCE_DIR}/Installation/Windows/")
|
||||
if (${USE_ENTERPRISE})
|
||||
set(CPACK_PACKAGE_NAME "ArangoDB3e")
|
||||
|
@ -28,21 +25,6 @@ else ()
|
|||
SET(BITS 32)
|
||||
endif ()
|
||||
|
||||
install_readme(README.windows README.windows.txt)
|
||||
|
||||
# install the visual studio runtime:
|
||||
set(CMAKE_INSTALL_UCRT_LIBRARIES 1)
|
||||
include(InstallRequiredSystemLibraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ${CMAKE_INSTALL_SBINDIR} COMPONENT Libraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT} DESTINATION ${CMAKE_INSTALL_SBINDIR} COMPONENT Libraries)
|
||||
|
||||
# install openssl
|
||||
if (NOT LIB_EAY_RELEASE_DLL OR NOT SSL_EAY_RELEASE_DLL)
|
||||
message(FATAL_ERROR, "BUNDLE_OPENSSL set but couldn't locate SSL DLLs. Please set LIB_EAY_RELEASE_DLL and SSL_EAY_RELEASE_DLL")
|
||||
endif()
|
||||
|
||||
install (FILES "${LIB_EAY_RELEASE_DLL}" DESTINATION "${CMAKE_INSTALL_BINDIR}/" COMPONENT Libraries)
|
||||
install (FILES "${SSL_EAY_RELEASE_DLL}" DESTINATION "${CMAKE_INSTALL_BINDIR}/" COMPONENT Libraries)
|
||||
|
||||
# icon paths
|
||||
set (ICON_PATH "${W_INSTALL_FILES}/Icons/")
|
||||
|
@ -82,10 +64,6 @@ set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${AR
|
|||
################################################################################
|
||||
# hook to build the server package
|
||||
################################################################################
|
||||
# other platforms link the file into the binary
|
||||
install(FILES ${ICU_DT}
|
||||
DESTINATION "${INSTALL_ICU_DT_DEST}"
|
||||
RENAME ${ICU_DT_DEST})
|
||||
|
||||
add_custom_target(package-arongodb-server-nsis
|
||||
COMMAND ${CMAKE_COMMAND} .
|
||||
|
|
|
@ -37,6 +37,16 @@ elseif (MSVC)
|
|||
include(packages/nsis)
|
||||
endif ()
|
||||
|
||||
################################################################################
|
||||
## generic tarball
|
||||
################################################################################
|
||||
set(CPACK_PACKAGE_TGZ "${CMAKE_BINARY_DIR}/${CPACK_PACKAGE_FILE_NAME}.tar.gz")
|
||||
add_custom_target(TGZ_package
|
||||
COMMENT "create TGZ-package"
|
||||
COMMAND ${CMAKE_CPACK_COMMAND} -G TGZ -C ${CMAKE_BUILD_TYPE}
|
||||
)
|
||||
|
||||
|
||||
################################################################################
|
||||
## SNAPCRAFT PACKAGE
|
||||
################################################################################
|
||||
|
|
|
@ -7,7 +7,6 @@ endif()
|
|||
if(SNAPCRAFT_FOUND)
|
||||
set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}")
|
||||
set(SNAPCRAFT_TEMPLATE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/Installation/Ubuntu")
|
||||
set(CPACK_PACKAGE_TGZ "${CMAKE_BINARY_DIR}/${CPACK_PACKAGE_FILE_NAME}.tar.gz")
|
||||
set(SNAPCRAFT_SOURCE_DIR "${CMAKE_BINARY_DIR}/_CPack_Packages/SNAP")
|
||||
|
||||
message(STATUS "Create snap package")
|
||||
|
@ -35,16 +34,11 @@ if(SNAPCRAFT_FOUND)
|
|||
DESTINATION "${SNAPCRAFT_SOURCE_DIR}/"
|
||||
)
|
||||
|
||||
add_custom_target(snap_TGZ
|
||||
COMMENT "create TGZ-package"
|
||||
COMMAND ${CMAKE_CPACK_COMMAND} -G TGZ
|
||||
)
|
||||
|
||||
add_custom_target(snap
|
||||
COMMENT "create snap-package"
|
||||
COMMAND ${SNAP_EXE} snap
|
||||
COMMAND cp *.snap ${PROJECT_BINARY_DIR}
|
||||
DEPENDS snap_TGZ
|
||||
DEPENDS TGZ_package
|
||||
WORKING_DIRECTORY ${SNAPCRAFT_SOURCE_DIR}
|
||||
)
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
/* jshint strict: false, sub: true */
|
||||
/* global print, arango */
|
||||
'use strict';
|
||||
|
@ -1466,6 +1467,8 @@ function startInstanceAgency (instanceInfo, protocol, options, addArgs, rootDir)
|
|||
instanceArgs['server.endpoint'] = protocol + '://127.0.0.1:' + port;
|
||||
instanceArgs['agency.my-address'] = protocol + '://127.0.0.1:' + port;
|
||||
instanceArgs['agency.supervision-grace-period'] = '5';
|
||||
//instanceArgs['agency.election-timeout-min'] = '0.5';
|
||||
//instanceArgs['agency.election-timeout-max'] = '4.0';
|
||||
|
||||
|
||||
if (i === N - 1) {
|
||||
|
|
|
@ -153,6 +153,7 @@
|
|||
"ERROR_CLUSTER_BACKEND_UNAVAILABLE" : { "code" : 1478, "message" : "A cluster backend which was required for the operation could not be reached" },
|
||||
"ERROR_CLUSTER_UNKNOWN_CALLBACK_ENDPOINT" : { "code" : 1479, "message" : "An endpoint couldn't be found" },
|
||||
"ERROR_CLUSTER_AGENCY_STRUCTURE_INVALID" : { "code" : 1480, "message" : "Invalid agency structure" },
|
||||
"ERROR_CLUSTER_AQL_COLLECTION_OUT_OF_SYNC" : { "code" : 1481, "message" : "collection is out of sync" },
|
||||
"ERROR_QUERY_KILLED" : { "code" : 1500, "message" : "query killed" },
|
||||
"ERROR_QUERY_PARSE" : { "code" : 1501, "message" : "%s" },
|
||||
"ERROR_QUERY_EMPTY" : { "code" : 1502, "message" : "query is empty" },
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue