mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
3e2947a8ee
|
@ -1,3 +1,5 @@
|
||||||
|
ALLBOOKS=HTTP AQL Users
|
||||||
|
|
||||||
all: build-books
|
all: build-books
|
||||||
|
|
||||||
.PHONY:
|
.PHONY:
|
||||||
|
@ -53,10 +55,13 @@ ppbook-precheck-bad-code-sections:
|
||||||
|
|
||||||
ppbook-check-html-link:
|
ppbook-check-html-link:
|
||||||
@echo "##### checking for invalid HTML links in $(NAME)"
|
@echo "##### checking for invalid HTML links in $(NAME)"
|
||||||
@if test "`egrep -r '\[.*\]\(.*\)' ppbooks/$(NAME) |grep '\.md:' |grep html |grep -v http://|grep -v https:// |grep -v header.css |wc -l`" -gt 0; then \
|
@echo "$(ALLBOOKS)" |sed -e 's; ;\n;g' |sed -e 's;^;/;' -e 's;$$;/;' > /tmp/books.regex
|
||||||
|
|
||||||
|
@egrep -r '\[.*\]\(.*\)' ppbooks/$(NAME)|grep '\.md:'| grep 'html'| grep -v 'http://' | grep -v 'https://' | grep -v 'header.css' | grep -v -f /tmp/books.regex > /tmp/relative_html_links.txt ||true
|
||||||
|
@if test "`cat /tmp/relative_html_links.txt |wc -l`" -gt 0; then \
|
||||||
echo "Found links to .html files inside of the document! use <foo>.md instead!"; \
|
echo "Found links to .html files inside of the document! use <foo>.md instead!"; \
|
||||||
echo; \
|
echo; \
|
||||||
egrep -r '\[.*\]\(.*\)' ppbooks/$(NAME) | grep '\.md:' | grep html |grep -v http://|grep -v https:// |grep -v header.css ; \
|
cat /tmp/relative_html_links.txt; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -99,18 +104,18 @@ book-check-dangling-anchors:
|
||||||
FN=`echo $$i | cut '-d,' -f 2`; \
|
FN=`echo $$i | cut '-d,' -f 2`; \
|
||||||
SFN=`echo $$i | cut '-d,' -f 1`; \
|
SFN=`echo $$i | cut '-d,' -f 1`; \
|
||||||
if test -z "$$FN"; then \
|
if test -z "$$FN"; then \
|
||||||
FN=$$SFN; \
|
FN=$$SFN; \
|
||||||
else \
|
else \
|
||||||
SFNP=`echo $$SFN | sed 's;/[a-zA-Z0-9]*.html;;'`; \
|
SFNP=`echo $$SFN | sed 's;/[a-zA-Z0-9]*.html;;'`; \
|
||||||
FN="$${SFNP}/$${FN}"; \
|
FN="$${SFNP}/$${FN}"; \
|
||||||
fi; \
|
fi; \
|
||||||
if test -n "$$ANCHOR"; then \
|
if test -n "$$ANCHOR"; then \
|
||||||
if grep -q "id=\"$$ANCHOR\">" $$FN; then \
|
if grep -q "id=\"$$ANCHOR\">" $$FN; then \
|
||||||
/bin/true; \
|
/bin/true; \
|
||||||
else \
|
else \
|
||||||
echo "$$i"; \
|
echo "$$i"; \
|
||||||
NO=$$((NO + 1)); \
|
NO=$$((NO + 1)); \
|
||||||
fi; \
|
fi; \
|
||||||
fi; \
|
fi; \
|
||||||
done; \
|
done; \
|
||||||
if test "$${NO}" -gt 0; then \
|
if test "$${NO}" -gt 0; then \
|
||||||
|
@ -133,8 +138,8 @@ build-book:
|
||||||
mkdir -p ppbooks/$(NAME); \
|
mkdir -p ppbooks/$(NAME); \
|
||||||
WD=`pwd`; \
|
WD=`pwd`; \
|
||||||
for dir in `find $(NAME) -type d `; do \
|
for dir in `find $(NAME) -type d `; do \
|
||||||
cd $${WD}/ppbooks; \
|
cd $${WD}/ppbooks; \
|
||||||
test -d $${dir} || mkdir -p $${dir}; \
|
test -d $${dir} || mkdir -p $${dir}; \
|
||||||
done; \
|
done; \
|
||||||
fi
|
fi
|
||||||
cd ppbooks/$(NAME); if ! test -L SUMMARY.md; then ln -s ../../$(NAME)/SUMMARY.md . ; fi
|
cd ppbooks/$(NAME); if ! test -L SUMMARY.md; then ln -s ../../$(NAME)/SUMMARY.md . ; fi
|
||||||
|
@ -182,12 +187,12 @@ clean-book-intermediate:
|
||||||
|
|
||||||
|
|
||||||
#************************************************************
|
#************************************************************
|
||||||
# Check docublocks - checks whether docublock are
|
# Check docublocks - checks whether docublock are
|
||||||
# - files in intermediate output directories and temporary
|
# - files in intermediate output directories and temporary
|
||||||
# files are excludes (with # in their names)
|
# files are excludes (with # in their names)
|
||||||
# - uniq in the source
|
# - uniq in the source
|
||||||
# - all docublocks are used somewhere in the documentation
|
# - all docublocks are used somewhere in the documentation
|
||||||
#
|
#
|
||||||
check-docublocks:
|
check-docublocks:
|
||||||
grep -R '@startDocuBlock' --include "*.h" --include "*.cpp" --include "*.js" --include "*.mdpp" . |\
|
grep -R '@startDocuBlock' --include "*.h" --include "*.cpp" --include "*.js" --include "*.mdpp" . |\
|
||||||
grep -v '@startDocuBlockInline' |\
|
grep -v '@startDocuBlockInline' |\
|
||||||
|
@ -206,7 +211,7 @@ check-docublocks:
|
||||||
>> /tmp/rawindoc.txt
|
>> /tmp/rawindoc.txt
|
||||||
cat /tmp/rawindoc.txt | sed -e "s;.*ck ;;" -e "s;.*ne ;;" |sort -u > /tmp/indoc.txt
|
cat /tmp/rawindoc.txt | sed -e "s;.*ck ;;" -e "s;.*ne ;;" |sort -u > /tmp/indoc.txt
|
||||||
grep -R '^@startDocuBlock' ../DocuBlocks --include "*.md" --include "*.mdpp" |grep -v aardvark > /tmp/rawinprog.txt
|
grep -R '^@startDocuBlock' ../DocuBlocks --include "*.md" --include "*.mdpp" |grep -v aardvark > /tmp/rawinprog.txt
|
||||||
# searching the Inline docublocks needs some more blacklisting:
|
# searching the Inline docublocks needs some more blacklisting:
|
||||||
grep -R '@startDocuBlockInline' --include "*.h" --include "*.cpp" --include "*.js" --include "*.mdpp" . |\
|
grep -R '@startDocuBlockInline' --include "*.h" --include "*.cpp" --include "*.js" --include "*.mdpp" . |\
|
||||||
grep -v ppbook |\
|
grep -v ppbook |\
|
||||||
grep -v allComments.txt |\
|
grep -v allComments.txt |\
|
||||||
|
@ -257,16 +262,17 @@ build-books-keep-md:
|
||||||
make build-book NAME=$(NAME)
|
make build-book NAME=$(NAME)
|
||||||
|
|
||||||
build-books:
|
build-books:
|
||||||
make clean-intermediate NAME=Users
|
for book in $(ALLBOOKS); do \
|
||||||
make clean-intermediate NAME=AQL
|
make clean-intermediate NAME=$${book}; \
|
||||||
make clean-intermediate NAME=HTTP
|
done
|
||||||
make build-books-keep-md NAME=Users
|
|
||||||
make build-books-keep-md NAME=AQL
|
|
||||||
make build-books-keep-md NAME=HTTP
|
|
||||||
|
|
||||||
#make ppbook-check-html-link NAME=Users
|
for book in $(ALLBOOKS); do \
|
||||||
#make ppbook-check-html-link NAME=AQL
|
make build-books-keep-md NAME=$${book}; \
|
||||||
#make ppbook-check-html-link NAME=HTTP
|
done
|
||||||
|
|
||||||
|
for book in $(ALLBOOKS); do \
|
||||||
|
make ppbook-check-html-link NAME=$${book}; \
|
||||||
|
done
|
||||||
|
|
||||||
make check-docublocks
|
make check-docublocks
|
||||||
|
echo '<head><meta http-equiv="refresh" content="0; url=Users/"></head><body></body>' > books/index.html
|
||||||
|
|
|
@ -84,6 +84,7 @@ echo "EXAMPLES"
|
||||||
echo "GRUNT"
|
echo "GRUNT"
|
||||||
(
|
(
|
||||||
cd js/apps/system/_admin/aardvark/APP
|
cd js/apps/system/_admin/aardvark/APP
|
||||||
|
rm -rf node_modules
|
||||||
npm install --only=dev
|
npm install --only=dev
|
||||||
grunt deploy
|
grunt deploy
|
||||||
)
|
)
|
||||||
|
|
|
@ -1164,7 +1164,7 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
|
||||||
// We only get here if the global timeout was triggered, not all
|
// We only get here if the global timeout was triggered, not all
|
||||||
// requests are marked by done!
|
// requests are marked by done!
|
||||||
|
|
||||||
LOG_TOPIC(ERR, logTopic) << "ClusterComm::performRequests: "
|
LOG_TOPIC(DEBUG, logTopic) << "ClusterComm::performRequests: "
|
||||||
<< "got timeout, this will be reported...";
|
<< "got timeout, this will be reported...";
|
||||||
|
|
||||||
// Forget about
|
// Forget about
|
||||||
|
|
|
@ -79,7 +79,6 @@ router.get('/config.js', function(req, res) {
|
||||||
&& isTrustedProxy(req.remoteAddress)) {
|
&& isTrustedProxy(req.remoteAddress)) {
|
||||||
basePath = req.headers['x-script-name'];
|
basePath = req.headers['x-script-name'];
|
||||||
}
|
}
|
||||||
console.log(Object.keys(global));
|
|
||||||
res.set('content-type', 'text/javascript');
|
res.set('content-type', 'text/javascript');
|
||||||
res.send("var frontendConfig = " + JSON.stringify({
|
res.send("var frontendConfig = " + JSON.stringify({
|
||||||
"basePath": basePath,
|
"basePath": basePath,
|
||||||
|
|
|
@ -54,7 +54,7 @@
|
||||||
<div class="pure-u-1-1 pure-u-md-12-24 pure-u-lg-6-24">
|
<div class="pure-u-1-1 pure-u-md-12-24 pure-u-lg-6-24">
|
||||||
<div class="graphWrapper">
|
<div class="graphWrapper">
|
||||||
<div id='clusterAverage'></div>
|
<div id='clusterAverage'></div>
|
||||||
<div class="graphLabel">TRANSFER SIZE</div>
|
<div class="graphLabel">AVG Request Time</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -112,6 +112,32 @@
|
||||||
</div>
|
</div>
|
||||||
</th>
|
</th>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th class="collectionTh">Unique:</th>
|
||||||
|
<th>
|
||||||
|
<input id="newPersistentUnique" type="checkbox" name="newPersistentUnique" value="true">
|
||||||
|
</th>
|
||||||
|
<th class="tooltipInfoTh">
|
||||||
|
<div>
|
||||||
|
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||||
|
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</th>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th class="collectionTh">Sparse:</th>
|
||||||
|
<th>
|
||||||
|
<input id="newPersistentSparse" type="checkbox" name="newPersistentSparse" value="true">
|
||||||
|
</th>
|
||||||
|
<th class="tooltipInfoTh">
|
||||||
|
<div>
|
||||||
|
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||||
|
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</th>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
<div id="newIndexTypeHash" class="newIndexClass" style="display:none">
|
<div id="newIndexTypeHash" class="newIndexClass" style="display:none">
|
||||||
|
|
|
@ -284,7 +284,7 @@
|
||||||
self.chartsOptions[1].options[0].values.push({x:time, y: self.calcTotalHttp(data.http, key)});
|
self.chartsOptions[1].options[0].values.push({x:time, y: self.calcTotalHttp(data.http, key)});
|
||||||
|
|
||||||
//AVERAGE
|
//AVERAGE
|
||||||
self.chartsOptions[2].options[0].values.push({x:time, y: data.avgRequestTime[key]});
|
self.chartsOptions[2].options[0].values.push({x:time, y: data.avgRequestTime[key] / self.coordinators.length});
|
||||||
});
|
});
|
||||||
self.historyInit = true;
|
self.historyInit = true;
|
||||||
}
|
}
|
||||||
|
@ -308,16 +308,16 @@
|
||||||
//AVERAGE
|
//AVERAGE
|
||||||
self.chartsOptions[2].options[0].values.push({
|
self.chartsOptions[2].options[0].values.push({
|
||||||
x: data.times[data.times.length - 1],
|
x: data.times[data.times.length - 1],
|
||||||
y: data.avgRequestTime[data.bytesSentPerSecond.length - 1]
|
y: data.avgRequestTime[data.bytesSentPerSecond.length - 1] / self.coordinators.length
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
chartsOptions: [
|
chartsOptions: [
|
||||||
{
|
{
|
||||||
id: "#clusterData",
|
id: "#clusterData",
|
||||||
|
type: 'bytes',
|
||||||
count: 2,
|
count: 2,
|
||||||
options: [
|
options: [
|
||||||
{
|
{
|
||||||
|
@ -340,6 +340,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "#clusterHttp",
|
id: "#clusterHttp",
|
||||||
|
type: 'bytes',
|
||||||
options: [{
|
options: [{
|
||||||
area: true,
|
area: true,
|
||||||
values: [],
|
values: [],
|
||||||
|
@ -351,10 +352,11 @@
|
||||||
{
|
{
|
||||||
id: "#clusterAverage",
|
id: "#clusterAverage",
|
||||||
data: [],
|
data: [],
|
||||||
|
type: 'seconds',
|
||||||
options: [{
|
options: [{
|
||||||
area: true,
|
area: true,
|
||||||
values: [],
|
values: [],
|
||||||
key: "Bytes",
|
key: "Seconds",
|
||||||
color: "rgb(243, 156, 18)",
|
color: "rgb(243, 156, 18)",
|
||||||
fillOpacity: 0.1
|
fillOpacity: 0.1
|
||||||
}]
|
}]
|
||||||
|
@ -392,11 +394,22 @@
|
||||||
self.charts[c.id].yAxis
|
self.charts[c.id].yAxis
|
||||||
.axisLabel('')
|
.axisLabel('')
|
||||||
.tickFormat(function(d) {
|
.tickFormat(function(d) {
|
||||||
if (d === null) {
|
var formatted;
|
||||||
return 'N/A';
|
|
||||||
|
if (c.type === 'bytes') {
|
||||||
|
if (d === null) {
|
||||||
|
return 'N/A';
|
||||||
|
}
|
||||||
|
formatted = parseFloat(d3.format(".2f")(d));
|
||||||
|
return prettyBytes(formatted);
|
||||||
|
}
|
||||||
|
else if (c.type === 'seconds') {
|
||||||
|
if (d === null) {
|
||||||
|
return 'N/A';
|
||||||
|
}
|
||||||
|
formatted = parseFloat(d3.format(".2f")(d));
|
||||||
|
return formatted;
|
||||||
}
|
}
|
||||||
var formatted = parseFloat(d3.format(".2f")(d));
|
|
||||||
return prettyBytes(formatted);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
var data, lines = self.returnGraphOptions(c.id);
|
var data, lines = self.returnGraphOptions(c.id);
|
||||||
|
|
|
@ -76,9 +76,13 @@
|
||||||
break;
|
break;
|
||||||
case 'Persistent':
|
case 'Persistent':
|
||||||
fields = $('#newPersistentFields').val();
|
fields = $('#newPersistentFields').val();
|
||||||
|
unique = self.checkboxToValue('#newPersistentUnique');
|
||||||
|
sparse = self.checkboxToValue('#newPersistentSparse');
|
||||||
postParameter = {
|
postParameter = {
|
||||||
type: 'persistent',
|
type: 'rocksdb',
|
||||||
fields: self.stringToArray(fields)
|
fields: self.stringToArray(fields),
|
||||||
|
unique: unique,
|
||||||
|
sparse: sparse
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
case 'Hash':
|
case 'Hash':
|
||||||
|
|
|
@ -49,7 +49,7 @@
|
||||||
}.bind(this);
|
}.bind(this);
|
||||||
|
|
||||||
var cb = function() {
|
var cb = function() {
|
||||||
console.log("node complete");
|
console.log("");
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!this.initCoordDone) {
|
if (!this.initCoordDone) {
|
||||||
|
|
|
@ -856,6 +856,166 @@ function cleanupCurrentCollections (plannedCollections, currentCollections,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief synchronize one shard, this is run as a V8 task
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function synchronizeOneShard(database, shard, planId, leader) {
|
||||||
|
// synchronize this shard from the leader
|
||||||
|
// this function will throw if anything goes wrong
|
||||||
|
|
||||||
|
var ok = false;
|
||||||
|
const rep = require("@arangodb/replication");
|
||||||
|
|
||||||
|
console.info("synchronizeOneShard: trying to synchronize local shard '%s/%s' for central '%s/%s'",
|
||||||
|
database, shard, database, planId);
|
||||||
|
try {
|
||||||
|
var ep = ArangoClusterInfo.getServerEndpoint(leader);
|
||||||
|
// First once without a read transaction:
|
||||||
|
var sy;
|
||||||
|
var count = 60;
|
||||||
|
while (true) {
|
||||||
|
try {
|
||||||
|
sy = rep.syncCollection(shard,
|
||||||
|
{ endpoint: ep, incremental: true,
|
||||||
|
keepBarrier: true });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
catch (err) {
|
||||||
|
console.debug("synchronizeOneShard: syncCollection did not work,",
|
||||||
|
"trying again later for shard", shard);
|
||||||
|
}
|
||||||
|
if (--count <= 0) {
|
||||||
|
console.error("synchronizeOneShard: syncCollection did not work",
|
||||||
|
"after many tries, giving up on shard", shard);
|
||||||
|
throw "syncCollection did not work";
|
||||||
|
}
|
||||||
|
wait(5);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sy.error) {
|
||||||
|
console.error("synchronizeOneShard: could not initially synchronize",
|
||||||
|
"shard ", shard, sy);
|
||||||
|
throw "Initial sync for shard " + shard + " failed";
|
||||||
|
} else {
|
||||||
|
if (sy.collections.length === 0 ||
|
||||||
|
sy.collections[0].name !== shard) {
|
||||||
|
cancelBarrier(ep, database, sy.barrierId);
|
||||||
|
throw "Shard " + shard + " seems to be gone from leader!";
|
||||||
|
} else {
|
||||||
|
// Now start a read transaction to stop writes:
|
||||||
|
var lockJobId = false;
|
||||||
|
try {
|
||||||
|
lockJobId = startReadLockOnLeader(ep, database,
|
||||||
|
shard, 300);
|
||||||
|
console.debug("lockJobId:", lockJobId);
|
||||||
|
}
|
||||||
|
catch (err1) {
|
||||||
|
console.error("synchronizeOneShard: exception in startReadLockOnLeader:", err1);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
cancelBarrier(ep, database, sy.barrierId);
|
||||||
|
}
|
||||||
|
if (lockJobId !== false) {
|
||||||
|
try {
|
||||||
|
var sy2 = rep.syncCollectionFinalize(
|
||||||
|
database, shard, sy.collections[0].id,
|
||||||
|
sy.lastLogTick, { endpoint: ep });
|
||||||
|
if (sy2.error) {
|
||||||
|
console.error("synchronizeOneShard: Could not synchronize shard",
|
||||||
|
shard, sy2);
|
||||||
|
ok = false;
|
||||||
|
} else {
|
||||||
|
ok = addShardFollower(ep, database, shard);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (err3) {
|
||||||
|
console.error("synchronizeOneshard: exception in",
|
||||||
|
"syncCollectionFinalize:", err3);
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
if (!cancelReadLockOnLeader(ep, database,
|
||||||
|
lockJobId)) {
|
||||||
|
console.error("synchronizeOneShard: read lock has timed out",
|
||||||
|
"for shard", shard);
|
||||||
|
ok = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.error("synchronizeOneShard: lockJobId was false for shard",
|
||||||
|
shard);
|
||||||
|
}
|
||||||
|
if (ok) {
|
||||||
|
console.info("synchronizeOneShard: synchronization worked for shard",
|
||||||
|
shard);
|
||||||
|
} else {
|
||||||
|
throw "Did not work for shard " + shard + ".";
|
||||||
|
// just to log below in catch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (err2) {
|
||||||
|
console.error("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s",
|
||||||
|
database, shard, database, planId, JSON.stringify(err2));
|
||||||
|
}
|
||||||
|
// Tell others that we are done:
|
||||||
|
try {
|
||||||
|
var jobInfo = global.KEY_GET("shardSynchronization", shard);
|
||||||
|
jobInfo.completed = ok;
|
||||||
|
global.KEY_SET("shardSynchronization", shard, jobInfo);
|
||||||
|
}
|
||||||
|
catch (e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief schedule a shard synchronization
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function scheduleOneShardSynchronization(database, shard, planId, leader) {
|
||||||
|
const registerTask = require("internal").registerTask;
|
||||||
|
console.debug("scheduleOneShardSynchronization:", database, shard, planId,
|
||||||
|
leader);
|
||||||
|
var scheduledJobs;
|
||||||
|
try {
|
||||||
|
scheduledJobs = global.KEYSPACE_GET("shardSynchronization");
|
||||||
|
}
|
||||||
|
catch (e) {
|
||||||
|
global.KEYSPACE_CREATE("shardSynchronization");
|
||||||
|
scheduledJobs = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
var jobInfo;
|
||||||
|
if (scheduledJobs.hasOwnProperty(shard)) {
|
||||||
|
jobInfo = scheduledJobs[shard];
|
||||||
|
if (jobInfo.completed === undefined) {
|
||||||
|
console.debug("old task still running, ignoring scheduling request");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
global.KEY_REMOVE("shardSynchronization", shard);
|
||||||
|
if (jobInfo.completed) { // success!
|
||||||
|
console.debug("old task just finished successfully,",
|
||||||
|
"ignoring scheduling request");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
console.debug("old task finished unsuccessfully, scheduling a new one");
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we reach this, we actually have to schedule a new task:
|
||||||
|
jobInfo = { database, shard, planId, leader };
|
||||||
|
var job = registerTask({
|
||||||
|
database: database,
|
||||||
|
params: {database, shard, planId, leader},
|
||||||
|
command: function(params) {
|
||||||
|
require("@arangodb/cluster").synchronizeOneShard(
|
||||||
|
params.database, params.shard, params.planId, params.leader);
|
||||||
|
}});
|
||||||
|
console.debug("scheduleOneShardSynchronization: job:", job);
|
||||||
|
global.KEY_SET("shardSynchronization", shard, jobInfo);
|
||||||
|
console.debug("scheduleOneShardSynchronization: have scheduled job", jobInfo);
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief synchronize collections for which we are followers (synchronously
|
/// @brief synchronize collections for which we are followers (synchronously
|
||||||
/// replicated shards)
|
/// replicated shards)
|
||||||
|
@ -870,8 +1030,6 @@ function synchronizeLocalFollowerCollections (plannedCollections,
|
||||||
var localDatabases = getLocalDatabases();
|
var localDatabases = getLocalDatabases();
|
||||||
var database;
|
var database;
|
||||||
|
|
||||||
var rep = require("@arangodb/replication");
|
|
||||||
|
|
||||||
// iterate over all matching databases
|
// iterate over all matching databases
|
||||||
for (database in plannedCollections) {
|
for (database in plannedCollections) {
|
||||||
if (plannedCollections.hasOwnProperty(database)) {
|
if (plannedCollections.hasOwnProperty(database)) {
|
||||||
|
@ -911,84 +1069,9 @@ function synchronizeLocalFollowerCollections (plannedCollections,
|
||||||
"come back later to this shard...");
|
"come back later to this shard...");
|
||||||
} else {
|
} else {
|
||||||
if (inCurrent.servers.indexOf(ourselves) === -1) {
|
if (inCurrent.servers.indexOf(ourselves) === -1) {
|
||||||
// we not in there - must synchronize this shard from
|
scheduleOneShardSynchronization(
|
||||||
// the leader
|
database, shard, collInfo.planId,
|
||||||
console.info("trying to synchronize local shard '%s/%s' for central '%s/%s'",
|
inCurrent.servers[0]);
|
||||||
database,
|
|
||||||
shard,
|
|
||||||
database,
|
|
||||||
collInfo.planId);
|
|
||||||
try {
|
|
||||||
var ep = ArangoClusterInfo.getServerEndpoint(
|
|
||||||
inCurrent.servers[0]);
|
|
||||||
// First once without a read transaction:
|
|
||||||
var sy = rep.syncCollection(shard,
|
|
||||||
{ endpoint: ep, incremental: true,
|
|
||||||
keepBarrier: true });
|
|
||||||
if (sy.error) {
|
|
||||||
console.error("Could not initially synchronize shard ", shard, sy);
|
|
||||||
} else {
|
|
||||||
if (sy.collections.length == 0 ||
|
|
||||||
sy.collections[0].name != shard) {
|
|
||||||
cancelBarrier(ep, database, sy.barrierId);
|
|
||||||
throw "Shard seems to be gone from leader!";
|
|
||||||
} else {
|
|
||||||
var ok = false;
|
|
||||||
// Now start a read transaction to stop writes:
|
|
||||||
var lockJobId = false;
|
|
||||||
try {
|
|
||||||
lockJobId = startReadLockOnLeader(ep, database,
|
|
||||||
shard, 300);
|
|
||||||
console.debug("lockJobId:", lockJobId);
|
|
||||||
}
|
|
||||||
catch (err1) {
|
|
||||||
console.error("Exception in startReadLockOnLeader:", err1);
|
|
||||||
}
|
|
||||||
finally {
|
|
||||||
cancelBarrier(ep, database, sy.barrierId);
|
|
||||||
}
|
|
||||||
if (lockJobId !== false) {
|
|
||||||
try {
|
|
||||||
var sy2 = rep.syncCollectionFinalize(
|
|
||||||
database, shard, sy.collections[0].id,
|
|
||||||
sy.lastLogTick, { endpoint: ep });
|
|
||||||
if (sy2.error) {
|
|
||||||
console.error("Could not synchronize shard", shard,
|
|
||||||
sy2);
|
|
||||||
ok = false;
|
|
||||||
} else {
|
|
||||||
ok = addShardFollower(ep, database, shard);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (err3) {
|
|
||||||
console.error("Exception in syncCollectionFinalize:", err3);
|
|
||||||
}
|
|
||||||
finally {
|
|
||||||
if (!cancelReadLockOnLeader(ep, database,
|
|
||||||
lockJobId)) {
|
|
||||||
console.error("Read lock has timed out for shard", shard);
|
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.error("lockJobId was false");
|
|
||||||
}
|
|
||||||
if (ok) {
|
|
||||||
console.info("Synchronization worked for shard", shard);
|
|
||||||
} else {
|
|
||||||
throw "Did not work."; // just to log below in catch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (err2) {
|
|
||||||
console.error("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s",
|
|
||||||
database,
|
|
||||||
shard,
|
|
||||||
database,
|
|
||||||
collInfo.planId,
|
|
||||||
JSON.stringify(err2));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1470,4 +1553,4 @@ exports.shardList = shardList;
|
||||||
exports.status = status;
|
exports.status = status;
|
||||||
exports.wait = waitForDistributedResponse;
|
exports.wait = waitForDistributedResponse;
|
||||||
exports.endpointToURL = endpointToURL;
|
exports.endpointToURL = endpointToURL;
|
||||||
|
exports.synchronizeOneShard = synchronizeOneShard;
|
||||||
|
|
|
@ -114,6 +114,34 @@ function SynchronousReplicationSuite () {
|
||||||
assertTrue(continueExternal(global.instanceInfo.arangods[pos].pid));
|
assertTrue(continueExternal(global.instanceInfo.arangods[pos].pid));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief fail the leader
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function failLeader() {
|
||||||
|
var leader = cinfo.shards[shards[0]][0];
|
||||||
|
var endpoint = global.ArangoClusterInfo.getServerEndpoint(leader);
|
||||||
|
// Now look for instanceInfo:
|
||||||
|
var pos = _.findIndex(global.instanceInfo.arangods,
|
||||||
|
x => x.endpoint === endpoint);
|
||||||
|
assertTrue(pos >= 0);
|
||||||
|
assertTrue(suspendExternal(global.instanceInfo.arangods[pos].pid));
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief heal the follower
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function healLeader() {
|
||||||
|
var leader = cinfo.shards[shards[0]][0];
|
||||||
|
var endpoint = global.ArangoClusterInfo.getServerEndpoint(leader);
|
||||||
|
// Now look for instanceInfo:
|
||||||
|
var pos = _.findIndex(global.instanceInfo.arangods,
|
||||||
|
x => x.endpoint === endpoint);
|
||||||
|
assertTrue(pos >= 0);
|
||||||
|
assertTrue(continueExternal(global.instanceInfo.arangods[pos].pid));
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief produce failure
|
/// @brief produce failure
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -121,9 +149,8 @@ function SynchronousReplicationSuite () {
|
||||||
function makeFailure(failure) {
|
function makeFailure(failure) {
|
||||||
if (failure.follower) {
|
if (failure.follower) {
|
||||||
failFollower();
|
failFollower();
|
||||||
/* } else {
|
} else {
|
||||||
failLeader(); // TODO: function does not exist
|
failLeader();
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,9 +161,8 @@ function SynchronousReplicationSuite () {
|
||||||
function healFailure(failure) {
|
function healFailure(failure) {
|
||||||
if (failure.follower) {
|
if (failure.follower) {
|
||||||
healFollower();
|
healFollower();
|
||||||
/* } else {
|
} else {
|
||||||
healLeader(); // TODO: function does not exist
|
healLeader();
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,6 +535,18 @@ function SynchronousReplicationSuite () {
|
||||||
assertTrue(waitForSynchronousReplication("_system"));
|
assertTrue(waitForSynchronousReplication("_system"));
|
||||||
},
|
},
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief run a standard check with failures:
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
testBasicOperationsFailureLeader : function () {
|
||||||
|
assertTrue(waitForSynchronousReplication("_system"));
|
||||||
|
failLeader();
|
||||||
|
runBasicOperations({}, {});
|
||||||
|
healLeader();
|
||||||
|
assertTrue(waitForSynchronousReplication("_system"));
|
||||||
|
},
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief just to allow a trailing comma at the end of the last test
|
/// @brief just to allow a trailing comma at the end of the last test
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
Loading…
Reference in New Issue