1
0
Fork 0

attempt to make test more reliable

This commit is contained in:
jsteemann 2018-10-17 20:32:35 +02:00
parent 285c1adb1d
commit b4a4ed3bc8
1 changed files with 41 additions and 31 deletions

View File

@ -31,7 +31,6 @@
var jsunity = require("jsunity"); var jsunity = require("jsunity");
var internal = require("internal"); var internal = require("internal");
var arangodb = require("@arangodb"); var arangodb = require("@arangodb");
var ERRORS = arangodb.errors;
var db = arangodb.db; var db = arangodb.db;
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -40,6 +39,9 @@ var db = arangodb.db;
function ClusterCollectionSuite () { function ClusterCollectionSuite () {
'use strict'; 'use strict';
const cn = "UnitTestsClusterCrudRepl";
return { return {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -47,6 +49,11 @@ function ClusterCollectionSuite () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
setUp : function () { setUp : function () {
db._drop(cn);
// number of shards is one so the estimates behave like in the single server
// if the shard number is higher we could just ensure that the estimate
// should be between 0 and 1
db._create(cn, { numberOfShards: 1, replicationFactor: 1});
}, },
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -54,44 +61,47 @@ function ClusterCollectionSuite () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
tearDown : function () { tearDown : function () {
try { db._drop(cn);
db._drop("UnitTestsClusterCrud");
}
catch (err) {
}
}, },
testIndexEstimates : function () { testIndexEstimates : function () {
// index estimate only availalbe with rocksdb for skiplist let c = db._collection(cn);
if (db._engine().name === 'rocksdb') {
var cn = "UnitTestsClusterCrudRepl"; c.ensureIndex({type:"skiplist", fields:["foo"]});
// numer of shards is one so the estimages behave like in the single server
// if the shard number is higher we could just ensure theat the estimate
// should be between 0 and 1
var c = db._create(cn, { numberOfShards: 1, replicationFactor: 1});
c.ensureIndex({type:"skiplist", fields:["foo"]}); for (let i = 0; i < 10; ++i) {
c.save({foo: i});
var i; }
var indexes; // waitForEstimatorSync does nothing in the cluster but waiting...
internal.waitForEstimatorSync(); // make sure estimates are consistent
for(i=0; i < 10; ++i){ let indexes;
c.save({foo: i}); let tries = 0;
} while (++tries < 60) {
internal.waitForEstimatorSync(); // make sure estimates are consistent
indexes = c.getIndexes(true); indexes = c.getIndexes(true);
// if this fails, increase wait-time in ClusterEngine::waitForEstimatorSync // if this fails, increase wait-time in ClusterEngine::waitForEstimatorSync
assertEqual(indexes[1].selectivityEstimate, 1); if (indexes[1].selectivityEstimate >= 0.999) {
break;
for(i=0; i < 10; ++i){
c.save({foo: i});
} }
internal.waitForEstimatorSync(); // make sure estimates are consistent internal.wait(0.5, false);
indexes = c.getIndexes(true);
assertEqual(indexes[1].selectivityEstimate, 0.5);
db._drop(cn);
} }
assertEqual(indexes[1].selectivityEstimate, 1);
for (let i = 0; i < 10; ++i) {
c.save({foo: i});
}
internal.waitForEstimatorSync(); // make sure estimates are consistent
tries = 0;
while (++tries < 60) {
indexes = c.getIndexes(true);
// if this fails, increase wait-time in ClusterEngine::waitForEstimatorSync
if (indexes[1].selectivityEstimate <= 0.501) {
break;
}
internal.wait(0.5, false);
}
assertEqual(indexes[1].selectivityEstimate, 0.5);
} }
}; };