mirror of https://gitee.com/bigwinds/arangodb
Bug fix 3.3/fixes 1805 (#5392)
This commit is contained in:
parent
8b8476c77e
commit
0740cb91e2
|
@ -852,24 +852,23 @@ void Index::warmup(arangodb::transaction::Methods*,
|
||||||
// it has to explicitly implement it.
|
// it has to explicitly implement it.
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<bool,double> Index::updateClusterEstimate(double defaultValue) {
|
std::pair<bool, double> Index::updateClusterEstimate(double defaultValue) {
|
||||||
// try to receive an selectivity estimate for the index
|
// try to receive a selectivity estimate for the index
|
||||||
// from indexEstimates stored in the logical collection.
|
// from indexEstimates stored in the logical collection.
|
||||||
// the caller has to guarantee that the _collection is valid.
|
// the caller has to guarantee that the _collection is valid.
|
||||||
// on the coordinator _collection is not always vaild!
|
// on the coordinator _collection is not always vaild!
|
||||||
|
std::pair<bool, double> rv(false, defaultValue);
|
||||||
|
|
||||||
std::pair<bool,double> rv(false,defaultValue);
|
auto estimates = _collection->clusterIndexEstimates(true);
|
||||||
|
|
||||||
auto estimates = _collection->clusterIndexEstimates();
|
|
||||||
auto found = estimates.find(std::to_string(_iid));
|
auto found = estimates.find(std::to_string(_iid));
|
||||||
|
|
||||||
if( found != estimates.end()){
|
if ( found != estimates.end()) {
|
||||||
rv.first = true;
|
rv.first = true;
|
||||||
rv.second = found->second;
|
rv.second = found->second;
|
||||||
_clusterSelectivity = rv.second;
|
_clusterSelectivity = rv.second;
|
||||||
}
|
}
|
||||||
return rv;
|
return rv;
|
||||||
};
|
}
|
||||||
|
|
||||||
/// @brief append the index description to an output stream
|
/// @brief append the index description to an output stream
|
||||||
std::ostream& operator<<(std::ostream& stream, arangodb::Index const* index) {
|
std::ostream& operator<<(std::ostream& stream, arangodb::Index const* index) {
|
||||||
|
|
|
@ -388,7 +388,6 @@ void RocksDBCollection::prepareIndexes(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
|
||||||
if (_indexes[0]->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
if (_indexes[0]->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||||
(_logicalCollection->type() == TRI_COL_TYPE_EDGE &&
|
(_logicalCollection->type() == TRI_COL_TYPE_EDGE &&
|
||||||
(_indexes[1]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX ||
|
(_indexes[1]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX ||
|
||||||
|
@ -396,11 +395,13 @@ void RocksDBCollection::prepareIndexes(
|
||||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||||
<< "got invalid indexes for collection '" << _logicalCollection->name()
|
<< "got invalid indexes for collection '" << _logicalCollection->name()
|
||||||
<< "'";
|
<< "'";
|
||||||
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
for (auto it : _indexes) {
|
for (auto it : _indexes) {
|
||||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "- " << it.get();
|
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "- " << it->context();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, std::string("got invalid indexes for collection '") + _logicalCollection->name() + "'");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::shared_ptr<Index> findIndex(
|
static std::shared_ptr<Index> findIndex(
|
||||||
|
|
|
@ -2932,9 +2932,9 @@ transaction::Methods::indexesForCollectionCoordinator(
|
||||||
std::shared_ptr<LogicalCollection> collection = clusterInfo->getCollection(databaseName(), name);
|
std::shared_ptr<LogicalCollection> collection = clusterInfo->getCollection(databaseName(), name);
|
||||||
std::vector<std::shared_ptr<Index>> indexes = collection->getIndexes();
|
std::vector<std::shared_ptr<Index>> indexes = collection->getIndexes();
|
||||||
|
|
||||||
collection->clusterIndexEstimates(); // update estiamtes in logical collection
|
collection->clusterIndexEstimates(); // update estimates in logical collection
|
||||||
// push updated values into indexes
|
// push updated values into indexes
|
||||||
for(auto i : indexes){
|
for (auto i : indexes) {
|
||||||
i->updateClusterEstimate();
|
i->updateClusterEstimate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -624,16 +624,16 @@ std::unique_ptr<FollowerInfo> const& LogicalCollection::followers() const {
|
||||||
void LogicalCollection::setDeleted(bool newValue) { _isDeleted = newValue; }
|
void LogicalCollection::setDeleted(bool newValue) { _isDeleted = newValue; }
|
||||||
|
|
||||||
// SECTION: Indexes
|
// SECTION: Indexes
|
||||||
std::unordered_map<std::string, double> LogicalCollection::clusterIndexEstimates(bool doNotUpdate){
|
std::unordered_map<std::string, double> LogicalCollection::clusterIndexEstimates(bool doNotUpdate) {
|
||||||
READ_LOCKER(readlock, _clusterEstimatesLock);
|
READ_LOCKER(readlock, _clusterEstimatesLock);
|
||||||
if (doNotUpdate) {
|
if (doNotUpdate) {
|
||||||
return _clusterEstimates;
|
return _clusterEstimates;
|
||||||
}
|
}
|
||||||
|
|
||||||
double ctime = TRI_microtime(); // in seconds
|
double const ctime = TRI_microtime(); // in seconds
|
||||||
auto needEstimateUpdate = [this,ctime](){
|
auto needEstimateUpdate = [this,ctime]() {
|
||||||
if(_clusterEstimates.empty()) {
|
if (_clusterEstimates.empty()) {
|
||||||
LOG_TOPIC(TRACE, Logger::CLUSTER) << "update because estimate is not availabe";
|
LOG_TOPIC(TRACE, Logger::CLUSTER) << "update because estimate is not available";
|
||||||
return true;
|
return true;
|
||||||
} else if (ctime - _clusterEstimateTTL > 60.0) {
|
} else if (ctime - _clusterEstimateTTL > 60.0) {
|
||||||
LOG_TOPIC(TRACE, Logger::CLUSTER) << "update because estimate is too old: " << ctime - _clusterEstimateTTL;
|
LOG_TOPIC(TRACE, Logger::CLUSTER) << "update because estimate is too old: " << ctime - _clusterEstimateTTL;
|
||||||
|
@ -642,10 +642,10 @@ std::unordered_map<std::string, double> LogicalCollection::clusterIndexEstimates
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (needEstimateUpdate()){
|
if (needEstimateUpdate()) {
|
||||||
readlock.unlock();
|
readlock.unlock();
|
||||||
WRITE_LOCKER(writelock, _clusterEstimatesLock);
|
WRITE_LOCKER(writelock, _clusterEstimatesLock);
|
||||||
if(needEstimateUpdate()){
|
if (needEstimateUpdate()) {
|
||||||
selectivityEstimatesOnCoordinator(_vocbase->name(), name(), _clusterEstimates);
|
selectivityEstimatesOnCoordinator(_vocbase->name(), name(), _clusterEstimates);
|
||||||
_clusterEstimateTTL = TRI_microtime();
|
_clusterEstimateTTL = TRI_microtime();
|
||||||
}
|
}
|
||||||
|
@ -654,7 +654,7 @@ std::unordered_map<std::string, double> LogicalCollection::clusterIndexEstimates
|
||||||
return _clusterEstimates;
|
return _clusterEstimates;
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogicalCollection::clusterIndexEstimates(std::unordered_map<std::string, double>&& estimates){
|
void LogicalCollection::clusterIndexEstimates(std::unordered_map<std::string, double>&& estimates) {
|
||||||
WRITE_LOCKER(lock, _clusterEstimatesLock);
|
WRITE_LOCKER(lock, _clusterEstimatesLock);
|
||||||
_clusterEstimates = std::move(estimates);
|
_clusterEstimates = std::move(estimates);
|
||||||
}
|
}
|
||||||
|
|
|
@ -196,14 +196,14 @@ class LogicalCollection {
|
||||||
//// SECTION: Indexes
|
//// SECTION: Indexes
|
||||||
|
|
||||||
// Estimates
|
// Estimates
|
||||||
std::unordered_map<std::string, double> clusterIndexEstimates(bool doNotUpdate=false);
|
std::unordered_map<std::string, double> clusterIndexEstimates(bool doNotUpdate = false);
|
||||||
void clusterIndexEstimates(std::unordered_map<std::string, double>&& estimates);
|
void clusterIndexEstimates(std::unordered_map<std::string, double>&& estimates);
|
||||||
|
|
||||||
double clusterIndexEstimatesTTL(){
|
double clusterIndexEstimatesTTL() const {
|
||||||
return _clusterEstimateTTL;
|
return _clusterEstimateTTL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void clusterIndexEstimatesTTL(double ttl){
|
void clusterIndexEstimatesTTL(double ttl) {
|
||||||
_clusterEstimateTTL = ttl;
|
_clusterEstimateTTL = ttl;
|
||||||
}
|
}
|
||||||
// End - Estimates
|
// End - Estimates
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
/*jshint globalstrict:false, strict:false */
|
||||||
|
/*global assertEqual, assertNull, fail */
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief test the collection interface
|
||||||
|
///
|
||||||
|
/// @file
|
||||||
|
///
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Dr. Frank Celler
|
||||||
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
var jsunity = require("jsunity");
|
||||||
|
|
||||||
|
var arangodb = require("@arangodb");
|
||||||
|
var internal = require("internal");
|
||||||
|
|
||||||
|
var ArangoCollection = arangodb.ArangoCollection;
|
||||||
|
var db = arangodb.db;
|
||||||
|
var ERRORS = arangodb.errors;
|
||||||
|
|
||||||
|
function CollectionSuite() {
|
||||||
|
let cn = "example";
|
||||||
|
return {
|
||||||
|
setUp: function() {
|
||||||
|
db._drop(cn);
|
||||||
|
},
|
||||||
|
|
||||||
|
tearDown: function() {
|
||||||
|
db._drop(cn);
|
||||||
|
},
|
||||||
|
|
||||||
|
testCreateWithInvalidIndexes1 : function () {
|
||||||
|
try {
|
||||||
|
db._create(cn, { indexes: [{ id: "1", type: "edge", fields: ["_from"] }] });
|
||||||
|
fail();
|
||||||
|
} catch (err) {
|
||||||
|
assertEqual(ERRORS.ERROR_BAD_PARAMETER.code, err.errorNum);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertNull(db._collection(cn));
|
||||||
|
},
|
||||||
|
|
||||||
|
testCreateWithInvalidIndexes2 : function () {
|
||||||
|
let cn = "example";
|
||||||
|
|
||||||
|
db._drop(cn);
|
||||||
|
try {
|
||||||
|
db._create(cn, { indexes: [{ id: "1234", type: "hash", fields: ["a"] }] });
|
||||||
|
fail();
|
||||||
|
} catch (err) {
|
||||||
|
assertEqual(ERRORS.ERROR_BAD_PARAMETER.code, err.errorNum);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertNull(db._collection(cn));
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
jsunity.run(CollectionSuite);
|
||||||
|
|
||||||
|
return jsunity.done();
|
Loading…
Reference in New Issue