1
0
Fork 0

don't return any in-progress indexes (#10431)

* don't return any in-progress indexes

* fix handling of in-progress indexes

* add test

* address review comment
This commit is contained in:
Jan 2019-11-14 12:08:38 +01:00 committed by KVS85
parent a17ec21767
commit e7db6d3097
12 changed files with 179 additions and 29 deletions

View File

@ -363,11 +363,12 @@ arangodb::Result IResearchView::appendVelocyPackImpl( // append JSON
static const std::function<bool(irs::string_ref const& key)> persistenceAcceptor =
[](irs::string_ref const&) -> bool { return true; };
auto& acceptor = context == Serialization::Persistence || context == Serialization::Inventory
auto& acceptor =
(context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress || context == Serialization::Inventory)
? persistenceAcceptor
: propertiesAcceptor;
if (context == Serialization::Persistence) {
if (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress) {
if (arangodb::ServerState::instance()->isSingleServer()) {
auto res = arangodb::LogicalViewHelperStorageEngine::properties(builder, *this);
@ -404,7 +405,7 @@ arangodb::Result IResearchView::appendVelocyPackImpl( // append JSON
return {};
}
if (context == Serialization::Persistence) {
if (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress) {
IResearchViewMetaState metaState;
for (auto& entry : _links) {

View File

@ -190,7 +190,9 @@ arangodb::Result IResearchViewCoordinator::appendVelocyPackImpl(
auto* acceptor = &propertiesAcceptor;
if (context == Serialization::Persistence || context == Serialization::Inventory) {
if (context == Serialization::Persistence ||
context == Serialization::PersistenceWithInProgress ||
context == Serialization::Inventory) {
auto res = arangodb::LogicalViewHelperClusterInfo::properties(builder, *this);
if (!res.ok()) {

View File

@ -296,7 +296,7 @@ arangodb::Result MMFilesCollection::persistProperties() {
try {
auto infoBuilder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
MMFilesCollectionMarker marker(TRI_DF_MARKER_VPACK_CHANGE_COLLECTION,
_logicalCollection.vocbase().id(),
_logicalCollection.id(), infoBuilder.slice());
@ -2284,7 +2284,7 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods& trx,
if (!engine->inRecovery()) {
auto builder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
_logicalCollection.properties(builder.slice(),
false); // always a full-update
}
@ -2422,7 +2422,7 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
{
auto builder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
_logicalCollection.properties(builder.slice(),
false); // always a full-update

View File

@ -215,6 +215,9 @@ RestStatus RestIndexHandler::getSelectivityEstimates() {
builder.add(StaticStrings::Code, VPackValue(static_cast<int>(rest::ResponseCode::OK)));
builder.add("indexes", VPackValue(VPackValueType::Object));
for (std::shared_ptr<Index> idx : idxs) {
if (idx->inProgress() || idx->isHidden()) {
continue;
}
std::string name = coll->name();
name.push_back(TRI_INDEX_HANDLE_SEPARATOR_CHR);
name.append(std::to_string(idx->id()));

View File

@ -466,7 +466,7 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(VPackSlice const& info,
if (!engine->inRecovery()) { // write new collection marker
auto builder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
VPackBuilder indexInfo;
idx->toVelocyPack(indexInfo, Index::makeFlags(Index::Serialize::Internals));
res = engine->writeCreateCollectionMarker(_logicalCollection.vocbase().id(),
@ -548,7 +548,7 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
auto builder = // RocksDB path
_logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
// log this event in the WAL and in the collection meta-data
res = engine->writeCreateCollectionMarker( // write marker

View File

@ -1241,7 +1241,7 @@ std::string RocksDBEngine::createCollection(TRI_vocbase_t& vocbase,
auto builder = collection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(cid));
int res =
@ -1399,7 +1399,7 @@ void RocksDBEngine::changeCollection(TRI_vocbase_t& vocbase,
LogicalCollection const& collection, bool doSync) {
auto builder = collection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
int res =
writeCreateCollectionMarker(vocbase.id(), collection.id(), builder.slice(),
RocksDBLogValue::CollectionChange(vocbase.id(),
@ -1415,7 +1415,7 @@ arangodb::Result RocksDBEngine::renameCollection(TRI_vocbase_t& vocbase,
std::string const& oldName) {
auto builder = collection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
int res = writeCreateCollectionMarker(
vocbase.id(), collection.id(), builder.slice(),
RocksDBLogValue::CollectionRename(vocbase.id(), collection.id(), arangodb::velocypack::StringRef(oldName)));
@ -1442,7 +1442,7 @@ Result RocksDBEngine::createView(TRI_vocbase_t& vocbase, TRI_voc_cid_t id,
VPackBuilder props;
props.openObject();
view.properties(props, LogicalDataSource::Serialization::Persistence);
view.properties(props, LogicalDataSource::Serialization::PersistenceWithInProgress);
props.close();
RocksDBValue const value = RocksDBValue::View(props.slice());
@ -1467,7 +1467,7 @@ arangodb::Result RocksDBEngine::dropView(TRI_vocbase_t const& vocbase,
VPackBuilder builder;
builder.openObject();
view.properties(builder, LogicalDataSource::Serialization::Persistence);
view.properties(builder, LogicalDataSource::Serialization::PersistenceWithInProgress);
builder.close();
auto logValue =
@ -1512,7 +1512,7 @@ Result RocksDBEngine::changeView(TRI_vocbase_t& vocbase,
VPackBuilder infoBuilder;
infoBuilder.openObject();
view.properties(infoBuilder, LogicalDataSource::Serialization::Persistence);
view.properties(infoBuilder, LogicalDataSource::Serialization::PersistenceWithInProgress);
infoBuilder.close();
RocksDBLogValue log = RocksDBLogValue::ViewChange(vocbase.id(), view.id());

View File

@ -565,6 +565,8 @@ std::pair<bool, bool> transaction::Methods::findIndexHandleForAndNode(
auto considerIndex = [&bestIndex, &bestCost, &bestSupportsFilter, &bestSupportsSort,
&indexes, node, reference, itemsInCollection,
&sortCondition](std::shared_ptr<Index> const& idx) -> void {
TRI_ASSERT(!idx->inProgress());
double filterCost = 0.0;
double sortCost = 0.0;
size_t itemsInIndex = itemsInCollection;
@ -2941,6 +2943,8 @@ bool transaction::Methods::getIndexForSortCondition(
auto considerIndex = [reference, sortCondition, itemsInIndex, &bestCost, &bestIndex,
&coveredAttributes](std::shared_ptr<Index> const& idx) -> void {
TRI_ASSERT(!idx->inProgress());
Index::SortCosts costs =
idx->supportsSortCondition(sortCondition, reference, itemsInIndex);
if (costs.supportsCondition &&
@ -3016,6 +3020,7 @@ std::unique_ptr<IndexIterator> transaction::Methods::indexScanForCondition(
}
// Now create the Iterator
TRI_ASSERT(!idx->inProgress());
return idx->iteratorForCondition(this, condition, var, opts);
}
@ -3218,7 +3223,7 @@ Result transaction::Methods::unlockRecursive(TRI_voc_cid_t cid, AccessMode::Type
/// @brief get list of indexes for a collection
std::vector<std::shared_ptr<Index>> transaction::Methods::indexesForCollection(
std::string const& collectionName, bool withHidden) {
std::string const& collectionName) {
if (_state->isCoordinator()) {
return indexesForCollectionCoordinator(collectionName);
}
@ -3227,13 +3232,12 @@ std::vector<std::shared_ptr<Index>> transaction::Methods::indexesForCollection(
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName, AccessMode::Type::READ);
LogicalCollection* document = documentCollection(trxCollection(cid));
std::vector<std::shared_ptr<Index>> indexes = document->getIndexes();
if (!withHidden) {
indexes.erase(std::remove_if(indexes.begin(), indexes.end(),
[](std::shared_ptr<Index> x) {
return x->isHidden();
}),
indexes.end());
}
indexes.erase(std::remove_if(indexes.begin(), indexes.end(),
[](std::shared_ptr<Index> const& x) {
return x->isHidden();
}),
indexes.end());
return indexes;
}
@ -3264,7 +3268,14 @@ std::vector<std::shared_ptr<Index>> transaction::Methods::indexesForCollectionCo
collection->clusterIndexEstimates(true);
}
return collection->getIndexes();
std::vector<std::shared_ptr<Index>> indexes = collection->getIndexes();
indexes.erase(std::remove_if(indexes.begin(), indexes.end(),
[](std::shared_ptr<Index> const& x) {
return x->isHidden();
}),
indexes.end());
return indexes;
}
/// @brief get the index by it's identifier. Will either throw or

View File

@ -392,7 +392,7 @@ class Methods {
/// @brief get all indexes for a collection name
ENTERPRISE_VIRT std::vector<std::shared_ptr<arangodb::Index>> indexesForCollection(
std::string const&, bool withHidden = false);
std::string const& collectionName);
/// @brief Lock all collections. Only works for selected sub-classes
virtual int lockCollections();

View File

@ -630,7 +630,8 @@ void LogicalCollection::toVelocyPackForClusterInventory(VPackBuilder& result,
arangodb::Result LogicalCollection::appendVelocyPack(arangodb::velocypack::Builder& result,
Serialization context) const {
bool const forPersistence = (context == Serialization::Persistence);
bool const forPersistence = (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress);
bool const showInProgress = (context == Serialization::PersistenceWithInProgress);
// We write into an open object
TRI_ASSERT(result.isOpenObject());
@ -673,8 +674,8 @@ arangodb::Result LogicalCollection::appendVelocyPack(arangodb::velocypack::Build
if (forPersistence) {
indexFlags = Index::makeFlags(Index::Serialize::Internals);
}
auto filter = [indexFlags, forPersistence](arangodb::Index const* idx, decltype(Index::makeFlags())& flags) {
if (forPersistence || (!idx->inProgress() && !idx->isHidden())) {
auto filter = [indexFlags, forPersistence, showInProgress](arangodb::Index const* idx, decltype(Index::makeFlags())& flags) {
if ((forPersistence || !idx->isHidden()) && (showInProgress || !idx->inProgress())) {
flags = indexFlags;
return true;
}

View File

@ -210,7 +210,7 @@ Result LogicalDataSource::properties(velocypack::Builder& builder,
// note: includeSystem and forPersistence are not 100% synonymous,
// however, for our purposes this is an okay mapping; we only set
// includeSystem if we are persisting the properties
if (context == Serialization::Persistence) {
if (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress) {
builder.add(StaticStrings::DataSourceDeleted, velocypack::Value(deleted()));
builder.add(StaticStrings::DataSourceSystem, velocypack::Value(system()));

View File

@ -137,6 +137,8 @@ class LogicalDataSource {
Properties,
// object will be saved in storage engine
Persistence,
// object will be saved in storage engine
PersistenceWithInProgress,
// object will be replicated or dumped/restored
Inventory
};

View File

@ -0,0 +1,130 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertNotEqual, assertTrue */
////////////////////////////////////////////////////////////////////////////////
/// @brief test index usage
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Copyright 2018, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
let jsunity = require("jsunity");
let arangodb = require("@arangodb");
let db = arangodb.db;
let tasks = require("@arangodb/tasks");
function IndexUsageSuite () {
const cnData = "UnitTestsCollection"; // used for test data
const cnComm = "UnitTestsCommunication"; // used for communication
return {
setUp : function () {
db._drop(cnData);
db._drop(cnComm);
db._create(cnData);
db._create(cnComm);
let docs = [];
for (let i = 0; i < 5000; ++i) {
docs.push({ value: "test" + i });
}
db[cnData].insert(docs);
},
tearDown : function () {
db._drop(cnData);
db._drop(cnComm);
},
testIndexUsage : function () {
let task = tasks.register({
command: function(params) {
require('jsunity').jsUnity.attachAssertions();
let db = require("internal").db;
let comm = db[params.cnComm];
let errors = require("@arangodb").errors;
comm.insert({ _key: "runner1", value: 0 });
while (!comm.exists("runner2")) {
require("internal").sleep(0.02);
}
let success = 0;
let time = require("internal").time;
let start = time();
do {
try {
db._query("FOR doc IN " + params.cnData + " FILTER doc.value > 10 LIMIT 10 RETURN doc");
comm.update("runner1", { value: ++success });
} catch (err) {
// if the index that was picked for the query is dropped in the meantime,
// we will get the following error back
assertEqual(err.errorNum, errors.ERROR_QUERY_BAD_JSON_PLAN.code);
}
} while (time() - start < 10.0);
},
params: { cnComm, cnData }
});
let comm = db[cnComm];
comm.insert({ _key: "runner2" });
while (!comm.exists("runner1")) {
require("internal").sleep(0.02);
}
let time = require("internal").time;
let start = time();
let success = 0;
do {
let indexes = db[cnData].indexes();
if (indexes.length > 1) {
db[cnData].dropIndex(indexes[1]);
}
db[cnData].ensureIndex({ type: "hash", fields: ["value"], inBackground: true });
++success;
} while (time() - start < 10.0);
while (true) {
try {
tasks.get(task);
require("internal").wait(0.25, false);
} catch (err) {
// "task not found" means the task is finished
break;
}
}
assertEqual(2, comm.count());
let doc = comm.document("runner1");
assertTrue(doc.value > 0, doc);
assertTrue(success > 0, success);
},
};
}
jsunity.run(IndexUsageSuite);
return jsunity.done();