1
0
Fork 0

issue 511.9.1: track DataSource instance instead of only its name in QueryCache (#7943)

* issue 511.9.1: track DataSource instance instead of only its name in QueryCache

* address use of invalid function

* fix expected test result

* address merge issue
This commit is contained in:
Vasiliy 2019-01-14 16:25:18 +03:00 committed by Andrey Abramov
parent 5670698d7b
commit fac48ddbc1
20 changed files with 307 additions and 209 deletions

View File

@ -86,7 +86,7 @@ Result AqlTransaction::processCollection(aql::Collection* collection) {
LogicalCollection* AqlTransaction::documentCollection(TRI_voc_cid_t cid) {
TransactionCollection* trxColl = this->trxCollection(cid);
TRI_ASSERT(trxColl != nullptr);
return trxColl->collection();
return trxColl->collection().get();
}
/// @brief lockCollections, this is needed in a corner case in AQL: we need

View File

@ -75,6 +75,7 @@ LogicalDataSource::Category const* injectDataSourceInQuery(
// NOTE The name may be modified if a numeric collection ID is given instead
// of a collection Name. Afterwards it will contain the name.
auto const dataSource = resolver.getDataSource(name);
if (dataSource == nullptr) {
// datasource not found...
if (failIfDoesNotExist) {
@ -90,6 +91,7 @@ LogicalDataSource::Category const* injectDataSourceInQuery(
return LogicalCollection::category();
}
// query actual name from datasource... this may be different to the
// name passed into this function, because the user may have accessed
// the collection by its numeric id
@ -114,7 +116,7 @@ LogicalDataSource::Category const* injectDataSourceInQuery(
}
} else if (dataSource->category() == LogicalView::category()) {
// it's a view!
query.addView(dataSourceName);
query.addDataSource(dataSource);
// Make sure to add all collections now:
resolver.visitCollections(
@ -127,6 +129,7 @@ LogicalDataSource::Category const* injectDataSourceInQuery(
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unexpected datasource type");
}
return dataSource->category();
}

View File

@ -44,6 +44,7 @@
#include "Logger/Logger.h"
#include "RestServer/AqlFeature.h"
#include "RestServer/QueryRegistryFeature.h"
#include "StorageEngine/TransactionCollection.h"
#include "StorageEngine/TransactionState.h"
#include "Transaction/Methods.h"
#include "Transaction/StandaloneContext.h"
@ -53,6 +54,7 @@
#include "V8/v8-conv.h"
#include "V8/v8-vpack.h"
#include "V8Server/V8DealerFeature.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/vocbase.h"
#include <velocypack/Iterator.h>
@ -215,6 +217,12 @@ Query::~Query() {
AqlFeature::unlease();
}
void Query::addDataSource( // track DataSource
std::shared_ptr<arangodb::LogicalDataSource> const& ds // DataSource to track
) {
_queryDataSources.emplace(ds);
}
/// @brief clone a query
/// note: as a side-effect, this will also create and start a transaction for
/// the query
@ -564,11 +572,17 @@ ExecutionState Query::execute(QueryRegistry* registry, QueryResult& queryResult)
if (cacheEntry != nullptr) {
bool hasPermissions = true;
ExecContext const* exe = ExecContext::CURRENT;
// got a result from the query cache
if (exe != nullptr) {
for (std::string const& dataSourceName : cacheEntry->_dataSources) {
for (auto& dataSource: cacheEntry->_dataSources) {
if (!dataSource) {
continue;
}
auto& dataSourceName = dataSource->name();
if (!exe->canUseCollection(dataSourceName, auth::Level::RO)) {
// cannot use query cache result because of permissions
hasPermissions = false;
@ -670,16 +684,27 @@ ExecutionState Query::execute(QueryRegistry* registry, QueryResult& queryResult)
_resultBuilder->close();
if (useQueryCache && _warnings.empty()) {
auto dataSources = _queryDataSources;
_trx->state()->allCollections( // collect transaction DataSources
[&dataSources](TransactionCollection& trxCollection)->bool {
dataSources.emplace(trxCollection.collection()); // add collection from transaction
return true; // continue traversal
}
);
// create a query cache entry for later storage
_cacheEntry = std::make_unique<QueryCacheResultEntry>(
hash(), _queryString, _resultBuilder, bindParameters(),
_trx->state()->collectionNames(_views));
std::move(dataSources) // query DataSources
);
}
queryResult.result = std::move(_resultBuilder);
queryResult.context = _trx->transactionContext();
_executionPhase = ExecutionPhase::FINALIZE;
}
// intentionally falls through
case ExecutionPhase::FINALIZE: {
// will set warnings, stats, profile and cleanup plan and engine
@ -768,7 +793,13 @@ ExecutionState Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry,
// got a result from the query cache
if (exe != nullptr) {
for (std::string const& dataSourceName : cacheEntry->_dataSources) {
for (auto& dataSource: cacheEntry->_dataSources) {
if (!dataSource) {
continue;
}
auto& dataSourceName = dataSource->name();
if (!exe->canUseCollection(dataSourceName, auth::Level::RO)) {
// cannot use query cache result because of permissions
hasPermissions = false;
@ -879,12 +910,23 @@ ExecutionState Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry,
queryResult.context = _trx->transactionContext();
if (useQueryCache && _warnings.empty()) {
auto dataSources = _queryDataSources;
_trx->state()->allCollections( // collect transaction DataSources
[&dataSources](TransactionCollection& trxCollection)->bool {
dataSources.emplace(trxCollection.collection()); // add collection from transaction
return true; // continue traversal
}
);
// create a cache entry for later usage
_cacheEntry =
std::make_unique<QueryCacheResultEntry>(hash(), _queryString, builder,
bindParameters(),
_trx->state()->collectionNames(_views));
std::move(dataSources) // query DataSources
);
}
// will set warnings, stats, profile and cleanup plan and engine
ExecutionState state = finalize(queryResult);
while (state == ExecutionState::WAITING) {

View File

@ -49,6 +49,7 @@ struct TRI_vocbase_t;
namespace arangodb {
class CollectionNameResolver;
class LogicalDataSource; // forward declaration
namespace transaction {
class Context;
@ -82,7 +83,6 @@ class Query {
private:
enum ExecutionPhase { INITIALIZE, EXECUTE, FINALIZE };
private:
Query(Query const&) = delete;
Query& operator=(Query const&) = delete;
@ -99,12 +99,14 @@ class Query {
virtual ~Query();
/// @brief note that the query uses the DataSource
void addDataSource(std::shared_ptr<arangodb::LogicalDataSource> const& ds);
/// @brief clone a query
/// note: as a side-effect, this will also create and start a transaction for
/// the query
TEST_VIRTUAL Query* clone(QueryPart, bool);
public:
constexpr static uint64_t DontCache = 0;
/// @brief whether or not the query is killed
@ -290,16 +292,6 @@ class Query {
/// @brief get a description of the query's current state
std::string getStateString() const;
/// @brief note that the query uses the view
void addView(std::string const& name) {
// Either collection or view
_views.emplace(name);
}
std::unordered_set<std::string> const& views() const noexcept {
return _views;
}
/// @brief look up a graph in the _graphs collection
graph::Graph const* lookupGraphByName(std::string const& name);
@ -379,6 +371,10 @@ class Query {
/// @brief graphs used in query, identified by name
std::unordered_map<std::string, std::unique_ptr<graph::Graph>> _graphs;
/// @brief set of DataSources used in the query
/// needed for the query cache, value LogicalDataSource::system()
std::unordered_set<std::shared_ptr<arangodb::LogicalDataSource>> _queryDataSources;
/// @brief the actual query string
QueryString _queryString;
@ -460,9 +456,6 @@ class Query {
/// @brief shared state
std::shared_ptr<SharedQueryState> _sharedState;
/// @brief names of views used by the query. needed for the query cache
std::unordered_set<std::string> _views;
/// @brief query cache entry built by the query
/// only populated when the query has generated its result(s) and before
/// storing the cache entry in the query cache
@ -478,4 +471,4 @@ class Query {
} // namespace aql
} // namespace arangodb
#endif
#endif

View File

@ -29,6 +29,7 @@
#include "Basics/WriteLocker.h"
#include "Basics/conversions.h"
#include "Basics/fasthash.h"
#include "VocBase/LogicalDataSource.h"
#include "VocBase/vocbase.h"
#include <velocypack/Builder.h>
@ -65,7 +66,8 @@ static bool showBindVars = true; // will be set once on startup. cannot be chan
QueryCacheResultEntry::QueryCacheResultEntry(uint64_t hash, QueryString const& queryString,
std::shared_ptr<VPackBuilder> const& queryResult,
std::shared_ptr<VPackBuilder> const& bindVars,
std::vector<std::string>&& dataSources)
std::unordered_set<std::shared_ptr<arangodb::LogicalDataSource>>&& dataSources // query DataSources
)
: _hash(hash),
_queryString(queryString.data(), queryString.size()),
_queryResult(queryResult),
@ -134,6 +136,7 @@ void QueryCacheResultEntry::toVelocyPack(VPackBuilder& builder) const {
builder.add("hits", VPackValue(_hits.load()));
double executionTime = this->executionTime();
if (executionTime < 0.0) {
builder.add("runTime", VPackValue(VPackValueType::Null));
} else {
@ -141,12 +144,17 @@ void QueryCacheResultEntry::toVelocyPack(VPackBuilder& builder) const {
}
auto timeString = TRI_StringTimeStamp(_stamp, false);
builder.add("started", VPackValue(timeString));
builder.add("dataSources", VPackValue(VPackValueType::Array));
for (auto const& ds : _dataSources) {
builder.add(VPackValue(ds));
if (ds) {
builder.add(arangodb::velocypack::Value(ds->name()));
}
}
builder.close();
builder.close();
@ -156,13 +164,13 @@ void QueryCacheResultEntry::toVelocyPack(VPackBuilder& builder) const {
QueryCacheDatabaseEntry::QueryCacheDatabaseEntry()
: _entriesByHash(), _head(nullptr), _tail(nullptr), _numResults(0), _sizeResults(0) {
_entriesByHash.reserve(128);
_entriesByDataSource.reserve(16);
_entriesByDataSourceGuid.reserve(16);
}
/// @brief destroy a database-specific cache
QueryCacheDatabaseEntry::~QueryCacheDatabaseEntry() {
_entriesByHash.clear();
_entriesByDataSource.clear();
_entriesByDataSourceGuid.clear();
}
/// @brief return the query cache contents
@ -252,17 +260,25 @@ void QueryCacheDatabaseEntry::store(std::shared_ptr<QueryCacheResultEntry>&& ent
try {
for (auto const& it : e->_dataSources) {
_entriesByDataSource[it].emplace(hash);
if (!it) {
continue; // skip null datasources
}
_entriesByDataSourceGuid[it->guid()].second.emplace(hash);
}
} catch (...) {
// rollback
// remove from data sources
for (auto const& it : e->_dataSources) {
auto it2 = _entriesByDataSource.find(it);
if (!it) {
continue; // skip null datasources
}
if (it2 != _entriesByDataSource.end()) {
(*it2).second.erase(hash);
auto itr2 = _entriesByDataSourceGuid.find(it->guid());
if (itr2 != _entriesByDataSourceGuid.end()) {
itr2->second.second.erase(hash);
}
}
@ -283,22 +299,22 @@ void QueryCacheDatabaseEntry::store(std::shared_ptr<QueryCacheResultEntry>&& ent
/// @brief invalidate all entries for the given data sources in the
/// database-specific cache
void QueryCacheDatabaseEntry::invalidate(std::vector<std::string> const& dataSources) {
for (auto const& it : dataSources) {
void QueryCacheDatabaseEntry::invalidate(std::vector<std::string> const& dataSourceGuids) {
for (auto const& it: dataSourceGuids) {
invalidate(it);
}
}
/// @brief invalidate all entries for a data source in the database-specific
/// cache
void QueryCacheDatabaseEntry::invalidate(std::string const& dataSource) {
auto it = _entriesByDataSource.find(dataSource);
void QueryCacheDatabaseEntry::invalidate(std::string const& dataSourceGuid) {
auto itr = _entriesByDataSourceGuid.find(dataSourceGuid);
if (it == _entriesByDataSource.end()) {
if (itr == _entriesByDataSourceGuid.end()) {
return;
}
for (auto& it2 : (*it).second) {
for (auto& it2 : itr->second.second) {
auto it3 = _entriesByHash.find(it2);
if (it3 != _entriesByHash.end()) {
@ -311,7 +327,7 @@ void QueryCacheDatabaseEntry::invalidate(std::string const& dataSource) {
}
}
_entriesByDataSource.erase(it);
_entriesByDataSourceGuid.erase(itr);
}
/// @brief enforce maximum number of results
@ -351,13 +367,14 @@ void QueryCacheDatabaseEntry::enforceMaxEntrySize(size_t value) {
/// @brief exclude all data from system collections
/// must be called under the shard's lock
void QueryCacheDatabaseEntry::excludeSystem() {
for (auto it = _entriesByDataSource.begin(); it != _entriesByDataSource.end();
for (auto itr = _entriesByDataSourceGuid.begin(); // setup
itr != _entriesByDataSourceGuid.end(); // condition
/* no hoisting */) {
if ((*it).first.empty() || (*it).first[0] != '_') {
if (!itr->second.first || !itr->second.first->system()) {
// not a system collection
++it;
++itr;
} else {
for (auto const& hash : (*it).second) {
for (auto const& hash: itr->second.second) {
auto it2 = _entriesByHash.find(hash);
if (it2 != _entriesByHash.end()) {
@ -367,17 +384,21 @@ void QueryCacheDatabaseEntry::excludeSystem() {
}
}
it = _entriesByDataSource.erase(it);
itr = _entriesByDataSourceGuid.erase(itr);
}
}
}
void QueryCacheDatabaseEntry::removeDatasources(QueryCacheResultEntry const* e) {
for (auto const& ds : e->_dataSources) {
auto it = _entriesByDataSource.find(ds);
if (!ds) {
continue; // skip null datasources
}
if (it != _entriesByDataSource.end()) {
(*it).second.erase(e->_hash);
auto itr = _entriesByDataSourceGuid.find(ds->guid());
if (itr != _entriesByDataSourceGuid.end()) {
itr->second.second.erase(e->_hash);
}
}
}
@ -603,7 +624,7 @@ void QueryCache::store(TRI_vocbase_t* vocbase, std::shared_ptr<QueryCacheResultE
// check if we need to exclude the entry because it refers to system
// collections
for (auto const& ds : e->_dataSources) {
if (!ds.empty() && ds[0] == '_') {
if (ds && ds->system()) {
// refers to a system collection...
return;
}
@ -643,7 +664,7 @@ void QueryCache::store(TRI_vocbase_t* vocbase, std::shared_ptr<QueryCacheResultE
}
/// @brief invalidate all queries for the given data sources
void QueryCache::invalidate(TRI_vocbase_t* vocbase, std::vector<std::string> const& dataSources) {
void QueryCache::invalidate(TRI_vocbase_t* vocbase, std::vector<std::string> const& dataSourceGuids) {
auto const part = getPart(vocbase);
WRITE_LOCKER(writeLocker, _entriesLock[part]);
@ -654,11 +675,11 @@ void QueryCache::invalidate(TRI_vocbase_t* vocbase, std::vector<std::string> con
}
// invalidate while holding the lock
(*it).second->invalidate(dataSources);
it->second->invalidate(dataSourceGuids);
}
/// @brief invalidate all queries for a particular data source
void QueryCache::invalidate(TRI_vocbase_t* vocbase, std::string const& dataSource) {
void QueryCache::invalidate(TRI_vocbase_t* vocbase, std::string const& dataSourceGuid) {
auto const part = getPart(vocbase);
WRITE_LOCKER(writeLocker, _entriesLock[part]);
@ -669,7 +690,7 @@ void QueryCache::invalidate(TRI_vocbase_t* vocbase, std::string const& dataSourc
}
// invalidate while holding the lock
(*it).second->invalidate(dataSource);
it->second->invalidate(dataSourceGuid);
}
/// @brief invalidate all queries for a particular database

View File

@ -31,6 +31,12 @@
struct TRI_vocbase_t;
namespace arangodb {
class LogicalDataSource; // forward declaration
}
namespace arangodb {
namespace velocypack {
class Builder;
@ -56,7 +62,8 @@ struct QueryCacheResultEntry {
QueryCacheResultEntry(uint64_t hash, QueryString const& queryString,
std::shared_ptr<arangodb::velocypack::Builder> const& queryResult,
std::shared_ptr<arangodb::velocypack::Builder> const& bindVars,
std::vector<std::string>&& dataSources);
std::unordered_set<std::shared_ptr<arangodb::LogicalDataSource>>&& dataSources // query DataSources
);
~QueryCacheResultEntry() = default;
@ -64,8 +71,8 @@ struct QueryCacheResultEntry {
std::string const _queryString;
std::shared_ptr<arangodb::velocypack::Builder> const _queryResult;
std::shared_ptr<arangodb::velocypack::Builder> const _bindVars;
std::unordered_set<std::shared_ptr<arangodb::LogicalDataSource>> const _dataSources; // query DataSources
std::shared_ptr<arangodb::velocypack::Builder> _stats;
std::vector<std::string> const _dataSources;
size_t _size;
size_t _rows;
std::atomic<uint64_t> _hits;
@ -100,11 +107,11 @@ struct QueryCacheDatabaseEntry {
/// @brief invalidate all entries for the given data sources in the
/// database-specific cache
void invalidate(std::vector<std::string> const& dataSources);
void invalidate(std::vector<std::string> const& dataSourceGuids);
/// @brief invalidate all entries for a data source in the
/// database-specific cache
void invalidate(std::string const& dataSource);
void invalidate(std::string const& dataSourceGuid);
void queriesToVelocyPack(arangodb::velocypack::Builder& builder) const;
@ -133,9 +140,10 @@ struct QueryCacheDatabaseEntry {
std::unordered_map<uint64_t, std::shared_ptr<QueryCacheResultEntry>> _entriesByHash;
/// @brief hash table that contains all data souce-specific query results
/// maps from data sources names to a set of query results as defined in
/// maps from data sources GUIDs to a set of query results as defined in
/// _entriesByHash
std::unordered_map<std::string, std::unordered_set<uint64_t>> _entriesByDataSource;
typedef std::pair<std::shared_ptr<arangodb::LogicalDataSource>, std::unordered_set<uint64_t>> GuidEntry;
std::unordered_map<std::string, GuidEntry> _entriesByDataSourceGuid; // non-nullptr LogicalDataSource ensured by store(...)
/// @brief beginning of linked list of result entries
QueryCacheResultEntry* _head;
@ -197,10 +205,10 @@ class QueryCache {
void store(TRI_vocbase_t* vocbase, std::shared_ptr<QueryCacheResultEntry> entry);
/// @brief invalidate all queries for the given data sources
void invalidate(TRI_vocbase_t* vocbase, std::vector<std::string> const& dataSources);
void invalidate(TRI_vocbase_t* vocbase, std::vector<std::string> const& dataSourceGuids);
/// @brief invalidate all queries for a particular data source
void invalidate(TRI_vocbase_t* vocbase, std::string const& dataSource);
void invalidate(TRI_vocbase_t* vocbase, std::string const& dataSourceGuid);
/// @brief invalidate all queries for a particular database
void invalidate(TRI_vocbase_t* vocbase);

View File

@ -449,15 +449,10 @@ arangodb::Result IResearchLink::commit() {
return arangodb::Result(); // reader not modified
}
_dataStore._reader = reader; // update reader
auto viewImpl = view();
// invalidate query cache if there were some data changes
if (viewImpl) {
arangodb::aql::QueryCache::instance()->invalidate(&(_collection.vocbase()),
viewImpl->name());
}
_dataStore._reader = reader; // update reader
arangodb::aql::QueryCache::instance()->invalidate(
&(_collection.vocbase()), _viewGuid
);
} catch (arangodb::basics::Exception const& e) {
return arangodb::Result(
e.code(),
@ -1331,5 +1326,5 @@ std::shared_ptr<IResearchView> IResearchLink::view() const {
} // namespace arangodb
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------

View File

@ -36,6 +36,7 @@
#include "RestServer/ViewTypesFeature.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "StorageEngine/TransactionCollection.h"
#include "StorageEngine/TransactionState.h"
#include "Transaction/Methods.h"
#include "Transaction/StandaloneContext.h"
@ -247,21 +248,21 @@ struct IResearchView::ViewFactory : public arangodb::ViewFactory {
arangodb::application_features::ApplicationServer::lookupFeature<arangodb::DatabaseFeature>(
"Database");
std::string error;
bool inUpgrade = databaseFeature ? databaseFeature->upgrade() : false; // check if DB is currently being upgraded (skip validation checks)
bool inUpgrade = databaseFeature ? databaseFeature->upgrade() : false; // check if DB is currently being upgraded (skip validation checks)
IResearchViewMeta meta;
IResearchViewMetaState metaState;
if (!meta.init(definition, error) || (meta._version == 0 && !inUpgrade) // version 0 must be upgraded to split data-store on a per-link basis
|| meta._version > LATEST_VERSION ||
(ServerState::instance()->isSingleServer() // init metaState for SingleServer
&& !metaState.init(definition, error))) {
return arangodb::Result(TRI_ERROR_BAD_PARAMETER, error.empty() ? (std::string("failed to initialize arangosearch View from definition: ") +
definition
.toString())
: (std::string("failed to initialize arangosearch View from definition, error in attribute '") +
error + "': " +
definition
.toString()));
if (!meta.init(definition, error) // parse definition
|| (meta._version == 0 && !inUpgrade) // version 0 must be upgraded to split data-store on a per-link basis
|| meta._version > LATEST_VERSION // ensure version is valid
|| (ServerState::instance()->isSingleServer() // init metaState for SingleServer
&& !metaState.init(definition, error))) {
return arangodb::Result(
TRI_ERROR_BAD_PARAMETER,
error.empty()
? (std::string("failed to initialize arangosearch View from definition: ") + definition.toString())
: (std::string("failed to initialize arangosearch View from definition, error in attribute '") + error + "': " + definition.toString())
);
}
auto impl = std::shared_ptr<IResearchView>(
@ -526,77 +527,77 @@ arangodb::Result IResearchView::appendVelocyPackImpl(arangodb::velocypack::Build
auto res = trx.begin();
if (!res.ok()) {
return res; // nothing more to output
return res; // nothing more to output
}
auto* state = trx.state();
if (!state) {
return arangodb::Result(
TRI_ERROR_INTERNAL,
std::string("failed to get transaction state while generating json "
"for arangosearch view '") +
name() + "'");
TRI_ERROR_INTERNAL,
std::string("failed to get transaction state while generating json for arangosearch view '") + name() + "'"
);
}
arangodb::velocypack::ObjectBuilder linksBuilderWrapper(&linksBuilder);
auto visitor = [this, &linksBuilder, &res]( // visit collections
arangodb::TransactionCollection& trxCollection // transaction collection
)->bool {
auto collection = trxCollection.collection();
for (auto& collectionName : state->collectionNames()) {
for (auto& index : trx.indexesForCollection(collectionName, /*withHidden*/ true)) {
if (index && arangodb::Index::IndexType::TRI_IDX_TYPE_IRESEARCH_LINK ==
index->type()) {
// TODO FIXME find a better way to retrieve an IResearch Link
// cannot use static_cast/reinterpret_cast since Index is not related
// to IResearchLink
auto* ptr = dynamic_cast<IResearchLink*>(index.get());
if (!ptr || *ptr != *this) {
continue; // the index is not a link for the current view
}
arangodb::velocypack::Builder linkBuilder;
linkBuilder.openObject();
if (!ptr->json(linkBuilder)) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
<< "failed to generate json for arangosearch link '" << ptr->id()
<< "' while generating json for arangosearch view '" << id() << "'";
continue; // skip invalid link definitions
}
linkBuilder.close();
// need to mask out some fields
static const std::function<bool(irs::string_ref const& key)> acceptor =
[](irs::string_ref const& key) -> bool {
return key != arangodb::StaticStrings::IndexId &&
key != arangodb::StaticStrings::IndexType &&
key != StaticStrings::ViewIdField; // ignored fields
};
arangodb::velocypack::Builder sanitizedBuilder;
sanitizedBuilder.openObject();
if (!mergeSliceSkipKeys(sanitizedBuilder, linkBuilder.slice(), acceptor)) {
Result result(
TRI_ERROR_INTERNAL,
std::string("failed to generate externally visible link ")
.append("definition while emplacing link definition into ")
.append("arangosearch view '")
.append(name())
.append("'"));
LOG_TOPIC(WARN, iresearch::TOPIC) << result.errorMessage();
return result;
}
sanitizedBuilder.close();
linksBuilderWrapper->add(collectionName, sanitizedBuilder.slice());
}
if (!collection) {
return true; // skip missing collections
}
auto link = IResearchLinkHelper::find(*collection, *this);
if (!link) {
return true; // no links for the current view
}
arangodb::velocypack::Builder linkBuilder;
linkBuilder.openObject();
if (!link->json(linkBuilder)) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
<< "failed to generate json for arangosearch link '" << link->id() << "' while generating json for arangosearch view '" << name() << "'";
return true; // skip invalid link definitions
}
linkBuilder.close();
static const auto acceptor = [](irs::string_ref const& key)->bool {
return key != arangodb::StaticStrings::IndexId
&& key != arangodb::StaticStrings::IndexType
&& key != StaticStrings::ViewIdField; // ignored fields
};
linksBuilder.add(
collection->name(),
arangodb::velocypack::Value(arangodb::velocypack::ValueType::Object)
);
if (!mergeSliceSkipKeys(linksBuilder, linkBuilder.slice(), acceptor)) {
res = arangodb::Result(
TRI_ERROR_INTERNAL,
std::string("failed to generate arangosearch link '") + std::to_string(link->id()) + "' definition while generating json for arangosearch view '" + name() + "'"
);
return false; // terminate generation
}
linksBuilder.close();
return true; // done with this collection
};
linksBuilder.openObject();
state->allCollections(visitor);
linksBuilder.close();
if (!res.ok()) {
return res;
}
trx.commit();
@ -1332,4 +1333,4 @@ void IResearchView::verifyKnownCollections() {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -80,8 +80,10 @@ inline irs::columnstore_reader::values_reader_f pkColumn(irs::sub_reader const&
return reader ? reader->values() : irs::columnstore_reader::values_reader_f{};
}
inline arangodb::LogicalCollection* lookupCollection(arangodb::transaction::Methods& trx,
TRI_voc_cid_t cid) {
inline std::shared_ptr<arangodb::LogicalCollection> lookupCollection( // find collection
arangodb::transaction::Methods& trx, // transaction
TRI_voc_cid_t cid // collection identifier
) {
TRI_ASSERT(trx.state());
// this is necessary for MMFiles
@ -445,8 +447,7 @@ bool IResearchViewBlock::next(ReadContext& ctx, size_t limit) {
}
auto const cid = _reader.cid(_readerOffset); // CID is constant until resetIterator()
auto* collection = lookupCollection(*_trx, cid);
auto collection = lookupCollection(*_trx, cid);
if (!collection) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
@ -565,8 +566,7 @@ bool IResearchViewUnorderedBlock::next(ReadContext& ctx, size_t limit) {
}
auto const cid = _reader.cid(_readerOffset); // CID is constant until resetIterator()
auto* collection = lookupCollection(*_trx, cid);
auto collection = lookupCollection(*_trx, cid);
if (!collection) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
@ -641,4 +641,4 @@ size_t IResearchViewUnorderedBlock::skip(size_t limit) {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -151,12 +151,6 @@ void handleViewsRule(arangodb::aql::Optimizer* opt,
auto addPlan = irs::make_finally([opt, &plan, rule, &modified]() {
opt->addPlan(std::move(plan), rule, modified);
});
if (query.views().empty()) {
// nothing to do in absence of views
return;
}
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};

View File

@ -346,8 +346,9 @@ int MMFilesTransactionState::addOperation(LocalDocumentId const& documentId,
}
auto queryCache = arangodb::aql::QueryCache::instance();
if (queryCache->mayBeActive()) {
queryCache->invalidate(&_vocbase, collection->name());
queryCache->invalidate(&_vocbase, collection->guid());
}
physical->setRevision(revisionId, false);
@ -494,8 +495,8 @@ int MMFilesTransactionState::writeCommitMarker() {
// also sync RocksDB WAL if required
bool hasPersistentIndex = false;
allCollections([&hasPersistentIndex](TransactionCollection* collection) {
auto c = static_cast<MMFilesTransactionCollection*>(collection);
allCollections([&hasPersistentIndex](TransactionCollection& collection)->bool {
auto* c = static_cast<MMFilesTransactionCollection*>(&collection);
if (c->canAccess(AccessMode::Type::WRITE) &&
c->collection()->getPhysical()->hasIndexOfType(
@ -507,6 +508,7 @@ int MMFilesTransactionState::writeCommitMarker() {
return true;
});
if (hasPersistentIndex) {
MMFilesPersistentIndexFeature::syncWal();
}

View File

@ -538,8 +538,9 @@ Result RocksDBTransactionState::addOperation(TRI_voc_cid_t cid, TRI_voc_rid_t re
// clear the query cache for this collection
auto queryCache = arangodb::aql::QueryCache::instance();
if (queryCache->mayBeActive()) {
queryCache->invalidate(&_vocbase, tcoll->collectionName());
if (queryCache->mayBeActive() && tcoll->collection()) {
queryCache->invalidate(&_vocbase, tcoll->collection()->guid());
}
switch (operationType) {

View File

@ -53,8 +53,8 @@ class TransactionCollection {
inline TRI_voc_cid_t id() const { return _cid; }
LogicalCollection* collection() const {
return _collection.get(); // vocbase collection pointer
std::shared_ptr<LogicalCollection> const& collection() const {
return _collection; // vocbase collection pointer
}
std::string collectionName() const;
@ -109,4 +109,4 @@ class TransactionCollection {
} // namespace arangodb
#endif
#endif

View File

@ -31,6 +31,7 @@
#include "Transaction/Methods.h"
#include "Transaction/Options.h"
#include "Utils/ExecContext.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/ticks.h"
using namespace arangodb;
@ -61,22 +62,6 @@ TransactionState::~TransactionState() {
}
}
std::vector<std::string> TransactionState::collectionNames(
std::unordered_set<std::string> const& initial) const {
std::vector<std::string> result;
result.reserve(_collections.size() + initial.size());
for (auto const& it : initial) {
result.emplace_back(it);
}
for (auto const& trxCollection : _collections) {
if (trxCollection->collection() != nullptr) {
result.emplace_back(trxCollection->collectionName());
}
}
return result;
}
/// @brief return the collection from a transaction
TransactionCollection* TransactionState::collection(TRI_voc_cid_t cid,
AccessMode::Type accessType) {
@ -193,9 +178,12 @@ Result TransactionState::ensureCollections(int nestingLevel) {
}
/// @brief run a callback on all collections
void TransactionState::allCollections(std::function<bool(TransactionCollection*)> const& cb) {
void TransactionState::allCollections( // iterate
std::function<bool(TransactionCollection&)> const& cb // callback to invoke
) {
for (auto& trxCollection : _collections) {
if (!cb(trxCollection)) {
TRI_ASSERT(trxCollection); // ensured by addCollection(...)
if (!cb(*trxCollection)) {
// abort early
return;
}
@ -387,9 +375,12 @@ void TransactionState::clearQueryCache() {
std::vector<std::string> collections;
for (auto& trxCollection : _collections) {
if (trxCollection->hasOperations()) {
if (trxCollection // valid instance
&& trxCollection->collection() // has a valid collection
&& trxCollection->hasOperations() // may have been modified
) {
// we're only interested in collections that may have been modified
collections.emplace_back(trxCollection->collectionName());
collections.emplace_back(trxCollection->collection()->guid());
}
}

View File

@ -130,9 +130,6 @@ class TransactionState {
_options.allowImplicitCollections = value;
}
std::vector<std::string> collectionNames(std::unordered_set<std::string> const& initial =
std::unordered_set<std::string>()) const;
/// @brief return the collection from a transaction
TransactionCollection* collection(TRI_voc_cid_t cid, AccessMode::Type accessType);
@ -147,7 +144,7 @@ class TransactionState {
Result useCollections(int nestingLevel);
/// @brief run a callback on all collections of the transaction
void allCollections(std::function<bool(TransactionCollection*)> const& cb);
void allCollections(std::function<bool(TransactionCollection&)> const& cb);
/// @brief return the number of collections in the transaction
size_t numCollections() const { return _collections.size(); }
@ -261,4 +258,4 @@ class TransactionState {
} // namespace arangodb
#endif
#endif

View File

@ -839,7 +839,7 @@ void transaction::Methods::pinData(TRI_voc_cid_t cid) {
}
TRI_ASSERT(trxColl->collection() != nullptr);
_transactionContextPtr->pinData(trxColl->collection());
_transactionContextPtr->pinData(trxColl->collection().get());
}
/// @brief whether or not a ditch has been created for the collection
@ -2979,7 +2979,7 @@ arangodb::LogicalCollection* transaction::Methods::documentCollection(
TRI_ASSERT(_state->status() == transaction::Status::RUNNING);
TRI_ASSERT(trxCollection->collection() != nullptr);
return trxCollection->collection();
return trxCollection->collection().get();
}
/// @brief return the collection
@ -2995,7 +2995,7 @@ arangodb::LogicalCollection* transaction::Methods::documentCollection(TRI_voc_ci
TRI_ASSERT(trxColl != nullptr);
TRI_ASSERT(trxColl->collection() != nullptr);
return trxColl->collection();
return trxColl->collection().get();
}
/// @brief add a collection by id, with the name supplied

View File

@ -68,7 +68,7 @@ TransactionCollection* SingleCollectionTransaction::resolveTrxCollection() {
_trxCollection = _state->collection(_cid, _accessType);
if (_trxCollection != nullptr) {
_documentCollection = _trxCollection->collection();
_documentCollection = _trxCollection->collection().get();
}
}

View File

@ -816,7 +816,8 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid) {
#if USE_PLAN_CACHE
arangodb::aql::PlanCache::instance()->invalidate(_vocbase);
#endif
arangodb::aql::QueryCache::instance()->invalidate(&vocbase(), name());
arangodb::aql::QueryCache::instance()->invalidate(&vocbase(), guid());
bool result = _physical->dropIndex(iid);
@ -826,6 +827,7 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid) {
DatabaseFeature::DATABASE->versionTracker()->track("drop index");
}
}
return result;
}

View File

@ -2982,7 +2982,11 @@ SECTION("test_transaction_registration") {
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3004,7 +3008,11 @@ SECTION("test_transaction_registration") {
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3026,7 +3034,11 @@ SECTION("test_transaction_registration") {
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3048,7 +3060,11 @@ SECTION("test_transaction_registration") {
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3070,7 +3086,11 @@ SECTION("test_transaction_registration") {
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3092,7 +3112,11 @@ SECTION("test_transaction_registration") {
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3116,7 +3140,11 @@ SECTION("test_transaction_registration") {
CHECK((1 == trx.state()->numCollections()));
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3137,7 +3165,11 @@ SECTION("test_transaction_registration") {
CHECK((1 == trx.state()->numCollections()));
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3158,7 +3190,11 @@ SECTION("test_transaction_registration") {
CHECK((1 == trx.state()->numCollections()));
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3179,7 +3215,11 @@ SECTION("test_transaction_registration") {
CHECK((1 == trx.state()->numCollections()));
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3200,7 +3240,11 @@ SECTION("test_transaction_registration") {
CHECK((1 == trx.state()->numCollections()));
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -3221,7 +3265,11 @@ SECTION("test_transaction_registration") {
CHECK((1 == trx.state()->numCollections()));
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
std::unordered_set<std::string> expectedNames = { "testCollection0" };
auto actualNames = trx.state()->collectionNames();
std::unordered_set<std::string> actualNames;
trx.state()->allCollections([&actualNames](arangodb::TransactionCollection& col)->bool {
actualNames.emplace(col.collection()->name());
return true;
});
for(auto& entry: actualNames) {
CHECK((1 == expectedNames.erase(entry)));
@ -6341,4 +6389,4 @@ SECTION("test_update_partial") {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -142,8 +142,8 @@ function ahuacatlQueryCacheTestSuite () {
try {
AQL_EXECUTE(query, { "@collection": "UnitTestsAhuacatlQueryCache1" });
fail();
} catch (err) {
fail();
assertEqual(internal.errors.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND.code, err.errorNum);
}