mirror of https://gitee.com/bigwinds/arangodb
Feature/test iresearch (#5610)
* start implementing arangosearch cluster tests. * backport: ensure view lookup is done via collectionNameResover, ensure updateProperties returns current view properties * first attempt to fix failing tests * refactor cluster wide view creation logic * if view is not found in the new plan then check the old plan too * ensure the cluster-wide view is looked up in vocbase as well on startup/recovery * do not store cluster-wide IResearchView in vocbase * move stale view cleanup to the shared pointer deleter, address test failures * do not print warning * enable arangosearch tests by default * fix catch tests * address icorrect return value for cluster-wide links * address some issues with test failures due to cluster-view allocated within TRI_vocbase_t * simplify per-cid view name, address 'catch' test failures * ensure IResearchViewNode volatility is properly calculated in cluster * invoke callbacks directly in AgencyMock instead of waiting for timeout * ensure view updates via JavaScript always use the latest view definition * pass a list of shards to `IResearchViewDBServer::snapshot` * extend cluster aql tests * fixes after merge * fix class/struct inconsistencies * comment failing tests * remove debug logging * add debug function * tests cleanup * simplify upcoming merge: pass resolver from a side * backport: move all transaction status callback logic to Methods * add changes missed from previous commit * fix js and ruby tests * more tests for IResearchViewNode * pass transaction to IResearchViewDBServer::snapshot, address IResearchViewDBServer tests segfault * pass transaction to IResearchView::snapshot instead of transaction state * temporarily add trace log output to tests to try to find the cause of the core dump on Jenkins * add more temporary debug output to trace down the segfault on Jenkins * add even more temporary debug output to trace down the segfault on Jenkins * ensure Vieew related maps are cleared during shutdown * reset ClusterInfo::instance() before DatabaseFeature::unprepare() * remove extraneous debug output * missed line from previous commit * uncomment required line * add nullptr checks to RocksDBIndexFactory::prepareIndexes(...) similar to the ones in MMFilesIndexFactory::prepareIndexes(...) * attempt to fix deadlock in tests * add comment as per reviewer request * fix aql test suite name * add some debug logging * address deadlock between ClusterInfo::loadPlan() and CollectionNameResolver::localNameLookup(...) * eplicitly state which index definition failed in the log message * use vocbase from shard-view isntead just in case * explicitly state which index definition failed in the log message * do not create shard-view instances from cluster-link instances (only register existing ones) * add some tests
This commit is contained in:
parent
164ec9ed03
commit
5eef6cd618
|
@ -291,7 +291,7 @@ describe ArangoDB do
|
|||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['code'].should eq(404)
|
||||
doc.parsed_response['errorNum'].should eq(1211)
|
||||
doc.parsed_response['errorNum'].should eq(1203)
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -306,7 +306,7 @@ describe ArangoDB do
|
|||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['code'].should eq(404)
|
||||
doc.parsed_response['errorNum'].should eq(1211)
|
||||
doc.parsed_response['errorNum'].should eq(1203)
|
||||
end
|
||||
|
||||
it "getting properties of a non-existent view" do
|
||||
|
@ -317,7 +317,7 @@ describe ArangoDB do
|
|||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['code'].should eq(404)
|
||||
doc.parsed_response['errorNum'].should eq(1211)
|
||||
doc.parsed_response['errorNum'].should eq(1203)
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -349,7 +349,7 @@ describe ArangoDB do
|
|||
doc.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['code'].should eq(404)
|
||||
doc.parsed_response['errorNum'].should eq(1211)
|
||||
doc.parsed_response['errorNum'].should eq(1203)
|
||||
end
|
||||
|
||||
it "modifying a view with unacceptable properties" do
|
||||
|
@ -373,13 +373,19 @@ describe ArangoDB do
|
|||
doc2 = ArangoDB.log_put("#{prefix}-modify-unacceptable", cmd2, :body => body2)
|
||||
doc2.code.should eq(200)
|
||||
doc2.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc2.parsed_response['threadsMaxTotal'].should eq(17)
|
||||
doc2.parsed_response['name'].should eq("lemon")
|
||||
doc2.parsed_response['type'].should eq("arangosearch")
|
||||
doc2.parsed_response['id'].should eq(doc1.parsed_response['id'])
|
||||
|
||||
cmd3 = api + '/lemon'
|
||||
doc3 = ArangoDB.log_delete("#{prefix}-modify-unacceptable", cmd3)
|
||||
cmd3 = api + '/lemon/properties'
|
||||
doc4 = ArangoDB.log_get("#{prefix}-modify-unacceptable", cmd3)
|
||||
doc4.parsed_response['threadsMaxTotal'].should eq(17)
|
||||
|
||||
doc3.code.should eq(200)
|
||||
doc3.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
cmd4 = api + '/lemon'
|
||||
doc4 = ArangoDB.log_delete("#{prefix}-modify-unacceptable", cmd4)
|
||||
|
||||
doc4.code.should eq(200)
|
||||
doc4.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -507,7 +513,9 @@ describe ArangoDB do
|
|||
doc1 = ArangoDB.log_put("#{prefix}-change-properties", cmd1, :body => body1)
|
||||
doc1.code.should eq(200)
|
||||
doc1.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc1.parsed_response['threadsMaxTotal'].should eq(7)
|
||||
doc1.parsed_response['name'].should eq('abc')
|
||||
doc1.parsed_response['type'].should eq('arangosearch')
|
||||
doc1.parsed_response['properties'].should eq(nil)
|
||||
|
||||
cmd2 = api + '/abc/properties'
|
||||
doc2 = ArangoDB.log_get("#{prefix}-change-properties", cmd2)
|
||||
|
@ -525,7 +533,9 @@ describe ArangoDB do
|
|||
doc1 = ArangoDB.log_put("#{prefix}-ignore-extra-properties", cmd1, :body => body1)
|
||||
doc1.code.should eq(200)
|
||||
doc1.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc1.parsed_response['threadsMaxTotal'].should eq(10)
|
||||
doc1.parsed_response['name'].should eq('abc')
|
||||
doc1.parsed_response['type'].should eq('arangosearch')
|
||||
doc1.parsed_response['properties'].should eq(nil)
|
||||
doc1.parsed_response['extra'].should eq(nil)
|
||||
|
||||
cmd2 = api + '/abc/properties'
|
||||
|
@ -544,7 +554,9 @@ describe ArangoDB do
|
|||
doc1 = ArangoDB.log_patch("#{prefix}-accept-patch", cmd1, :body => body1)
|
||||
doc1.code.should eq(200)
|
||||
doc1.headers['content-type'].should eq("application/json; charset=utf-8")
|
||||
doc1.parsed_response['threadsMaxTotal'].should eq(3)
|
||||
doc1.parsed_response['name'].should eq('abc')
|
||||
doc1.parsed_response['type'].should eq('arangosearch')
|
||||
doc1.parsed_response['properties'].should eq(nil)
|
||||
|
||||
cmd2 = api + '/abc/properties'
|
||||
doc2 = ArangoDB.log_get("#{prefix}-accept-patch", cmd2)
|
||||
|
|
|
@ -125,9 +125,9 @@ bool OurLessThan::operator()(
|
|||
cmp = AqlValue::Compare(_trx, aa, bb, true);
|
||||
}
|
||||
|
||||
if (cmp == -1) {
|
||||
if (cmp < 0) {
|
||||
return reg.asc;
|
||||
} else if (cmp == 1) {
|
||||
} else if (cmp > 0) {
|
||||
return !reg.asc;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,6 +157,13 @@ void EngineInfoContainerDBServer::EngineInfo::addNode(ExecutionNode* node) {
|
|||
case ExecutionNode::ENUMERATE_IRESEARCH_VIEW:{
|
||||
TRI_ASSERT(_type == ExecutionNode::MAX_NODE_TYPE_VALUE);
|
||||
auto& viewNode = *ExecutionNode::castTo<iresearch::IResearchViewNode*>(node);
|
||||
|
||||
// FIXME should we have a separate optimizer rule for that?
|
||||
//
|
||||
// evaluate node volatility before the distribution
|
||||
// can't do it on DB servers since only parts of the plan will be sent
|
||||
viewNode.volatility(true);
|
||||
|
||||
_type = ExecutionNode::ENUMERATE_IRESEARCH_VIEW;
|
||||
_view = viewNode.view().get();
|
||||
break;
|
||||
|
|
|
@ -869,11 +869,22 @@ ExecutionNode* ExecutionPlan::fromNodeFor(ExecutionNode* previous,
|
|||
// second operand is a view
|
||||
std::string const viewName = expression->getString();
|
||||
auto& vocbase = _ast->query()->vocbase();
|
||||
auto view = vocbase.lookupView(viewName);
|
||||
|
||||
std::shared_ptr<LogicalView> view;
|
||||
|
||||
if (ServerState::instance()->isSingleServer()) {
|
||||
view = vocbase.lookupView(viewName);
|
||||
} else {
|
||||
// need cluster wide view
|
||||
TRI_ASSERT(ClusterInfo::instance());
|
||||
view = ClusterInfo::instance()->getView(vocbase.name(), viewName);
|
||||
}
|
||||
|
||||
if (!view) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"no view for EnumerateView");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL,
|
||||
"no view for EnumerateView"
|
||||
);
|
||||
}
|
||||
|
||||
en = registerNode(new iresearch::IResearchViewNode(
|
||||
|
|
|
@ -139,8 +139,12 @@ ClusterInfo* ClusterInfo::instance() { return _instance.get(); }
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ClusterInfo::ClusterInfo(AgencyCallbackRegistry* agencyCallbackRegistry)
|
||||
: _agency(), _agencyCallbackRegistry(agencyCallbackRegistry),
|
||||
_planVersion(0), _currentVersion(0), _uniqid() {
|
||||
: _agency(),
|
||||
_agencyCallbackRegistry(agencyCallbackRegistry),
|
||||
_planVersion(0),
|
||||
_currentVersion(0),
|
||||
_planLoader(std::thread::id()),
|
||||
_uniqid() {
|
||||
_uniqid._currentValue = 1ULL;
|
||||
_uniqid._upperValue = 0ULL;
|
||||
|
||||
|
@ -163,6 +167,8 @@ void ClusterInfo::cleanup() {
|
|||
return;
|
||||
}
|
||||
|
||||
TRI_ASSERT(theInstance->_newPlannedViews.empty()); // only non-empty during loadPlan()
|
||||
theInstance->_plannedViews.clear();
|
||||
theInstance->_plannedCollections.clear();
|
||||
theInstance->_shards.clear();
|
||||
theInstance->_shardKeys.clear();
|
||||
|
@ -369,6 +375,26 @@ void ClusterInfo::loadPlan() {
|
|||
++_planProt.wantedVersion; // Indicate that after *NOW* somebody has to
|
||||
// reread from the agency!
|
||||
MUTEX_LOCKER(mutexLocker, _planProt.mutex); // only one may work at a time
|
||||
|
||||
// For ArangoSearch views we need to get access to immediately created views
|
||||
// in order to allow links to be created correctly.
|
||||
// For the scenario above, we track such views in '_newPlannedViews' member
|
||||
// which is supposed to be empty before and after 'ClusterInfo::loadPlan()' execution.
|
||||
// In addition, we do the following "trick" to provide access to '_newPlannedViews'
|
||||
// from outside 'ClusterInfo': in case if 'ClusterInfo::getView' has been called
|
||||
// from within 'ClusterInfo::loadPlan', we redirect caller to search view in
|
||||
// '_newPlannedViews' member instead of '_plannedViews'
|
||||
|
||||
// set plan loader
|
||||
TRI_ASSERT(_newPlannedViews.empty());
|
||||
_planLoader = std::this_thread::get_id();
|
||||
|
||||
// ensure we'll eventually reset plan loader
|
||||
auto resetLoader = scopeGuard([this](){
|
||||
_planLoader = std::thread::id();
|
||||
_newPlannedViews.clear();
|
||||
});
|
||||
|
||||
uint64_t storedVersion = _planProt.wantedVersion; // this is the version
|
||||
// we will set in the end
|
||||
|
||||
|
@ -415,7 +441,6 @@ void ClusterInfo::loadPlan() {
|
|||
decltype(_shards) newShards;
|
||||
decltype(_shardServers) newShardServers;
|
||||
decltype(_shardKeys) newShardKeys;
|
||||
decltype(_plannedViews) newViews;
|
||||
|
||||
bool swapDatabases = false;
|
||||
bool swapCollections = false;
|
||||
|
@ -432,6 +457,118 @@ void ClusterInfo::loadPlan() {
|
|||
swapDatabases = true;
|
||||
}
|
||||
|
||||
// Ensure views are being created BEFORE collections to allow
|
||||
// links find them
|
||||
// Immediate children of "Views" are database names, then ids
|
||||
// of views, then one JSON object with the description:
|
||||
|
||||
// "Plan":{"Views": {
|
||||
// "_system": {
|
||||
// "654321": {
|
||||
// "id": "654321",
|
||||
// "name": "v",
|
||||
// "collections": [
|
||||
// <list of cluster-wide collection IDs of linked collections>
|
||||
// ]
|
||||
// },...
|
||||
// },...
|
||||
// }}
|
||||
|
||||
// Now the same for views:
|
||||
databasesSlice = planSlice.get("Views"); // format above
|
||||
if (databasesSlice.isObject()) {
|
||||
bool isCoordinator = ServerState::instance()->isCoordinator();
|
||||
for (auto const& databasePairSlice :
|
||||
VPackObjectIterator(databasesSlice)) {
|
||||
VPackSlice const& viewsSlice = databasePairSlice.value;
|
||||
if (!viewsSlice.isObject()) {
|
||||
continue;
|
||||
}
|
||||
std::string const databaseName = databasePairSlice.key.copyString();
|
||||
TRI_vocbase_t* vocbase = nullptr;
|
||||
if (isCoordinator) {
|
||||
vocbase = databaseFeature->lookupDatabaseCoordinator(databaseName);
|
||||
} else {
|
||||
vocbase = databaseFeature->lookupDatabase(databaseName);
|
||||
}
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
// No database with this name found.
|
||||
// We have an invalid state here.
|
||||
continue;
|
||||
}
|
||||
|
||||
for (auto const& viewPairSlice :
|
||||
VPackObjectIterator(viewsSlice)) {
|
||||
VPackSlice const& viewSlice = viewPairSlice.value;
|
||||
if (!viewSlice.isObject()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string const viewId =
|
||||
viewPairSlice.key.copyString();
|
||||
|
||||
try {
|
||||
auto preCommit = [this, viewId, databaseName](std::shared_ptr<LogicalView> const& view)->bool {
|
||||
auto& views = _newPlannedViews[databaseName];
|
||||
// register with name as well as with id:
|
||||
views.reserve(views.size() + 2);
|
||||
views[viewId] = view;
|
||||
views[view->name()] = view;
|
||||
return true;
|
||||
};
|
||||
|
||||
const auto newView = LogicalView::create(
|
||||
*vocbase,
|
||||
viewPairSlice.value,
|
||||
false, // false == coming from Agency
|
||||
newPlanVersion,
|
||||
preCommit
|
||||
);
|
||||
|
||||
if (!newView) {
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to create view '" << viewId
|
||||
<< "'. The view will be ignored for now and the invalid information "
|
||||
"will be repaired. VelocyPack: "
|
||||
<< viewSlice.toJson();
|
||||
continue;
|
||||
}
|
||||
} catch (std::exception const& ex) {
|
||||
// The Plan contains invalid view information.
|
||||
// This should not happen in healthy situations.
|
||||
// If it happens in unhealthy situations the
|
||||
// cluster should not fail.
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to load information for view '" << viewId
|
||||
<< "': " << ex.what() << ". invalid information in Plan. The "
|
||||
"view will be ignored for now and the invalid information "
|
||||
"will be repaired. VelocyPack: "
|
||||
<< viewSlice.toJson();
|
||||
|
||||
TRI_ASSERT(false);
|
||||
continue;
|
||||
} catch (...) {
|
||||
// The Plan contains invalid view information.
|
||||
// This should not happen in healthy situations.
|
||||
// If it happens in unhealthy situations the
|
||||
// cluster should not fail.
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to load information for view '" << viewId
|
||||
<< ". invalid information in Plan. The view will "
|
||||
"be ignored for now and the invalid information will "
|
||||
"be repaired. VelocyPack: "
|
||||
<< viewSlice.toJson();
|
||||
|
||||
TRI_ASSERT(false);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
swapViews = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Immediate children of "Collections" are database names, then ids
|
||||
// of collections, then one JSON object with the description:
|
||||
|
||||
|
@ -628,112 +765,6 @@ void ClusterInfo::loadPlan() {
|
|||
}
|
||||
}
|
||||
|
||||
// Immediate children of "Views" are database names, then ids
|
||||
// of views, then one JSON object with the description:
|
||||
|
||||
// "Plan":{"Views": {
|
||||
// "_system": {
|
||||
// "654321": {
|
||||
// "id": "654321",
|
||||
// "name": "v",
|
||||
// "collections": [
|
||||
// <list of cluster-wide collection IDs of linked collections>
|
||||
// ]
|
||||
// },...
|
||||
// },...
|
||||
// }}
|
||||
|
||||
// Now the same for views:
|
||||
databasesSlice = planSlice.get("Views"); // format above
|
||||
if (databasesSlice.isObject()) {
|
||||
bool isCoordinator = ServerState::instance()->isCoordinator();
|
||||
for (auto const& databasePairSlice :
|
||||
VPackObjectIterator(databasesSlice)) {
|
||||
VPackSlice const& viewsSlice = databasePairSlice.value;
|
||||
if (!viewsSlice.isObject()) {
|
||||
continue;
|
||||
}
|
||||
DatabaseViews databaseViews;
|
||||
std::string const databaseName = databasePairSlice.key.copyString();
|
||||
TRI_vocbase_t* vocbase = nullptr;
|
||||
if (isCoordinator) {
|
||||
vocbase = databaseFeature->lookupDatabaseCoordinator(databaseName);
|
||||
} else {
|
||||
vocbase = databaseFeature->lookupDatabase(databaseName);
|
||||
}
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
// No database with this name found.
|
||||
// We have an invalid state here.
|
||||
continue;
|
||||
}
|
||||
|
||||
for (auto const& viewPairSlice :
|
||||
VPackObjectIterator(viewsSlice)) {
|
||||
VPackSlice const& viewSlice = viewPairSlice.value;
|
||||
if (!viewSlice.isObject()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string const viewId =
|
||||
viewPairSlice.key.copyString();
|
||||
|
||||
try {
|
||||
const auto newView = LogicalView::create(
|
||||
*vocbase, viewPairSlice.value, false, newPlanVersion // false == coming from Agency
|
||||
);
|
||||
|
||||
if (!newView) {
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to create view '" << viewId
|
||||
<< "'. The view will be ignored for now and the invalid information "
|
||||
"will be repaired. VelocyPack: "
|
||||
<< viewSlice.toJson();
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string const viewName = newView->name();
|
||||
// register with name as well as with id:
|
||||
databaseViews.emplace(std::make_pair(viewName, newView));
|
||||
databaseViews.emplace(std::make_pair(viewId, newView));
|
||||
|
||||
} catch (std::exception const& ex) {
|
||||
// The Plan contains invalid view information.
|
||||
// This should not happen in healthy situations.
|
||||
// If it happens in unhealthy situations the
|
||||
// cluster should not fail.
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to load information for view '" << viewId
|
||||
<< "': " << ex.what() << ". invalid information in Plan. The "
|
||||
"view will be ignored for now and the invalid information "
|
||||
"will be repaired. VelocyPack: "
|
||||
<< viewSlice.toJson();
|
||||
|
||||
TRI_ASSERT(false);
|
||||
continue;
|
||||
} catch (...) {
|
||||
// The Plan contains invalid view information.
|
||||
// This should not happen in healthy situations.
|
||||
// If it happens in unhealthy situations the
|
||||
// cluster should not fail.
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to load information for view '" << viewId
|
||||
<< ". invalid information in Plan. The view will "
|
||||
"be ignored for now and the invalid information will "
|
||||
"be repaired. VelocyPack: "
|
||||
<< viewSlice.toJson();
|
||||
|
||||
TRI_ASSERT(false);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
newViews.emplace(
|
||||
std::make_pair(databaseName, databaseViews));
|
||||
swapViews = true;
|
||||
}
|
||||
}
|
||||
|
||||
WRITE_LOCKER(writeLocker, _planProt.lock);
|
||||
_plan = planBuilder;
|
||||
_planVersion = newPlanVersion;
|
||||
|
@ -747,7 +778,7 @@ void ClusterInfo::loadPlan() {
|
|||
_shardServers.swap(newShardServers);
|
||||
}
|
||||
if (swapViews) {
|
||||
_plannedViews.swap(newViews);
|
||||
_plannedViews.swap(_newPlannedViews);
|
||||
}
|
||||
_planProt.doneVersion = storedVersion;
|
||||
_planProt.isValid = true; // will never be reset to false
|
||||
|
@ -1033,7 +1064,35 @@ std::shared_ptr<CollectionInfoCurrent> ClusterInfo::getCollectionCurrent(
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<LogicalView> ClusterInfo::getView(
|
||||
DatabaseID const& databaseID, ViewID const& viewID) {
|
||||
DatabaseID const& databaseID,
|
||||
ViewID const& viewID
|
||||
) {
|
||||
auto lookupView = [](
|
||||
AllViews const& dbs,
|
||||
DatabaseID const& databaseID,
|
||||
ViewID const& viewID
|
||||
) noexcept -> std::shared_ptr<LogicalView> {
|
||||
// look up database by id
|
||||
auto const db = dbs.find(databaseID);
|
||||
|
||||
if (db != dbs.end()) {
|
||||
// look up view by id (or by name)
|
||||
auto& views = db->second;
|
||||
auto const view = views.find(viewID);
|
||||
|
||||
if (view != views.end()) {
|
||||
return view->second;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
if (std::this_thread::get_id() == _planLoader) {
|
||||
// we're loading plan, lookup inside immediately created planned views
|
||||
// already protected by _planProt.mutex, don't need to lock there
|
||||
return lookupView(_newPlannedViews, databaseID, viewID);
|
||||
}
|
||||
|
||||
int tries = 0;
|
||||
|
||||
|
@ -1045,16 +1104,10 @@ std::shared_ptr<LogicalView> ClusterInfo::getView(
|
|||
while (true) { // left by break
|
||||
{
|
||||
READ_LOCKER(readLocker, _planProt.lock);
|
||||
// look up database by id
|
||||
auto it = _plannedViews.find(databaseID);
|
||||
auto const view = lookupView(_plannedViews, databaseID, viewID);
|
||||
|
||||
if (it != _plannedViews.end()) {
|
||||
// look up view by id (or by name)
|
||||
auto it2 = (*it).second.find(viewID);
|
||||
|
||||
if (it2 != (*it).second.end()) {
|
||||
return (*it2).second;
|
||||
}
|
||||
if (view) {
|
||||
return view;
|
||||
}
|
||||
}
|
||||
if (++tries >= 2) {
|
||||
|
@ -3374,4 +3427,4 @@ std::unordered_map<ServerID, std::string> ClusterInfo::getServerAliases() {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -333,7 +333,9 @@ class ClusterInfo {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<LogicalView> getView(
|
||||
DatabaseID const& vocbase, ViewID const& viewID);
|
||||
DatabaseID const& vocbase,
|
||||
ViewID const& viewID
|
||||
);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ask about all views of a database
|
||||
|
@ -688,6 +690,8 @@ class ClusterInfo {
|
|||
std::unordered_map<ShardID, std::vector<ServerID>> _shardServers;
|
||||
|
||||
AllViews _plannedViews; // from Plan/Views/
|
||||
AllViews _newPlannedViews; // views that have been created during `loadPlan` execution
|
||||
std::atomic<std::thread::id> _planLoader; // thread id that is loading plan
|
||||
|
||||
// The Current state:
|
||||
AllCollectionsCurrent _currentCollections; // from Current/Collections/
|
||||
|
@ -739,4 +743,4 @@ class ClusterInfo {
|
|||
|
||||
} // end namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -240,12 +240,14 @@ void ClusterIndexFactory::prepareIndexes(
|
|||
}
|
||||
|
||||
auto idx = prepareIndexFromSlice(v, false, col, true);
|
||||
|
||||
if (!idx) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error creating index from definition '" << indexesSlice.toString()
|
||||
<< "'";
|
||||
<< "error creating index from definition '" << v.toString() << "'";
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
indexes.emplace_back(std::move(idx));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,6 +104,23 @@ arangodb::aql::AqlValue noop(
|
|||
);
|
||||
}
|
||||
|
||||
void registerFunctions(arangodb::aql::AqlFunctionFeature& functions) {
|
||||
arangodb::iresearch::addFunction(functions, {
|
||||
"__ARANGOSEARCH_SCORE_DEBUG", // name
|
||||
".", // value to convert
|
||||
true, // deterministic
|
||||
false, // can't throw
|
||||
true, // can be run on server
|
||||
[](arangodb::aql::Query*,
|
||||
arangodb::transaction::Methods*,
|
||||
arangodb::SmallVector<arangodb::aql::AqlValue> const& args) noexcept {
|
||||
auto arg = arangodb::aql::Functions::ExtractFunctionParameterValue(args, 0);
|
||||
auto const floatValue = *reinterpret_cast<float_t const*>(arg.slice().begin());
|
||||
return arangodb::aql::AqlValue(arangodb::aql::AqlValueHintDouble(double_t(floatValue)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void registerFilters(arangodb::aql::AqlFunctionFeature& functions) {
|
||||
arangodb::iresearch::addFunction(functions, {
|
||||
"EXISTS", // name
|
||||
|
@ -246,13 +263,12 @@ void registerViewFactory() {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Impl>
|
||||
arangodb::Result transactionDataSourceRegistrationCallback(
|
||||
arangodb::LogicalDataSource& dataSource,
|
||||
arangodb::transaction::Methods& trx
|
||||
) {
|
||||
if (arangodb::iresearch::DATA_SOURCE_TYPE != dataSource.type()) {
|
||||
return arangodb::Result(); // not an IResearchView (noop)
|
||||
return {}; // not an IResearchView (noop)
|
||||
}
|
||||
|
||||
// TODO FIXME find a better way to look up a LogicalView
|
||||
|
@ -266,38 +282,21 @@ arangodb::Result transactionDataSourceRegistrationCallback(
|
|||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to get LogicalView while processing a TransactionState by IResearchFeature for name '" << dataSource.name() << "'";
|
||||
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL);
|
||||
return {TRI_ERROR_INTERNAL};
|
||||
}
|
||||
|
||||
// TODO FIXME find a better way to look up an IResearch View
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
auto* impl = dynamic_cast<Impl*>(view);
|
||||
#else
|
||||
auto* impl = static_cast<Impl*>(view);
|
||||
#endif
|
||||
|
||||
if (!impl) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to get IResearchView while processing a TransactionState by IResearchFeature for cid '" << dataSource.name() << "'";
|
||||
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
auto& impl = arangodb::LogicalView::cast<arangodb::iresearch::IResearchView>(*view);
|
||||
|
||||
return arangodb::Result(
|
||||
impl->apply(trx) ? TRI_ERROR_NO_ERROR : TRI_ERROR_INTERNAL
|
||||
impl.apply(trx) ? TRI_ERROR_NO_ERROR : TRI_ERROR_INTERNAL
|
||||
);
|
||||
}
|
||||
|
||||
void registerTransactionDataSourceRegistrationCallback() {
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
// NOOP
|
||||
} else if(arangodb::ServerState::instance()->isDBServer()) {
|
||||
if (arangodb::ServerState::instance()->isSingleServer()) {
|
||||
arangodb::transaction::Methods::addDataSourceRegistrationCallback(
|
||||
transactionDataSourceRegistrationCallback<arangodb::iresearch::IResearchViewDBServer>
|
||||
);
|
||||
} else {
|
||||
arangodb::transaction::Methods::addDataSourceRegistrationCallback(
|
||||
transactionDataSourceRegistrationCallback<arangodb::iresearch::IResearchView>
|
||||
&transactionDataSourceRegistrationCallback
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -379,6 +378,7 @@ void IResearchFeature::start() {
|
|||
if (functions) {
|
||||
registerFilters(*functions);
|
||||
registerScorers(*functions);
|
||||
registerFunctions(*functions);
|
||||
} else {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to find feature 'AQLFunctions' while registering iresearch filters";
|
||||
|
@ -411,4 +411,4 @@ NS_END // arangodb
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -64,6 +64,7 @@ IResearchLink::IResearchLink(
|
|||
_dropCollectionInDestructor(false),
|
||||
_id(iid),
|
||||
_view(nullptr) {
|
||||
// IResearchLink is not intended to be used on a coordinator
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
}
|
||||
|
||||
|
@ -158,16 +159,6 @@ int IResearchLink::drop() {
|
|||
|
||||
_dropCollectionInDestructor = false; // will do drop now
|
||||
|
||||
if (arangodb::ServerState::instance()->isDBServer()) {
|
||||
// TODO FIXME find a better way to look up an iResearch View
|
||||
auto* view = LogicalView::cast<IResearchViewDBServer>(_wiew.get());
|
||||
|
||||
return view
|
||||
? view->drop(_collection->id()).errorNumber()
|
||||
: TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND
|
||||
;
|
||||
}
|
||||
|
||||
return _view->drop(_collection->id());
|
||||
}
|
||||
|
||||
|
@ -213,76 +204,105 @@ bool IResearchLink::init(arangodb::velocypack::Slice const& definition) {
|
|||
auto identifier = definition.get(StaticStrings::ViewIdField);
|
||||
auto viewId = identifier.getNumber<uint64_t>();
|
||||
auto& vocbase = _collection->vocbase();
|
||||
auto logicalView = vocbase.lookupView(viewId);
|
||||
auto logicalView = vocbase.lookupView(viewId); // will only contain IResearchView (even for a DBServer)
|
||||
|
||||
if (!logicalView
|
||||
|| arangodb::iresearch::DATA_SOURCE_TYPE != logicalView->type()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error looking up view '" << viewId << "': no such view";
|
||||
return false; // no such view
|
||||
}
|
||||
// creation of link on a DBServer
|
||||
if (!logicalView && arangodb::ServerState::instance()->isDBServer()) {
|
||||
auto* ci = ClusterInfo::instance();
|
||||
|
||||
std::shared_ptr<arangodb::LogicalView> wiew;
|
||||
if (!ci) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to find 'ClusterInfo' instance for lookup of link '" << _id << "'";
|
||||
TRI_set_errno(TRI_ERROR_INTERNAL);
|
||||
|
||||
// create the IResearchView for the specific collection (on DBServer)
|
||||
if (arangodb::ServerState::instance()->isDBServer()) {
|
||||
// TODO FIXME find a better way to look up an iResearch View
|
||||
auto* view = LogicalView::cast<IResearchViewDBServer>(logicalView.get());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (view) {
|
||||
wiew = logicalView; // remeber the DBServer view instance
|
||||
logicalView = view->ensure(_collection->id()); // repoint LogicalView at the per-cid instance
|
||||
} else {
|
||||
logicalView = nullptr;
|
||||
auto logicalWiew = ci->getView(vocbase.name(), std::to_string(viewId));
|
||||
auto* wiew = LogicalView::cast<IResearchViewDBServer>(logicalWiew.get());
|
||||
|
||||
if (wiew) {
|
||||
auto collection = vocbase.lookupCollection(_collection->id());
|
||||
|
||||
// this is a cluster-wide collection/index/link (per-cid view links have their corresponding collections in vocbase)
|
||||
if (!collection) {
|
||||
auto clusterCol = ci->getCollectionCurrent(
|
||||
vocbase.name(), std::to_string(_collection->id())
|
||||
);
|
||||
|
||||
if (clusterCol) {
|
||||
for (auto& entry: clusterCol->errorNum()) {
|
||||
collection = vocbase.lookupCollection(entry.first); // find shard collection
|
||||
|
||||
if (collection) {
|
||||
// ensure the shard collection is registered with the cluster-wide view
|
||||
// required from creating snapshots for per-cid views loaded from WAL
|
||||
// only register existing per-cid view instances, do not create new per-cid view
|
||||
// instances since they will be created/registered by their per-cid links just below
|
||||
wiew->ensure(collection->id(), false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true; // leave '_view' uninitialized to mark the index as unloaded/unusable
|
||||
}
|
||||
|
||||
// TODO FIXME find a better way to look up an iResearch View
|
||||
auto* view = LogicalView::cast<IResearchView>(logicalView.get());
|
||||
logicalView = wiew->ensure(_collection->id()); // repoint LogicalView at the per-cid instance
|
||||
}
|
||||
}
|
||||
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error finding view: '" << viewId << "' for link '" << _id << "'";
|
||||
if (!logicalView
|
||||
|| arangodb::iresearch::DATA_SOURCE_TYPE != logicalView->type()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error finding view: '" << viewId << "' for link '" << _id << "' : no such view";
|
||||
|
||||
return false;
|
||||
}
|
||||
return false; // no such view
|
||||
}
|
||||
|
||||
auto viewSelf = view->self();
|
||||
auto* view = LogicalView::cast<IResearchView>(logicalView.get());
|
||||
|
||||
if (!viewSelf) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error read-locking view: '" << viewId
|
||||
<< "' for link '" << _id << "'";
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error finding view: '" << viewId << "' for link '" << _id << "'";
|
||||
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
_viewLock = std::unique_lock<ReadMutex>(viewSelf->mutex()); // aquire read-lock before checking view
|
||||
auto viewSelf = view->self();
|
||||
|
||||
if (!viewSelf->get()) {
|
||||
_viewLock.unlock();
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error getting view: '" << viewId << "' for link '" << _id << "'";
|
||||
if (!viewSelf) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error read-locking view: '" << viewId
|
||||
<< "' for link '" << _id << "'";
|
||||
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
_dropCollectionInDestructor = view->emplace(_collection->id()); // track if this is the instance that called emplace
|
||||
_meta = std::move(meta);
|
||||
_view = std::move(view);
|
||||
_wiew = std::move(wiew);
|
||||
_viewLock = std::unique_lock<ReadMutex>(viewSelf->mutex()); // aquire read-lock before checking view
|
||||
|
||||
// FIXME TODO remove once View::updateProperties(...) will be fixed to write
|
||||
// the update delta into the WAL marker instead of the full persisted state
|
||||
{
|
||||
auto* engine = arangodb::EngineSelectorFeature::ENGINE;
|
||||
if (!viewSelf->get()) {
|
||||
_viewLock.unlock();
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error getting view: '" << viewId << "' for link '" << _id << "'";
|
||||
|
||||
if (engine && engine->inRecovery()) {
|
||||
_defaultId = _wiew ? _wiew->id() : _view->id();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
_dropCollectionInDestructor = view->emplace(_collection->id()); // track if this is the instance that called emplace
|
||||
_meta = std::move(meta);
|
||||
_view = std::move(view);
|
||||
|
||||
// FIXME TODO remove once View::updateProperties(...) will be fixed to write
|
||||
// the update delta into the WAL marker instead of the full persisted state
|
||||
{
|
||||
auto* engine = arangodb::EngineSelectorFeature::ENGINE;
|
||||
|
||||
if (engine && engine->inRecovery()) {
|
||||
_defaultId = _view->id();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Result IResearchLink::insert(
|
||||
|
@ -343,11 +363,7 @@ bool IResearchLink::json(
|
|||
ReadMutex mutex(_mutex); // '_view' can be asynchronously modified
|
||||
SCOPED_LOCK(mutex);
|
||||
|
||||
if (_wiew) {
|
||||
builder.add(
|
||||
StaticStrings::ViewIdField, arangodb::velocypack::Value(_wiew->id())
|
||||
);
|
||||
} else if (_view) {
|
||||
if (_view) {
|
||||
builder.add(
|
||||
StaticStrings::ViewIdField, arangodb::velocypack::Value(_view->id())
|
||||
);
|
||||
|
@ -375,11 +391,10 @@ bool IResearchLink::matchesDefinition(VPackSlice const& slice) const {
|
|||
}
|
||||
|
||||
auto identifier = slice.get(StaticStrings::ViewIdField);
|
||||
auto viewId = _wiew ? _wiew->id() : _view->id();
|
||||
|
||||
if (!identifier.isNumber()
|
||||
|| uint64_t(identifier.getInt()) != identifier.getUInt()
|
||||
|| identifier.getUInt() != viewId) {
|
||||
|| identifier.getUInt() != _view->id()) {
|
||||
return false; // iResearch View names of current object and slice do not match
|
||||
}
|
||||
} else if (_view) {
|
||||
|
@ -483,7 +498,7 @@ int IResearchLink::unload() {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
_defaultId = _wiew ? _wiew->id() : _view->id(); // remember view ID just in case (e.g. call to toVelocyPack(...) after unload())
|
||||
_defaultId = _view->id(); // remember view ID just in case (e.g. call to toVelocyPack(...) after unload())
|
||||
|
||||
if (!_collection) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
|
|
|
@ -196,7 +196,6 @@ class IResearchLink {
|
|||
IResearchLinkMeta _meta; // how this collection should be indexed
|
||||
mutable irs::async_utils::read_write_mutex _mutex; // for use with _view to allow asynchronous disassociation
|
||||
IResearchView* _view; // effectively the IResearch datastore itself (nullptr == not associated)
|
||||
std::shared_ptr<arangodb::LogicalView> _wiew; // the DBServer view instance (valid only on DBServer)
|
||||
std::unique_lock<irs::async_utils::read_write_mutex::read_mutex> _viewLock; // prevent view deallocation (lock @ AsyncSelf)
|
||||
}; // IResearchLink
|
||||
|
||||
|
|
|
@ -126,31 +126,35 @@ bool IResearchLinkCoordinator::init(VPackSlice definition) {
|
|||
auto identifier = definition.get(StaticStrings::ViewIdField);
|
||||
auto viewId = identifier.getNumber<uint64_t>();
|
||||
auto& vocbase = _collection->vocbase();
|
||||
auto logicalView = vocbase.lookupView(viewId);
|
||||
|
||||
if (!logicalView
|
||||
|| arangodb::iresearch::DATA_SOURCE_TYPE != logicalView->type()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
TRI_ASSERT(ClusterInfo::instance());
|
||||
auto logicalView = ClusterInfo::instance()->getView(
|
||||
vocbase.name(), basics::StringUtils::itoa(viewId)
|
||||
);
|
||||
|
||||
if (!logicalView
|
||||
|| arangodb::iresearch::DATA_SOURCE_TYPE != logicalView->type()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error looking up view '" << viewId << "': no such view";
|
||||
return false; // no such view
|
||||
}
|
||||
return false; // no such view
|
||||
}
|
||||
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
auto view = std::dynamic_pointer_cast<IResearchViewCoordinator>(logicalView);
|
||||
#else
|
||||
auto view = std::static_pointer_cast<IResearchViewCoordinator>(logicalView);
|
||||
#endif
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
auto view = std::dynamic_pointer_cast<IResearchViewCoordinator>(logicalView);
|
||||
#else
|
||||
auto view = std::static_pointer_cast<IResearchViewCoordinator>(logicalView);
|
||||
#endif
|
||||
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "error finding view: '" << viewId << "' for link '" << id() << "'";
|
||||
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
_view = view;
|
||||
_view = view;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*static*/ IResearchLinkCoordinator::ptr IResearchLinkCoordinator::make(
|
||||
|
@ -233,4 +237,4 @@ char const* IResearchLinkCoordinator::typeName() const {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -258,12 +258,17 @@ size_t directoryMemory(irs::directory const& directory, TRI_voc_cid_t viewId) no
|
|||
/// similar to the data path calculation for collections
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
irs::utf8_path getPersistedPath(
|
||||
arangodb::DatabasePathFeature const& dbPathFeature, TRI_voc_cid_t id
|
||||
arangodb::DatabasePathFeature const& dbPathFeature,
|
||||
TRI_vocbase_t const& vocbase,
|
||||
TRI_voc_cid_t id
|
||||
) {
|
||||
irs::utf8_path dataPath(dbPathFeature.directory());
|
||||
static const std::string subPath("databases");
|
||||
static const std::string dbPath("database-");
|
||||
|
||||
dataPath /= subPath;
|
||||
dataPath /= dbPath;
|
||||
dataPath += std::to_string(vocbase.id());
|
||||
dataPath /= arangodb::iresearch::DATA_SOURCE_TYPE.name();
|
||||
dataPath += "-";
|
||||
dataPath += std::to_string(id);
|
||||
|
@ -683,7 +688,7 @@ IResearchView::IResearchView(
|
|||
_asyncTerminate(false),
|
||||
_memoryNode(&_memoryNodes[0]), // set current memory node (arbitrarily 0)
|
||||
_toFlush(&_memoryNodes[1]), // set flush-pending memory node (not same as _memoryNode)
|
||||
_storePersisted(getPersistedPath(dbPathFeature, id())),
|
||||
_storePersisted(getPersistedPath(dbPathFeature, vocbase, id())),
|
||||
_threadPool(0, 0), // 0 == create pool with no threads, i.e. not running anything
|
||||
_inRecovery(false) {
|
||||
// set up in-recovery insertion hooks
|
||||
|
@ -748,21 +753,30 @@ IResearchView::IResearchView(
|
|||
auto* viewPtr = this;
|
||||
|
||||
// initialize transaction read callback
|
||||
_trxReadCallback = [viewPtr](arangodb::TransactionState& state)->void {
|
||||
if (arangodb::transaction::Status::RUNNING != state.status()) {
|
||||
_trxReadCallback = [viewPtr](
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::transaction::Status status
|
||||
)->void {
|
||||
if (arangodb::transaction::Status::RUNNING != status) {
|
||||
return; // NOOP
|
||||
}
|
||||
|
||||
viewPtr->snapshot(state, true);
|
||||
viewPtr->snapshot(trx, true);
|
||||
};
|
||||
|
||||
// initialize transaction write callback
|
||||
_trxWriteCallback = [viewPtr](arangodb::TransactionState& state)->void {
|
||||
if (arangodb::transaction::Status::COMMITTED != state.status()) {
|
||||
_trxWriteCallback = [viewPtr](
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::transaction::Status status
|
||||
)->void {
|
||||
auto* state = trx.state();
|
||||
|
||||
// check state of the top-most transaction only
|
||||
if (!state || arangodb::transaction::Status::COMMITTED != state->status()) {
|
||||
return; // NOOP
|
||||
}
|
||||
|
||||
auto* cookie = ViewStateHelper::write(state, *viewPtr);
|
||||
auto* cookie = ViewStateHelper::write(*state, *viewPtr);
|
||||
TRI_ASSERT(cookie); // must have been added together with this callback
|
||||
ReadMutex mutex(viewPtr->_mutex); // '_memoryStore'/'_storePersisted' can be asynchronously modified
|
||||
|
||||
|
@ -797,20 +811,20 @@ IResearchView::IResearchView(
|
|||
viewPtr->_asyncCondition.notify_all(); // trigger recheck of sync
|
||||
}
|
||||
|
||||
if (state.waitForSync() && !viewPtr->sync()) {
|
||||
if (state->waitForSync() && !viewPtr->sync()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to sync while committing transaction for IResearch view '" << viewPtr->name()
|
||||
<< "', tid '" << state.id() << "'";
|
||||
<< "', tid '" << state->id() << "'";
|
||||
}
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
<< "caught exception while committing transaction for IResearch view '" << viewPtr->name()
|
||||
<< "', tid '" << state.id() << "': " << e.what();
|
||||
<< "', tid '" << state->id() << "': " << e.what();
|
||||
IR_LOG_EXCEPTION();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
<< "caught exception while committing transaction for iResearch view '" << viewPtr->name()
|
||||
<< "', tid '" << state.id() << "'";
|
||||
<< "', tid '" << state->id() << "'";
|
||||
IR_LOG_EXCEPTION();
|
||||
}
|
||||
};
|
||||
|
@ -970,15 +984,8 @@ IResearchView::MemoryStore& IResearchView::activeMemoryStore() const {
|
|||
}
|
||||
|
||||
bool IResearchView::apply(arangodb::transaction::Methods& trx) {
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
state->addStatusChangeCallback(_trxReadCallback);
|
||||
|
||||
return true;
|
||||
// called from IResearchView when this view is added to a transaction
|
||||
return trx.addStatusChangeCallback(&_trxReadCallback); // add shapshot
|
||||
}
|
||||
|
||||
int IResearchView::drop(TRI_voc_cid_t cid) {
|
||||
|
@ -1355,15 +1362,14 @@ int IResearchView::insert(
|
|||
|
||||
store = ptr.get();
|
||||
|
||||
if (!ViewStateHelper::write(state, *this, std::move(ptr))) {
|
||||
if (!ViewStateHelper::write(state, *this, std::move(ptr))
|
||||
|| !trx.addStatusChangeCallback(&_trxWriteCallback)) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to store state into a TransactionState for insert into IResearch view '" << name() << "'"
|
||||
<< "', tid '" << state.id() << "', collection '" << cid << "', revision '" << documentId.id() << "'";
|
||||
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
state.addStatusChangeCallback(_trxWriteCallback);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1434,15 +1440,14 @@ int IResearchView::insert(
|
|||
|
||||
store = ptr.get();
|
||||
|
||||
if (!ViewStateHelper::write(state, *this, std::move(ptr))) {
|
||||
if (!ViewStateHelper::write(state, *this, std::move(ptr))
|
||||
|| !trx.addStatusChangeCallback(&_trxWriteCallback)) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to store state into a TransactionState for insert into IResearch view '" << name() << "'"
|
||||
<< "', tid '" << state.id() << "', collection '" << cid << "'";
|
||||
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
state.addStatusChangeCallback(_trxWriteCallback);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1681,15 +1686,14 @@ int IResearchView::remove(
|
|||
|
||||
store = ptr.get();
|
||||
|
||||
if (!ViewStateHelper::write(state, *this, std::move(ptr))) {
|
||||
if (!ViewStateHelper::write(state, *this, std::move(ptr))
|
||||
|| !trx.addStatusChangeCallback(&_trxWriteCallback)) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to store state into a TransactionState for insert into IResearch view '" << name() << "'"
|
||||
<< "', tid '" << state.id() << "', collection '" << cid << "', revision '" << documentId.id() << "'";
|
||||
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
state.addStatusChangeCallback(_trxWriteCallback);
|
||||
}
|
||||
|
||||
TRI_ASSERT(store && false == !*store);
|
||||
|
@ -1719,10 +1723,19 @@ int IResearchView::remove(
|
|||
}
|
||||
|
||||
PrimaryKeyIndexReader* IResearchView::snapshot(
|
||||
TransactionState& state,
|
||||
transaction::Methods& trx,
|
||||
bool force /*= false*/
|
||||
) const {
|
||||
auto* cookie = ViewStateHelper::read(state, *this);
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to get transaction state while creating IResearchView snapshot";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto* cookie = ViewStateHelper::read(*state, *this);
|
||||
|
||||
if (cookie) {
|
||||
return &(cookie->_snapshot);
|
||||
|
@ -1732,7 +1745,7 @@ PrimaryKeyIndexReader* IResearchView::snapshot(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
if (state.waitForSync() && !const_cast<IResearchView*>(this)->sync()) {
|
||||
if (state->waitForSync() && !const_cast<IResearchView*>(this)->sync()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to sync while creating snapshot for IResearch view '" << name() << "', previous snapshot will be used instead";
|
||||
}
|
||||
|
@ -1758,23 +1771,23 @@ PrimaryKeyIndexReader* IResearchView::snapshot(
|
|||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "caught exception while collecting readers for snapshot of IResearch view '" << name()
|
||||
<< "', tid '" << state.id() << "': " << e.what();
|
||||
<< "', tid '" << state->id() << "': " << e.what();
|
||||
IR_LOG_EXCEPTION();
|
||||
|
||||
return nullptr;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "caught exception while collecting readers for snapshot of IResearch view '" << name()
|
||||
<< "', tid '" << state.id() << "'";
|
||||
<< "', tid '" << state->id() << "'";
|
||||
IR_LOG_EXCEPTION();
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!ViewStateHelper::read(state, *this, std::move(cookiePtr))) {
|
||||
if (!ViewStateHelper::read(*state, *this, std::move(cookiePtr))) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to store state into a TransactionState for snapshot of IResearch view '" << name()
|
||||
<< "', tid '" << state.id() << "'";
|
||||
<< "', tid '" << state->id() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -1991,25 +2004,17 @@ void IResearchView::verifyKnownCollections() {
|
|||
auto cids = _meta._collections;
|
||||
|
||||
{
|
||||
static const arangodb::transaction::Options defaults;
|
||||
struct State final: public arangodb::TransactionState {
|
||||
State(TRI_vocbase_t& vocbase)
|
||||
: arangodb::TransactionState(vocbase, 0, defaults) {}
|
||||
virtual arangodb::Result abortTransaction(
|
||||
arangodb::transaction::Methods*
|
||||
) override { return TRI_ERROR_NOT_IMPLEMENTED; }
|
||||
virtual arangodb::Result beginTransaction(
|
||||
arangodb::transaction::Hints
|
||||
) override { return TRI_ERROR_NOT_IMPLEMENTED; }
|
||||
virtual arangodb::Result commitTransaction(
|
||||
arangodb::transaction::Methods*
|
||||
) override { return TRI_ERROR_NOT_IMPLEMENTED; }
|
||||
virtual bool hasFailedOperations() const override { return false; }
|
||||
struct DummyTransaction : transaction::Methods {
|
||||
explicit DummyTransaction(std::shared_ptr<transaction::Context> const& ctx)
|
||||
: transaction::Methods(ctx) {
|
||||
}
|
||||
};
|
||||
|
||||
State state(vocbase());
|
||||
transaction::StandaloneContext context(vocbase());
|
||||
std::shared_ptr<transaction::Context> dummy; // intentionally empty
|
||||
DummyTransaction trx(std::shared_ptr<transaction::Context>(dummy, &context)); // use aliasing constructor
|
||||
|
||||
if (!appendKnownCollections(cids, *snapshot(state, true))) {
|
||||
if (!appendKnownCollections(cids, *snapshot(trx, true))) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
<< "failed to collect collection IDs for IResearch view '" << id() << "'";
|
||||
|
||||
|
@ -2046,4 +2051,4 @@ NS_END // arangodb
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "Containers.h"
|
||||
#include "IResearchViewMeta.h"
|
||||
#include "Transaction/Status.h"
|
||||
#include "VocBase/LogicalDataSource.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
|
@ -232,7 +233,7 @@ class IResearchView final: public arangodb::DBServerLogicalView,
|
|||
/// if force == true && no snapshot -> associate current snapshot
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
PrimaryKeyIndexReader* snapshot(
|
||||
TransactionState& state,
|
||||
transaction::Methods& trx,
|
||||
bool force = false
|
||||
) const;
|
||||
|
||||
|
@ -303,8 +304,8 @@ class IResearchView final: public arangodb::DBServerLogicalView,
|
|||
};
|
||||
|
||||
class ViewStateHelper; // forward declaration
|
||||
class ViewStateRead; // forward declaration
|
||||
class ViewStateWrite; // forward declaration
|
||||
struct ViewStateRead; // forward declaration
|
||||
struct ViewStateWrite; // forward declaration
|
||||
|
||||
struct FlushCallbackUnregisterer {
|
||||
void operator()(IResearchView* view) const noexcept;
|
||||
|
@ -354,12 +355,12 @@ class IResearchView final: public arangodb::DBServerLogicalView,
|
|||
PersistedStore _storePersisted;
|
||||
FlushCallback _flushCallback; // responsible for flush callback unregistration
|
||||
irs::async_utils::thread_pool _threadPool;
|
||||
std::function<void(arangodb::TransactionState& state)> _trxReadCallback; // for snapshot(...)
|
||||
std::function<void(arangodb::TransactionState& state)> _trxWriteCallback; // for insert(...)/remove(...)
|
||||
std::function<void(arangodb::transaction::Methods& trx, arangodb::transaction::Status status)> _trxReadCallback; // for snapshot(...)
|
||||
std::function<void(arangodb::transaction::Methods& trx, arangodb::transaction::Status status)> _trxWriteCallback; // for insert(...)/remove(...)
|
||||
std::atomic<bool> _inRecovery;
|
||||
};
|
||||
|
||||
NS_END // iresearch
|
||||
NS_END // arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -52,18 +52,6 @@
|
|||
|
||||
NS_LOCAL
|
||||
|
||||
inline arangodb::iresearch::IResearchViewNode const& getViewNode(
|
||||
arangodb::iresearch::IResearchViewBlockBase const& block
|
||||
) noexcept {
|
||||
TRI_ASSERT(block.getPlanNode());
|
||||
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
return dynamic_cast<arangodb::iresearch::IResearchViewNode const&>(*block.getPlanNode());
|
||||
#else
|
||||
return static_cast<arangodb::iresearch::IResearchViewNode const&>(*block.getPlanNode());
|
||||
#endif
|
||||
}
|
||||
|
||||
inline arangodb::aql::RegisterId getRegister(
|
||||
arangodb::aql::Variable const& var,
|
||||
arangodb::aql::ExecutionNode const& node
|
||||
|
@ -128,7 +116,7 @@ IResearchViewBlockBase::IResearchViewBlockBase(
|
|||
IResearchViewNode const& en)
|
||||
: ExecutionBlock(&engine, &en),
|
||||
_filterCtx(1), // arangodb::iresearch::ExpressionExecutionContext
|
||||
_ctx(getViewNode(*this)),
|
||||
_ctx(en),
|
||||
_reader(reader),
|
||||
_filter(irs::filter::prepared::empty()),
|
||||
_execCtx(*_trx, _ctx),
|
||||
|
@ -164,7 +152,7 @@ void IResearchViewBlockBase::reset() {
|
|||
_ctx._data = _buffer.front();
|
||||
_ctx._pos = _pos;
|
||||
|
||||
auto& viewNode = getViewNode(*this);
|
||||
auto& viewNode = *ExecutionNode::castTo<IResearchViewNode const*>(getPlanNode());
|
||||
auto* plan = const_cast<ExecutionPlan*>(viewNode.plan());
|
||||
|
||||
arangodb::iresearch::QueryContext const queryCtx = {
|
||||
|
@ -199,12 +187,14 @@ void IResearchViewBlockBase::reset() {
|
|||
|
||||
// compile order
|
||||
_order = order.prepare();
|
||||
_volatileSort = viewNode.volatile_sort();
|
||||
}
|
||||
|
||||
// compile filter
|
||||
_filter = root.prepare(_reader, _order, irs::boost::no_boost(), _filterCtx);
|
||||
_volatileFilter = _volatileSort || viewNode.volatile_filter();
|
||||
|
||||
auto const& volatility = viewNode.volatility();
|
||||
_volatileSort = volatility.second;
|
||||
_volatileFilter = _volatileSort || volatility.first;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,7 +247,9 @@ AqlItemBlock* IResearchViewBlockBase::getSome(size_t atMost) {
|
|||
size_t send = 0;
|
||||
std::unique_ptr<AqlItemBlock> res;
|
||||
|
||||
auto const& planNode = getViewNode(*this);
|
||||
auto const& viewNode = *ExecutionNode::castTo<IResearchViewNode const*>(
|
||||
getPlanNode()
|
||||
);
|
||||
|
||||
do {
|
||||
do {
|
||||
|
@ -295,7 +287,7 @@ AqlItemBlock* IResearchViewBlockBase::getSome(size_t atMost) {
|
|||
TRI_ASSERT(cur);
|
||||
|
||||
auto const curRegs = cur->getNrRegs();
|
||||
auto const nrRegs = planNode.getRegisterPlan()->nrRegs[planNode.getDepth()];
|
||||
auto const nrRegs = viewNode.getRegisterPlan()->nrRegs[viewNode.getDepth()];
|
||||
|
||||
res.reset(requestBlock(atMost, nrRegs));
|
||||
// automatically freed if we throw
|
||||
|
@ -419,7 +411,8 @@ bool IResearchViewBlock::next(
|
|||
size_t& pos,
|
||||
size_t limit) {
|
||||
TRI_ASSERT(_filter);
|
||||
auto const numSorts = getViewNode(*this).sortCondition().size();
|
||||
auto const& viewNode = *ExecutionNode::castTo<IResearchViewNode const*>(getPlanNode());
|
||||
auto const numSorts = viewNode.sortCondition().size();
|
||||
|
||||
for (size_t count = _reader.size(); _readerOffset < count; ) {
|
||||
bool done = false;
|
||||
|
@ -443,7 +436,7 @@ bool IResearchViewBlock::next(
|
|||
}
|
||||
|
||||
// evaluate scores
|
||||
TRI_ASSERT(!getViewNode(*this).sortCondition().empty());
|
||||
TRI_ASSERT(!viewNode.sortCondition().empty());
|
||||
_scr->evaluate();
|
||||
|
||||
// copy scores, registerId's are sequential
|
||||
|
@ -755,4 +748,4 @@ NS_END // arangodb
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "IResearchViewDBServer.h"
|
||||
#include "VelocyPackHelper.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "RestServer/DatabasePathFeature.h"
|
||||
|
@ -36,7 +37,7 @@
|
|||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
NS_LOCAL
|
||||
namespace {
|
||||
|
||||
typedef irs::async_utils::read_write_mutex::read_mutex ReadMutex;
|
||||
typedef irs::async_utils::read_write_mutex::write_mutex WriteMutex;
|
||||
|
@ -46,13 +47,6 @@ typedef irs::async_utils::read_write_mutex::write_mutex WriteMutex;
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
std::string const VIEW_NAME_PREFIX("_iresearch_");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief a key in the jSON definition that differentiates a view-cid container
|
||||
/// from individual per-cid view implementation
|
||||
/// (view types are identical)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
std::string const VIEW_CONTAINER_MARKER("master");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief index reader implementation over multiple PrimaryKeyIndexReaders
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -180,15 +174,10 @@ struct ViewState: public arangodb::TransactionState::Cookie {
|
|||
/// @brief generate the name used for the per-cid views
|
||||
/// must be unique to avoid view collisions in vocbase
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
std::string generateName(
|
||||
std::string const& viewName,
|
||||
TRI_voc_cid_t viewId,
|
||||
TRI_voc_cid_t collectionId
|
||||
) {
|
||||
std::string generateName(TRI_voc_cid_t viewId, TRI_voc_cid_t collectionId) {
|
||||
return VIEW_NAME_PREFIX
|
||||
+ std::to_string(collectionId)
|
||||
+ "_" + std::to_string(viewId)
|
||||
+ "_" + viewName
|
||||
;
|
||||
}
|
||||
|
||||
|
@ -213,10 +202,10 @@ irs::utf8_path getPersistedPath(
|
|||
return dataPath;
|
||||
}
|
||||
|
||||
NS_END
|
||||
}
|
||||
|
||||
NS_BEGIN(arangodb)
|
||||
NS_BEGIN(iresearch)
|
||||
namespace arangodb {
|
||||
namespace iresearch {
|
||||
|
||||
IResearchViewDBServer::IResearchViewDBServer(
|
||||
TRI_vocbase_t& vocbase,
|
||||
|
@ -229,19 +218,6 @@ IResearchViewDBServer::IResearchViewDBServer(
|
|||
? info.get(StaticStrings::PropertiesField) : emptyObjectSlice()
|
||||
),
|
||||
_persistedPath(getPersistedPath(dbPathFeature, id())) {
|
||||
|
||||
auto* viewPtr = this;
|
||||
|
||||
// initialize transaction read callback
|
||||
_trxReadCallback = [viewPtr](arangodb::TransactionState& state)->void {
|
||||
switch(state.status()) {
|
||||
case arangodb::transaction::Status::RUNNING:
|
||||
viewPtr->snapshot(state, true);
|
||||
return;
|
||||
default:
|
||||
{} // NOOP
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
IResearchViewDBServer::~IResearchViewDBServer() {
|
||||
|
@ -307,17 +283,6 @@ arangodb::Result IResearchViewDBServer::appendVelocyPack(
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
bool IResearchViewDBServer::apply(arangodb::transaction::Methods& trx) {
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
state->addStatusChangeCallback(_trxReadCallback);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
arangodb::Result IResearchViewDBServer::drop() {
|
||||
WriteMutex mutex(_mutex);
|
||||
|
@ -336,26 +301,39 @@ arangodb::Result IResearchViewDBServer::drop() {
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
arangodb::Result IResearchViewDBServer::drop(TRI_voc_cid_t cid) {
|
||||
WriteMutex mutex(_mutex);
|
||||
SCOPED_LOCK(mutex); // 'collections_' can be asynchronously read
|
||||
auto itr = _collections.find(cid);
|
||||
arangodb::Result IResearchViewDBServer::drop(TRI_voc_cid_t cid) noexcept {
|
||||
try {
|
||||
WriteMutex mutex(_mutex);
|
||||
SCOPED_LOCK(mutex); // 'collections_' can be asynchronously read
|
||||
auto itr = _collections.find(cid);
|
||||
|
||||
if (itr == _collections.end()) {
|
||||
return arangodb::Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||
if (itr == _collections.end()) {
|
||||
return arangodb::Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto res = vocbase().dropView(itr->second->id(), true); // per-cid collections always system
|
||||
|
||||
if (res.ok()) {
|
||||
_collections.erase(itr);
|
||||
}
|
||||
|
||||
return res;
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "caught exception while dropping collection '" << cid << "' from IResearchView '" << name() << "': " << e.what();
|
||||
IR_LOG_EXCEPTION();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "caught exception while dropping collection '" << cid << "' from IResearchView '" << name() << "'";
|
||||
IR_LOG_EXCEPTION();
|
||||
}
|
||||
|
||||
auto res = vocbase().dropView(itr->second->id(), true); // per-cid collections always system
|
||||
|
||||
if (res.ok()) {
|
||||
_collections.erase(itr);
|
||||
}
|
||||
|
||||
return res;
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
||||
TRI_voc_cid_t cid
|
||||
TRI_voc_cid_t cid,
|
||||
bool create /*= true*/
|
||||
) {
|
||||
WriteMutex mutex(_mutex);
|
||||
SCOPED_LOCK(mutex); // 'collections_' can be asynchronously read
|
||||
|
@ -365,6 +343,19 @@ std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
|||
return itr->second;
|
||||
}
|
||||
|
||||
auto viewName = generateName(id(), cid);
|
||||
auto view = vocbase().lookupView(viewName); // on startup a IResearchView might only be in vocbase but not in a brand new IResearchViewDBServer
|
||||
|
||||
if (view) {
|
||||
_collections.emplace(cid, view); // track the IResearchView instance from vocbase
|
||||
|
||||
return view; // do not wrap in deleter since view already present in vocbase (as if already present in '_collections')
|
||||
}
|
||||
|
||||
if (!create) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static const std::function<bool(irs::string_ref const& key)> acceptor = [](
|
||||
irs::string_ref const& key
|
||||
)->bool {
|
||||
|
@ -380,7 +371,7 @@ std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
|||
); // required to for use of VIEW_NAME_PREFIX
|
||||
builder.add(
|
||||
arangodb::StaticStrings::DataSourceName,
|
||||
toValuePair(generateName(name(), id(), cid))
|
||||
toValuePair(viewName)
|
||||
); // mark the view definition as an internal per-cid instance
|
||||
builder.add(
|
||||
arangodb::StaticStrings::DataSourceType,
|
||||
|
@ -404,8 +395,7 @@ std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
|||
}
|
||||
|
||||
builder.close();
|
||||
|
||||
auto view = vocbase().createView(builder.slice());
|
||||
view = vocbase().createView(builder.slice());
|
||||
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
|
@ -414,15 +404,22 @@ std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
// FIXME should we register?
|
||||
_collections.emplace(cid, view);
|
||||
|
||||
// hold a reference to the original view in the deleter so that the view is still valid for the duration of the pointer wrapper
|
||||
// this shared_ptr should not be stored in TRI_vocbase_t since the deleter depends on 'this'
|
||||
return std::shared_ptr<arangodb::LogicalView>(
|
||||
view.get(),
|
||||
[this, view, cid](arangodb::LogicalView*)->void {
|
||||
// FIXME destructor has to be noexcept
|
||||
static const auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
auto& vocbase = view->vocbase();
|
||||
|
||||
// same view in vocbase and with no collections
|
||||
if (view.get() == vocbase().lookupView(view->id()).get() // avoid double dropView(...)
|
||||
if (view.get() == vocbase.lookupView(view->id()).get() // avoid double dropView(...)
|
||||
&& view->visitCollections(visitor)) {
|
||||
// FIXME TODO ensure somehow that 'this' is still valid
|
||||
drop(cid);
|
||||
}
|
||||
}
|
||||
|
@ -456,36 +453,6 @@ std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
|||
|
||||
// not a per-cid view instance (get here from ClusterInfo)
|
||||
if (!irs::starts_with(name, VIEW_NAME_PREFIX)) {
|
||||
auto wiew = vocbase.lookupView(name);
|
||||
|
||||
// DBServer view already exists, treat as an update
|
||||
if (wiew) {
|
||||
return wiew->updateProperties(info.get(StaticStrings::PropertiesField), false, true).ok() // 'false' because full view definition
|
||||
? wiew : nullptr;
|
||||
}
|
||||
|
||||
// if creation request not coming from the vocbase
|
||||
if (!info.hasKey(VIEW_CONTAINER_MARKER)) {
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
||||
builder.openObject();
|
||||
|
||||
if (!mergeSlice(builder, info)) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to generate definition while constructing IResearch View in database '" << vocbase.id() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
builder.add(
|
||||
VIEW_CONTAINER_MARKER,
|
||||
arangodb::velocypack::Value(arangodb::velocypack::ValueType::Null)
|
||||
);
|
||||
builder.close();
|
||||
|
||||
return vocbase.createView(builder.slice());
|
||||
}
|
||||
|
||||
auto* feature = arangodb::application_features::ApplicationServer::lookupFeature<
|
||||
arangodb::DatabasePathFeature
|
||||
>("DatabasePath");
|
||||
|
@ -497,142 +464,74 @@ std::shared_ptr<arangodb::LogicalView> IResearchViewDBServer::ensure(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
static const std::function<bool(irs::string_ref const& key)> acceptor = [](
|
||||
irs::string_ref const& key
|
||||
)->bool {
|
||||
return key != VIEW_CONTAINER_MARKER; // ignored internally injected filed
|
||||
};
|
||||
arangodb::velocypack::Builder builder;
|
||||
auto* ci = ClusterInfo::instance();
|
||||
|
||||
builder.openObject();
|
||||
|
||||
if (!mergeSliceSkipKeys(builder, info, acceptor)) {
|
||||
if (!ci) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to generate definition while constructing IResearch View in database '" << vocbase.name() << "'";
|
||||
<< "failure to find ClusterInfo instance while constructing IResearch View in database '" << vocbase.id() << "'";
|
||||
TRI_set_errno(TRI_ERROR_INTERNAL);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
builder.close();
|
||||
auto wiew = std::shared_ptr<IResearchViewDBServer>(
|
||||
new IResearchViewDBServer(vocbase, info, *feature, planVersion)
|
||||
);
|
||||
auto logicalWiew = ClusterInfo::instance()->getView(vocbase.name(), name);
|
||||
|
||||
PTR_NAMED(IResearchViewDBServer, view, vocbase, builder.slice(), *feature, planVersion);
|
||||
// if not found in the plan then look for the view in vocbase (added there below)
|
||||
if (!logicalWiew) {
|
||||
logicalWiew = vocbase.lookupView(name);
|
||||
}
|
||||
|
||||
if (preCommit && !preCommit(view)) {
|
||||
if (preCommit && !preCommit(wiew)) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
<< "failure during pre-commit while constructing IResearch View in database '" << vocbase.id() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return view;
|
||||
return wiew;
|
||||
}
|
||||
|
||||
// ...........................................................................
|
||||
// a per-cid view instance (get here only from StorageEngine startup or WAL recovery)
|
||||
// ...........................................................................
|
||||
|
||||
// parse view names created by generateName(...)
|
||||
auto* begin = name.c_str() + VIEW_NAME_PREFIX.size();
|
||||
auto size = name.size() - VIEW_NAME_PREFIX.size();
|
||||
auto* end = (char*)::memchr(begin, '_', size);
|
||||
auto view = vocbase.lookupView(name);
|
||||
|
||||
if (!end) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to determine collection ID while constructing IResearch View in database '" << vocbase.name() << "'";
|
||||
|
||||
return nullptr;
|
||||
if (view) {
|
||||
return view;
|
||||
}
|
||||
|
||||
irs::string_ref collectionId(begin, end - begin);
|
||||
|
||||
begin = end + 1; // +1 for '_'
|
||||
size -= collectionId.size() + 1; // +1 for '_'
|
||||
end = (char*)::memchr(begin, '_', size);
|
||||
|
||||
if (!end) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to determine view ID while constructing IResearch View in database '" << vocbase.name() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
irs::string_ref viewId(begin, end - begin);
|
||||
|
||||
begin = end + 1; // +1 for '_'
|
||||
size -= viewId.size() + 1; // +1 for '_'
|
||||
|
||||
irs::string_ref viewName(begin, size);
|
||||
char* suffix;
|
||||
auto cid = std::strtoull(collectionId.c_str(), &suffix, 10); // 10 for base-10
|
||||
|
||||
if (suffix != collectionId.c_str() + collectionId.size()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to parse collection ID while constructing IResearch View in database '" << vocbase.name() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto wiew = vocbase.lookupView(viewId); // always look up by view ID since it cannot change
|
||||
|
||||
// create DBServer view
|
||||
if (!wiew) {
|
||||
static const std::function<bool(irs::string_ref const& key)> acceptor = [](
|
||||
irs::string_ref const& key
|
||||
)->bool {
|
||||
return key != arangodb::StaticStrings::DataSourceId
|
||||
&& key != arangodb::StaticStrings::DataSourceSystem
|
||||
&& key != arangodb::StaticStrings::DataSourceName; // ignored fields
|
||||
};
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
||||
builder.openObject();
|
||||
builder.add(
|
||||
arangodb::StaticStrings::DataSourceId,
|
||||
arangodb::velocypack::Value(viewId)
|
||||
);
|
||||
builder.add(
|
||||
arangodb::StaticStrings::DataSourceName,
|
||||
arangodb::velocypack::Value(viewName)
|
||||
);
|
||||
builder.add(// mark the view definition as a DBServer instance
|
||||
VIEW_CONTAINER_MARKER,
|
||||
arangodb::velocypack::Value(arangodb::velocypack::ValueType::Null)
|
||||
);
|
||||
|
||||
if (!mergeSliceSkipKeys(builder, info, acceptor)) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to generate definition while constructing IResearch View in database '" << vocbase.name() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
builder.close();
|
||||
wiew = vocbase.createView(builder.slice());
|
||||
}
|
||||
|
||||
// TODO FIXME find a better way to look up an iResearch View
|
||||
auto* view = LogicalView::cast<IResearchViewDBServer>(wiew.get());
|
||||
// no view for shard
|
||||
view = IResearchView::make(vocbase, info, isNew, planVersion, preCommit);
|
||||
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure while creating an IResearch View '" << std::string(name) << "' in database '" << vocbase.name() << "'";
|
||||
<< "failure while creating an IResearch View '" << name << "' in database '" << vocbase.name() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto impl = IResearchView::make(vocbase, info, isNew, planVersion, preCommit);
|
||||
// a wrapper to remove the view from vocbase if it no longer has any links
|
||||
// hold a reference to the original view in the deleter so that the view is
|
||||
// still valid for the duration of the pointer wrapper
|
||||
return std::shared_ptr<arangodb::LogicalView>(
|
||||
view.get(),
|
||||
[view](arangodb::LogicalView*)->void {
|
||||
static const auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
auto& vocbase = view->vocbase();
|
||||
|
||||
if (!impl) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure while creating an IResearch View for collection '" << cid << "' in database '" << vocbase.name() << "'";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
WriteMutex mutex(view->_mutex);
|
||||
SCOPED_LOCK(mutex); // 'collections_' can be asynchronously read
|
||||
|
||||
return view->_collections.emplace(cid, impl).first->second;
|
||||
// same view in vocbase and with no collections
|
||||
if (view.get() == vocbase.lookupView(view->id()).get() // avoid double dropView(...)
|
||||
&& view->visitCollections(visitor)
|
||||
&& !vocbase.dropView(view->id(), true).ok()) { // per-cid collections always system
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to drop stale IResearchView '" << view->name() << "' while from database '" << vocbase.name() << "'";
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
void IResearchViewDBServer::open() {
|
||||
|
@ -648,33 +547,30 @@ arangodb::Result IResearchViewDBServer::rename(
|
|||
std::string&& newName,
|
||||
bool /*doSync*/
|
||||
) {
|
||||
ReadMutex mutex(_mutex);
|
||||
SCOPED_LOCK(mutex); // 'collections_' can be asynchronously modified
|
||||
|
||||
for (auto& entry: _collections) {
|
||||
auto res = vocbase().renameView(
|
||||
entry.second, generateName(newName, id(), entry.first)
|
||||
);
|
||||
|
||||
if (TRI_ERROR_NO_ERROR != res) {
|
||||
return res; // fail on first failure
|
||||
}
|
||||
}
|
||||
|
||||
name(std::move(newName));
|
||||
|
||||
return arangodb::Result();
|
||||
}
|
||||
|
||||
PrimaryKeyIndexReader* IResearchViewDBServer::snapshot(
|
||||
TransactionState& state,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::string> const& shards,
|
||||
bool force /*= false*/
|
||||
) const {
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to get transaction state while creating IResearchView snapshot";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// TODO FIXME find a better way to look up a ViewState
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
auto* cookie = dynamic_cast<ViewState*>(state.cookie(this));
|
||||
auto* cookie = dynamic_cast<ViewState*>(state->cookie(this));
|
||||
#else
|
||||
auto* cookie = static_cast<ViewState*>(state.cookie(this));
|
||||
auto* cookie = static_cast<ViewState*>(state->cookie(this));
|
||||
#endif
|
||||
|
||||
if (cookie) {
|
||||
|
@ -685,15 +581,39 @@ PrimaryKeyIndexReader* IResearchViewDBServer::snapshot(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
auto* resolver = trx.resolver();
|
||||
|
||||
if (!resolver) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to retrieve CollectionNameResolver from the transaction";
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto cookiePtr = irs::memory::make_unique<ViewState>();
|
||||
auto& reader = cookiePtr->_snapshot;
|
||||
ReadMutex mutex(_mutex);
|
||||
SCOPED_LOCK(mutex); // 'collections_' can be asynchronously modified
|
||||
|
||||
try {
|
||||
for (auto& entry: _collections) {
|
||||
auto* rdr =
|
||||
static_cast<IResearchView*>(entry.second.get())->snapshot(state, force);
|
||||
for (auto& shardId : shards) {
|
||||
auto const cid = resolver->getCollectionIdLocal(shardId);
|
||||
|
||||
if (0 == cid) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
<< "failed to find shard by id '" << shardId << "', skipping it";
|
||||
continue;
|
||||
}
|
||||
|
||||
auto const shardView = _collections.find(cid);
|
||||
|
||||
if (shardView == _collections.end()) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
<< "failed to find shard view for shard id '" << cid << "', skipping it";
|
||||
continue;
|
||||
}
|
||||
|
||||
auto* rdr = LogicalView::cast<IResearchView>(*shardView->second).snapshot(trx, force);
|
||||
|
||||
if (rdr) {
|
||||
reader.add(*rdr);
|
||||
|
@ -714,7 +634,7 @@ PrimaryKeyIndexReader* IResearchViewDBServer::snapshot(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
state.cookie(this, std::move(cookiePtr));
|
||||
state->cookie(this, std::move(cookiePtr));
|
||||
|
||||
return &reader;
|
||||
}
|
||||
|
@ -857,8 +777,8 @@ bool IResearchViewDBServer::visitCollections(
|
|||
return true;
|
||||
}
|
||||
|
||||
NS_END // iresearch
|
||||
NS_END // arangodb
|
||||
} // iresearch
|
||||
} // arangodb
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
|
|
|
@ -25,15 +25,15 @@
|
|||
#define ARANGOD_IRESEARCH__IRESEARCH_VIEW_DBSERVER_H 1
|
||||
|
||||
#include "utils/async_utils.hpp"
|
||||
#include "utils/memory.hpp"
|
||||
#include "utils/utf8_path.hpp"
|
||||
#include "velocypack/Builder.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class DatabasePathFeature; // forward declaration
|
||||
class TransactionState; // forward declaration
|
||||
class DatabasePathFeature;
|
||||
class TransactionState;
|
||||
class CollectionNameResolver;
|
||||
|
||||
namespace transaction {
|
||||
|
||||
|
@ -46,33 +46,32 @@ class Methods; // forward declaration
|
|||
namespace arangodb {
|
||||
namespace iresearch {
|
||||
|
||||
class PrimaryKeyIndexReader; // forward declaration
|
||||
class PrimaryKeyIndexReader;
|
||||
|
||||
class IResearchViewDBServer final: public arangodb::LogicalView {
|
||||
public:
|
||||
virtual ~IResearchViewDBServer();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief apply any changes to 'state' required by this view
|
||||
/// @return success
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
bool apply(arangodb::transaction::Methods& trx);
|
||||
|
||||
virtual arangodb::Result drop() override;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief drop the view association for the specified 'cid'
|
||||
/// @return if an association was removed
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result drop(TRI_voc_cid_t cid);
|
||||
arangodb::Result drop(TRI_voc_cid_t cid) noexcept;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ensure there is a view instance for the specified 'cid'
|
||||
/// @param force creation of a new instance if none is available in vocbase
|
||||
/// @return an existing instance or create a new instance if none is registred
|
||||
/// on ptr reset the view will be dropped if it has no collections
|
||||
/// @note view created in vocbase() to match callflow during regular startup
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
std::shared_ptr<arangodb::LogicalView> ensure(TRI_voc_cid_t cid);
|
||||
std::shared_ptr<arangodb::LogicalView> ensure(
|
||||
TRI_voc_cid_t cid,
|
||||
bool create = true
|
||||
);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief view factory
|
||||
|
@ -96,7 +95,8 @@ class IResearchViewDBServer final: public arangodb::LogicalView {
|
|||
/// if force == true && no snapshot -> associate current snapshot
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
PrimaryKeyIndexReader* snapshot(
|
||||
TransactionState& state,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::string> const& shards,
|
||||
bool force = false
|
||||
) const;
|
||||
|
||||
|
@ -117,13 +117,10 @@ class IResearchViewDBServer final: public arangodb::LogicalView {
|
|||
) const override;
|
||||
|
||||
private:
|
||||
DECLARE_SPTR(LogicalView);
|
||||
|
||||
std::map<TRI_voc_cid_t, std::shared_ptr<arangodb::LogicalView>> _collections;
|
||||
arangodb::velocypack::Builder _meta; // the view definition
|
||||
mutable irs::async_utils::read_write_mutex _mutex; // for use with members
|
||||
irs::utf8_path const _persistedPath;
|
||||
std::function<void(arangodb::TransactionState& state)> _trxReadCallback; // for snapshot(...)
|
||||
|
||||
IResearchViewDBServer(
|
||||
TRI_vocbase_t& vocbase,
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "Aql/SortCondition.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
|
||||
|
@ -173,6 +174,43 @@ std::vector<arangodb::iresearch::IResearchSort> fromVelocyPack(
|
|||
return sorts;
|
||||
}
|
||||
|
||||
/// negative value - value is dirty
|
||||
/// _volatilityMask & 1 == volatile filter
|
||||
/// _volatilityMask & 2 == volatile sort
|
||||
int evaluateVolatility(arangodb::iresearch::IResearchViewNode const& node) {
|
||||
auto const inInnerLoop = node.isInInnerLoop();
|
||||
auto const& plan = *node.plan();
|
||||
auto const& outVariable = node.outVariable();
|
||||
|
||||
std::unordered_set<arangodb::aql::Variable const*> vars;
|
||||
int mask = 0;
|
||||
|
||||
// evaluate filter condition volatility
|
||||
auto& filterCondition = node.filterCondition();
|
||||
if (!filterConditionIsEmpty(&filterCondition) && inInnerLoop) {
|
||||
irs::set_bit<0>(::hasDependecies(plan, filterCondition, outVariable, vars), mask);
|
||||
}
|
||||
|
||||
// evaluate sort condition volatility
|
||||
auto& sortCondition = node.sortCondition();
|
||||
if (!sortCondition.empty() && inInnerLoop) {
|
||||
vars.clear();
|
||||
|
||||
for (auto const& sort : sortCondition) {
|
||||
if (::hasDependecies(plan, *sort.node, outVariable, vars)) {
|
||||
irs::set_bit<1>(mask);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
std::function<bool(TRI_voc_cid_t)> const viewIsEmpty = [](TRI_voc_cid_t) {
|
||||
return false;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace arangodb {
|
||||
|
@ -212,12 +250,21 @@ IResearchViewNode::IResearchViewNode(
|
|||
// set it to surrogate 'RETURN ALL' node
|
||||
_filterCondition(&ALL),
|
||||
_sortCondition(fromVelocyPack(plan, base.get("sortCondition"))) {
|
||||
// view
|
||||
auto const viewId = base.get("viewId").copyString();
|
||||
|
||||
if (ServerState::instance()->isSingleServer()) {
|
||||
_view = _vocbase.lookupView(basics::StringUtils::uint64(viewId));
|
||||
} else {
|
||||
// need cluster wide view
|
||||
TRI_ASSERT(ClusterInfo::instance());
|
||||
_view = ClusterInfo::instance()->getView(_vocbase.name(), viewId);
|
||||
}
|
||||
|
||||
// FIXME how to check properly
|
||||
_view = _vocbase.lookupView(
|
||||
basics::StringUtils::uint64(base.get("viewId").copyString())
|
||||
);
|
||||
TRI_ASSERT(_view && iresearch::DATA_SOURCE_TYPE == _view->type());
|
||||
|
||||
// filter condition
|
||||
auto const filterSlice = base.get("condition");
|
||||
|
||||
if (!filterSlice.isEmptyObject()) {
|
||||
|
@ -227,6 +274,7 @@ IResearchViewNode::IResearchViewNode(
|
|||
);
|
||||
}
|
||||
|
||||
// shards
|
||||
auto const shardsSlice = base.get("shards");
|
||||
|
||||
if (!shardsSlice.isArray()) {
|
||||
|
@ -250,6 +298,13 @@ IResearchViewNode::IResearchViewNode(
|
|||
|
||||
_shards.push_back(shard->name());
|
||||
}
|
||||
|
||||
// volatility mask
|
||||
auto const volatilityMaskSlice = base.get("volatility");
|
||||
|
||||
if (volatilityMaskSlice.isNumber()) {
|
||||
_volatilityMask = volatilityMaskSlice.getNumber<int>();
|
||||
}
|
||||
}
|
||||
|
||||
void IResearchViewNode::planNodeRegisters(
|
||||
|
@ -280,27 +335,15 @@ void IResearchViewNode::planNodeRegisters(
|
|||
}
|
||||
}
|
||||
|
||||
bool IResearchViewNode::volatile_filter() const {
|
||||
if (!filterConditionIsEmpty(_filterCondition) && isInInnerLoop()) {
|
||||
std::unordered_set<arangodb::aql::Variable const*> vars;
|
||||
return ::hasDependecies(*plan(), *_filterCondition, outVariable(), vars);
|
||||
std::pair<bool, bool> IResearchViewNode::volatility(bool force /*=false*/) const {
|
||||
if (force || _volatilityMask < 0) {
|
||||
_volatilityMask = evaluateVolatility(*this);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IResearchViewNode::volatile_sort() const {
|
||||
if (!_sortCondition.empty() && isInInnerLoop()) {
|
||||
std::unordered_set<aql::Variable const*> vars;
|
||||
|
||||
for (auto const& sort : _sortCondition) {
|
||||
if (::hasDependecies(*plan(), *sort.node, outVariable(), vars)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return std::make_pair(
|
||||
irs::check_bit<0>(_volatilityMask), // filter
|
||||
irs::check_bit<1>(_volatilityMask) // sort
|
||||
);
|
||||
}
|
||||
|
||||
/// @brief toVelocyPack, for EnumerateViewNode
|
||||
|
@ -313,6 +356,7 @@ void IResearchViewNode::toVelocyPackHelper(
|
|||
|
||||
// system info
|
||||
nodes.add("database", VPackValue(_vocbase.name()));
|
||||
// need 'view' field to correctly print view name in JS explanation
|
||||
nodes.add("view", VPackValue(_view->name()));
|
||||
nodes.add("viewId", VPackValue(basics::StringUtils::itoa(_view->id())));
|
||||
|
||||
|
@ -341,6 +385,9 @@ void IResearchViewNode::toVelocyPackHelper(
|
|||
}
|
||||
}
|
||||
|
||||
// volatility mask
|
||||
nodes.add("volatility", VPackValue(_volatilityMask));
|
||||
|
||||
nodes.close();
|
||||
}
|
||||
|
||||
|
@ -394,12 +441,13 @@ aql::ExecutionNode* IResearchViewNode::clone(
|
|||
decltype(_sortCondition)(_sortCondition)
|
||||
);
|
||||
node->_shards = _shards;
|
||||
node->_volatilityMask = _volatilityMask;
|
||||
|
||||
return cloneHelper(std::move(node), withDependencies, withProperties);
|
||||
}
|
||||
|
||||
bool IResearchViewNode::empty() const noexcept {
|
||||
return _view->visitCollections([](TRI_voc_cid_t){ return false; });
|
||||
return _view->visitCollections(viewIsEmpty);
|
||||
}
|
||||
|
||||
/// @brief the cost of an enumerate view node
|
||||
|
@ -439,30 +487,28 @@ std::unique_ptr<aql::ExecutionBlock> IResearchViewNode::createBlock(
|
|||
|
||||
auto* trx = engine.getQuery()->trx();
|
||||
|
||||
if (!trx || !(trx->state())) {
|
||||
if (!trx) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to get transaction state while creating IResearchView ExecutionBlock";
|
||||
<< "failed to get transaction while creating IResearchView ExecutionBlock";
|
||||
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL,
|
||||
"failed to get transaction state while creating IResearchView ExecutionBlock"
|
||||
"failed to get transaction while creating IResearchView ExecutionBlock"
|
||||
);
|
||||
}
|
||||
|
||||
auto& state = *(trx->state());
|
||||
auto& view = *this->view();
|
||||
PrimaryKeyIndexReader* reader;
|
||||
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
// FIXME pass list of the shards involved
|
||||
// FIXME cache snapshot in transaction state when transaction starts
|
||||
reader = LogicalView::cast<IResearchViewDBServer>(*this->view()).snapshot(state, true);
|
||||
reader = LogicalView::cast<IResearchViewDBServer>(view).snapshot(*trx, _shards, true);
|
||||
} else {
|
||||
reader = LogicalView::cast<IResearchView>(*this->view()).snapshot(state);
|
||||
reader = LogicalView::cast<IResearchView>(view).snapshot(*trx);
|
||||
}
|
||||
|
||||
if (!reader) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to get snapshot while creating IResearchView ExecutionBlock for IResearchView '" << view()->name() << "' tid '" << state.id() << "'";
|
||||
<< "failed to get snapshot while creating IResearchView ExecutionBlock for IResearchView '" << view.name() << "' tid '";
|
||||
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL,
|
||||
|
|
|
@ -163,11 +163,13 @@ class IResearchViewNode final : public arangodb::aql::ExecutionNode {
|
|||
std::unordered_set<aql::Variable const*>& vars
|
||||
) const override final;
|
||||
|
||||
/// @brief node has nondeterministic filter condition or located inside a loop
|
||||
bool volatile_filter() const;
|
||||
|
||||
/// @brief node has nondeterministic sort condition or located inside a loop
|
||||
bool volatile_sort() const;
|
||||
/// @brief node volatility, determines how often query has
|
||||
/// to be rebuilt during the execution
|
||||
/// @note first - node has nondeterministic/dependent (inside a loop)
|
||||
/// filter condition
|
||||
/// second - node has nondeterministic/dependent (inside a loop)
|
||||
/// sort condition
|
||||
std::pair<bool, bool> volatility(bool force = false) const;
|
||||
|
||||
void planNodeRegisters(
|
||||
std::vector<aql::RegisterId>& nrRegsHere,
|
||||
|
@ -202,6 +204,9 @@ class IResearchViewNode final : public arangodb::aql::ExecutionNode {
|
|||
|
||||
/// @brief list of shards involved, need this for the cluster
|
||||
std::vector<std::string> _shards;
|
||||
|
||||
/// @brief volatility mask
|
||||
mutable int _volatilityMask{ -1 };
|
||||
}; // IResearchViewNode
|
||||
|
||||
} // iresearch
|
||||
|
|
|
@ -599,27 +599,26 @@ void MMFilesIndexFactory::fillSystemIndexes(arangodb::LogicalCollection* col,
|
|||
|
||||
void MMFilesIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice const& indexesSlice,
|
||||
std::vector<std::shared_ptr<arangodb::Index>>& indexes) const {
|
||||
|
||||
for (auto const& v : VPackArrayIterator(indexesSlice)) {
|
||||
if (basics::VelocyPackHelper::getBooleanValue(v, "error", false)) {
|
||||
// We have an error here.
|
||||
// Do not add index.
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
auto idx = prepareIndexFromSlice(v, false, col, true);
|
||||
|
||||
|
||||
if (!idx) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error creating index from definition '"
|
||||
<< indexesSlice.toString() << "'";
|
||||
<< "error creating index from definition '" << v.toString() << "'";
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
indexes.emplace_back(std::move(idx));
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -605,9 +605,10 @@ void RocksDBIndexFactory::fillSystemIndexes(arangodb::LogicalCollection* col,
|
|||
void RocksDBIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice const& indexesSlice,
|
||||
std::vector<std::shared_ptr<arangodb::Index>>& indexes) const {
|
||||
TRI_ASSERT(indexesSlice.isArray());
|
||||
|
||||
|
||||
bool splitEdgeIndex = false;
|
||||
TRI_idx_iid_t last = 0;
|
||||
|
||||
for (auto const& v : VPackArrayIterator(indexesSlice)) {
|
||||
if (arangodb::basics::VelocyPackHelper::getBooleanValue(v, "error",
|
||||
false)) {
|
||||
|
@ -616,19 +617,23 @@ void RocksDBIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice cons
|
|||
// TODO Handle Properly
|
||||
continue;
|
||||
}
|
||||
|
||||
bool alreadyHandled = false;
|
||||
|
||||
// check for combined edge index from MMFiles; must split!
|
||||
auto value = v.get("type");
|
||||
|
||||
if (value.isString()) {
|
||||
std::string tmp = value.copyString();
|
||||
arangodb::Index::IndexType const type =
|
||||
arangodb::Index::type(tmp.c_str());
|
||||
|
||||
if (type == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) {
|
||||
VPackSlice fields = v.get("fields");
|
||||
|
||||
if (fields.isArray() && fields.length() == 2) {
|
||||
VPackBuilder from;
|
||||
|
||||
from.openObject();
|
||||
|
||||
for (auto const& f : VPackObjectIterator(v)) {
|
||||
if (arangodb::StringRef(f.key) == "fields") {
|
||||
from.add(VPackValue("fields"));
|
||||
|
@ -640,10 +645,13 @@ void RocksDBIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice cons
|
|||
from.add(f.value);
|
||||
}
|
||||
}
|
||||
|
||||
from.close();
|
||||
|
||||
|
||||
VPackBuilder to;
|
||||
|
||||
to.openObject();
|
||||
|
||||
for (auto const& f : VPackObjectIterator(v)) {
|
||||
if (arangodb::StringRef(f.key) == "fields") {
|
||||
to.add(VPackValue("fields"));
|
||||
|
@ -652,6 +660,7 @@ void RocksDBIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice cons
|
|||
to.close();
|
||||
} else if (arangodb::StringRef(f.key) == "id") {
|
||||
auto iid = basics::StringUtils::uint64(f.value.copyString()) + 1;
|
||||
|
||||
last = iid;
|
||||
to.add("id", VPackValue(std::to_string(iid)));
|
||||
} else {
|
||||
|
@ -659,20 +668,38 @@ void RocksDBIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice cons
|
|||
to.add(f.value);
|
||||
}
|
||||
}
|
||||
|
||||
to.close();
|
||||
|
||||
|
||||
auto idxFrom = prepareIndexFromSlice(from.slice(), false, col, true);
|
||||
indexes.emplace_back(std::move(idxFrom));
|
||||
|
||||
|
||||
if (!idxFrom) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error creating index from definition '" << from.slice().toString() << "'";
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
auto idxTo = prepareIndexFromSlice(to.slice(), false, col, true);
|
||||
|
||||
if (!idxTo) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error creating index from definition '" << to.slice().toString() << "'";
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
indexes.emplace_back(std::move(idxFrom));
|
||||
indexes.emplace_back(std::move(idxTo));
|
||||
|
||||
alreadyHandled = true;
|
||||
splitEdgeIndex = true;
|
||||
|
||||
continue;
|
||||
}
|
||||
} else if (splitEdgeIndex) {
|
||||
VPackBuilder b;
|
||||
|
||||
b.openObject();
|
||||
|
||||
for (auto const& f : VPackObjectIterator(v)) {
|
||||
if (arangodb::StringRef(f.key) == "id") {
|
||||
last++;
|
||||
|
@ -682,23 +709,37 @@ void RocksDBIndexFactory::prepareIndexes(LogicalCollection* col, VPackSlice cons
|
|||
b.add(f.value);
|
||||
}
|
||||
}
|
||||
|
||||
b.close();
|
||||
|
||||
|
||||
auto idx = prepareIndexFromSlice(b.slice(), false, col, true);
|
||||
|
||||
if (!idx) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error creating index from definition '" << b.slice().toString() << "'";
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
indexes.emplace_back(std::move(idx));
|
||||
|
||||
alreadyHandled = true;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!alreadyHandled) {
|
||||
auto idx = prepareIndexFromSlice(v, false, col, true);
|
||||
indexes.emplace_back(std::move(idx));
|
||||
|
||||
auto idx = prepareIndexFromSlice(v, false, col, true);
|
||||
|
||||
if (!idx) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error creating index from definition '" << v.toString() << "'";
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
indexes.emplace_back(std::move(idx));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -94,12 +94,6 @@ TransactionCollection* TransactionState::collection(
|
|||
return trxCollection;
|
||||
}
|
||||
|
||||
void TransactionState::addStatusChangeCallback(
|
||||
StatusChangeCallback const& callback
|
||||
) {
|
||||
_statusChangeCallbacks.emplace_back(&callback);
|
||||
}
|
||||
|
||||
TransactionState::Cookie* TransactionState::cookie(
|
||||
void const* key
|
||||
) noexcept {
|
||||
|
@ -424,14 +418,4 @@ void TransactionState::updateStatus(transaction::Status status) {
|
|||
}
|
||||
|
||||
_status = status;
|
||||
|
||||
for (auto& callback: _statusChangeCallbacks) {
|
||||
TRI_ASSERT(callback);
|
||||
|
||||
try {
|
||||
(*callback)(*this);
|
||||
} catch (...) {
|
||||
// we must not propagate exceptions from here
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,9 +86,6 @@ class TransactionState {
|
|||
);
|
||||
virtual ~TransactionState();
|
||||
|
||||
/// @brief add a callback to be called for state change events
|
||||
void addStatusChangeCallback(StatusChangeCallback const& callback);
|
||||
|
||||
/// @return a cookie associated with the specified key, nullptr if none
|
||||
Cookie* cookie(void const* key) noexcept;
|
||||
|
||||
|
@ -188,7 +185,7 @@ class TransactionState {
|
|||
TransactionCollection* findCollection(TRI_voc_cid_t cid) const;
|
||||
|
||||
void setType(AccessMode::Type type);
|
||||
|
||||
|
||||
/// @brief whether or not a transaction is read-only
|
||||
bool isReadOnlyTransaction() const {
|
||||
return (_type == AccessMode::Type::READ);
|
||||
|
@ -239,10 +236,8 @@ class TransactionState {
|
|||
private:
|
||||
/// a collection of stored cookies
|
||||
std::map<void const*, Cookie::ptr> _cookies;
|
||||
/// functrs to call for status change (pointer to allow for use of std::vector)
|
||||
std::vector<StatusChangeCallback const*> _statusChangeCallbacks;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -82,12 +82,12 @@ std::vector<arangodb::transaction::Methods::DataSourceRegistrationCallback>& get
|
|||
|
||||
/// @return the status change callbacks stored in state
|
||||
/// or nullptr if none and !create
|
||||
std::vector<arangodb::transaction::Methods::StatusChangeCallback>* getStatusChangeCallbacks(
|
||||
std::vector<arangodb::transaction::Methods::StatusChangeCallback const*>* getStatusChangeCallbacks(
|
||||
arangodb::TransactionState& state,
|
||||
bool create = false
|
||||
) {
|
||||
struct CookieType: public arangodb::TransactionState::Cookie {
|
||||
std::vector<arangodb::transaction::Methods::StatusChangeCallback> _callbacks;
|
||||
std::vector<arangodb::transaction::Methods::StatusChangeCallback const*> _callbacks;
|
||||
};
|
||||
|
||||
static const int key = 0; // arbitrary location in memory, common for all
|
||||
|
@ -170,7 +170,7 @@ void applyStatusChangeCallbacks(
|
|||
TRI_ASSERT(callback); // addStatusChangeCallback(...) ensures valid
|
||||
|
||||
try {
|
||||
callback(trx, status);
|
||||
(*callback)(trx, status);
|
||||
} catch (...) {
|
||||
// we must not propagate exceptions from here
|
||||
}
|
||||
|
@ -246,9 +246,9 @@ static OperationResult emptyResult(OperationOptions const& options) {
|
|||
}
|
||||
|
||||
bool transaction::Methods::addStatusChangeCallback(
|
||||
StatusChangeCallback const& callback
|
||||
StatusChangeCallback const* callback
|
||||
) {
|
||||
if (!callback) {
|
||||
if (!callback || !*callback) {
|
||||
return true; // nothing to call back
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ class Methods {
|
|||
/// will match trx.state()->status() for top-level transactions
|
||||
/// may not match trx.state()->status() for embeded transactions
|
||||
/// since their staus is not updated from RUNNING
|
||||
typedef std::function<void(transaction::Methods& trx, transaction::Status& status)> StatusChangeCallback;
|
||||
typedef std::function<void(transaction::Methods& trx, transaction::Status status)> StatusChangeCallback;
|
||||
|
||||
/// @brief add a callback to be called for LogicalDataSource instance
|
||||
/// association events, e.g. addCollection(...)
|
||||
|
@ -169,8 +169,9 @@ class Methods {
|
|||
static void addDataSourceRegistrationCallback(DataSourceRegistrationCallback const& callback);
|
||||
|
||||
/// @brief add a callback to be called for state change events
|
||||
/// @param callback nullptr and empty functers are ignored, treated as success
|
||||
/// @return success
|
||||
bool addStatusChangeCallback(StatusChangeCallback const& callback);
|
||||
bool addStatusChangeCallback(StatusChangeCallback const* callback);
|
||||
|
||||
/// @brief clear all called for LogicalDataSource instance association events
|
||||
/// @note not thread-safe on the assumption of static factory registration
|
||||
|
|
|
@ -304,45 +304,29 @@ std::string CollectionNameResolver::getCollectionName(
|
|||
}
|
||||
|
||||
std::string CollectionNameResolver::localNameLookup(TRI_voc_cid_t cid) const {
|
||||
std::string name;
|
||||
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
READ_LOCKER(readLocker, _vocbase._dataSourceLock);
|
||||
auto it = _vocbase._dataSourceById.find(cid);
|
||||
|
||||
if (it != _vocbase._dataSourceById.end()
|
||||
&& LogicalCollection::category() == it->second->category()) {
|
||||
if (it->second->planId() == it->second->id()) {
|
||||
// DBserver local case
|
||||
name = (*it).second->name();
|
||||
} else {
|
||||
// DBserver case of a shard:
|
||||
name = arangodb::basics::StringUtils::itoa((*it).second->planId());
|
||||
std::shared_ptr<LogicalCollection> ci;
|
||||
|
||||
try {
|
||||
ci = ClusterInfo::instance()->getCollection(
|
||||
it->second->vocbase().name(), name
|
||||
);
|
||||
}
|
||||
catch (...) {
|
||||
}
|
||||
|
||||
if (ci == nullptr) {
|
||||
name = ""; // collection unknown
|
||||
} else {
|
||||
name = ci->name(); // can be empty, if collection unknown
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return !name.empty() ? name : std::string("_unknown");
|
||||
}
|
||||
|
||||
// exactly as in the non-cluster case
|
||||
static const std::string UNKNOWN("_unknown");
|
||||
auto collection = _vocbase.lookupCollection(cid);
|
||||
|
||||
return collection ? collection->name() : std::string("_unknown");
|
||||
// exactly as in the non-cluster case
|
||||
if (!ServerState::isDBServer(_serverRole)) {
|
||||
return collection ? collection->name() : UNKNOWN;
|
||||
}
|
||||
|
||||
// DBserver case of a shard:
|
||||
if (collection && collection->planId() != collection->id()) {
|
||||
try {
|
||||
collection = ClusterInfo::instance()->getCollection(
|
||||
collection->vocbase().name(), std::to_string(collection->planId())
|
||||
);
|
||||
}
|
||||
catch (...) {
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
// can be empty, if collection unknown
|
||||
return collection && !collection->name().empty()
|
||||
? collection->name() : UNKNOWN;
|
||||
}
|
||||
|
||||
std::shared_ptr<LogicalDataSource> CollectionNameResolver::getDataSource(
|
||||
|
@ -441,20 +425,6 @@ std::shared_ptr<LogicalView> CollectionNameResolver::getView(
|
|||
#endif
|
||||
}
|
||||
|
||||
std::string CollectionNameResolver::getViewNameCluster(
|
||||
TRI_voc_cid_t cid
|
||||
) const {
|
||||
if (!ServerState::isClusterRole(_serverRole)) {
|
||||
// This handles the case of a standalone server
|
||||
auto view = _vocbase.lookupView(cid);
|
||||
|
||||
return view ? view->name() : StaticStrings::Empty;
|
||||
}
|
||||
|
||||
// FIXME not supported
|
||||
return StaticStrings::Empty;
|
||||
}
|
||||
|
||||
bool CollectionNameResolver::visitCollections(
|
||||
std::function<bool(LogicalCollection&)> const& visitor,
|
||||
TRI_voc_cid_t id
|
||||
|
|
|
@ -152,11 +152,6 @@ class CollectionNameResolver {
|
|||
std::string const& nameOrId
|
||||
) const noexcept;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief look up a cluster-wide view name for a cluster-wide view id
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
std::string getViewNameCluster(TRI_voc_cid_t cid) const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief the vocbase instance this resolver instance uses
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -204,4 +199,4 @@ class CollectionNameResolver {
|
|||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -397,80 +397,79 @@ static void JS_NameViewVocbase(
|
|||
|
||||
/// @brief returns the properties of a view
|
||||
static void JS_PropertiesViewVocbase(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args
|
||||
) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
auto* viewPtr = TRI_UnwrapClass<std::shared_ptr<arangodb::LogicalView>>(
|
||||
args.Holder(), WRP_VOCBASE_VIEW_TYPE
|
||||
);
|
||||
|
||||
std::shared_ptr<arangodb::LogicalView>* v =
|
||||
TRI_UnwrapClass<std::shared_ptr<arangodb::LogicalView>>(
|
||||
args.Holder(), WRP_VOCBASE_VIEW_TYPE);
|
||||
|
||||
if (v == nullptr || v->get() == nullptr) {
|
||||
if (!viewPtr || !*viewPtr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract view");
|
||||
}
|
||||
|
||||
LogicalView* view = v->get();
|
||||
auto view = *viewPtr;
|
||||
|
||||
bool const isModification = (args.Length() != 0);
|
||||
// In the cluster the view object might contain outdated properties,
|
||||
// which will break tests. We need an extra lookup for each operation.
|
||||
arangodb::CollectionNameResolver resolver(view->vocbase());
|
||||
|
||||
// check if we want to change some parameters
|
||||
if (isModification) {
|
||||
v8::Handle<v8::Value> par = args[0];
|
||||
bool partialUpdate = true; // partial update by default
|
||||
if (args.Length() > 0 && args[0]->IsObject()) {
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
||||
if (par->IsObject()) {
|
||||
VPackBuilder builder;
|
||||
int res = TRI_V8ToVPack(isolate, builder, args[0], false);
|
||||
{
|
||||
auto res = TRI_V8ToVPack(isolate, builder, args[0], false);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (TRI_ERROR_NO_ERROR != res) {
|
||||
TRI_V8_THROW_EXCEPTION(res);
|
||||
}
|
||||
}
|
||||
|
||||
if (args.Length() > 1) {
|
||||
if (!args[1]->IsBoolean()) {
|
||||
TRI_V8_THROW_EXCEPTION_PARAMETER("<partialUpdate> must be a boolean");
|
||||
}
|
||||
bool partialUpdate = true; // partial update by default
|
||||
|
||||
partialUpdate = args[1]->ToBoolean()->Value();
|
||||
if (args.Length() > 1) {
|
||||
if (!args[1]->IsBoolean()) {
|
||||
TRI_V8_THROW_EXCEPTION_PARAMETER("<partialUpdate> must be a boolean");
|
||||
}
|
||||
|
||||
VPackSlice const slice = builder.slice();
|
||||
partialUpdate = args[1]->ToBoolean()->Value();
|
||||
}
|
||||
|
||||
// try to write new parameter to file
|
||||
bool doSync =
|
||||
application_features::ApplicationServer::getFeature<DatabaseFeature>(
|
||||
"Database")
|
||||
->forceSyncProperties();
|
||||
auto updateRes = view->updateProperties(slice, partialUpdate, doSync);
|
||||
auto doSync = arangodb::application_features::ApplicationServer::getFeature<
|
||||
DatabaseFeature
|
||||
>("Database")->forceSyncProperties();
|
||||
|
||||
if (!updateRes.ok()) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(updateRes.errorNumber(),
|
||||
updateRes.errorMessage());
|
||||
}
|
||||
view = resolver.getView(view->id()); // ensure have the latest definition
|
||||
|
||||
if (!view) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto res = view->updateProperties(builder.slice(), partialUpdate, doSync);
|
||||
|
||||
if (!res.ok()) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// in the cluster the view object might contain outdated
|
||||
// properties, which will break tests. We need an extra lookup
|
||||
arangodb::CollectionNameResolver resolver(view->vocbase());
|
||||
auto updatedView = resolver.getView(view->id());
|
||||
view = resolver.getView(view->id());
|
||||
|
||||
if (!updatedView) {
|
||||
if (!view) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||
}
|
||||
|
||||
view = updatedView.get();
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
||||
VPackBuilder vpackProperties;
|
||||
|
||||
vpackProperties.openObject();
|
||||
view->toVelocyPack(vpackProperties, true, false);
|
||||
vpackProperties.close();
|
||||
builder.openObject();
|
||||
view->toVelocyPack(builder, true, false);
|
||||
builder.close();
|
||||
|
||||
// return the current parameter set
|
||||
v8::Handle<v8::Object> result =
|
||||
TRI_VPackToV8(isolate, vpackProperties.slice().get("properties"))
|
||||
->ToObject();
|
||||
// FIXME TODO this should be the full view representation similar to JS_PropertiesVocbaseCol(...), not just "properties"
|
||||
auto result =
|
||||
TRI_VPackToV8(isolate, builder.slice().get("properties")) ->ToObject();
|
||||
|
||||
TRI_V8_RETURN(result);
|
||||
TRI_V8_TRY_CATCH_END
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Meta/utility.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
|
||||
#include <velocypack/Buffer.h>
|
||||
|
||||
|
@ -78,7 +80,14 @@ class LogicalView : public LogicalDataSource {
|
|||
// to explicitly expose our intention to fail in 'noexcept' function
|
||||
// in case of wrong type
|
||||
auto impl = dynamic_cast<typename target_type_t::pointer>(&view);
|
||||
TRI_ASSERT(impl);
|
||||
|
||||
if (!impl) {
|
||||
LOG_TOPIC(ERR, Logger::VIEWS)
|
||||
<< "invalid convertion attempt from '" << typeid(Source).name() << "'"
|
||||
<< " to '" << typeid(typename target_type_t::value_type).name() << "'";
|
||||
TRI_ASSERT(false);
|
||||
}
|
||||
|
||||
return *impl;
|
||||
#else
|
||||
return static_cast<typename target_type_t::reference>(view);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <velocypack/Collection.h>
|
||||
#include <velocypack/Parser.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
#include <iostream>
|
||||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Aql/PlanCache.h"
|
||||
|
@ -117,10 +118,7 @@ namespace {
|
|||
// recursive locking of the same instance is not yet supported (create a new instance instead)
|
||||
TRI_ASSERT(_update != owned);
|
||||
|
||||
if (_locker.tryLock()) {
|
||||
_owner.store(std::this_thread::get_id());
|
||||
_update = owned;
|
||||
} else if (std::this_thread::get_id() != _owner.load()) { // not recursive
|
||||
if (std::this_thread::get_id() != _owner.load()) { // not recursive
|
||||
_locker.lock();
|
||||
_owner.store(std::this_thread::get_id());
|
||||
_update = owned;
|
||||
|
@ -128,7 +126,6 @@ namespace {
|
|||
}
|
||||
|
||||
void unlock() {
|
||||
_locker.unlock();
|
||||
_update(*this);
|
||||
}
|
||||
|
||||
|
@ -141,6 +138,7 @@ namespace {
|
|||
static void owned(RecursiveWriteLocker& locker) {
|
||||
static std::thread::id unowned;
|
||||
locker._owner.store(unowned);
|
||||
locker._locker.unlock();
|
||||
locker._update = noop;
|
||||
}
|
||||
};
|
||||
|
@ -2201,4 +2199,4 @@ TRI_voc_rid_t TRI_StringToRid(char const* p, size_t len, bool& isOld,
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -119,7 +119,6 @@ enum TRI_vocbase_col_status_e : int {
|
|||
|
||||
/// @brief database
|
||||
struct TRI_vocbase_t {
|
||||
friend class arangodb::CollectionNameResolver;
|
||||
friend class arangodb::StorageEngine;
|
||||
|
||||
/// @brief database state
|
||||
|
@ -444,4 +443,4 @@ void TRI_SanitizeObject(arangodb::velocypack::Slice const slice,
|
|||
void TRI_SanitizeObjectWithEdges(arangodb::velocypack::Slice const slice,
|
||||
arangodb::velocypack::Builder& builder);
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -1229,7 +1229,7 @@ ArangoDatabase.prototype._dropView = function (id) {
|
|||
}
|
||||
}
|
||||
|
||||
var v = this._collection(id);
|
||||
var v = this._view(id);
|
||||
if (v) {
|
||||
return v.drop();
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ const optionsDefaults = {
|
|||
'extraArgs': {},
|
||||
'extremeVerbosity': false,
|
||||
'force': true,
|
||||
'arangosearch':false,
|
||||
'arangosearch':true,
|
||||
'jsonReply': false,
|
||||
'loopEternal': false,
|
||||
'loopSleepSec': 1,
|
||||
|
|
|
@ -0,0 +1,460 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertUndefined, assertEqual, assertTrue, assertFalse, AQL_EXECUTE */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for iresearch usage
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var db = require("@arangodb").db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function IResearchAqlTestSuite(numberOfShards, replicationFactor) {
|
||||
var c;
|
||||
var v;
|
||||
|
||||
// provided arguments
|
||||
var args = {
|
||||
numberOfShards: numberOfShards,
|
||||
replicationFactor: replicationFactor
|
||||
};
|
||||
console.info("Test suite arguments: " + JSON.stringify(args));
|
||||
|
||||
return {
|
||||
setUp : function () {
|
||||
db._drop("UnitTestsCollection");
|
||||
c = db._create("UnitTestsCollection", args);
|
||||
|
||||
db._drop("AnotherUnitTestsCollection");
|
||||
var ac = db._create("AnotherUnitTestsCollection", args);
|
||||
|
||||
db._dropView("UnitTestsView");
|
||||
v = db._createView("UnitTestsView", "arangosearch", {});
|
||||
var meta = {
|
||||
links: {
|
||||
"UnitTestsCollection": {
|
||||
includeAllFields: true,
|
||||
fields: {
|
||||
text: { analyzers: [ "text_en" ] }
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
v.properties(meta);
|
||||
|
||||
ac.save({ a: "foo", id : 0 });
|
||||
ac.save({ a: "ba", id : 1 });
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
c.save({ a: "foo", b: "bar", c: i });
|
||||
c.save({ a: "foo", b: "baz", c: i });
|
||||
c.save({ a: "bar", b: "foo", c: i });
|
||||
c.save({ a: "baz", b: "foo", c: i });
|
||||
}
|
||||
|
||||
c.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
c.save({ name: "half", text: "quick fox over lazy" });
|
||||
c.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
c.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
c.save({ name: "numeric", anotherNumericField: 0 });
|
||||
c.save({ name: "null", anotherNullField: null });
|
||||
c.save({ name: "bool", anotherBoolField: true });
|
||||
c.save({ _key: "foo", xyz: 1 });
|
||||
},
|
||||
|
||||
tearDown : function () {
|
||||
var meta = { links : { "UnitTestsCollection": null } };
|
||||
v.properties(meta);
|
||||
v.drop();
|
||||
db._drop("UnitTestsCollection");
|
||||
db._drop("AnotherUnitTestsCollection");
|
||||
},
|
||||
|
||||
testAttributeEqualityFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.a == 'foo' RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 10);
|
||||
result.forEach(function(res) {
|
||||
assertEqual(res.a, "foo");
|
||||
});
|
||||
},
|
||||
|
||||
testMultipleAttributeEqualityFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.a == 'foo' && doc.b == 'bar' RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 5);
|
||||
result.forEach(function(res) {
|
||||
assertEqual(res.a, "foo");
|
||||
assertEqual(res.b, "bar");
|
||||
});
|
||||
},
|
||||
|
||||
testMultipleAttributeEqualityFilterSortAttribute : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.a == 'foo' && doc.b == 'bar' SORT doc.c RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 5);
|
||||
var last = -1;
|
||||
result.forEach(function(res) {
|
||||
assertEqual(res.a, "foo");
|
||||
assertEqual(res.b, "bar");
|
||||
assertEqual(res.c, last + 1);
|
||||
last = res.c;
|
||||
});
|
||||
},
|
||||
|
||||
testMultipleAttributeEqualityFilterSortAttributeDesc : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.a == 'foo' AND doc.b == 'bar' SORT doc.c DESC RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 5);
|
||||
var last = 5;
|
||||
result.forEach(function(res) {
|
||||
assertEqual(res.a, "foo");
|
||||
assertEqual(res.b, "bar");
|
||||
assertEqual(res.c, last - 1);
|
||||
last = res.c;
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeLessFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c < 2 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 8);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c < 2);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeLeqFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c <= 2 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 12);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c <= 2);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeGeqFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c >= 2 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 12);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c >= 2);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeGreaterFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c > 2 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 8);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c > 2);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeOpenIntervalFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c > 1 AND doc.c < 3 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 4);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c > 1 && res.c < 3);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeClosedIntervalFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c >= 1 AND doc.c <= 3 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 12);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c >= 1 && res.c <= 3);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeIntervalExclusionFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.c < 1 OR doc.c > 3 RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 8);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.c < 1 || res.c > 3);
|
||||
});
|
||||
},
|
||||
|
||||
testAttributeNeqFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.a != 'foo' RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 18); // include documents without attribute 'a'
|
||||
result.forEach(function(res) {
|
||||
assertFalse(res.a === 'foo');
|
||||
});
|
||||
},
|
||||
|
||||
testStartsWithFilter : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER STARTS_WITH(doc.a, 'fo') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 10);
|
||||
result.forEach(function(res) {
|
||||
assertEqual(res.a, 'foo');
|
||||
});
|
||||
},
|
||||
|
||||
testStartsWithFilter2 : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER STARTS_WITH(doc.b, 'ba') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 10);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(res.b === 'bar' || res.b === 'baz');
|
||||
});
|
||||
},
|
||||
|
||||
testStartsWithFilterSort : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER STARTS_WITH(doc.b, 'ba') && doc.c == 0 SORT doc.b RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 2);
|
||||
assertEqual(result[0].b, 'bar');
|
||||
assertEqual(result[1].b, 'baz');
|
||||
assertEqual(result[0].c, 0);
|
||||
assertEqual(result[1].c, 0);
|
||||
},
|
||||
|
||||
// FIXME uncomment when TOKENS function will be fixed
|
||||
// testInTokensFilterSortTFIDF : function () {
|
||||
// var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER doc.text IN TOKENS('the quick brown', 'text_en') SORT TFIDF(doc) LIMIT 4 RETURN doc", null, { waitForSync: true }).json;
|
||||
//
|
||||
// assertEqual(result.length, 4);
|
||||
// assertEqual(result[0].name, 'full');
|
||||
// assertEqual(result[1].name, 'other half');
|
||||
// assertEqual(result[2].name, 'half');
|
||||
// assertEqual(result[3].name, 'quarter');
|
||||
// },
|
||||
|
||||
testPhraseFilter : function () {
|
||||
var result0 = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER PHRASE(doc.text, 'quick brown fox jumps', 'text_en') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result0.length, 1);
|
||||
assertEqual(result0[0].name, 'full');
|
||||
|
||||
var result1 = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER PHRASE(doc.text, [ 'quick brown fox jumps' ], 'text_en') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result1.length, 1);
|
||||
assertEqual(result1[0].name, 'full');
|
||||
},
|
||||
|
||||
testExistsFilter : function () {
|
||||
var expected = new Set();
|
||||
expected.add("full");
|
||||
expected.add("half");
|
||||
expected.add("other half");
|
||||
expected.add("quarter");
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER EXISTS(doc.text) RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(res.name));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testExistsFilterByAnalyzer : function () {
|
||||
var expected = new Set();
|
||||
expected.add("full");
|
||||
expected.add("half");
|
||||
expected.add("other half");
|
||||
expected.add("quarter");
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER EXISTS(doc.text, 'analyzer') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(res.name));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testExistsFilterByType : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER EXISTS(doc.text, 'type') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, 0);
|
||||
},
|
||||
|
||||
testExistsFilterByTypeNull : function () {
|
||||
var expected = new Set();
|
||||
expected.add("null");
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER EXISTS(doc.anotherNullField, 'type', 'null') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(res.name));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testExistsFilterByTypeBool : function () {
|
||||
var expected = new Set();
|
||||
expected.add("bool");
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW UnitTestsView FILTER EXISTS(doc['anotherBoolField'], 'type', 'bool') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(res.name));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testExistsFilterByTypeNumeric : function () {
|
||||
var expected = new Set();
|
||||
expected.add("numeric");
|
||||
|
||||
var result = AQL_EXECUTE("LET suffix='NumericField' LET fieldName = CONCAT('another', suffix) FOR doc IN VIEW UnitTestsView FILTER EXISTS(doc[fieldName], 'type', 'numeric') RETURN doc", null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(res.name));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testViewInInnerLoop : function() {
|
||||
var expected = new Set(); // FIXME is there a better way to compare objects in js?
|
||||
expected.add(JSON.stringify({ a: "foo", b: "bar", c: 0 }));
|
||||
expected.add(JSON.stringify({ a: "foo", b: "baz", c: 0 }));
|
||||
expected.add(JSON.stringify({ a: "bar", b: "foo", c: 1 }));
|
||||
expected.add(JSON.stringify({ a: "baz", b: "foo", c: 1 }));
|
||||
|
||||
var result = AQL_EXECUTE(
|
||||
"FOR adoc IN AnotherUnitTestsCollection" +
|
||||
" FOR doc IN VIEW UnitTestsView FILTER adoc.id == doc.c && STARTS_WITH(doc['a'], adoc.a) " +
|
||||
"RETURN doc"
|
||||
, null, { waitForSync: true }).json;
|
||||
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(JSON.stringify({ a: res.a, b: res.b, c: res.c })));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testViewInInnerLoopMultipleFilters : function() {
|
||||
var expected = new Set(); // FIXME is there a better way to compare objects in js?
|
||||
expected.add(JSON.stringify({ a: "foo", b: "bar", c: 0 }));
|
||||
expected.add(JSON.stringify({ a: "foo", b: "baz", c: 0 }));
|
||||
|
||||
var result = AQL_EXECUTE(
|
||||
"FOR adoc IN AnotherUnitTestsCollection FILTER adoc.id < 1" +
|
||||
" FOR doc IN VIEW UnitTestsView FILTER adoc.id == doc.c && STARTS_WITH(doc['a'], adoc.a) " +
|
||||
"RETURN doc"
|
||||
, null, { waitForSync: true }).json;
|
||||
|
||||
|
||||
assertEqual(result.length, expected.size);
|
||||
result.forEach(function(res) {
|
||||
assertTrue(expected.delete(JSON.stringify({ a: res.a, b: res.b, c: res.c })));
|
||||
});
|
||||
assertEqual(expected.size, 0);
|
||||
},
|
||||
|
||||
testViewInInnerLoopSortByAttribute : function() {
|
||||
var expected = [];
|
||||
expected.push({ a: "bar", b: "foo", c: 1 });
|
||||
expected.push({ a: "baz", b: "foo", c: 1 });
|
||||
expected.push({ a: "foo", b: "bar", c: 0 });
|
||||
expected.push({ a: "foo", b: "baz", c: 0 });
|
||||
|
||||
var result = AQL_EXECUTE(
|
||||
"FOR adoc IN AnotherUnitTestsCollection" +
|
||||
" FOR doc IN VIEW UnitTestsView FILTER adoc.id == doc.c && STARTS_WITH(doc['a'], adoc.a) " +
|
||||
"SORT doc.c DESC, doc.a, doc.b " +
|
||||
"RETURN doc"
|
||||
, null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.length);
|
||||
var i = 0;
|
||||
result.forEach(function(res) {
|
||||
var doc = expected[i++];
|
||||
assertEqual(doc.a, res.a);
|
||||
assertEqual(doc.b, res.b);
|
||||
assertEqual(doc.c, res.c);
|
||||
});
|
||||
},
|
||||
/*
|
||||
testViewInInnerLoopSortByTFIDF_BM25_Attribute : function() {
|
||||
var expected = [];
|
||||
expected.push({ a: "baz", b: "foo", c: 1 });
|
||||
expected.push({ a: "bar", b: "foo", c: 1 });
|
||||
expected.push({ a: "foo", b: "bar", c: 0 });
|
||||
expected.push({ a: "foo", b: "baz", c: 0 });
|
||||
|
||||
var result = AQL_EXECUTE(
|
||||
"FOR adoc IN AnotherUnitTestsCollection" +
|
||||
" FOR doc IN VIEW UnitTestsView FILTER adoc.id == doc.c && STARTS_WITH(doc['a'], adoc.a) " +
|
||||
"SORT TFIDF(doc) DESC, BM25(doc) DESC, doc.a DESC, doc.b " +
|
||||
"RETURN doc"
|
||||
, null, { waitForSync: true }).json;
|
||||
|
||||
assertEqual(result.length, expected.length);
|
||||
var i = 0;
|
||||
result.forEach(function(res) {
|
||||
var doc = expected[i++];
|
||||
assertEqual(doc.a, res.a);
|
||||
assertEqual(doc.b, res.b);
|
||||
assertEqual(doc.c, res.c);
|
||||
});
|
||||
},
|
||||
*/
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(function IResearchAqlTestSuite_s1_r1() {
|
||||
return IResearchAqlTestSuite({ numberOfShards: 1, replicationFactor: 1 })
|
||||
});
|
||||
|
||||
jsunity.run(function IResearchAqlTestSuite_s4_r1() {
|
||||
return IResearchAqlTestSuite({ numberOfShards: 4, replicationFactor: 1 })
|
||||
});
|
||||
|
||||
/*
|
||||
|
||||
jsunity.run(function IResearchAqlTestSuite_s1_r2() {
|
||||
return IResearchAqlTestSuite({ numberOfShards: 1, replicationFactor: 2 })
|
||||
});
|
||||
|
||||
jsunity.run(function IResearchAqlTestSuite_s4_r3() {
|
||||
return IResearchAqlTestSuite({ numberOfShards: 4, replicationFactor: 3 })
|
||||
});
|
||||
|
||||
*/
|
||||
|
||||
return jsunity.done();
|
|
@ -0,0 +1,772 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertUndefined, assertEqual, assertTrue, assertFalse, AQL_EXECUTE */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Andrey Abramov
|
||||
/// @author Vasiliy Nabatchikov
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var db = require("@arangodb").db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function IResearchFeatureDDLTestSuite () {
|
||||
return {
|
||||
setUpAll : function () {
|
||||
},
|
||||
|
||||
tearDownAll : function () {
|
||||
db._drop("TestCollection");
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
db._drop("TestCollection2");
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief IResearchFeatureDDLTestSuite tests
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testStressAddRemoveView : function() {
|
||||
db._dropView("TestView");
|
||||
for (i = 0; i < 100; ++i) {
|
||||
db._createView("TestView", "arangosearch", {});
|
||||
assertTrue(null != db._view("TestView"));
|
||||
db._dropView("TestView")
|
||||
assertTrue(null == db._view("TestView"));
|
||||
}
|
||||
},
|
||||
|
||||
testStressAddRemoveViewWithLink : function() {
|
||||
db._drop("TestCollection0");
|
||||
db._dropView("TestView");
|
||||
db._create("TestCollection0");
|
||||
|
||||
var addLink = { links: { "TestCollection0": {} } };
|
||||
|
||||
for (i = 0; i < 100; ++i) {
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
view.properties(addLink, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(1, properties.collections.length);
|
||||
var indexes = db.TestCollection0.getIndexes();
|
||||
assertEqual(2, indexes.length);
|
||||
var link = indexes[1];
|
||||
assertEqual("primary", indexes[0].type);
|
||||
assertNotEqual(null, link)
|
||||
assertEqual("arangosearch", link.type)
|
||||
db._dropView("TestView")
|
||||
assertEqual(null, db._view("TestView"))
|
||||
assertEqual(1, db.TestCollection0.getIndexes().length);
|
||||
}
|
||||
},
|
||||
|
||||
testStressAddRemoveLink : function() {
|
||||
db._drop("TestCollection0");
|
||||
db._dropView("TestView");
|
||||
db._create("TestCollection0");
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
var addLink = { links: { "TestCollection0": {} } };
|
||||
var removeLink = { links: { "TestCollection0": null } };
|
||||
|
||||
for (i = 0; i < 100; ++i) {
|
||||
view.properties(addLink, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(1, properties.collections.length);
|
||||
var indexes = db.TestCollection0.getIndexes();
|
||||
assertEqual(2, indexes.length);
|
||||
var link = indexes[1];
|
||||
assertEqual("primary", indexes[0].type);
|
||||
assertNotEqual(null, link)
|
||||
assertEqual("arangosearch", link.type)
|
||||
view.properties(removeLink, false);
|
||||
properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(0, properties.collections.length);
|
||||
assertEqual(1, db.TestCollection0.getIndexes().length);
|
||||
}
|
||||
},
|
||||
|
||||
//FIXME
|
||||
// testRemoveLinkViaCollection : function() {
|
||||
// db._drop("TestCollection0");
|
||||
// db._dropView("TestView");
|
||||
//
|
||||
// var view = db._createView("TestView", "arangosearch", {});
|
||||
// db._create("TestCollection0");
|
||||
// var addLink = { links: { "TestCollection0": {} } };
|
||||
// view.properties(addLink, true); // partial update
|
||||
// properties = view.properties();
|
||||
// assertTrue(Array === properties.collections.constructor);
|
||||
// assertEqual(1, properties.collections.length);
|
||||
// db._drop("TestCollection0");
|
||||
// properties = view.properties();
|
||||
// assertTrue(Array === properties.collections.constructor);
|
||||
// assertEqual(0, properties.collections.length);
|
||||
// },
|
||||
|
||||
testViewDDL: function() {
|
||||
// collections
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
db._drop("TestCollection2");
|
||||
db._dropView("TestView");
|
||||
db._create("TestCollection0");
|
||||
db._create("TestCollection1");
|
||||
db._create("TestCollection2");
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
var properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(0, properties.collections.length);
|
||||
|
||||
var meta = { links: { "TestCollection0": {} } };
|
||||
view.properties(meta, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(1, properties.collections.length);
|
||||
|
||||
meta = { links: { "TestCollection1": {} } };
|
||||
view.properties(meta, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(2, properties.collections.length);
|
||||
|
||||
meta = { links: { "TestCollection2": {} } };
|
||||
view.properties(meta, false); // full update
|
||||
properties = view.properties();
|
||||
assertTrue(Array === properties.collections.constructor);
|
||||
assertEqual(1, properties.collections.length);
|
||||
|
||||
|
||||
// commit
|
||||
db._dropView("TestView");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
properties = view.properties();
|
||||
assertTrue(Object === properties.commit.constructor);
|
||||
assertEqual(10, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertTrue(Object === properties.commit.consolidate.constructor);
|
||||
assertEqual(4, Object.keys(properties.commit.consolidate).length);
|
||||
assertTrue(Object === properties.commit.consolidate.bytes.constructor);
|
||||
assertEqual(300, properties.commit.consolidate.bytes.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes.threshold.toFixed(6));
|
||||
assertTrue(Object === properties.commit.consolidate.bytes_accum.constructor);
|
||||
assertEqual(300, properties.commit.consolidate.bytes_accum.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes_accum.threshold.toFixed(6));
|
||||
assertTrue(Object === properties.commit.consolidate.count.constructor);
|
||||
assertEqual(300, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
assertTrue(Object === properties.commit.consolidate.fill.constructor);
|
||||
assertEqual(300, properties.commit.consolidate.fill.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.fill.threshold.toFixed(6));
|
||||
|
||||
meta = { commit: {
|
||||
commitIntervalMsec: 10000,
|
||||
consolidate: {
|
||||
bytes: { segmentThreshold: 20, threshold: 0.5 },
|
||||
bytes_accum: {},
|
||||
count: {}
|
||||
}
|
||||
} };
|
||||
view.properties(meta, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Object === properties.commit.constructor);
|
||||
assertEqual(10, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(10000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertTrue(Object === properties.commit.consolidate.constructor);
|
||||
assertEqual(3, Object.keys(properties.commit.consolidate).length);
|
||||
assertTrue(Object === properties.commit.consolidate.bytes.constructor);
|
||||
assertEqual(20, properties.commit.consolidate.bytes.segmentThreshold);
|
||||
assertEqual((0.5).toFixed(6), properties.commit.consolidate.bytes.threshold.toFixed(6));
|
||||
assertTrue(Object === properties.commit.consolidate.bytes_accum.constructor);
|
||||
assertEqual(300, properties.commit.consolidate.bytes_accum.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes_accum.threshold.toFixed(6));
|
||||
assertTrue(Object === properties.commit.consolidate.count.constructor);
|
||||
assertEqual(300, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
|
||||
meta = { commit: {
|
||||
cleanupIntervalStep: 20,
|
||||
consolidate: { count: { segmentThreshold: 30, threshold: 0.75 } }
|
||||
} };
|
||||
view.properties(meta, false); // full update
|
||||
properties = view.properties();
|
||||
assertTrue(Object === properties.commit.constructor);
|
||||
assertEqual(20, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertTrue(Object === properties.commit.consolidate.constructor);
|
||||
assertEqual(1, Object.keys(properties.commit.consolidate).length);
|
||||
assertTrue(Object === properties.commit.consolidate.count.constructor);
|
||||
assertEqual(30, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.75).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
|
||||
|
||||
// locale
|
||||
db._dropView("TestView");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
properties = view.properties();
|
||||
meta = { locale: "de_DE.UTF-16" };
|
||||
view.properties(meta);
|
||||
properties = view.properties();
|
||||
assertTrue(String === properties.locale.constructor);
|
||||
assertTrue(properties.locale.length > 0);
|
||||
assertEqual("de_DE.UTF-8", properties.locale);
|
||||
|
||||
// threads max idle/total
|
||||
db._dropView("TestView");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
properties = view.properties();
|
||||
assertTrue(Number === properties.threadsMaxIdle.constructor);
|
||||
assertEqual(5, properties.threadsMaxIdle);
|
||||
assertTrue(Number === properties.threadsMaxTotal.constructor);
|
||||
assertEqual(5, properties.threadsMaxTotal);
|
||||
|
||||
meta = { threadsMaxIdle: 42 };
|
||||
view.properties(meta, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Number === properties.threadsMaxIdle.constructor);
|
||||
assertEqual(42, properties.threadsMaxIdle);
|
||||
assertTrue(Number === properties.threadsMaxTotal.constructor);
|
||||
assertEqual(5, properties.threadsMaxTotal);
|
||||
|
||||
meta = { threadsMaxTotal: 1 };
|
||||
view.properties(meta, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Number === properties.threadsMaxIdle.constructor);
|
||||
assertEqual(42, properties.threadsMaxIdle);
|
||||
assertTrue(Number === properties.threadsMaxTotal.constructor);
|
||||
assertEqual(1, properties.threadsMaxTotal);
|
||||
|
||||
meta = { threadsMaxIdle: 0 };
|
||||
view.properties(meta, false); // full update
|
||||
properties = view.properties();
|
||||
assertTrue(Number === properties.threadsMaxIdle.constructor);
|
||||
assertEqual(0, properties.threadsMaxIdle);
|
||||
assertTrue(Number === properties.threadsMaxTotal.constructor);
|
||||
assertEqual(5, properties.threadsMaxTotal);
|
||||
},
|
||||
|
||||
testLinkDDL: function() {
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
db._drop("TestCollection2");
|
||||
db._dropView("TestView");
|
||||
db._create("TestCollection0");
|
||||
db._create("TestCollection1");
|
||||
db._create("TestCollection2");
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
var meta = { links: {
|
||||
"TestCollection0": {},
|
||||
"TestCollection1": { analyzers: [ "text_en"], includeAllFields: true, trackListPositions: true },
|
||||
"TestCollection2": { fields: {
|
||||
"b": { fields: { "b1": {} } },
|
||||
"c": { includeAllFields: true },
|
||||
"d": { trackListPositions: true },
|
||||
"e": { analyzers: [ "text_de"] }
|
||||
} }
|
||||
} };
|
||||
view.properties(meta, true); // partial update
|
||||
var properties = view.properties();
|
||||
assertTrue(Object === properties.links.constructor);
|
||||
assertEqual(3, Object.keys(properties.links).length);
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection0.constructor);
|
||||
assertTrue(Object === properties.links.TestCollection0.fields.constructor);
|
||||
assertEqual(0, Object.keys(properties.links.TestCollection0.fields).length);
|
||||
assertTrue(Boolean === properties.links.TestCollection0.includeAllFields.constructor);
|
||||
assertEqual(false, properties.links.TestCollection0.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection0.trackListPositions.constructor);
|
||||
assertEqual(false, properties.links.TestCollection0.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection0.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection0.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection0.analyzers[0].constructor);
|
||||
assertEqual("identity", properties.links.TestCollection0.analyzers[0]);
|
||||
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection1.constructor);
|
||||
assertTrue(Object === properties.links.TestCollection1.fields.constructor);
|
||||
assertEqual(0, Object.keys(properties.links.TestCollection1.fields).length);
|
||||
assertTrue(Boolean === properties.links.TestCollection1.includeAllFields.constructor);
|
||||
assertEqual(true, properties.links.TestCollection1.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection1.trackListPositions.constructor);
|
||||
assertEqual(true, properties.links.TestCollection1.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection1.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection1.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection1.analyzers[0].constructor);
|
||||
assertEqual("text_en", properties.links.TestCollection1.analyzers[0]);
|
||||
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection2.constructor);
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection2.fields.constructor);
|
||||
assertEqual(4, Object.keys(properties.links.TestCollection2.fields).length);
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection2.fields.b.fields.constructor);
|
||||
assertEqual(1, Object.keys(properties.links.TestCollection2.fields.b.fields).length);
|
||||
|
||||
assertTrue(Boolean === properties.links.TestCollection2.fields.c.includeAllFields.constructor);
|
||||
assertEqual(true, properties.links.TestCollection2.fields.c.includeAllFields);
|
||||
|
||||
assertTrue(Boolean === properties.links.TestCollection2.fields.d.trackListPositions.constructor);
|
||||
assertEqual(true, properties.links.TestCollection2.fields.d.trackListPositions);
|
||||
|
||||
assertTrue(Array === properties.links.TestCollection2.fields.e.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection2.fields.e.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection2.fields.e.analyzers[0].constructor);
|
||||
assertEqual("text_de", properties.links.TestCollection2.fields.e.analyzers[0]);
|
||||
|
||||
assertTrue(Boolean === properties.links.TestCollection2.includeAllFields.constructor);
|
||||
assertEqual(false, properties.links.TestCollection2.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection2.trackListPositions.constructor);
|
||||
assertEqual(false, properties.links.TestCollection2.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection2.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection2.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection2.analyzers[0].constructor);
|
||||
assertEqual("identity", properties.links.TestCollection2.analyzers[0]);
|
||||
|
||||
meta = { links: { "TestCollection0": null, "TestCollection2": {} } };
|
||||
view.properties(meta, true); // partial update
|
||||
properties = view.properties();
|
||||
assertTrue(Object === properties.links.constructor);
|
||||
assertEqual(2, Object.keys(properties.links).length);
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection1.constructor);
|
||||
assertTrue(Object === properties.links.TestCollection1.fields.constructor);
|
||||
assertEqual(0, Object.keys(properties.links.TestCollection1.fields).length);
|
||||
assertTrue(Boolean === properties.links.TestCollection1.includeAllFields.constructor);
|
||||
assertEqual(true, properties.links.TestCollection1.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection1.trackListPositions.constructor);
|
||||
assertEqual(true, properties.links.TestCollection1.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection1.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection1.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection1.analyzers[0].constructor);
|
||||
assertEqual("text_en", properties.links.TestCollection1.analyzers[0]);
|
||||
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection2.constructor);
|
||||
assertTrue(Object === properties.links.TestCollection2.fields.constructor);
|
||||
assertEqual(0, Object.keys(properties.links.TestCollection2.fields).length);
|
||||
assertTrue(Boolean === properties.links.TestCollection2.includeAllFields.constructor);
|
||||
assertEqual(false, properties.links.TestCollection2.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection2.trackListPositions.constructor);
|
||||
assertEqual(false, properties.links.TestCollection2.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection2.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection2.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection2.analyzers[0].constructor);
|
||||
assertEqual("identity", properties.links.TestCollection2.analyzers[0]);
|
||||
|
||||
meta = { links: { "TestCollection0": { includeAllFields: true }, "TestCollection1": {} } };
|
||||
view.properties(meta, false); // full update
|
||||
properties = view.properties();
|
||||
assertTrue(Object === properties.links.constructor);
|
||||
assertEqual(2, Object.keys(properties.links).length);
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection0.constructor);
|
||||
assertTrue(Object === properties.links.TestCollection0.fields.constructor);
|
||||
assertEqual(0, Object.keys(properties.links.TestCollection0.fields).length);
|
||||
assertTrue(Boolean === properties.links.TestCollection0.includeAllFields.constructor);
|
||||
assertEqual(true, properties.links.TestCollection0.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection0.trackListPositions.constructor);
|
||||
assertEqual(false, properties.links.TestCollection0.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection0.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection0.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection0.analyzers[0].constructor);
|
||||
assertEqual("identity", properties.links.TestCollection0.analyzers[0]);
|
||||
|
||||
|
||||
assertTrue(Object === properties.links.TestCollection1.constructor);
|
||||
assertTrue(Object === properties.links.TestCollection1.fields.constructor);
|
||||
assertEqual(0, Object.keys(properties.links.TestCollection1.fields).length);
|
||||
assertTrue(Boolean === properties.links.TestCollection1.includeAllFields.constructor);
|
||||
assertEqual(false, properties.links.TestCollection1.includeAllFields);
|
||||
assertTrue(Boolean === properties.links.TestCollection1.trackListPositions.constructor);
|
||||
assertEqual(false, properties.links.TestCollection1.trackListPositions);
|
||||
assertTrue(Array === properties.links.TestCollection1.analyzers.constructor);
|
||||
assertEqual(1, properties.links.TestCollection1.analyzers.length);
|
||||
assertTrue(String === properties.links.TestCollection1.analyzers[0].constructor);
|
||||
assertEqual("identity", properties.links.TestCollection1.analyzers[0]);
|
||||
},
|
||||
|
||||
testViewCreate: function() {
|
||||
// 1 empty collection
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
var col0 = db._create("TestCollection0");
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
var meta = { links: { "TestCollection0": { includeAllFields: true } } };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(0, result.length);
|
||||
|
||||
col0.save({ name: "quarter", text: "quick over" });
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(1, result.length);
|
||||
assertEqual("quarter", result[0].name);
|
||||
|
||||
// 1 non-empty collection
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
col0 = db._create("TestCollection0");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col0.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
col0.save({ name: "half", text: "quick fox over lazy" });
|
||||
col0.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
col0.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
meta = { links: { "TestCollection0": { includeAllFields: true } } };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(4, result.length);
|
||||
assertEqual("full", result[0].name);
|
||||
assertEqual("half", result[1].name);
|
||||
assertEqual("other half", result[2].name);
|
||||
assertEqual("quarter", result[3].name);
|
||||
|
||||
// 2 non-empty collections
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
col0 = db._create("TestCollection0");
|
||||
var col1 = db._create("TestCollection1");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col0.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
col0.save({ name: "half", text: "quick fox over lazy" });
|
||||
col1.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
col1.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
meta = { links: {
|
||||
"TestCollection0": { includeAllFields: true },
|
||||
"TestCollection1": { includeAllFields: true }
|
||||
} };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(4, result.length);
|
||||
assertEqual("full", result[0].name);
|
||||
assertEqual("half", result[1].name);
|
||||
assertEqual("other half", result[2].name);
|
||||
assertEqual("quarter", result[3].name);
|
||||
|
||||
// 1 empty collection + 2 non-empty collections
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
db._drop("TestCollection2");
|
||||
col0 = db._create("TestCollection0");
|
||||
col1 = db._create("TestCollection1");
|
||||
var col2 = db._create("TestCollection2");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col2.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
col2.save({ name: "half", text: "quick fox over lazy" });
|
||||
col0.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
col0.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
meta = { links: {
|
||||
"TestCollection0": { includeAllFields: true },
|
||||
"TestCollection1": { includeAllFields: true },
|
||||
"TestCollection2": { includeAllFields: true }
|
||||
} };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(4, result.length);
|
||||
assertEqual("full", result[0].name);
|
||||
assertEqual("half", result[1].name);
|
||||
assertEqual("other half", result[2].name);
|
||||
assertEqual("quarter", result[3].name);
|
||||
},
|
||||
|
||||
testViewModify: function() {
|
||||
// 1 empty collection
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
var col0 = db._create("TestCollection0");
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
var meta = { links: { "TestCollection0": { includeAllFields: true } } };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
meta = {
|
||||
commit: {
|
||||
commitIntervalMsec: 10000,
|
||||
consolidate: {
|
||||
bytes: { segmentThreshold: 20, threshold: 0.5 },
|
||||
bytes_accum: {},
|
||||
count: {}
|
||||
}
|
||||
},
|
||||
locale: "de_DE.UTF-16",
|
||||
threadsMaxIdle: 42,
|
||||
threadsMaxTotal: 1
|
||||
};
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(0, result.length);
|
||||
var properties = view.properties();
|
||||
assertEqual(10, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(10000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertEqual(3, Object.keys(properties.commit.consolidate).length);
|
||||
assertEqual(20, properties.commit.consolidate.bytes.segmentThreshold);
|
||||
assertEqual((0.5).toFixed(6), properties.commit.consolidate.bytes.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.bytes_accum.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes_accum.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
assertEqual("de_DE.UTF-8", properties.locale);
|
||||
assertEqual(42, properties.threadsMaxIdle);
|
||||
assertEqual(1, properties.threadsMaxTotal);
|
||||
|
||||
col0.save({ name: "quarter", text: "quick over" });
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(1, result.length);
|
||||
assertEqual("quarter", result[0].name);
|
||||
|
||||
// 1 non-empty collection
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
col0 = db._create("TestCollection0");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col0.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
col0.save({ name: "half", text: "quick fox over lazy" });
|
||||
col0.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
col0.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
meta = { links: { "TestCollection0": { includeAllFields: true } } };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
meta = {
|
||||
commit: {
|
||||
commitIntervalMsec: 10000,
|
||||
consolidate: {
|
||||
bytes: { segmentThreshold: 20, threshold: 0.5 },
|
||||
bytes_accum: {},
|
||||
count: {}
|
||||
}
|
||||
},
|
||||
locale: "de_DE.UTF-16",
|
||||
threadsMaxIdle: 42,
|
||||
threadsMaxTotal: 1
|
||||
};
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(4, result.length);
|
||||
assertEqual("full", result[0].name);
|
||||
assertEqual("half", result[1].name);
|
||||
assertEqual("other half", result[2].name);
|
||||
assertEqual("quarter", result[3].name);
|
||||
properties = view.properties();
|
||||
assertEqual(10, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(10000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertEqual(3, Object.keys(properties.commit.consolidate).length);
|
||||
assertEqual(20, properties.commit.consolidate.bytes.segmentThreshold);
|
||||
assertEqual((0.5).toFixed(6), properties.commit.consolidate.bytes.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.bytes_accum.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes_accum.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
assertEqual("de_DE.UTF-8", properties.locale);
|
||||
assertEqual(42, properties.threadsMaxIdle);
|
||||
assertEqual(1, properties.threadsMaxTotal);
|
||||
|
||||
// 2 non-empty collections
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
col0 = db._create("TestCollection0");
|
||||
var col1 = db._create("TestCollection1");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col0.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
col0.save({ name: "half", text: "quick fox over lazy" });
|
||||
col1.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
col1.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
meta = { links: {
|
||||
"TestCollection0": { includeAllFields: true },
|
||||
"TestCollection1": { includeAllFields: true }
|
||||
} };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
meta = {
|
||||
commit: {
|
||||
commitIntervalMsec: 10000,
|
||||
consolidate: {
|
||||
bytes: { segmentThreshold: 20, threshold: 0.5 },
|
||||
bytes_accum: {},
|
||||
count: {}
|
||||
}
|
||||
},
|
||||
locale: "de_DE.UTF-16",
|
||||
threadsMaxIdle: 42,
|
||||
threadsMaxTotal: 1
|
||||
};
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(4, result.length);
|
||||
assertEqual("full", result[0].name);
|
||||
assertEqual("half", result[1].name);
|
||||
assertEqual("other half", result[2].name);
|
||||
assertEqual("quarter", result[3].name);
|
||||
properties = view.properties();
|
||||
assertEqual(10, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(10000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertEqual(3, Object.keys(properties.commit.consolidate).length);
|
||||
assertEqual(20, properties.commit.consolidate.bytes.segmentThreshold);
|
||||
assertEqual((0.5).toFixed(6), properties.commit.consolidate.bytes.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.bytes_accum.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes_accum.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
assertEqual("de_DE.UTF-8", properties.locale);
|
||||
assertEqual(42, properties.threadsMaxIdle);
|
||||
assertEqual(1, properties.threadsMaxTotal);
|
||||
|
||||
// 1 empty collection + 2 non-empty collections
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
db._drop("TestCollection1");
|
||||
db._drop("TestCollection2");
|
||||
col0 = db._create("TestCollection0");
|
||||
col1 = db._create("TestCollection1");
|
||||
var col2 = db._create("TestCollection2");
|
||||
view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col2.save({ name: "full", text: "the quick brown fox jumps over the lazy dog" });
|
||||
col2.save({ name: "half", text: "quick fox over lazy" });
|
||||
col0.save({ name: "other half", text: "the brown jumps the dog" });
|
||||
col0.save({ name: "quarter", text: "quick over" });
|
||||
|
||||
meta = { links: {
|
||||
"TestCollection0": { includeAllFields: true },
|
||||
"TestCollection1": { includeAllFields: true },
|
||||
"TestCollection2": { includeAllFields: true }
|
||||
} };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
meta = {
|
||||
commit: {
|
||||
commitIntervalMsec: 10000,
|
||||
consolidate: {
|
||||
bytes: { segmentThreshold: 20, threshold: 0.5 },
|
||||
bytes_accum: {},
|
||||
count: {}
|
||||
}
|
||||
},
|
||||
locale: "de_DE.UTF-16",
|
||||
threadsMaxIdle: 42,
|
||||
threadsMaxTotal: 1
|
||||
};
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.name RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(4, result.length);
|
||||
assertEqual("full", result[0].name);
|
||||
assertEqual("half", result[1].name);
|
||||
assertEqual("other half", result[2].name);
|
||||
assertEqual("quarter", result[3].name);
|
||||
properties = view.properties();
|
||||
assertEqual(10, properties.commit.cleanupIntervalStep);
|
||||
assertEqual(10000, properties.commit.commitIntervalMsec);
|
||||
assertEqual(5000, properties.commit.commitTimeoutMsec);
|
||||
assertEqual(3, Object.keys(properties.commit.consolidate).length);
|
||||
assertEqual(20, properties.commit.consolidate.bytes.segmentThreshold);
|
||||
assertEqual((0.5).toFixed(6), properties.commit.consolidate.bytes.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.bytes_accum.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.bytes_accum.threshold.toFixed(6));
|
||||
assertEqual(300, properties.commit.consolidate.count.segmentThreshold);
|
||||
assertEqual((0.85).toFixed(6), properties.commit.consolidate.count.threshold.toFixed(6));
|
||||
assertEqual("de_DE.UTF-8", properties.locale);
|
||||
assertEqual(42, properties.threadsMaxIdle);
|
||||
assertEqual(1, properties.threadsMaxTotal);
|
||||
},
|
||||
|
||||
testLinkModify: function() {
|
||||
db._dropView("TestView");
|
||||
db._drop("TestCollection0");
|
||||
var col0 = db._create("TestCollection0");
|
||||
var view = db._createView("TestView", "arangosearch", {});
|
||||
|
||||
col0.save({ a: "foo", c: "bar", z: 0 });
|
||||
col0.save({ a: "foz", d: "baz", z: 1 });
|
||||
col0.save({ b: "bar", c: "foo", z: 2 });
|
||||
col0.save({ b: "baz", d: "foz", z: 3 });
|
||||
|
||||
var meta = { links: { "TestCollection0": { fields: { a: {} } } } };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
var result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.z RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(2, result.length);
|
||||
assertEqual(0, result[0].z);
|
||||
assertEqual(1, result[1].z);
|
||||
|
||||
meta = { links: { "TestCollection0": { fields: { b: {} } } } };
|
||||
view.properties(meta, true); // partial update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.z RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(2, result.length);
|
||||
assertEqual(2, result[0].z);
|
||||
assertEqual(3, result[1].z);
|
||||
|
||||
meta = { links: { "TestCollection0": { fields: { c: {} } } } };
|
||||
view.properties(meta, false); // full update
|
||||
|
||||
result = AQL_EXECUTE("FOR doc IN VIEW TestView SORT doc.z RETURN doc", null, { waitForSync: true }).json;
|
||||
assertEqual(2, result.length);
|
||||
assertEqual(0, result[0].z);
|
||||
assertEqual(2, result[1].z);
|
||||
},
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(IResearchFeatureDDLTestSuite);
|
||||
|
||||
return jsunity.done();
|
|
@ -90,10 +90,6 @@ function iResearchAqlTestSuite () {
|
|||
db._drop("AnotherUnitTestsCollection");
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test no fullcount
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testTransactionRegistration : function () {
|
||||
// read lock
|
||||
var result = db._executeTransaction({
|
||||
|
|
|
@ -22,12 +22,76 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "AgencyMock.h"
|
||||
#include "Basics/ConditionLocker.h"
|
||||
#include "Basics/NumberUtils.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Agency/Store.h"
|
||||
#include "lib/Rest/HttpResponse.h"
|
||||
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
namespace arangodb {
|
||||
namespace consensus {
|
||||
|
||||
// FIXME TODO for some reason the implementation of this function is missing in the arangodb code
|
||||
void Store::notifyObservers() const {
|
||||
auto* clusterFeature =
|
||||
arangodb::application_features::ApplicationServer::getFeature<
|
||||
arangodb::ClusterFeature
|
||||
>("Cluster");
|
||||
|
||||
if (!clusterFeature) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto* callbackRegistry = clusterFeature->agencyCallbackRegistry();
|
||||
|
||||
if (!callbackRegistry) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<uint32_t> callbackIds;
|
||||
|
||||
{
|
||||
MUTEX_LOCKER(storeLocker, _storeLock);
|
||||
|
||||
for (auto& entry: _observerTable) {
|
||||
auto& key = entry.first;
|
||||
auto pos = key.rfind("/"); // observer id is after the last '/'
|
||||
|
||||
if (std::string::npos == pos) {
|
||||
continue;
|
||||
}
|
||||
|
||||
bool success;
|
||||
auto* idStr = &(key[pos + 1]);
|
||||
auto id = arangodb::NumberUtils::atoi<uint32_t>(
|
||||
idStr, idStr + std::strlen(idStr), success
|
||||
);
|
||||
|
||||
if (success) {
|
||||
callbackIds.emplace_back(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& id: callbackIds) {
|
||||
try {
|
||||
auto& condition = callbackRegistry->getCallback(id)->_cv;
|
||||
CONDITION_LOCKER(locker, condition);
|
||||
|
||||
callbackRegistry->getCallback(id)->refetchAndUpdate(false, true); // force a check
|
||||
condition.signal();
|
||||
} catch(...) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // consensus
|
||||
} // arangodb
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- GeneralClientConnectionAgencyMock
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -82,6 +146,11 @@ void GeneralClientConnectionAgencyMock::handleWrite(
|
|||
|
||||
resp.writeHeader(&buffer);
|
||||
buffer.appendText(body);
|
||||
|
||||
if (_invokeCallbacks) {
|
||||
// FIXME TODO should be done in a separate thread since some callbacks aquire non-recursive mutexes
|
||||
_store->notifyObservers();
|
||||
}
|
||||
}
|
||||
|
||||
void GeneralClientConnectionAgencyMock::response(
|
||||
|
@ -149,4 +218,4 @@ void GeneralClientConnectionAgencyMock::request(char const* data, size_t length)
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -40,9 +40,11 @@ class GeneralClientConnectionAgencyMock: public GeneralClientConnectionMock {
|
|||
public:
|
||||
explicit GeneralClientConnectionAgencyMock(
|
||||
arangodb::consensus::Store& store,
|
||||
bool invokeCallbacks = false,
|
||||
bool trace = false
|
||||
) noexcept
|
||||
: _store(&store),
|
||||
_invokeCallbacks(invokeCallbacks),
|
||||
_trace(trace) {
|
||||
}
|
||||
|
||||
|
@ -63,6 +65,7 @@ class GeneralClientConnectionAgencyMock: public GeneralClientConnectionMock {
|
|||
std::vector<std::string> _path;
|
||||
std::string _url;
|
||||
std::string _body;
|
||||
bool _invokeCallbacks;
|
||||
bool _trace;
|
||||
}; // GeneralClientConnectionAgencyMock
|
||||
|
||||
|
|
|
@ -578,8 +578,8 @@ SECTION("test_write") {
|
|||
static std::vector<std::string> const EMPTY;
|
||||
auto doc0 = arangodb::velocypack::Parser::fromJson("{ \"abc\": \"def\" }");
|
||||
auto doc1 = arangodb::velocypack::Parser::fromJson("{ \"ghi\": \"jkl\" }");
|
||||
std::string dataPath = (((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=std::string("arangosearch-42")).utf8();
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
std::string dataPath = ((((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=(std::string("database-") + std::to_string(vocbase.id())))/=std::string("arangosearch-42")).utf8();
|
||||
auto linkJson = arangodb::velocypack::Parser::fromJson("{ \"type\": \"arangosearch\", \"view\": 42, \"includeAllFields\": true }");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \
|
||||
|
@ -672,4 +672,4 @@ SECTION("test_write") {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -373,7 +373,7 @@ SECTION("test_create_drop") {
|
|||
|
||||
// drop view
|
||||
CHECK(vocbase->dropView(logicalView->planId(), false).ok());
|
||||
CHECK(nullptr == vocbase->lookupView(viewId));
|
||||
CHECK(nullptr == ci->getView(vocbase->name(), viewId));
|
||||
|
||||
// old index remains valid
|
||||
{
|
||||
|
|
|
@ -310,7 +310,8 @@ SECTION("test_defaults") {
|
|||
}
|
||||
|
||||
SECTION("test_drop") {
|
||||
std::string dataPath = (((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=std::string("arangosearch-123")).utf8();
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
std::string dataPath = ((((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=(std::string("database-") + std::to_string(vocbase.id())))/=std::string("arangosearch-123")).utf8();
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \
|
||||
\"id\": 123, \
|
||||
\"name\": \"testView\", \
|
||||
|
@ -319,7 +320,6 @@ SECTION("test_drop") {
|
|||
|
||||
CHECK((false == TRI_IsDirectory(dataPath.c_str())));
|
||||
|
||||
Vocbase vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
CHECK((nullptr != logicalCollection));
|
||||
|
@ -339,7 +339,8 @@ SECTION("test_drop") {
|
|||
}
|
||||
|
||||
SECTION("test_drop_with_link") {
|
||||
std::string dataPath = (((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=std::string("arangosearch-123")).utf8();
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
std::string dataPath = ((((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=(std::string("database-") + std::to_string(vocbase.id())))/=std::string("arangosearch-123")).utf8();
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \
|
||||
\"id\": 123, \
|
||||
\"name\": \"testView\", \
|
||||
|
@ -348,7 +349,6 @@ SECTION("test_drop_with_link") {
|
|||
|
||||
CHECK((false == TRI_IsDirectory(dataPath.c_str())));
|
||||
|
||||
Vocbase vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
CHECK((nullptr != logicalCollection));
|
||||
|
@ -408,8 +408,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(1 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -427,8 +433,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(0 == snapshot->live_docs_count());
|
||||
}
|
||||
}
|
||||
|
@ -462,8 +474,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(1 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -481,8 +499,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(0 == snapshot->live_docs_count());
|
||||
}
|
||||
}
|
||||
|
@ -516,8 +540,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(1 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -538,8 +568,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(0 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -587,8 +623,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(1 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -604,8 +646,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(1 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -653,8 +701,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(1 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -675,8 +729,14 @@ SECTION("test_drop_cid") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(0 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -1012,8 +1072,14 @@ SECTION("test_insert") {
|
|||
CHECK((view->sync()));
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(2 == snapshot->live_docs_count());
|
||||
}
|
||||
|
||||
|
@ -1052,8 +1118,14 @@ SECTION("test_insert") {
|
|||
CHECK((view->sync()));
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK((2 == snapshot->docs_count()));
|
||||
}
|
||||
|
||||
|
@ -1072,8 +1144,14 @@ SECTION("test_insert") {
|
|||
view->visitCollections([&cids](TRI_voc_cid_t cid)->bool { cids.emplace(cid); return true; });
|
||||
CHECK((0 == cids.size()));
|
||||
std::unordered_set<TRI_voc_cid_t> actual;
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(actual, *snapshot);
|
||||
CHECK((actual.empty()));
|
||||
}
|
||||
|
@ -1099,8 +1177,14 @@ SECTION("test_insert") {
|
|||
CHECK((view->sync()));
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK((4 == snapshot->docs_count()));
|
||||
|
||||
// validate cid count
|
||||
|
@ -1110,8 +1194,14 @@ SECTION("test_insert") {
|
|||
CHECK((0 == cids.size()));
|
||||
std::unordered_set<TRI_voc_cid_t> expected = { 1 };
|
||||
std::unordered_set<TRI_voc_cid_t> actual;
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(actual, *snapshot);
|
||||
|
||||
for (auto& cid: expected) {
|
||||
|
@ -1154,8 +1244,14 @@ SECTION("test_insert") {
|
|||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK((4 == snapshot->docs_count()));
|
||||
}
|
||||
|
||||
|
@ -1191,8 +1287,14 @@ SECTION("test_insert") {
|
|||
CHECK((view->sync()));
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK((4 == snapshot->docs_count()));
|
||||
}
|
||||
|
||||
|
@ -1229,8 +1331,14 @@ SECTION("test_insert") {
|
|||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK((4 == snapshot->docs_count()));
|
||||
}
|
||||
}
|
||||
|
@ -1239,7 +1347,7 @@ SECTION("test_open") {
|
|||
// default data path
|
||||
{
|
||||
Vocbase vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
std::string dataPath = (((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=std::string("arangosearch-123")).utf8();
|
||||
std::string dataPath = ((((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=(std::string("database-") + std::to_string(vocbase.id())))/=std::string("arangosearch-123")).utf8();
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"id\": 123, \"name\": \"testView\", \"type\": \"testType\" }");
|
||||
|
||||
CHECK((false == TRI_IsDirectory(dataPath.c_str())));
|
||||
|
@ -1270,8 +1378,14 @@ SECTION("test_query") {
|
|||
auto* view = dynamic_cast<arangodb::iresearch::IResearchView*>(logicalView.get());
|
||||
REQUIRE((false == !view));
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(0 == snapshot->docs_count());
|
||||
}
|
||||
|
||||
|
@ -1305,8 +1419,14 @@ SECTION("test_query") {
|
|||
view->sync();
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(12 == snapshot->docs_count());
|
||||
}
|
||||
|
||||
|
@ -1351,8 +1471,14 @@ SECTION("test_query") {
|
|||
view->sync();
|
||||
}
|
||||
|
||||
auto state0 = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot0 = view->snapshot(*state0, true);
|
||||
arangodb::transaction::Methods trx0(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot0 = view->snapshot(trx0, true);
|
||||
CHECK(12 == snapshot0->docs_count());
|
||||
|
||||
// add more data
|
||||
|
@ -1381,8 +1507,14 @@ SECTION("test_query") {
|
|||
// old reader sees same data as before
|
||||
CHECK(12 == snapshot0->docs_count());
|
||||
// new reader sees new data
|
||||
auto state1 = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot1 = view->snapshot(*state1, true);
|
||||
arangodb::transaction::Methods trx1(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot1 = view->snapshot(trx1, true);
|
||||
CHECK(24 == snapshot1->docs_count());
|
||||
}
|
||||
|
||||
|
@ -1443,8 +1575,14 @@ SECTION("test_query") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
options
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
CHECK(i == snapshot->docs_count());
|
||||
}
|
||||
}
|
||||
|
@ -1515,6 +1653,8 @@ SECTION("test_register_link") {
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> EMPTY;
|
||||
|
||||
// new link
|
||||
{
|
||||
s.engine.views.clear();
|
||||
|
@ -1544,8 +1684,14 @@ SECTION("test_register_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size()));
|
||||
}
|
||||
|
@ -1562,8 +1708,14 @@ SECTION("test_register_link") {
|
|||
CHECK((false == !link));
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size())); // link addition does trigger collection load
|
||||
|
||||
|
@ -1594,8 +1746,14 @@ SECTION("test_register_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size()));
|
||||
}
|
||||
|
@ -1620,8 +1778,14 @@ SECTION("test_register_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size())); // link addition does trigger collection load
|
||||
}
|
||||
|
@ -1644,8 +1808,14 @@ SECTION("test_register_link") {
|
|||
CHECK((false == !link1)); // duplicate link creation is allowed
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size())); // link addition does trigger collection load
|
||||
|
||||
|
@ -1664,6 +1834,7 @@ SECTION("test_register_link") {
|
|||
}
|
||||
|
||||
SECTION("test_unregister_link") {
|
||||
std::vector<std::string> const EMPTY;
|
||||
bool persisted = false;
|
||||
auto before = StorageEngineMock::before;
|
||||
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::before = before; });
|
||||
|
@ -1711,8 +1882,14 @@ SECTION("test_unregister_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((1 == cids.size()));
|
||||
}
|
||||
|
@ -1742,8 +1919,14 @@ SECTION("test_unregister_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size()));
|
||||
}
|
||||
|
@ -1798,8 +1981,14 @@ SECTION("test_unregister_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((1 == cids.size()));
|
||||
}
|
||||
|
@ -1825,8 +2014,14 @@ SECTION("test_unregister_link") {
|
|||
{
|
||||
std::unordered_set<TRI_voc_cid_t> cids;
|
||||
view->sync();
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = view->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = view->snapshot(trx, true);
|
||||
arangodb::iresearch::appendKnownCollections(cids, *snapshot);
|
||||
CHECK((0 == cids.size()));
|
||||
}
|
||||
|
@ -2447,32 +2642,58 @@ SECTION("test_transaction_snapshot") {
|
|||
|
||||
// no snapshot in TransactionState (force == false, waitForSync = false)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = viewImpl->snapshot(*state);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = viewImpl->snapshot(trx);
|
||||
CHECK((nullptr == snapshot));
|
||||
}
|
||||
|
||||
// no snapshot in TransactionState (force == true, waitForSync = false)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = viewImpl->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* snapshot = viewImpl->snapshot(trx, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((0 == snapshot->live_docs_count()));
|
||||
}
|
||||
|
||||
// no snapshot in TransactionState (force == false, waitForSync = true)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
state->waitForSync(true);
|
||||
auto* snapshot = viewImpl->snapshot(*state);
|
||||
arangodb::transaction::Options opts;
|
||||
opts.waitForSync = true;
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
opts
|
||||
);
|
||||
auto* snapshot = viewImpl->snapshot(trx);
|
||||
CHECK((nullptr == snapshot));
|
||||
}
|
||||
|
||||
// no snapshot in TransactionState (force == true, waitForSync = true)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
state->waitForSync(true);
|
||||
auto* snapshot = viewImpl->snapshot(*state, true);
|
||||
arangodb::transaction::Options opts;
|
||||
opts.waitForSync = true;
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
opts
|
||||
);
|
||||
auto* snapshot = viewImpl->snapshot(trx, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
}
|
||||
|
@ -2503,13 +2724,12 @@ SECTION("test_transaction_snapshot") {
|
|||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* state = trx.state();
|
||||
viewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
auto* snapshot = viewImpl->snapshot(*state);
|
||||
CHECK((true == viewImpl->apply(trx)));
|
||||
CHECK((true == trx.begin().ok()));
|
||||
auto* snapshot = viewImpl->snapshot(trx);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK(true == trx.abort().ok()); // prevent assertion in destructor
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == true, waitForSync = false)
|
||||
|
@ -2521,13 +2741,12 @@ SECTION("test_transaction_snapshot") {
|
|||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* state = trx.state();
|
||||
viewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
auto* snapshot = viewImpl->snapshot(*state, true);
|
||||
CHECK((true == viewImpl->apply(trx)));
|
||||
CHECK((true == trx.begin().ok()));
|
||||
auto* snapshot = viewImpl->snapshot(trx, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK(true == trx.abort().ok()); // prevent assertion in destructor
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == true, waitForSync = false during updateStatus(), true during snapshot())
|
||||
|
@ -2539,34 +2758,38 @@ SECTION("test_transaction_snapshot") {
|
|||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* state = trx.state();
|
||||
viewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
auto state = trx.state();
|
||||
REQUIRE(state);
|
||||
CHECK((true == viewImpl->apply(trx)));
|
||||
CHECK((true == trx.begin().ok()));
|
||||
state->waitForSync(true);
|
||||
auto* snapshot = viewImpl->snapshot(*state, true);
|
||||
auto* snapshot = viewImpl->snapshot(trx, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK(true == trx.abort().ok()); // prevent assertion in destructor
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == true, waitForSync = true during updateStatus(), false during snapshot())
|
||||
{
|
||||
arangodb::transaction::Options opts;
|
||||
opts.waitForSync = true;
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
opts
|
||||
);
|
||||
auto* state = trx.state();
|
||||
auto state = trx.state();
|
||||
REQUIRE(state);
|
||||
state->waitForSync(true);
|
||||
viewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
CHECK((true == viewImpl->apply(trx)));
|
||||
CHECK((true == trx.begin().ok()));
|
||||
state->waitForSync(false);
|
||||
auto* snapshot = viewImpl->snapshot(*state, true);
|
||||
auto* snapshot = viewImpl->snapshot(trx, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((2 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK(true == trx.abort().ok()); // prevent assertion in destructor
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3525,4 +3748,4 @@ SECTION("test_update_partial") {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -198,6 +198,7 @@ struct IResearchViewCoordinatorSetup {
|
|||
arangodb::LogTopic::setLogLevel(arangodb::iresearch::TOPIC.name(), arangodb::LogLevel::DEFAULT);
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::CLUSTER.name(), arangodb::LogLevel::DEFAULT);
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::FIXME.name(), arangodb::LogLevel::DEFAULT);
|
||||
arangodb::ClusterInfo::cleanup(); // reset ClusterInfo::instance() before DatabaseFeature::unprepare()
|
||||
arangodb::application_features::ApplicationServer::server = nullptr;
|
||||
|
||||
// destroy application features
|
||||
|
@ -3158,4 +3159,4 @@ SECTION("IResearchViewNode::createBlock") {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -24,12 +24,17 @@
|
|||
#include "utils/misc.hpp"
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "../IResearch/AgencyCommManagerMock.h"
|
||||
#include "../IResearch/StorageEngineMock.h"
|
||||
#include "common.h"
|
||||
#include "AgencyMock.h"
|
||||
#include "StorageEngineMock.h"
|
||||
#include "Agency/AgencyFeature.h"
|
||||
#include "Agency/Store.h"
|
||||
#include "Aql/AstNode.h"
|
||||
#include "Aql/Variable.h"
|
||||
#include "Basics/files.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "IResearch/IResearchCommon.h"
|
||||
#include "IResearch/IResearchFeature.h"
|
||||
|
@ -46,6 +51,7 @@
|
|||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Utils/OperationOptions.h"
|
||||
#include "velocypack/Parser.h"
|
||||
#include "V8Server/V8DealerFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
|
@ -54,35 +60,49 @@
|
|||
// -----------------------------------------------------------------------------
|
||||
|
||||
struct IResearchViewDBServerSetup {
|
||||
GeneralClientConnectionMapMock* agency;
|
||||
struct ClusterCommControl : arangodb::ClusterComm {
|
||||
static void reset() {
|
||||
arangodb::ClusterComm::_theInstanceInit.store(0);
|
||||
}
|
||||
};
|
||||
|
||||
arangodb::consensus::Store _agencyStore{nullptr, "arango"};
|
||||
GeneralClientConnectionAgencyMock* agency;
|
||||
StorageEngineMock engine;
|
||||
arangodb::application_features::ApplicationServer server;
|
||||
std::vector<std::pair<arangodb::application_features::ApplicationFeature*, bool>> features;
|
||||
std::string testFilesystemPath;
|
||||
|
||||
IResearchViewDBServerSetup(): server(nullptr, nullptr) {
|
||||
auto* agencyCommManager = new AgencyCommManagerMock();
|
||||
auto* agencyCommManager = new AgencyCommManagerMock("arango");
|
||||
agency = agencyCommManager->addConnection<GeneralClientConnectionAgencyMock>(_agencyStore, true);
|
||||
agency = agencyCommManager->addConnection<GeneralClientConnectionAgencyMock>(_agencyStore, true); // need 2 connections or Agency callbacks will fail
|
||||
arangodb::AgencyCommManager::MANAGER.reset(agencyCommManager);
|
||||
|
||||
agency = agencyCommManager->addConnection<GeneralClientConnectionMapMock>();
|
||||
arangodb::ServerState::instance()->setRole(arangodb::ServerState::RoleEnum::ROLE_PRIMARY);
|
||||
arangodb::EngineSelectorFeature::ENGINE = &engine;
|
||||
arangodb::AgencyCommManager::MANAGER.reset(agencyCommManager);
|
||||
|
||||
// suppress INFO {authentication} Authentication is turned on (system only), authentication for unix sockets is turned on
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::AUTHENTICATION.name(), arangodb::LogLevel::WARN);
|
||||
|
||||
// suppress INFO {cluster} Starting up with role PRIMARY
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::CLUSTER.name(), arangodb::LogLevel::WARN);
|
||||
|
||||
// suppress log messages since tests check error conditions
|
||||
arangodb::LogTopic::setLogLevel(arangodb::iresearch::TOPIC.name(), arangodb::LogLevel::FATAL);
|
||||
irs::logger::output_le(iresearch::logger::IRL_FATAL, stderr);
|
||||
|
||||
// setup required application features
|
||||
features.emplace_back(new arangodb::AuthenticationFeature(&server), false); // required for AgencyComm::send(...)
|
||||
features.emplace_back(new arangodb::DatabaseFeature(&server), false); // required for TRI_vocbase_t::renameView(...)
|
||||
features.emplace_back(arangodb::DatabaseFeature::DATABASE = new arangodb::DatabaseFeature(&server), false); // required for TRI_vocbase_t::renameView(...)
|
||||
features.emplace_back(new arangodb::DatabasePathFeature(&server), false);
|
||||
features.emplace_back(new arangodb::FlushFeature(&server), false); // do not start the thread
|
||||
features.emplace_back(new arangodb::QueryRegistryFeature(&server), false); // required for TRI_vocbase_t instantiation
|
||||
features.emplace_back(new arangodb::ViewTypesFeature(&server), false); // required for TRI_vocbase_t::createView(...)
|
||||
features.emplace_back(new arangodb::iresearch::IResearchFeature(&server), false); // required for instantiating IResearchView*
|
||||
features.emplace_back(new arangodb::AgencyFeature(&server), false);
|
||||
features.emplace_back(new arangodb::ClusterFeature(&server), false);
|
||||
features.emplace_back(new arangodb::V8DealerFeature(&server), false);
|
||||
|
||||
for (auto& f: features) {
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(f.first);
|
||||
|
@ -90,6 +110,10 @@ struct IResearchViewDBServerSetup {
|
|||
|
||||
for (auto& f: features) {
|
||||
f.first->prepare();
|
||||
|
||||
if (f.first->name() == "Authentication") {
|
||||
f.first->forceDisable();
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
|
@ -98,8 +122,6 @@ struct IResearchViewDBServerSetup {
|
|||
}
|
||||
}
|
||||
|
||||
arangodb::ClusterInfo::createInstance(nullptr); // required for generating view id
|
||||
|
||||
testFilesystemPath = (
|
||||
(irs::utf8_path()/=
|
||||
TRI_GetTempPath())/=
|
||||
|
@ -111,15 +133,16 @@ struct IResearchViewDBServerSetup {
|
|||
long systemError;
|
||||
std::string systemErrorStr;
|
||||
TRI_CreateDirectory(testFilesystemPath.c_str(), systemError, systemErrorStr);
|
||||
|
||||
agencyCommManager->start(); // initialize agency
|
||||
}
|
||||
|
||||
~IResearchViewDBServerSetup() {
|
||||
TRI_RemoveDirectory(testFilesystemPath.c_str());
|
||||
arangodb::LogTopic::setLogLevel(arangodb::iresearch::TOPIC.name(), arangodb::LogLevel::DEFAULT);
|
||||
arangodb::EngineSelectorFeature::ENGINE = nullptr;
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::CLUSTER.name(), arangodb::LogLevel::DEFAULT);
|
||||
arangodb::ClusterInfo::cleanup(); // reset ClusterInfo::instance() before DatabaseFeature::unprepare()
|
||||
arangodb::application_features::ApplicationServer::server = nullptr;
|
||||
arangodb::AgencyCommManager::MANAGER.reset();
|
||||
arangodb::ServerState::instance()->setRole(arangodb::ServerState::RoleEnum::ROLE_SINGLE);
|
||||
|
||||
// destroy application features
|
||||
for (auto& f: features) {
|
||||
|
@ -132,7 +155,11 @@ struct IResearchViewDBServerSetup {
|
|||
f.first->unprepare();
|
||||
}
|
||||
|
||||
ClusterCommControl::reset();
|
||||
arangodb::ServerState::instance()->setRole(arangodb::ServerState::RoleEnum::ROLE_SINGLE);
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::AUTHENTICATION.name(), arangodb::LogLevel::DEFAULT);
|
||||
arangodb::EngineSelectorFeature::ENGINE = nullptr;
|
||||
arangodb::AgencyCommManager::MANAGER.reset();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -149,11 +176,11 @@ TEST_CASE("IResearchViewDBServerTest", "[cluster][iresearch][iresearch-view]") {
|
|||
(void)(s);
|
||||
|
||||
SECTION("test_drop") {
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE(nullptr != ci);
|
||||
|
||||
// drop empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -166,9 +193,7 @@ SECTION("test_drop") {
|
|||
|
||||
// drop non-empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto const wiewId = std::to_string(ci->uniqid() + 1); // +1 because LogicalView creation will generate a new ID
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -176,9 +201,19 @@ SECTION("test_drop") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
// ensure we have shard view in vocbase
|
||||
auto const shardViewName = "_iresearch_123_" + wiewId;
|
||||
auto jsonShard = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\": 100, \"name\": \"" + shardViewName + "\", \"type\": \"arangosearch\", \"isSystem\": true }"
|
||||
);
|
||||
CHECK((true == !vocbase.lookupView(shardViewName)));
|
||||
auto shardView = vocbase.createView(jsonShard->slice());
|
||||
CHECK(shardView);
|
||||
|
||||
auto view = impl->ensure(123);
|
||||
auto const viewId = view->id();
|
||||
CHECK((false == !view));
|
||||
CHECK(view == shardView);
|
||||
static auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
CHECK((false == impl->visitCollections(visitor)));
|
||||
CHECK((false == !vocbase.lookupView(view->id())));
|
||||
|
@ -189,9 +224,7 @@ SECTION("test_drop") {
|
|||
|
||||
// drop non-empty (drop failure)
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto const wiewId = std::to_string(ci->uniqid() + 1); // +1 because LogicalView creation will generate a new ID
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -199,8 +232,18 @@ SECTION("test_drop") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
// ensure we have shard view in vocbase
|
||||
auto const shardViewName = "_iresearch_123_" + wiewId;
|
||||
auto jsonShard = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\": 100, \"name\": \"" + shardViewName + "\", \"type\": \"arangosearch\", \"isSystem\": true }"
|
||||
);
|
||||
CHECK((true == !vocbase.lookupView(shardViewName)));
|
||||
auto shardView = vocbase.createView(jsonShard->slice());
|
||||
CHECK(shardView);
|
||||
|
||||
auto view = impl->ensure(123);
|
||||
CHECK((false == !view));
|
||||
CHECK(view == shardView);
|
||||
static auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
CHECK((false == impl->visitCollections(visitor)));
|
||||
CHECK((false == !vocbase.lookupView(view->id())));
|
||||
|
@ -216,9 +259,6 @@ SECTION("test_drop") {
|
|||
}
|
||||
|
||||
SECTION("test_drop_cid") {
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -226,9 +266,18 @@ SECTION("test_drop_cid") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
// ensure we have shard view in vocbase
|
||||
auto jsonShard = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\": 100, \"name\": \"_iresearch_123_1\", \"type\": \"arangosearch\", \"isSystem\": true }"
|
||||
);
|
||||
CHECK((true == !vocbase.lookupView("_iresearch_123_1")));
|
||||
auto shardView = vocbase.createView(jsonShard->slice());
|
||||
CHECK(shardView);
|
||||
|
||||
auto view = impl->ensure(123);
|
||||
auto const viewId = view->id();
|
||||
CHECK((false == !view));
|
||||
CHECK(shardView == view);
|
||||
static auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
CHECK((false == impl->visitCollections(visitor)));
|
||||
CHECK((false == !vocbase.lookupView(view->id())));
|
||||
|
@ -239,9 +288,6 @@ SECTION("test_drop_cid") {
|
|||
}
|
||||
|
||||
SECTION("test_ensure") {
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"collections\": [ 3, 4, 5 ] }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -249,10 +295,9 @@ SECTION("test_ensure") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
CHECK((true == !vocbase.lookupView("_iresearch_123_1_testView")));
|
||||
auto view = impl->ensure(123);
|
||||
CHECK((false == !view));
|
||||
CHECK((std::string("_iresearch_123_1_testView") == view->name()));
|
||||
CHECK((std::string("_iresearch_123_1") == view->name()));
|
||||
CHECK((false == view->deleted()));
|
||||
CHECK((wiew->id() != view->id())); // must have unique ID
|
||||
CHECK((view->id() == view->planId())); // same as view ID
|
||||
|
@ -261,17 +306,18 @@ SECTION("test_ensure") {
|
|||
CHECK((&vocbase == &(view->vocbase())));
|
||||
static auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
CHECK((true == view->visitCollections(visitor))); // no collections in view
|
||||
CHECK((false == !vocbase.lookupView("_iresearch_123_1_testView")));
|
||||
CHECK((false == !vocbase.lookupView("_iresearch_123_1")));
|
||||
}
|
||||
|
||||
SECTION("test_make") {
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE(nullptr != ci);
|
||||
|
||||
// make DBServer view
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto const wiewId = ci->uniqid() + 1; // +1 because LogicalView creation will generate a new ID
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
CHECK((false == !wiew));
|
||||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
|
@ -279,19 +325,16 @@ SECTION("test_make") {
|
|||
|
||||
CHECK((std::string("testView") == wiew->name()));
|
||||
CHECK((false == wiew->deleted()));
|
||||
CHECK((1 == wiew->id()));
|
||||
CHECK((wiewId == wiew->id()));
|
||||
CHECK((impl->id() == wiew->planId())); // same as view ID
|
||||
CHECK((0 == wiew->planVersion())); // when creating via vocbase planVersion is always 0
|
||||
CHECK((42 == wiew->planVersion())); // when creating via vocbase planVersion is always 0
|
||||
CHECK((arangodb::iresearch::DATA_SOURCE_TYPE == wiew->type()));
|
||||
CHECK((&vocbase == &(wiew->vocbase())));
|
||||
}
|
||||
|
||||
// make IResearchView (DBServer view also created)
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"id\": 100, \"name\": \"_iresearch_123_456_testView\", \"type\": \"arangosearch\", \"isSystem\": true }");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"id\": 100, \"name\": \"_iresearch_123_456\", \"type\": \"arangosearch\", \"isSystem\": true }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
CHECK((true == !vocbase.lookupView("testView")));
|
||||
auto view = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -299,36 +342,22 @@ SECTION("test_make") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchView*>(view.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
CHECK((std::string("_iresearch_123_456_testView") == view->name()));
|
||||
CHECK((std::string("_iresearch_123_456") == view->name()));
|
||||
CHECK((false == view->deleted()));
|
||||
CHECK((100 == view->id()));
|
||||
CHECK((view->id() == view->planId())); // same as view ID
|
||||
CHECK((42 == view->planVersion()));
|
||||
CHECK((arangodb::iresearch::DATA_SOURCE_TYPE == view->type()));
|
||||
CHECK((&vocbase == &(view->vocbase())));
|
||||
|
||||
auto wiew = vocbase.lookupView("testView");
|
||||
CHECK((false == !wiew));
|
||||
auto* wmpl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != wmpl));
|
||||
|
||||
CHECK((std::string("testView") == wiew->name()));
|
||||
CHECK((false == wiew->deleted()));
|
||||
CHECK((view->id() != wiew->id()));
|
||||
CHECK((456 == wiew->id()));
|
||||
CHECK((wiew->id() == wiew->planId())); // same as view ID
|
||||
CHECK((0 == wiew->planVersion())); // when creating via vocbase planVersion is always 0
|
||||
CHECK((arangodb::iresearch::DATA_SOURCE_TYPE == wiew->type()));
|
||||
CHECK((&vocbase == &(wiew->vocbase())));
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("test_open") {
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE(nullptr != ci);
|
||||
|
||||
// open empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -343,9 +372,7 @@ SECTION("test_open") {
|
|||
|
||||
// open non-empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto const wiewId = std::to_string(ci->uniqid() + 1); // +1 because LogicalView creation will generate a new ID
|
||||
std::string dataPath = (((irs::utf8_path()/=s.testFilesystemPath)/=std::string("databases"))/=std::string("arangosearch-123")).utf8();
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
|
@ -354,20 +381,34 @@ SECTION("test_open") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
// ensure we have shard view in vocbase
|
||||
auto const shardViewName = "_iresearch_123_" + wiewId;
|
||||
auto jsonShard = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\": 100, \"name\": \"" + shardViewName + "\", \"type\": \"arangosearch\", \"isSystem\": true }"
|
||||
);
|
||||
CHECK((true == !vocbase.lookupView(shardViewName)));
|
||||
auto shardView = vocbase.createView(jsonShard->slice());
|
||||
CHECK(shardView);
|
||||
|
||||
static auto visitor = [](TRI_voc_cid_t)->bool { return false; };
|
||||
CHECK((true == impl->visitCollections(visitor)));
|
||||
auto view = impl->ensure(123);
|
||||
CHECK((false == !view));
|
||||
CHECK(view == shardView);
|
||||
CHECK((false == impl->visitCollections(visitor)));
|
||||
wiew->open();
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("test_query") {
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE((nullptr != ci));
|
||||
auto* databaseFeature = arangodb::application_features::ApplicationServer::getFeature<arangodb::DatabaseFeature>("Database");
|
||||
REQUIRE((nullptr != databaseFeature));
|
||||
std::string error;
|
||||
|
||||
auto createJson = arangodb::velocypack::Parser::fromJson("{ \
|
||||
\"id\": \"42\", \
|
||||
\"name\": \"testView\", \
|
||||
\"type\": \"arangosearch\" \
|
||||
}");
|
||||
|
@ -380,28 +421,42 @@ SECTION("test_query") {
|
|||
// no filter/order provided, means "RETURN *"
|
||||
{
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
REQUIRE(nullptr != logicalCollection);
|
||||
auto logicalWiew = vocbase.createView(createJson->slice());
|
||||
REQUIRE((false == !logicalWiew));
|
||||
auto* wiewImpl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(logicalWiew.get());
|
||||
REQUIRE((false == !wiewImpl));
|
||||
auto logicalView = wiewImpl->ensure(42);
|
||||
auto logicalView = wiewImpl->ensure(logicalCollection->id());
|
||||
REQUIRE((false == !logicalView));
|
||||
auto* viewImpl = dynamic_cast<arangodb::iresearch::IResearchView*>(logicalView.get());
|
||||
REQUIRE((false == !viewImpl));
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() }, true);
|
||||
CHECK(0 == snapshot->docs_count());
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
// ordered iterator
|
||||
{
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
REQUIRE(nullptr != logicalCollection);
|
||||
auto logicalWiew = vocbase.createView(createJson->slice());
|
||||
REQUIRE((false == !logicalWiew));
|
||||
auto* wiewImpl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(logicalWiew.get());
|
||||
REQUIRE((false == !wiewImpl));
|
||||
auto logicalView = wiewImpl->ensure(42);
|
||||
auto logicalView = wiewImpl->ensure(logicalCollection->id());
|
||||
REQUIRE((false == !logicalView));
|
||||
auto* viewImpl = dynamic_cast<arangodb::iresearch::IResearchView*>(logicalView.get());
|
||||
|
||||
|
@ -427,9 +482,17 @@ SECTION("test_query") {
|
|||
viewImpl->sync();
|
||||
}
|
||||
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() }, true);
|
||||
CHECK(12 == snapshot->docs_count());
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
// snapshot isolation
|
||||
|
@ -437,12 +500,16 @@ SECTION("test_query") {
|
|||
auto links = arangodb::velocypack::Parser::fromJson("{ \
|
||||
\"links\": { \"testCollection\": { \"includeAllFields\" : true } } \
|
||||
}");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\", \"id\":442 }");
|
||||
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
TRI_vocbase_t* vocbase; // will be owned by DatabaseFeature
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == databaseFeature->createDatabase(0, "testDatabase" TOSTRING(__LINE__), vocbase)));
|
||||
REQUIRE((nullptr != vocbase));
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == ci->createDatabaseCoordinator(vocbase->name(), arangodb::velocypack::Slice::emptyObjectSlice(), error, 0.)));
|
||||
auto* logicalCollection = vocbase->createCollection(collectionJson->slice());
|
||||
std::vector<std::string> collections{ logicalCollection->name() };
|
||||
auto logicalWiew = vocbase.createView(createJson->slice());
|
||||
CHECK((TRI_ERROR_NO_ERROR == ci->createViewCoordinator(vocbase->name(), "42", createJson->slice(), error)));
|
||||
auto logicalWiew = ci->getView(vocbase->name(), "42"); // link creation requires cluster-view to be in ClusterInfo instead of TRI_vocbase_t
|
||||
CHECK((false == !logicalWiew));
|
||||
auto* wiewImpl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(logicalWiew.get());
|
||||
CHECK((false == !wiewImpl));
|
||||
|
@ -453,7 +520,7 @@ SECTION("test_query") {
|
|||
// fill with test data
|
||||
{
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
EMPTY,
|
||||
collections,
|
||||
EMPTY,
|
||||
|
@ -474,14 +541,23 @@ SECTION("test_query") {
|
|||
|
||||
arangodb::transaction::Options trxOptions;
|
||||
trxOptions.waitForSync = true;
|
||||
auto state0 = s.engine.createTransactionState(vocbase, trxOptions);
|
||||
auto* snapshot0 = wiewImpl->snapshot(*state0, true);
|
||||
|
||||
arangodb::transaction::Methods trx0(
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
collections,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
trxOptions
|
||||
);
|
||||
CHECK((trx0.begin().ok()));
|
||||
auto* snapshot0 = wiewImpl->snapshot(trx0, { logicalCollection->name() }, true);
|
||||
CHECK(12 == snapshot0->docs_count());
|
||||
CHECK((trx0.commit().ok()));
|
||||
|
||||
// add more data
|
||||
{
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
EMPTY,
|
||||
collections,
|
||||
EMPTY,
|
||||
|
@ -502,10 +578,19 @@ SECTION("test_query") {
|
|||
|
||||
// old reader sees same data as before
|
||||
CHECK(12 == snapshot0->docs_count());
|
||||
|
||||
// new reader sees new data
|
||||
auto state1 = s.engine.createTransactionState(vocbase, trxOptions);
|
||||
auto* snapshot1 = wiewImpl->snapshot(*state1, true);
|
||||
arangodb::transaction::Methods trx1(
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
collections,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
trxOptions
|
||||
);
|
||||
CHECK((trx1.begin().ok()));
|
||||
auto* snapshot1 = wiewImpl->snapshot(trx1, { logicalCollection->name() }, true);
|
||||
CHECK(24 == snapshot1->docs_count());
|
||||
CHECK((trx1.commit().ok()));
|
||||
}
|
||||
|
||||
// query while running FlushThread
|
||||
|
@ -517,9 +602,13 @@ SECTION("test_query") {
|
|||
arangodb::FlushFeature
|
||||
>("Flush");
|
||||
REQUIRE(feature);
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
auto logicalWiew = vocbase.createView(viewCreateJson->slice());
|
||||
TRI_vocbase_t* vocbase; // will be owned by DatabaseFeature
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == databaseFeature->createDatabase(0, "testDatabase" TOSTRING(__LINE__), vocbase)));
|
||||
REQUIRE((nullptr != vocbase));
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == ci->createDatabaseCoordinator(vocbase->name(), arangodb::velocypack::Slice::emptyObjectSlice(), error, 0.)));
|
||||
auto* logicalCollection = vocbase->createCollection(collectionJson->slice());
|
||||
CHECK((TRI_ERROR_NO_ERROR == ci->createViewCoordinator(vocbase->name(), "42", createJson->slice(), error)));
|
||||
auto logicalWiew = ci->getView(vocbase->name(), "42"); // link creation requires cluster-view to be in ClusterInfo instead of TRI_vocbase_t
|
||||
REQUIRE((false == !logicalWiew));
|
||||
auto* wiewImpl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(logicalWiew.get());
|
||||
REQUIRE((false == !wiewImpl));
|
||||
|
@ -551,7 +640,7 @@ SECTION("test_query") {
|
|||
{
|
||||
auto doc = arangodb::velocypack::Parser::fromJson(std::string("{ \"seq\": ") + std::to_string(i) + " }");
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
|
@ -565,20 +654,28 @@ SECTION("test_query") {
|
|||
|
||||
// query
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options{}
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() }, true);
|
||||
CHECK(i == snapshot->docs_count());
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("test_rename") {
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE(nullptr != ci);
|
||||
|
||||
// rename empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -612,14 +709,12 @@ SECTION("test_rename") {
|
|||
|
||||
auto view = impl->ensure(123);
|
||||
CHECK((false == !view));
|
||||
CHECK((std::string("_iresearch_123_1_newName") == view->name()));
|
||||
CHECK((std::string("_iresearch_123_1") == view->name()));
|
||||
}
|
||||
|
||||
// rename non-empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto const wiewId = std::to_string(ci->uniqid() + 1); // +1 because LogicalView creation will generate a new ID
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -627,10 +722,19 @@ SECTION("test_rename") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
// ensure we have shard view in vocbase
|
||||
auto const shardViewName = "_iresearch_123_" + wiewId;
|
||||
auto jsonShard = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\": 100, \"name\": \"" + shardViewName + "\", \"type\": \"arangosearch\", \"isSystem\": true }"
|
||||
);
|
||||
CHECK((true == !vocbase.lookupView(shardViewName)));
|
||||
auto shardView = vocbase.createView(jsonShard->slice());
|
||||
CHECK(shardView);
|
||||
|
||||
CHECK((std::string("testView") == wiew->name()));
|
||||
auto view = impl->ensure(123);
|
||||
CHECK((false == !view));
|
||||
CHECK((std::string("_iresearch_123_3_testView") == view->name()));
|
||||
CHECK((shardViewName == view->name()));
|
||||
|
||||
{
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
@ -654,7 +758,7 @@ SECTION("test_rename") {
|
|||
CHECK((std::string("newName") == builder.slice().get("name").copyString()));
|
||||
}
|
||||
|
||||
CHECK((std::string("_iresearch_123_3_newName") == view->name()));
|
||||
CHECK((("_iresearch_123_" + wiewId) == view->name()));
|
||||
wiew->rename("testView", true); // rename back or vocbase will be out of sync
|
||||
}
|
||||
}
|
||||
|
@ -662,9 +766,6 @@ SECTION("test_rename") {
|
|||
SECTION("test_toVelocyPack") {
|
||||
// base
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"unusedKey\": \"unusedValue\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -686,9 +787,6 @@ SECTION("test_toVelocyPack") {
|
|||
|
||||
// includeProperties
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"unusedKey\": \"unusedValue\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -715,9 +813,6 @@ SECTION("test_toVelocyPack") {
|
|||
|
||||
// includeSystem
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"unusedKey\": \"unusedValue\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -744,18 +839,19 @@ SECTION("test_toVelocyPack") {
|
|||
|
||||
SECTION("test_transaction_snapshot") {
|
||||
static std::vector<std::string> const EMPTY;
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"commit\": { \"commitIntervalMsec\": 0 } }");
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
REQUIRE(nullptr != logicalCollection);
|
||||
auto logicalWiew = vocbase.createView(viewJson->slice());
|
||||
REQUIRE((false == !logicalWiew));
|
||||
auto* wiewImpl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(logicalWiew.get());
|
||||
REQUIRE((nullptr != wiewImpl));
|
||||
auto logicalView = wiewImpl->ensure(42);
|
||||
auto logicalView = wiewImpl->ensure(logicalCollection->id());
|
||||
REQUIRE((false == !logicalView));
|
||||
auto* viewImpl = dynamic_cast<arangodb::iresearch::IResearchView*>(logicalView.get());
|
||||
REQUIRE((nullptr != viewImpl));
|
||||
|
||||
// add a single document to view (do not sync)
|
||||
{
|
||||
|
@ -776,41 +872,6 @@ SECTION("test_transaction_snapshot") {
|
|||
|
||||
// no snapshot in TransactionState (force == false, waitForSync = false)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = wiewImpl->snapshot(*state);
|
||||
CHECK((nullptr == snapshot));
|
||||
}
|
||||
|
||||
// no snapshot in TransactionState (force == true, waitForSync = false)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((0 == snapshot->live_docs_count()));
|
||||
}
|
||||
|
||||
// no snapshot in TransactionState (force == false, waitForSync = true)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
state->waitForSync(true);
|
||||
auto* snapshot = wiewImpl->snapshot(*state);
|
||||
CHECK((nullptr == snapshot));
|
||||
}
|
||||
|
||||
// no snapshot in TransactionState (force == true, waitForSync = true)
|
||||
{
|
||||
auto state = s.engine.createTransactionState(vocbase, arangodb::transaction::Options());
|
||||
state->waitForSync(true);
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
}
|
||||
|
||||
// add another single document to view (do not sync)
|
||||
{
|
||||
auto doc = arangodb::velocypack::Parser::fromJson("{ \"key\": 2 }");
|
||||
arangodb::iresearch::IResearchLinkMeta meta;
|
||||
meta._includeAllFields = true;
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
|
@ -819,11 +880,12 @@ SECTION("test_transaction_snapshot") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
viewImpl->insert(trx, 42, arangodb::LocalDocumentId(1), doc->slice(), meta);
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() });
|
||||
CHECK((nullptr == snapshot));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == false, waitForSync = false)
|
||||
// no snapshot in TransactionState (force == true, waitForSync = false)
|
||||
{
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
|
@ -832,85 +894,68 @@ SECTION("test_transaction_snapshot") {
|
|||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* state = trx.state();
|
||||
wiewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
auto* snapshot = wiewImpl->snapshot(*state);
|
||||
CHECK((trx.begin().ok()));
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() }, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK((0 == snapshot->live_docs_count()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == true, waitForSync = false)
|
||||
// no snapshot in TransactionState (force == false, waitForSync = true)
|
||||
{
|
||||
arangodb::transaction::Options opts;
|
||||
opts.waitForSync = true;
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
opts
|
||||
);
|
||||
auto* state = trx.state();
|
||||
wiewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK((trx.begin().ok()));
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() });
|
||||
CHECK((nullptr == snapshot));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == true, waitForSync = false during updateStatus(), true during snapshot())
|
||||
// no snapshot in TransactionState (force == true, waitForSync = true)
|
||||
{
|
||||
arangodb::transaction::Options opts;
|
||||
opts.waitForSync = true;
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
opts
|
||||
);
|
||||
auto* state = trx.state();
|
||||
wiewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
state->waitForSync(true);
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
CHECK((trx.begin().ok()));
|
||||
auto* snapshot = wiewImpl->snapshot(trx, { logicalCollection->name() }, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((1 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
}
|
||||
|
||||
// old snapshot in TransactionState (force == true, waitForSync = true during updateStatus(), false during snapshot())
|
||||
{
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
auto* state = trx.state();
|
||||
state->waitForSync(true);
|
||||
wiewImpl->apply(trx);
|
||||
state->updateStatus(arangodb::transaction::Status::RUNNING);
|
||||
state->waitForSync(false);
|
||||
auto* snapshot = wiewImpl->snapshot(*state, true);
|
||||
CHECK((nullptr != snapshot));
|
||||
CHECK((2 == snapshot->live_docs_count()));
|
||||
state->updateStatus(arangodb::transaction::Status::ABORTED); // prevent assertion ind destructor
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("test_updateProperties") {
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE((nullptr != ci));
|
||||
auto* databaseFeature = arangodb::application_features::ApplicationServer::getFeature<arangodb::DatabaseFeature>("Database");
|
||||
REQUIRE((nullptr != databaseFeature));
|
||||
std::string error;
|
||||
|
||||
// update empty (partial)
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"id\": \"42\", \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t* vocbase; // will be owned by DatabaseFeature
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == databaseFeature->createDatabase(0, "testDatabase" TOSTRING(__LINE__), vocbase)));
|
||||
REQUIRE((nullptr != vocbase));
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == ci->createDatabaseCoordinator(vocbase->name(), arangodb::velocypack::Slice::emptyObjectSlice(), error, 0.)));
|
||||
auto* logicalCollection = vocbase->createCollection(collectionJson->slice());
|
||||
CHECK((nullptr != logicalCollection));
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
CHECK((TRI_ERROR_NO_ERROR == ci->createViewCoordinator(vocbase->name(), "42", viewJson->slice(), error)));
|
||||
auto wiew = ci->getView(vocbase->name(), "42"); // link creation requires cluster-view to be in ClusterInfo instead of TRI_vocbase_t
|
||||
CHECK((false == !wiew));
|
||||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
@ -964,15 +1009,16 @@ SECTION("test_updateProperties") {
|
|||
|
||||
// update empty (full)
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"id\": \"42\", \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t* vocbase; // will be owned by DatabaseFeature
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == databaseFeature->createDatabase(0, "testDatabase" TOSTRING(__LINE__), vocbase)));
|
||||
REQUIRE((nullptr != vocbase));
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == ci->createDatabaseCoordinator(vocbase->name(), arangodb::velocypack::Slice::emptyObjectSlice(), error, 0.)));
|
||||
auto* logicalCollection = vocbase->createCollection(collectionJson->slice());
|
||||
CHECK((nullptr != logicalCollection));
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
CHECK((TRI_ERROR_NO_ERROR == ci->createViewCoordinator(vocbase->name(), "42", viewJson->slice(), error)));
|
||||
auto wiew = ci->getView(vocbase->name(), "42"); // link creation requires cluster-view to be in ClusterInfo instead of TRI_vocbase_t
|
||||
CHECK((false == !wiew));
|
||||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
@ -1026,15 +1072,16 @@ SECTION("test_updateProperties") {
|
|||
|
||||
// update non-empty (partial)
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto* logicalCollection = vocbase.createCollection(collectionJson->slice());
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"id\": \"42\", \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t* vocbase; // will be owned by DatabaseFeature
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == databaseFeature->createDatabase(0, "testDatabase" TOSTRING(__LINE__), vocbase)));
|
||||
REQUIRE((nullptr != vocbase));
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == ci->createDatabaseCoordinator(vocbase->name(), arangodb::velocypack::Slice::emptyObjectSlice(), error, 0.)));
|
||||
auto* logicalCollection = vocbase->createCollection(collectionJson->slice());
|
||||
CHECK((nullptr != logicalCollection));
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
CHECK((TRI_ERROR_NO_ERROR == ci->createViewCoordinator(vocbase->name(), "42", viewJson->slice(), error)));
|
||||
auto wiew = ci->getView(vocbase->name(), "42"); // link creation requires cluster-view to be in ClusterInfo instead of TRI_vocbase_t
|
||||
CHECK((false == !wiew));
|
||||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
@ -1090,18 +1137,19 @@ SECTION("test_updateProperties") {
|
|||
|
||||
// update non-empty (full)
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto collection0Json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\" }");
|
||||
auto collection1Json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection1\", \"id\": \"123\" }");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto* logicalCollection0 = vocbase.createCollection(collection0Json->slice());
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"id\": \"42\", \"name\": \"testView\", \"type\": \"arangosearch\", \"properties\": { \"collections\": [ 3, 4, 5 ], \"threadsMaxIdle\": 24, \"threadsMaxTotal\": 42 } }");
|
||||
TRI_vocbase_t* vocbase; // will be owned by DatabaseFeature
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == databaseFeature->createDatabase(0, "testDatabase" TOSTRING(__LINE__), vocbase)));
|
||||
REQUIRE((nullptr != vocbase));
|
||||
REQUIRE((TRI_ERROR_NO_ERROR == ci->createDatabaseCoordinator(vocbase->name(), arangodb::velocypack::Slice::emptyObjectSlice(), error, 0.)));
|
||||
auto* logicalCollection0 = vocbase->createCollection(collection0Json->slice());
|
||||
CHECK((nullptr != logicalCollection0));
|
||||
auto* logicalCollection1 = vocbase.createCollection(collection1Json->slice());
|
||||
auto* logicalCollection1 = vocbase->createCollection(collection1Json->slice());
|
||||
CHECK((nullptr != logicalCollection1));
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
CHECK((TRI_ERROR_NO_ERROR == ci->createViewCoordinator(vocbase->name(), "42", viewJson->slice(), error)));
|
||||
auto wiew = ci->getView(vocbase->name(), "42"); // link creation requires cluster-view to be in ClusterInfo instead of TRI_vocbase_t
|
||||
CHECK((false == !wiew));
|
||||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
@ -1157,11 +1205,11 @@ SECTION("test_updateProperties") {
|
|||
}
|
||||
|
||||
SECTION("test_visitCollections") {
|
||||
auto* ci = arangodb::ClusterInfo::instance();
|
||||
REQUIRE(nullptr != ci);
|
||||
|
||||
// visit empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -1175,9 +1223,7 @@ SECTION("test_visitCollections") {
|
|||
|
||||
// visit non-empty
|
||||
{
|
||||
s.agency->responses.clear();
|
||||
s.agency->responses["POST /_api/agency/read HTTP/1.1\r\n\r\n[[\"/Sync/LatestID\"]]"] = "http/1.0 200\n\n[ { \"\": { \"Sync\": { \"LatestID\" : 1 } } } ]";
|
||||
s.agency->responses["POST /_api/agency/write HTTP/1.1"] = "http/1.0 200\n\n{\"results\": []}";
|
||||
auto const wiewId = std::to_string(ci->uniqid() + 1); // +1 because LogicalView creation will generate a new ID
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto wiew = arangodb::iresearch::IResearchViewDBServer::make(vocbase, json->slice(), true, 42);
|
||||
|
@ -1185,8 +1231,18 @@ SECTION("test_visitCollections") {
|
|||
auto* impl = dynamic_cast<arangodb::iresearch::IResearchViewDBServer*>(wiew.get());
|
||||
CHECK((nullptr != impl));
|
||||
|
||||
// ensure we have shard view in vocbase
|
||||
auto const shardViewName = "_iresearch_123_" + wiewId;
|
||||
auto jsonShard = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\": 100, \"name\": \"" + shardViewName + "\", \"type\": \"arangosearch\", \"isSystem\": true }"
|
||||
);
|
||||
CHECK((true == !vocbase.lookupView(shardViewName)));
|
||||
auto shardView = vocbase.createView(jsonShard->slice());
|
||||
CHECK(shardView);
|
||||
|
||||
auto view = impl->ensure(123);
|
||||
CHECK((false == !view));
|
||||
CHECK(shardView == view);
|
||||
std::set<TRI_voc_cid_t> cids = { 123 };
|
||||
static auto visitor = [&cids](TRI_voc_cid_t cid)->bool { return 1 == cids.erase(cid); };
|
||||
CHECK((true == wiew->visitCollections(visitor))); // all collections expected
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
#include "Utils/OperationOptions.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Aql/AqlFunctionFeature.h"
|
||||
#include "Aql/BasicBlocks.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Aql/OptimizerRulesFeature.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "IResearch/ApplicationServerHelper.h"
|
||||
|
@ -45,6 +47,7 @@
|
|||
#include "IResearch/IResearchCommon.h"
|
||||
#include "IResearch/IResearchView.h"
|
||||
#include "IResearch/IResearchViewNode.h"
|
||||
#include "IResearch/IResearchViewBlock.h"
|
||||
#include "IResearch/IResearchAnalyzerFeature.h"
|
||||
#include "IResearch/SystemDatabaseFeature.h"
|
||||
#include "Logger/Logger.h"
|
||||
|
@ -190,12 +193,15 @@ SECTION("construct") {
|
|||
CHECK(42 == node.id());
|
||||
CHECK(logicalView == node.view());
|
||||
CHECK(node.sortCondition().empty());
|
||||
CHECK(!node.volatile_filter());
|
||||
CHECK(!node.volatile_sort());
|
||||
CHECK(!node.volatility().first); // filter volatility
|
||||
CHECK(!node.volatility().second); // sort volatility
|
||||
CHECK(node.getVariablesUsedHere().empty());
|
||||
auto const setHere = node.getVariablesSetHere();
|
||||
CHECK(1 == setHere.size());
|
||||
CHECK(&outVariable == setHere[0]);
|
||||
|
||||
size_t nrItems{};
|
||||
CHECK(0. == node.estimateCost(nrItems));
|
||||
CHECK(0. == node.estimateCost(nrItems)); // no dependencies
|
||||
CHECK(0 == nrItems);
|
||||
}
|
||||
|
||||
|
@ -246,8 +252,7 @@ SECTION("clone") {
|
|||
CHECK(node.view() == cloned.view());
|
||||
CHECK(&node.filterCondition() == &cloned.filterCondition());
|
||||
CHECK(node.sortCondition() == cloned.sortCondition());
|
||||
CHECK(node.volatile_filter() == cloned.volatile_filter());
|
||||
CHECK(node.volatile_sort() == cloned.volatile_sort());
|
||||
CHECK(node.volatility() == cloned.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == cloned.estimateCost(rhsNrItems));
|
||||
|
@ -277,8 +282,7 @@ SECTION("clone") {
|
|||
CHECK(node.view() == cloned.view());
|
||||
CHECK(&node.filterCondition() == &cloned.filterCondition());
|
||||
CHECK(node.sortCondition() == cloned.sortCondition());
|
||||
CHECK(node.volatile_filter() == cloned.volatile_filter());
|
||||
CHECK(node.volatile_sort() == cloned.volatile_sort());
|
||||
CHECK(node.volatility() == cloned.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == cloned.estimateCost(rhsNrItems));
|
||||
|
@ -307,8 +311,7 @@ SECTION("clone") {
|
|||
CHECK(node.view() == cloned.view());
|
||||
CHECK(&node.filterCondition() == &cloned.filterCondition());
|
||||
CHECK(node.sortCondition() == cloned.sortCondition());
|
||||
CHECK(node.volatile_filter() == cloned.volatile_filter());
|
||||
CHECK(node.volatile_sort() == cloned.volatile_sort());
|
||||
CHECK(node.volatility() == cloned.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == cloned.estimateCost(rhsNrItems));
|
||||
|
@ -352,8 +355,7 @@ SECTION("clone") {
|
|||
CHECK(node.view() == cloned.view());
|
||||
CHECK(&node.filterCondition() == &cloned.filterCondition());
|
||||
CHECK(node.sortCondition() == cloned.sortCondition());
|
||||
CHECK(node.volatile_filter() == cloned.volatile_filter());
|
||||
CHECK(node.volatile_sort() == cloned.volatile_sort());
|
||||
CHECK(node.volatility() == cloned.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == cloned.estimateCost(rhsNrItems));
|
||||
|
@ -386,8 +388,7 @@ SECTION("clone") {
|
|||
CHECK(node.view() == cloned.view());
|
||||
CHECK(&node.filterCondition() == &cloned.filterCondition());
|
||||
CHECK(node.sortCondition() == cloned.sortCondition());
|
||||
CHECK(node.volatile_filter() == cloned.volatile_filter());
|
||||
CHECK(node.volatile_sort() == cloned.volatile_sort());
|
||||
CHECK(node.volatility() == cloned.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == cloned.estimateCost(rhsNrItems));
|
||||
|
@ -419,8 +420,7 @@ SECTION("clone") {
|
|||
CHECK(node.view() == cloned.view());
|
||||
CHECK(&node.filterCondition() == &cloned.filterCondition());
|
||||
CHECK(node.sortCondition() == cloned.sortCondition());
|
||||
CHECK(node.volatile_filter() == cloned.volatile_filter());
|
||||
CHECK(node.volatile_sort() == cloned.volatile_sort());
|
||||
CHECK(node.volatility() == cloned.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == cloned.estimateCost(rhsNrItems));
|
||||
|
@ -491,8 +491,7 @@ SECTION("serialize") {
|
|||
CHECK(node.view() == deserialized.view());
|
||||
CHECK(&node.filterCondition() == &deserialized.filterCondition());
|
||||
CHECK(node.sortCondition() == deserialized.sortCondition());
|
||||
CHECK(node.volatile_filter() == deserialized.volatile_filter());
|
||||
CHECK(node.volatile_sort() == deserialized.volatile_sort());
|
||||
CHECK(node.volatility() == deserialized.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == deserialized.estimateCost(rhsNrItems));
|
||||
|
@ -517,8 +516,7 @@ SECTION("serialize") {
|
|||
CHECK(node.view() == deserialized.view());
|
||||
CHECK(&node.filterCondition() == &deserialized.filterCondition());
|
||||
CHECK(node.sortCondition() == deserialized.sortCondition());
|
||||
CHECK(node.volatile_filter() == deserialized.volatile_filter());
|
||||
CHECK(node.volatile_sort() == deserialized.volatile_sort());
|
||||
CHECK(node.volatility() == deserialized.volatility());
|
||||
|
||||
size_t lhsNrItems{}, rhsNrItems{};
|
||||
CHECK(node.estimateCost(lhsNrItems) == deserialized.estimateCost(rhsNrItems));
|
||||
|
@ -614,4 +612,105 @@ SECTION("collections") {
|
|||
CHECK(expectedCollections.empty());
|
||||
}
|
||||
|
||||
SECTION("createBlockSingleServer") {
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto createJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
auto logicalView = vocbase.createView(createJson->slice());
|
||||
REQUIRE((false == !logicalView));
|
||||
|
||||
// dummy query
|
||||
arangodb::aql::Query query(
|
||||
false, vocbase, arangodb::aql::QueryString("RETURN 1"),
|
||||
nullptr, arangodb::velocypack::Parser::fromJson("{}"),
|
||||
arangodb::aql::PART_MAIN
|
||||
);
|
||||
query.prepare(arangodb::QueryRegistryFeature::QUERY_REGISTRY, 42);
|
||||
|
||||
// dummy engine
|
||||
arangodb::aql::ExecutionEngine engine(&query);
|
||||
|
||||
arangodb::aql::Variable const outVariable("variable", 0);
|
||||
|
||||
// prepare view snapshot
|
||||
|
||||
// no filter condition, no sort condition
|
||||
{
|
||||
arangodb::iresearch::IResearchViewNode node(
|
||||
*query.plan(),
|
||||
42, // id
|
||||
vocbase, // database
|
||||
logicalView, // view
|
||||
outVariable,
|
||||
nullptr, // no filter condition
|
||||
{} // no sort condition
|
||||
);
|
||||
|
||||
std::unordered_map<arangodb::aql::ExecutionNode*, arangodb::aql::ExecutionBlock*> EMPTY;
|
||||
|
||||
// before transaction has started (no snapshot)
|
||||
try {
|
||||
auto block = node.createBlock(engine, EMPTY);
|
||||
CHECK(false);
|
||||
} catch (arangodb::basics::Exception const& e) {
|
||||
CHECK(TRI_ERROR_INTERNAL == e.code());
|
||||
}
|
||||
|
||||
// start transaction (put snapshot into)
|
||||
REQUIRE(query.trx()->state());
|
||||
arangodb::LogicalView::cast<arangodb::iresearch::IResearchView>(*logicalView).snapshot(
|
||||
*query.trx(), true
|
||||
);
|
||||
|
||||
// after transaction has started
|
||||
{
|
||||
auto block = node.createBlock(engine, EMPTY);
|
||||
CHECK(nullptr != block);
|
||||
CHECK(nullptr != dynamic_cast<arangodb::iresearch::IResearchViewUnorderedBlock*>(block.get()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME TODO
|
||||
//SECTION("createBlockDBServer") {
|
||||
//}
|
||||
|
||||
SECTION("createBlockCoordinator") {
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto createJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
auto logicalView = vocbase.createView(createJson->slice());
|
||||
REQUIRE((false == !logicalView));
|
||||
|
||||
// dummy query
|
||||
arangodb::aql::Query query(
|
||||
false, vocbase, arangodb::aql::QueryString("RETURN 1"),
|
||||
nullptr, arangodb::velocypack::Parser::fromJson("{}"),
|
||||
arangodb::aql::PART_MAIN
|
||||
);
|
||||
query.prepare(arangodb::QueryRegistryFeature::QUERY_REGISTRY, 42);
|
||||
|
||||
// dummy engine
|
||||
arangodb::aql::ExecutionEngine engine(&query);
|
||||
|
||||
arangodb::aql::Variable const outVariable("variable", 0);
|
||||
|
||||
// no filter condition, no sort condition
|
||||
arangodb::iresearch::IResearchViewNode node(
|
||||
*query.plan(),
|
||||
42, // id
|
||||
vocbase, // database
|
||||
logicalView, // view
|
||||
outVariable,
|
||||
nullptr, // no filter condition
|
||||
{} // no sort condition
|
||||
);
|
||||
|
||||
std::unordered_map<arangodb::aql::ExecutionNode*, arangodb::aql::ExecutionBlock*> EMPTY;
|
||||
|
||||
arangodb::ServerState::instance()->setRole(arangodb::ServerState::ROLE_COORDINATOR);
|
||||
auto emptyBlock = node.createBlock(engine, EMPTY);
|
||||
arangodb::ServerState::instance()->setRole(arangodb::ServerState::ROLE_SINGLE);
|
||||
CHECK(nullptr != emptyBlock);
|
||||
CHECK(nullptr != dynamic_cast<arangodb::aql::NoResultsBlock*>(emptyBlock.get()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1037,12 +1037,17 @@ std::string StorageEngineMock::createCollection(
|
|||
}
|
||||
|
||||
std::unique_ptr<TRI_vocbase_t> StorageEngineMock::createDatabase(
|
||||
TRI_voc_tick_t id,
|
||||
arangodb::velocypack::Slice const& args,
|
||||
TRI_voc_tick_t id,
|
||||
arangodb::velocypack::Slice const& args,
|
||||
int& status
|
||||
) {
|
||||
TRI_ASSERT(false);
|
||||
return nullptr;
|
||||
if (!args.get("name").isString()) {
|
||||
status = TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
status = TRI_ERROR_NO_ERROR;
|
||||
|
||||
return std::make_unique<TRI_vocbase_t>(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, id, args.get("name").copyString());
|
||||
}
|
||||
|
||||
void StorageEngineMock::createIndex(
|
||||
|
@ -1440,8 +1445,7 @@ void StorageEngineMock::waitUntilDeletion(TRI_voc_tick_t id, bool force, int& st
|
|||
}
|
||||
|
||||
int StorageEngineMock::writeCreateDatabaseMarker(TRI_voc_tick_t id, VPackSlice const& slice) {
|
||||
TRI_ASSERT(false);
|
||||
return TRI_ERROR_INTERNAL;
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
TransactionCollectionMock::TransactionCollectionMock(arangodb::TransactionState* state, TRI_voc_cid_t cid)
|
||||
|
|
Loading…
Reference in New Issue