mirror of https://gitee.com/bigwinds/arangodb
issue 344.7: remove more redundant functions (#4863)
* issue 344.7: remove more redundant functions * backport: fix missed functions under USE_ENTERPRISE
This commit is contained in:
parent
793101528f
commit
06eb8ade01
|
@ -57,8 +57,8 @@ int EnumerateCollectionBlock::initialize() {
|
|||
if (_collection->isSatellite()) {
|
||||
auto logicalCollection = _collection->getCollection();
|
||||
auto cid = logicalCollection->planId();
|
||||
auto dbName = logicalCollection->dbName();
|
||||
|
||||
TRI_ASSERT(logicalCollection->vocbase());
|
||||
auto dbName = logicalCollection->vocbase()->name();
|
||||
double maxWait = _engine->getQuery()->queryOptions().satelliteSyncWait;
|
||||
bool inSync = false;
|
||||
unsigned long waitInterval = 10000;
|
||||
|
|
|
@ -483,7 +483,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
aql::QueryOptions opts = query->queryOptions();
|
||||
TRI_ASSERT(opts.transactionOptions.skipInaccessibleCollections);
|
||||
opts.inaccessibleCollections.insert(shardId);
|
||||
opts.inaccessibleCollections.insert(collection->getCollection()->cid_as_string());
|
||||
opts.inaccessibleCollections.insert(std::to_string(collection->getCollection()->id()));
|
||||
opts.toVelocyPack(result, true);
|
||||
} else {
|
||||
// the toVelocyPack will open & close the "options" object
|
||||
|
@ -832,7 +832,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
TRI_ASSERT(ServerState::instance()->isSingleServerOrCoordinator());
|
||||
TRI_ASSERT(trxOps.skipInaccessibleCollections);
|
||||
pair->second.inaccessibleShards.insert(shard);
|
||||
pair->second.inaccessibleShards.insert(collection.second->getCollection()->cid_as_string());
|
||||
pair->second.inaccessibleShards.insert(std::to_string(collection.second->getCollection()->id()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -860,7 +860,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
if (trx->isInaccessibleCollectionId(it->getPlanId())) {
|
||||
TRI_ASSERT(trxOps.skipInaccessibleCollections);
|
||||
pair->second.inaccessibleShards.insert(shard);
|
||||
pair->second.inaccessibleShards.insert(it->getCollection()->cid_as_string());
|
||||
pair->second.inaccessibleShards.insert(std::to_string(it->getCollection()->id()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -552,8 +552,9 @@ CloneShardDistribution(ClusterInfo* ci, LogicalCollection* col,
|
|||
auto result = std::make_shared<std::unordered_map<std::string, std::vector<std::string>>>();
|
||||
TRI_ASSERT(cid != 0);
|
||||
std::string cidString = arangodb::basics::StringUtils::itoa(cid);
|
||||
std::shared_ptr<LogicalCollection> other =
|
||||
ci->getCollection(col->dbName(), cidString);
|
||||
TRI_ASSERT(col->vocbase());
|
||||
auto other = ci->getCollection(col->vocbase()->name(), cidString);
|
||||
|
||||
// The function guarantees that no nullptr is returned
|
||||
TRI_ASSERT(other != nullptr);
|
||||
|
||||
|
@ -2685,9 +2686,11 @@ std::shared_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
|
|||
col->setStatus(TRI_VOC_COL_STATUS_LOADED);
|
||||
VPackBuilder velocy = col->toVelocyPackIgnore(ignoreKeys, false, false);
|
||||
|
||||
TRI_ASSERT(col->vocbase());
|
||||
auto& dbName = col->vocbase()->name();
|
||||
std::string errorMsg;
|
||||
int myerrno = ci->createCollectionCoordinator(
|
||||
col->dbName(),
|
||||
dbName,
|
||||
std::to_string(col->id()),
|
||||
col->numberOfShards(), col->replicationFactor(),
|
||||
waitForSyncReplication, velocy.slice(), errorMsg, 240.0);
|
||||
|
@ -2698,9 +2701,10 @@ std::shared_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
|
|||
}
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(myerrno, errorMsg);
|
||||
}
|
||||
|
||||
ci->loadPlan();
|
||||
|
||||
auto c = ci->getCollection(col->dbName(), std::to_string(col->id()));
|
||||
auto c = ci->getCollection(dbName, std::to_string(col->id()));
|
||||
// We never get a nullptr here because an exception is thrown if the
|
||||
// collection does not exist. Also, the create collection should have
|
||||
// failed before.
|
||||
|
|
|
@ -290,10 +290,11 @@ void ShardDistributionReporter::helperDistributionForDatabase(
|
|||
std::vector<ServerID> serversToAsk;
|
||||
while (!todoSyncStateCheck.empty()) {
|
||||
counters.clear();
|
||||
auto const col = todoSyncStateCheck.front();
|
||||
|
||||
auto const col = todoSyncStateCheck.front();
|
||||
auto allShards = col->shardIds();
|
||||
auto cic = _ci->getCollectionCurrent(dbName, col->cid_as_string());
|
||||
auto cic = _ci->getCollectionCurrent(dbName, std::to_string(col->id()));
|
||||
|
||||
// Send requests
|
||||
for (auto const& s : *(allShards.get())) {
|
||||
double timeleft = endtime - TRI_microtime();
|
||||
|
@ -480,12 +481,15 @@ bool ShardDistributionReporter::testAllShardsInSync(
|
|||
TRI_ASSERT(col != nullptr);
|
||||
TRI_ASSERT(shardIds != nullptr);
|
||||
|
||||
auto cic = _ci->getCollectionCurrent(dbName, col->cid_as_string());
|
||||
auto cic = _ci->getCollectionCurrent(dbName, std::to_string(col->id()));
|
||||
|
||||
for (auto const& s : *shardIds) {
|
||||
auto curServers = cic->servers(s.first);
|
||||
|
||||
if (!TestIsShardInSync(s.second, curServers)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -378,19 +378,23 @@ bool Index::Compare(VPackSlice const& lhs, VPackSlice const& rhs) {
|
|||
|
||||
/// @brief return a contextual string for logging
|
||||
std::string Index::context() const {
|
||||
TRI_ASSERT(_collection->vocbase());
|
||||
std::ostringstream result;
|
||||
|
||||
result << "index { id: " << id() << ", type: " << oldtypeName()
|
||||
<< ", collection: " << _collection->dbName() << "/"
|
||||
<< ", collection: " << _collection->vocbase()->name() << "/"
|
||||
<< _collection->name() << ", unique: " << (_unique ? "true" : "false")
|
||||
<< ", fields: ";
|
||||
result << "[";
|
||||
|
||||
for (size_t i = 0; i < _fields.size(); ++i) {
|
||||
if (i > 0) {
|
||||
result << ", ";
|
||||
}
|
||||
|
||||
result << _fields[i];
|
||||
}
|
||||
|
||||
result << "] }";
|
||||
|
||||
return result.str();
|
||||
|
|
|
@ -3332,11 +3332,17 @@ Result MMFilesCollection::update(
|
|||
*builder.get(), options.isRestore, revisionId);
|
||||
|
||||
if (_isDBServer) {
|
||||
TRI_ASSERT(_logicalCollection->vocbase());
|
||||
// Need to check that no sharding keys have changed:
|
||||
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()),
|
||||
oldDoc, builder->slice(), false)) {
|
||||
if (arangodb::shardKeysChanged(
|
||||
_logicalCollection->vocbase()->name(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()
|
||||
),
|
||||
oldDoc,
|
||||
builder->slice(),
|
||||
false
|
||||
)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES);
|
||||
}
|
||||
}
|
||||
|
@ -3465,11 +3471,17 @@ Result MMFilesCollection::replace(
|
|||
options.isRestore, revisionId);
|
||||
|
||||
if (_isDBServer) {
|
||||
TRI_ASSERT(_logicalCollection->vocbase());
|
||||
// Need to check that no sharding keys have changed:
|
||||
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()),
|
||||
oldDoc, builder->slice(), false)) {
|
||||
if (arangodb::shardKeysChanged(
|
||||
_logicalCollection->vocbase()->name(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()
|
||||
),
|
||||
oldDoc,
|
||||
builder->slice(),
|
||||
false
|
||||
)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -700,13 +700,14 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
auto other = vocbase->lookupCollection(name);
|
||||
|
||||
if (other != nullptr) {
|
||||
if (other->cid() == collection->cid()) {
|
||||
if (other->id() == collection->id()) {
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::ENGINES)
|
||||
<< "collection " << collectionId << " in database "
|
||||
<< databaseId << " already renamed; moving on";
|
||||
break;
|
||||
} else {
|
||||
TRI_voc_cid_t otherCid = other->cid();
|
||||
auto otherCid = other->id();
|
||||
|
||||
state->releaseCollection(otherCid);
|
||||
vocbase->dropCollection(other.get(), true, -1.0);
|
||||
}
|
||||
|
@ -1073,7 +1074,7 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
collection = vocbase->lookupCollection(name).get();
|
||||
|
||||
if (collection != nullptr) {
|
||||
TRI_voc_cid_t otherCid = collection->cid();
|
||||
auto otherCid = collection->id();
|
||||
|
||||
state->releaseCollection(otherCid);
|
||||
vocbase->dropCollection(collection, true, -1.0);
|
||||
|
@ -1609,9 +1610,9 @@ int MMFilesWalRecoverState::fillIndexes() {
|
|||
physical->useSecondaryIndexes(true);
|
||||
|
||||
auto ctx = transaction::StandaloneContext::Create(collection->vocbase());
|
||||
arangodb::SingleCollectionTransaction trx(ctx, collection->cid(),
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
ctx, collection->id(), AccessMode::Type::WRITE
|
||||
);
|
||||
int res = physical->fillAllIndexes(&trx);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
|
|
@ -365,8 +365,9 @@ static int DumpCollection(MMFilesReplicationDumpContext* dump,
|
|||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax,
|
||||
bool withTicks, bool useVst = false) {
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dumping collection " << collection->cid() << ", tick range "
|
||||
<< dataMin << " - " << dataMax;
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME)
|
||||
<< "dumping collection " << collection->id()
|
||||
<< ", tick range " << dataMin << " - " << dataMax;
|
||||
|
||||
bool const isEdgeCollection = (collection->type() == TRI_COL_TYPE_EDGE);
|
||||
|
||||
|
@ -460,8 +461,15 @@ int MMFilesDumpCollectionReplication(MMFilesReplicationDumpContext* dump,
|
|||
MMFilesCompactionPreventer compactionPreventer(mmfiles);
|
||||
|
||||
try {
|
||||
res = DumpCollection(dump, collection, collection->vocbase()->id(),
|
||||
collection->cid(), dataMin, dataMax, withTicks);
|
||||
res = DumpCollection(
|
||||
dump,
|
||||
collection,
|
||||
collection->vocbase()->id(),
|
||||
collection->id(),
|
||||
dataMin,
|
||||
dataMax,
|
||||
withTicks
|
||||
);
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
|
|
@ -798,7 +798,7 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
// -------------------------------------------------------------------------------------
|
||||
|
||||
if (phase == PHASE_DROP_CREATE) {
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase(), parameters);
|
||||
auto* col = resolveCollection(vocbase(), parameters).get();
|
||||
|
||||
if (col == nullptr) {
|
||||
// not found...
|
||||
|
@ -918,7 +918,7 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
std::string const progress = "dumping data for " + collectionMsg;
|
||||
setProgress(progress);
|
||||
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase(), parameters);
|
||||
auto* col = resolveCollection(vocbase(), parameters).get();
|
||||
|
||||
if (col == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, std::string("cannot dump: ") +
|
||||
|
@ -926,8 +926,8 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
}
|
||||
|
||||
Result res;
|
||||
|
||||
std::string const& masterColl = !masterUuid.empty() ? masterUuid : StringUtils::itoa(masterCid);
|
||||
|
||||
if (incremental && getSize(col) > 0) {
|
||||
res = handleCollectionSync(col, masterColl, _masterInfo._lastLogTick);
|
||||
} else {
|
||||
|
|
|
@ -308,15 +308,15 @@ std::string Syncer::getCName(VPackSlice const& slice) const {
|
|||
}
|
||||
|
||||
/// @brief extract the collection by either id or name, may return nullptr!
|
||||
LogicalCollection* Syncer::getCollectionByIdOrName(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_cid_t cid,
|
||||
std::string const& name) {
|
||||
auto* idCol = vocbase->lookupCollection(cid).get();
|
||||
arangodb::LogicalCollection* nameCol = nullptr;
|
||||
std::shared_ptr<LogicalCollection> Syncer::getCollectionByIdOrName(
|
||||
TRI_vocbase_t* vocbase, TRI_voc_cid_t cid, std::string const& name
|
||||
) {
|
||||
auto idCol = vocbase->lookupCollection(cid);
|
||||
std::shared_ptr<LogicalCollection> nameCol;
|
||||
|
||||
if (!name.empty()) {
|
||||
// try looking up the collection by name then
|
||||
nameCol = vocbase->lookupCollection(name).get();
|
||||
nameCol = vocbase->lookupCollection(name);
|
||||
}
|
||||
|
||||
if (idCol != nullptr && nameCol != nullptr) {
|
||||
|
@ -377,18 +377,20 @@ TRI_vocbase_t* Syncer::resolveVocbase(VPackSlice const& slice) {
|
|||
}
|
||||
}
|
||||
|
||||
arangodb::LogicalCollection* Syncer::resolveCollection(TRI_vocbase_t* vocbase,
|
||||
VPackSlice const& slice) {
|
||||
std::shared_ptr<LogicalCollection> Syncer::resolveCollection(
|
||||
TRI_vocbase_t* vocbase, arangodb::velocypack::Slice const& slice
|
||||
) {
|
||||
TRI_ASSERT(vocbase != nullptr);
|
||||
// extract "cid"
|
||||
TRI_voc_cid_t cid = getCid(slice);
|
||||
|
||||
if (!simulate32Client() || cid == 0) {
|
||||
VPackSlice uuid;
|
||||
|
||||
if ((uuid = slice.get("cuid")).isString()) {
|
||||
return vocbase->lookupCollectionByUuid(uuid.copyString()).get();
|
||||
return vocbase->lookupCollectionByUuid(uuid.copyString());
|
||||
} else if ((uuid = slice.get("globallyUniqueId")).isString()) {
|
||||
return vocbase->lookupCollectionByUuid(uuid.copyString()).get();
|
||||
return vocbase->lookupCollectionByUuid(uuid.copyString());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -397,11 +399,14 @@ arangodb::LogicalCollection* Syncer::resolveCollection(TRI_vocbase_t* vocbase,
|
|||
TRI_errno_string(TRI_ERROR_REPLICATION_INVALID_RESPONSE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// extract optional "cname"
|
||||
std::string cname = getCName(slice);
|
||||
|
||||
if (cname.empty()) {
|
||||
cname = arangodb::basics::VelocyPackHelper::getStringValue(slice, "name", "");
|
||||
}
|
||||
|
||||
return getCollectionByIdOrName(vocbase, cid, cname);
|
||||
}
|
||||
|
||||
|
@ -551,7 +556,7 @@ Result Syncer::createCollection(TRI_vocbase_t* vocbase,
|
|||
slice, "type", TRI_COL_TYPE_DOCUMENT));
|
||||
|
||||
// resolve collection by uuid, name, cid (in that order of preference)
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase, slice);
|
||||
auto* col = resolveCollection(vocbase, slice).get();
|
||||
|
||||
if (col != nullptr &&
|
||||
col->type() == type &&
|
||||
|
@ -573,18 +578,24 @@ Result Syncer::createCollection(TRI_vocbase_t* vocbase,
|
|||
if (col != nullptr) {
|
||||
if (col->isSystem()) {
|
||||
TRI_ASSERT(!simulate32Client() || col->globallyUniqueId() == col->name());
|
||||
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(vocbase),
|
||||
col->cid(), AccessMode::Type::WRITE);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase),
|
||||
col->id(),
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
OperationOptions opts;
|
||||
OperationResult opRes = trx.truncate(col->name(), opts);
|
||||
|
||||
if (opRes.fail()) {
|
||||
return opRes.result;
|
||||
}
|
||||
|
||||
return trx.finish(opRes.result);
|
||||
} else {
|
||||
vocbase->dropCollection(col, false, -1.0);
|
||||
|
@ -631,10 +642,13 @@ Result Syncer::createCollection(TRI_vocbase_t* vocbase,
|
|||
/// @brief drops a collection, based on the VelocyPack provided
|
||||
Result Syncer::dropCollection(VPackSlice const& slice, bool reportError) {
|
||||
TRI_vocbase_t* vocbase = resolveVocbase(slice);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase, slice);
|
||||
|
||||
auto* col = resolveCollection(vocbase, slice).get();
|
||||
|
||||
if (col == nullptr) {
|
||||
if (reportError) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
|
@ -642,7 +656,7 @@ Result Syncer::dropCollection(VPackSlice const& slice, bool reportError) {
|
|||
|
||||
return Result();
|
||||
}
|
||||
|
||||
|
||||
return Result(vocbase->dropCollection(col, true, -1.0));
|
||||
}
|
||||
|
||||
|
@ -652,21 +666,24 @@ Result Syncer::createIndex(VPackSlice const& slice) {
|
|||
if (!indexSlice.isObject()) {
|
||||
indexSlice = slice.get("data");
|
||||
}
|
||||
|
||||
|
||||
if (!indexSlice.isObject()) {
|
||||
return Result(TRI_ERROR_REPLICATION_INVALID_RESPONSE, "index slice is not an object");
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = resolveVocbase(slice);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase, slice);
|
||||
|
||||
auto col = resolveCollection(vocbase, slice);
|
||||
|
||||
if (col == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND,
|
||||
"did not find collection for index");
|
||||
}
|
||||
|
||||
|
||||
VPackBuilder s;
|
||||
s.openObject();
|
||||
s.add("objectId", VPackSlice::nullSlice());
|
||||
|
@ -710,7 +727,7 @@ Result Syncer::dropIndex(arangodb::velocypack::Slice const& slice) {
|
|||
} else {
|
||||
id = VelocyPackHelper::getStringValue(slice, "id", "");
|
||||
}
|
||||
|
||||
|
||||
if (id.empty()) {
|
||||
return Result(TRI_ERROR_REPLICATION_INVALID_RESPONSE, "id not found in index drop slice");
|
||||
}
|
||||
|
@ -721,7 +738,9 @@ Result Syncer::dropIndex(arangodb::velocypack::Slice const& slice) {
|
|||
if (vocbase == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase, slice);
|
||||
|
||||
auto* col = resolveCollection(vocbase, slice).get();
|
||||
|
||||
if (col == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
|
|
|
@ -138,30 +138,33 @@ class Syncer {
|
|||
|
||||
/// @brief handle the state response of the master
|
||||
Result handleStateResponse(arangodb::velocypack::Slice const&);
|
||||
|
||||
|
||||
virtual TRI_vocbase_t* resolveVocbase(velocypack::Slice const&);
|
||||
|
||||
LogicalCollection* resolveCollection(TRI_vocbase_t*, arangodb::velocypack::Slice const& slice);
|
||||
|
||||
std::shared_ptr<LogicalCollection> resolveCollection(
|
||||
TRI_vocbase_t* vocbase, arangodb::velocypack::Slice const& slice
|
||||
);
|
||||
|
||||
std::unordered_map<std::string, DatabaseGuard> const& vocbases() const {
|
||||
return _vocbases;
|
||||
}
|
||||
|
||||
|
||||
/// @brief whether or not the HTTP result is valid or not
|
||||
bool hasFailed(arangodb::httpclient::SimpleHttpResult* response) const;
|
||||
|
||||
/// @brief create an error result from a failed HTTP request/response
|
||||
Result buildHttpError(arangodb::httpclient::SimpleHttpResult* response, std::string const& url) const;
|
||||
|
||||
|
||||
/// we need to act like a 3.2 client
|
||||
bool simulate32Client() const;
|
||||
|
||||
|
||||
private:
|
||||
|
||||
|
||||
/// @brief extract the collection by either id or name, may return nullptr!
|
||||
LogicalCollection* getCollectionByIdOrName(TRI_vocbase_t*, TRI_voc_cid_t,
|
||||
std::string const&);
|
||||
|
||||
std::shared_ptr<LogicalCollection> getCollectionByIdOrName(
|
||||
TRI_vocbase_t* vocbase, TRI_voc_cid_t cid, std::string const& name
|
||||
);
|
||||
|
||||
/// @brief apply a single marker from the collection dump
|
||||
Result applyCollectionDumpMarkerInternal(transaction::Methods&,
|
||||
LogicalCollection* coll,
|
||||
|
|
|
@ -290,11 +290,13 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
if (vocbase == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
arangodb::LogicalCollection* coll = resolveCollection(vocbase, slice);
|
||||
|
||||
auto* coll = resolveCollection(vocbase, slice).get();
|
||||
|
||||
if (coll == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
|
||||
|
||||
bool isSystem = coll->isSystem();
|
||||
// extract "data"
|
||||
VPackSlice const doc = slice.get("data");
|
||||
|
@ -554,9 +556,12 @@ Result TailingSyncer::renameCollection(VPackSlice const& slice) {
|
|||
if (vocbase == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
arangodb::LogicalCollection* col = nullptr;
|
||||
|
||||
if (slice.hasKey("cuid")) {
|
||||
col = resolveCollection(vocbase, slice);
|
||||
col = resolveCollection(vocbase, slice).get();
|
||||
|
||||
if (col == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, "unknown cuid");
|
||||
}
|
||||
|
@ -605,13 +610,16 @@ Result TailingSyncer::changeCollection(VPackSlice const& slice) {
|
|||
}
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase, slice);
|
||||
|
||||
auto* col = resolveCollection(vocbase, slice).get();
|
||||
|
||||
if (col == nullptr) {
|
||||
if (isDeleted) {
|
||||
// not a problem if a collection that is going to be deleted anyway
|
||||
// does not exist on slave
|
||||
return Result();
|
||||
}
|
||||
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ bool RestWalAccessHandler::parseFilter(WalAccess::Filter& filter) {
|
|||
return false;
|
||||
}
|
||||
|
||||
filter.collection = c->cid();
|
||||
filter.collection = c->id();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -115,8 +115,9 @@ RocksDBCollection::RocksDBCollection(LogicalCollection* collection,
|
|||
}
|
||||
|
||||
rocksutils::globalRocksEngine()->addCollectionMapping(
|
||||
_objectId, _logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid());
|
||||
_objectId, _logicalCollection->vocbase()->id(), _logicalCollection->id()
|
||||
);
|
||||
|
||||
if (_cacheEnabled) {
|
||||
createCache();
|
||||
}
|
||||
|
@ -135,8 +136,9 @@ RocksDBCollection::RocksDBCollection(LogicalCollection* collection,
|
|||
_cacheEnabled(
|
||||
static_cast<RocksDBCollection const*>(physical)->_cacheEnabled) {
|
||||
rocksutils::globalRocksEngine()->addCollectionMapping(
|
||||
_objectId, _logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid());
|
||||
_objectId, _logicalCollection->vocbase()->id(), _logicalCollection->id()
|
||||
);
|
||||
|
||||
if (_cacheEnabled) {
|
||||
createCache();
|
||||
}
|
||||
|
@ -245,7 +247,9 @@ TRI_voc_rid_t RocksDBCollection::revision() const { return _revisionId; }
|
|||
TRI_voc_rid_t RocksDBCollection::revision(transaction::Methods* trx) const {
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
|
||||
state->findCollection(_logicalCollection->cid()));
|
||||
state->findCollection(_logicalCollection->id())
|
||||
);
|
||||
|
||||
TRI_ASSERT(trxCollection != nullptr);
|
||||
|
||||
return trxCollection->revision();
|
||||
|
@ -257,8 +261,11 @@ uint64_t RocksDBCollection::numberDocuments(transaction::Methods* trx) const {
|
|||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
|
||||
state->findCollection(_logicalCollection->cid()));
|
||||
state->findCollection(_logicalCollection->id())
|
||||
);
|
||||
|
||||
TRI_ASSERT(trxCollection != nullptr);
|
||||
|
||||
return trxCollection->numberDocuments();
|
||||
}
|
||||
|
||||
|
@ -522,10 +529,16 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(
|
|||
VPackBuilder indexInfo;
|
||||
idx->toVelocyPack(indexInfo, false, true);
|
||||
res = static_cast<RocksDBEngine*>(engine)->writeCreateCollectionMarker(
|
||||
_logicalCollection->vocbase()->id(), _logicalCollection->cid(),
|
||||
builder.slice(), RocksDBLogValue::IndexCreate(
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid(), indexInfo.slice()));
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->id(),
|
||||
builder.slice(),
|
||||
RocksDBLogValue::IndexCreate(
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->id(),
|
||||
indexInfo.slice()
|
||||
)
|
||||
);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
// We could not persist the index creation. Better abort
|
||||
// Remove the Index in the local list again.
|
||||
|
@ -603,10 +616,16 @@ int RocksDBCollection::restoreIndex(transaction::Methods* trx,
|
|||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
TRI_ASSERT(engine != nullptr);
|
||||
int res = engine->writeCreateCollectionMarker(
|
||||
_logicalCollection->vocbase()->id(), _logicalCollection->cid(),
|
||||
builder.slice(), RocksDBLogValue::IndexCreate(
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid(), indexInfo.slice()));
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->id(),
|
||||
builder.slice(),
|
||||
RocksDBLogValue::IndexCreate(
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->id(),
|
||||
indexInfo.slice()
|
||||
)
|
||||
);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
// We could not persist the index creation. Better abort
|
||||
// Remove the Index in the local list again.
|
||||
|
@ -667,11 +686,14 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
|
||||
// log this event in the WAL and in the collection meta-data
|
||||
int res = engine->writeCreateCollectionMarker(
|
||||
_logicalCollection->vocbase()->id(), _logicalCollection->cid(),
|
||||
builder.slice(),
|
||||
RocksDBLogValue::IndexDrop(_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid(), iid));
|
||||
|
||||
_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->id(),
|
||||
builder.slice(),
|
||||
RocksDBLogValue::IndexDrop(
|
||||
_logicalCollection->vocbase()->id(), _logicalCollection->id(), iid
|
||||
)
|
||||
);
|
||||
|
||||
if (isGeoIndex) {
|
||||
// decrease total number of geo indexes by one
|
||||
TRI_ASSERT(_numberOfGeoIndexes > 0);
|
||||
|
@ -762,29 +784,35 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
TRI_voc_rid_t rid = transaction::helpers::extractRevFromDocument(doc);
|
||||
TRI_ASSERT(rid != 0);
|
||||
|
||||
state->prepareOperation(_logicalCollection->cid(),
|
||||
rid, // actual revision ID!!
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
state->prepareOperation(
|
||||
_logicalCollection->id(),
|
||||
rid, // actual revision ID!!
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE
|
||||
);
|
||||
|
||||
LocalDocumentId const docId =
|
||||
RocksDBKey::documentId(RocksDBEntryType::Document, iter->key());
|
||||
auto res = removeDocument(trx, docId, doc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
// Failed to remove document in truncate. Throw
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
res = state->addOperation(_logicalCollection->cid(), docId.id(),
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
||||
|
||||
res = state->addOperation(
|
||||
_logicalCollection->id(), docId.id(), TRI_VOC_DOCUMENT_OPERATION_REMOVE
|
||||
);
|
||||
|
||||
// transaction size limit reached
|
||||
if (res.fail()) {
|
||||
// This should never happen...
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
|
||||
}
|
||||
|
||||
trackWaitForSync(trx, options);
|
||||
iter->Next();
|
||||
}
|
||||
|
||||
|
||||
// reset to previous value after truncate is finished
|
||||
state->options().intermediateCommitCount = prvICC;
|
||||
|
||||
|
@ -907,8 +935,9 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
auto mthds = RocksDBTransactionState::toMethods(trx);
|
||||
RocksDBSavePoint guard(mthds, trx->isSingleOperationTransaction());
|
||||
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
||||
state->prepareOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_INSERT
|
||||
);
|
||||
|
||||
// disable indexing in this transaction if we are allowed to
|
||||
IndexingDisabler disabler(mthds, !hasGeoIndex() && trx->isSingleOperationTransaction());
|
||||
|
@ -924,8 +953,9 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
TRI_ASSERT(!mdr.empty());
|
||||
}
|
||||
|
||||
Result result = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
||||
auto result = state->addOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_INSERT
|
||||
);
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
|
@ -995,11 +1025,17 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
options.mergeObjects, options.keepNull, *builder.get(),
|
||||
options.isRestore, revisionId);
|
||||
if (_isDBServer) {
|
||||
TRI_ASSERT(_logicalCollection->vocbase());
|
||||
// Need to check that no sharding keys have changed:
|
||||
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()),
|
||||
oldDoc, builder->slice(), false)) {
|
||||
if (arangodb::shardKeysChanged(
|
||||
_logicalCollection->vocbase()->name(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()
|
||||
),
|
||||
oldDoc,
|
||||
builder->slice(),
|
||||
false
|
||||
)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES);
|
||||
}
|
||||
}
|
||||
|
@ -1011,8 +1047,9 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
trx->isSingleOperationTransaction());
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
||||
state->prepareOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_UPDATE
|
||||
);
|
||||
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
|
||||
|
||||
if (res.ok()) {
|
||||
|
@ -1025,8 +1062,9 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
TRI_ASSERT(!mdr.empty());
|
||||
}
|
||||
|
||||
Result result = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
||||
auto result = state->addOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_UPDATE
|
||||
);
|
||||
|
||||
// transaction size limit reached -- fail hard
|
||||
if (result.fail()) {
|
||||
|
@ -1096,11 +1134,17 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
|||
revisionId);
|
||||
|
||||
if (_isDBServer) {
|
||||
TRI_ASSERT(_logicalCollection->vocbase());
|
||||
// Need to check that no sharding keys have changed:
|
||||
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()),
|
||||
oldDoc, builder->slice(), false)) {
|
||||
if (arangodb::shardKeysChanged(
|
||||
_logicalCollection->vocbase()->name(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
_logicalCollection->planId()
|
||||
),
|
||||
oldDoc,
|
||||
builder->slice(),
|
||||
false
|
||||
)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES);
|
||||
}
|
||||
}
|
||||
|
@ -1112,8 +1156,9 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
|||
trx->isSingleOperationTransaction());
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
||||
state->prepareOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_REPLACE
|
||||
);
|
||||
|
||||
Result opResult = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
|
||||
|
||||
|
@ -1127,8 +1172,9 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
|||
TRI_ASSERT(!mdr.empty());
|
||||
}
|
||||
|
||||
Result result = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
||||
auto result = state->addOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_REPLACE
|
||||
);
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
|
@ -1191,17 +1237,19 @@ Result RocksDBCollection::remove(arangodb::transaction::Methods* trx,
|
|||
trx->isSingleOperationTransaction());
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(_logicalCollection->cid(), oldRevisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
state->prepareOperation(
|
||||
_logicalCollection->id(), oldRevisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE
|
||||
);
|
||||
res = removeDocument(trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.ok()) {
|
||||
trackWaitForSync(trx, options);
|
||||
|
||||
// report key size
|
||||
res = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
||||
res = state->addOperation(
|
||||
_logicalCollection->id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE
|
||||
);
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (res.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
|
@ -1311,7 +1359,7 @@ int RocksDBCollection::saveIndex(transaction::Methods* trx,
|
|||
|
||||
std::shared_ptr<VPackBuilder> builder = idx->toVelocyPack(false, true);
|
||||
auto vocbase = _logicalCollection->vocbase();
|
||||
auto collectionId = _logicalCollection->cid();
|
||||
auto collectionId = _logicalCollection->id();
|
||||
VPackSlice data = builder->slice();
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
|
@ -1325,8 +1373,9 @@ int RocksDBCollection::saveIndex(transaction::Methods* trx,
|
|||
arangodb::Result RocksDBCollection::fillIndexes(
|
||||
transaction::Methods* trx, std::shared_ptr<arangodb::Index> added) {
|
||||
// FIXME: assert for an exclusive lock on this collection
|
||||
TRI_ASSERT(trx->state()->collection(_logicalCollection->cid(),
|
||||
AccessMode::Type::EXCLUSIVE) != nullptr);
|
||||
TRI_ASSERT(trx->state()->collection(
|
||||
_logicalCollection->id(), AccessMode::Type::EXCLUSIVE
|
||||
));
|
||||
|
||||
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(added.get());
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
|
@ -1804,9 +1853,11 @@ int RocksDBCollection::unlockRead() {
|
|||
uint64_t RocksDBCollection::recalculateCounts() {
|
||||
// start transaction to get a collection lock
|
||||
auto ctx = transaction::StandaloneContext::Create(_logicalCollection->vocbase());
|
||||
SingleCollectionTransaction trx(ctx, _logicalCollection->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, _logicalCollection->id(), AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
auto res = trx.begin();
|
||||
|
||||
if (res.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
@ -1936,9 +1987,11 @@ void RocksDBCollection::recalculateIndexEstimates(
|
|||
|
||||
// start transaction to get a collection lock
|
||||
auto ctx = transaction::StandaloneContext::Create(_logicalCollection->vocbase());
|
||||
arangodb::SingleCollectionTransaction trx(ctx, _logicalCollection->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
ctx, _logicalCollection->id(), AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
auto res = trx.begin();
|
||||
|
||||
if (res.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
|
|
@ -469,7 +469,10 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
|||
if (r.ok()) {
|
||||
std::hash<StringRef> hasher;
|
||||
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
|
||||
RocksDBTransactionState::toState(trx)->trackIndexInsert(_collection->cid(), id(), hash);
|
||||
RocksDBTransactionState::toState(trx)->trackIndexInsert(
|
||||
_collection->id(), id(), hash
|
||||
);
|
||||
|
||||
return IndexResult();
|
||||
} else {
|
||||
return IndexResult(r.errorNumber(), this);
|
||||
|
@ -500,7 +503,10 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
|||
if (res.ok()) {
|
||||
std::hash<StringRef> hasher;
|
||||
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
|
||||
RocksDBTransactionState::toState(trx)->trackIndexRemove(_collection->cid(), id(), hash);
|
||||
RocksDBTransactionState::toState(trx)->trackIndexRemove(
|
||||
_collection->id(), id(), hash
|
||||
);
|
||||
|
||||
return IndexResult();
|
||||
} else {
|
||||
return IndexResult(res.errorNumber(), this);
|
||||
|
|
|
@ -1091,12 +1091,12 @@ arangodb::Result RocksDBEngine::dropCollection(
|
|||
|
||||
// Prepare collection remove batch
|
||||
RocksDBLogValue logValue = RocksDBLogValue::CollectionDrop(
|
||||
vocbase->id(), collection->cid(),
|
||||
StringRef(collection->globallyUniqueId()));
|
||||
vocbase->id(), collection->id(), StringRef(collection->globallyUniqueId())
|
||||
);
|
||||
rocksdb::WriteBatch batch;
|
||||
batch.PutLogData(logValue.slice());
|
||||
RocksDBKey key;
|
||||
key.constructCollection(vocbase->id(), collection->cid());
|
||||
key.constructCollection(vocbase->id(), collection->id());
|
||||
batch.Delete(RocksDBColumnFamily::definitions(), key.string());
|
||||
rocksdb::Status res = _db->Write(wo, &batch);
|
||||
|
||||
|
@ -1115,7 +1115,7 @@ arangodb::Result RocksDBEngine::dropCollection(
|
|||
// remove from map
|
||||
{
|
||||
WRITE_LOCKER(guard, _mapLock);
|
||||
_collectionMap.erase(collection->cid());
|
||||
_collectionMap.erase(collection->id());
|
||||
}
|
||||
|
||||
// delete documents
|
||||
|
@ -1200,9 +1200,14 @@ arangodb::Result RocksDBEngine::renameCollection(
|
|||
VPackBuilder builder =
|
||||
collection->toVelocyPackIgnore({"path", "statusString"}, true, true);
|
||||
int res = writeCreateCollectionMarker(
|
||||
vocbase->id(), collection->cid(), builder.slice(),
|
||||
RocksDBLogValue::CollectionRename(vocbase->id(), collection->cid(),
|
||||
StringRef(oldName)));
|
||||
vocbase->id(),
|
||||
collection->id(),
|
||||
builder.slice(),
|
||||
RocksDBLogValue::CollectionRename(
|
||||
vocbase->id(), collection->id(), StringRef(oldName)
|
||||
)
|
||||
);
|
||||
|
||||
return arangodb::Result(res);
|
||||
}
|
||||
|
||||
|
@ -2012,3 +2017,7 @@ void RocksDBEngine::releaseTick(TRI_voc_tick_t tick) {
|
|||
}
|
||||
|
||||
} // namespace arangodb
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
|
@ -420,8 +420,10 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
if (numChunks > 0) {
|
||||
// first chunk
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), col->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
col->id(),
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
trx.addHint(
|
||||
transaction::Hints::Hint::RECOVERY); // to turn off waitForSync!
|
||||
|
@ -480,8 +482,10 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
}
|
||||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), col->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
col->id(),
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
trx.addHint(
|
||||
transaction::Hints::Hint::RECOVERY); // to turn off waitForSync!
|
||||
|
|
|
@ -63,8 +63,12 @@ RocksDBIndex::RocksDBIndex(
|
|||
if (_cacheEnabled) {
|
||||
createCache();
|
||||
}
|
||||
|
||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->addIndexMapping(_objectId, collection->vocbase()->id(), collection->cid(), _iid);
|
||||
|
||||
engine->addIndexMapping(
|
||||
_objectId, collection->vocbase()->id(), collection->id(), _iid
|
||||
);
|
||||
}
|
||||
|
||||
RocksDBIndex::RocksDBIndex(TRI_idx_iid_t id, LogicalCollection* collection,
|
||||
|
@ -84,8 +88,12 @@ RocksDBIndex::RocksDBIndex(TRI_idx_iid_t id, LogicalCollection* collection,
|
|||
if (_cacheEnabled) {
|
||||
createCache();
|
||||
}
|
||||
|
||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->addIndexMapping(_objectId, collection->vocbase()->id(), collection->cid(), _iid);
|
||||
|
||||
engine->addIndexMapping(
|
||||
_objectId, collection->vocbase()->id(), collection->id(), _iid
|
||||
);
|
||||
}
|
||||
|
||||
RocksDBIndex::~RocksDBIndex() {
|
||||
|
|
|
@ -420,7 +420,7 @@ void RocksDBPrimaryIndex::handleValNode(transaction::Methods* trx,
|
|||
TRI_ASSERT(cid != 0);
|
||||
TRI_ASSERT(key != nullptr);
|
||||
|
||||
if (!_isRunningInCluster && cid != _collection->cid()) {
|
||||
if (!_isRunningInCluster && cid != _collection->id()) {
|
||||
// only continue lookup if the id value is syntactically correct and
|
||||
// refers to "our" collection, using local collection id
|
||||
return;
|
||||
|
|
|
@ -127,7 +127,7 @@ int RocksDBReplicationContext::bindCollection(
|
|||
|
||||
if ((_collection == nullptr) ||
|
||||
((_collection->name() != collectionIdentifier) &&
|
||||
std::to_string(_collection->cid()) != collectionIdentifier &&
|
||||
std::to_string(_collection->id()) != collectionIdentifier &&
|
||||
_collection->globallyUniqueId() != collectionIdentifier)) {
|
||||
_collection = _trx->vocbase()->lookupCollection(collectionIdentifier).get();
|
||||
|
||||
|
|
|
@ -266,7 +266,7 @@ void RocksDBRestReplicationHandler::handleCommandLoggerFollow() {
|
|||
return;
|
||||
}
|
||||
|
||||
cid = c->cid();
|
||||
cid = c->id();
|
||||
}
|
||||
|
||||
auto trxContext = transaction::StandaloneContext::Create(_vocbase);
|
||||
|
|
|
@ -614,7 +614,7 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
for (auto& it : hashes) {
|
||||
// The estimator is only useful if we are in a non-unique indexes
|
||||
TRI_ASSERT(!_unique);
|
||||
state->trackIndexInsert(_collection->cid(), id(), it);
|
||||
state->trackIndexInsert(_collection->id(), id(), it);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -739,7 +739,7 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
|||
for (auto& it : hashes) {
|
||||
// The estimator is only useful if we are in a non-unique indexes
|
||||
TRI_ASSERT(!_unique);
|
||||
state->trackIndexRemove(_collection->cid(), id(), it);
|
||||
state->trackIndexRemove(_collection->id(), id(), it);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ std::shared_ptr<LogicalCollection> CollectionNameResolver::getCollection(
|
|||
#else
|
||||
auto dataSource = getDataSource(id);
|
||||
|
||||
return dataSource->category() == LogicalCollection::category()
|
||||
return dataSource && dataSource->category() == LogicalCollection::category()
|
||||
? std::static_pointer_cast<LogicalCollection>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ std::shared_ptr<LogicalCollection> CollectionNameResolver::getCollection(
|
|||
#else
|
||||
auto dataSource = getDataSource(nameOrId);
|
||||
|
||||
return dataSource->category() == LogicalCollection::category()
|
||||
return dataSource && dataSource->category() == LogicalCollection::category()
|
||||
? std::static_pointer_cast<LogicalCollection>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
@ -270,18 +270,16 @@ std::string CollectionNameResolver::localNameLookup(TRI_voc_cid_t cid) const {
|
|||
// DBserver case of a shard:
|
||||
name = arangodb::basics::StringUtils::itoa((*it).second->planId());
|
||||
std::shared_ptr<LogicalCollection> ci;
|
||||
|
||||
try {
|
||||
TRI_ASSERT(it->second->vocbase());
|
||||
ci = ClusterInfo::instance()->getCollection(
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
dynamic_cast<LogicalCollection*>(it->second.get())->dbName(),
|
||||
#else
|
||||
static_cast<LogicalCollection*>(it->second.get())->dbName(),
|
||||
#endif
|
||||
name
|
||||
it->second->vocbase()->name(), name
|
||||
);
|
||||
}
|
||||
catch (...) {
|
||||
}
|
||||
|
||||
if (ci == nullptr) {
|
||||
name = ""; // collection unknown
|
||||
} else {
|
||||
|
@ -378,7 +376,7 @@ std::shared_ptr<LogicalView> CollectionNameResolver::getView(
|
|||
#else
|
||||
auto dataSource = getDataSource(id);
|
||||
|
||||
return dataSource->category() == LogicalView::category()
|
||||
return dataSource && dataSource->category() == LogicalView::category()
|
||||
? std::static_pointer_cast<LogicalView>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
@ -391,7 +389,7 @@ std::shared_ptr<LogicalView> CollectionNameResolver::getView(
|
|||
#else
|
||||
auto dataSource = getDataSource(nameOrId);
|
||||
|
||||
return dataSource->category() == LogicalView::category()
|
||||
return dataSource && dataSource->category() == LogicalView::category()
|
||||
? std::static_pointer_cast<LogicalView>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -2386,7 +2386,8 @@ static void JS_StatusVocbaseCol(
|
|||
}
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
std::string const databaseName(collection->dbName());
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& databaseName = collection->vocbase()->name();
|
||||
|
||||
try {
|
||||
std::shared_ptr<LogicalCollection> const ci =
|
||||
|
@ -2399,7 +2400,7 @@ static void JS_StatusVocbaseCol(
|
|||
}
|
||||
}
|
||||
// intentionally falls through
|
||||
|
||||
|
||||
TRI_vocbase_col_status_e status = collection->status();
|
||||
|
||||
TRI_V8_RETURN(v8::Number::New(isolate, (int)status));
|
||||
|
@ -2467,7 +2468,8 @@ static void JS_TypeVocbaseCol(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
}
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
std::string const databaseName = collection->dbName();
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& databaseName = collection->vocbase()->name();
|
||||
|
||||
try {
|
||||
std::shared_ptr<LogicalCollection> const ci =
|
||||
|
|
|
@ -460,10 +460,6 @@ uint64_t LogicalCollection::numberDocuments(transaction::Methods* trx) const {
|
|||
|
||||
uint32_t LogicalCollection::internalVersion() const { return _internalVersion; }
|
||||
|
||||
std::string LogicalCollection::cid_as_string() const {
|
||||
return std::to_string(id());
|
||||
}
|
||||
|
||||
TRI_col_type_e LogicalCollection::type() const { return _type; }
|
||||
|
||||
std::string LogicalCollection::globallyUniqueId() const {
|
||||
|
@ -490,11 +486,6 @@ void LogicalCollection::avoidServers(std::vector<std::string> const& a) {
|
|||
_avoidServers = a;
|
||||
}
|
||||
|
||||
std::string LogicalCollection::dbName() const {
|
||||
TRI_ASSERT(vocbase());
|
||||
return vocbase()->name();
|
||||
}
|
||||
|
||||
TRI_vocbase_col_status_e LogicalCollection::status() const { return _status; }
|
||||
|
||||
TRI_vocbase_col_status_e LogicalCollection::getStatusLocked() {
|
||||
|
@ -1021,7 +1012,10 @@ std::shared_ptr<arangodb::velocypack::Builder> LogicalCollection::figures() cons
|
|||
auto builder = std::make_shared<VPackBuilder>();
|
||||
builder->openObject();
|
||||
builder->close();
|
||||
int res = figuresOnCoordinator(dbName(), std::to_string(id()), builder);
|
||||
TRI_ASSERT(vocbase());
|
||||
|
||||
int res =
|
||||
figuresOnCoordinator(vocbase()->name(), std::to_string(id()), builder);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
|
|
|
@ -113,15 +113,8 @@ class LogicalCollection: public LogicalDataSource {
|
|||
|
||||
uint32_t internalVersion() const;
|
||||
|
||||
inline TRI_voc_cid_t cid() const { return id(); }
|
||||
|
||||
/// @deprecated only required/used for ShardDistributionReporterTest (do not use)
|
||||
virtual std::string cid_as_string() const;
|
||||
|
||||
TRI_col_type_e type() const;
|
||||
|
||||
std::string dbName() const;
|
||||
|
||||
std::string globallyUniqueId() const;
|
||||
|
||||
// Does always return the cid
|
||||
|
@ -163,7 +156,6 @@ class LogicalCollection: public LogicalDataSource {
|
|||
// SECTION: Properties
|
||||
TRI_voc_rid_t revision(transaction::Methods*) const;
|
||||
bool isLocal() const;
|
||||
using LogicalDataSource::deleted; // required by TRI_vocbase_t
|
||||
bool isSystem() const;
|
||||
bool waitForSync() const;
|
||||
bool isSmart() const;
|
||||
|
|
|
@ -115,7 +115,7 @@ class LogicalDataSource {
|
|||
inline Category const& category() const noexcept { return _category; }
|
||||
inline bool deleted() const noexcept { return _deleted; }
|
||||
virtual void drop() = 0;
|
||||
inline TRI_voc_cid_t id() const noexcept { return _id; }
|
||||
inline TRI_voc_cid_t const& id() const noexcept { return _id; } // reference required for ShardDistributionReporterTest
|
||||
inline std::string const& name() const noexcept { return _name; }
|
||||
inline TRI_voc_cid_t planId() const noexcept { return _planId; }
|
||||
virtual Result rename(std::string&& newName, bool doSync) = 0;
|
||||
|
|
|
@ -165,15 +165,6 @@ void LogicalView::drop() {
|
|||
_physical->drop();
|
||||
}
|
||||
|
||||
VPackBuilder LogicalView::toVelocyPack(bool includeProperties,
|
||||
bool includeSystem) const {
|
||||
VPackBuilder builder;
|
||||
builder.openObject();
|
||||
toVelocyPack(builder, includeProperties, includeSystem);
|
||||
builder.close();
|
||||
return builder;
|
||||
}
|
||||
|
||||
void LogicalView::toVelocyPack(VPackBuilder& result, bool includeProperties,
|
||||
bool includeSystem) const {
|
||||
// We write into an open object
|
||||
|
|
|
@ -91,9 +91,6 @@ class LogicalView final: public LogicalDataSource {
|
|||
virtual Result rename(std::string&& newName, bool doSync) override;
|
||||
|
||||
// SECTION: Serialization
|
||||
velocypack::Builder toVelocyPack(bool includeProperties = false,
|
||||
bool includeSystem = false) const;
|
||||
|
||||
void toVelocyPack(velocypack::Builder&, bool includeProperties = false,
|
||||
bool includeSystem = false) const;
|
||||
|
||||
|
|
|
@ -236,13 +236,21 @@ Result Collections::load(TRI_vocbase_t* vocbase, LogicalCollection* coll) {
|
|||
TRI_ASSERT(coll != nullptr);
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
TRI_ASSERT(coll->vocbase());
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
return ULColCoordinatorEnterprise(coll->dbName(), coll->cid_as_string(),
|
||||
TRI_VOC_COL_STATUS_LOADED);
|
||||
return ULColCoordinatorEnterprise(
|
||||
coll->vocbase()->name(),
|
||||
std::to_string(coll->id()),
|
||||
TRI_VOC_COL_STATUS_LOADED
|
||||
);
|
||||
#else
|
||||
auto ci = ClusterInfo::instance();
|
||||
|
||||
return ci->setCollectionStatusCoordinator(
|
||||
coll->dbName(), std::to_string(coll->id()), TRI_VOC_COL_STATUS_LOADED
|
||||
coll->vocbase()->name(),
|
||||
std::to_string(coll->id()),
|
||||
TRI_VOC_COL_STATUS_LOADED
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
@ -260,15 +268,18 @@ Result Collections::load(TRI_vocbase_t* vocbase, LogicalCollection* coll) {
|
|||
Result Collections::unload(TRI_vocbase_t* vocbase, LogicalCollection* coll) {
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
#ifdef USE_ENTERPRISE
|
||||
return ULColCoordinatorEnterprise(vocbase->name(), coll->cid_as_string(),
|
||||
TRI_VOC_COL_STATUS_UNLOADED);
|
||||
return ULColCoordinatorEnterprise(
|
||||
vocbase->name(), std::to_string(coll->id()), TRI_VOC_COL_STATUS_UNLOADED
|
||||
);
|
||||
#else
|
||||
auto ci = ClusterInfo::instance();
|
||||
|
||||
return ci->setCollectionStatusCoordinator(
|
||||
vocbase->name(), std::to_string(coll->id()), TRI_VOC_COL_STATUS_UNLOADED
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
return vocbase->unloadCollection(coll, false);
|
||||
}
|
||||
|
||||
|
@ -335,7 +346,12 @@ Result Collections::updateProperties(LogicalCollection* coll,
|
|||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto info = ci->getCollection(coll->dbName(), std::to_string(coll->id()));
|
||||
|
||||
TRI_ASSERT(coll->vocbase());
|
||||
|
||||
auto info =
|
||||
ci->getCollection(coll->vocbase()->name(), std::to_string(coll->id()));
|
||||
|
||||
return info->updateProperties(props, false);
|
||||
} else {
|
||||
auto ctx = transaction::V8Context::CreateWhenRequired(coll->vocbase(), false);
|
||||
|
@ -434,7 +450,8 @@ static Result DropVocbaseColCoordinator(arangodb::LogicalCollection* collection,
|
|||
return TRI_ERROR_FORBIDDEN;
|
||||
}
|
||||
|
||||
std::string const databaseName(collection->dbName());
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& databaseName = collection->vocbase()->name();
|
||||
auto cid = std::to_string(collection->id());
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
std::string errorMsg;
|
||||
|
@ -466,7 +483,8 @@ Result Collections::drop(TRI_vocbase_t* vocbase, LogicalCollection* coll,
|
|||
}
|
||||
}
|
||||
|
||||
std::string const dbname = coll->dbName();
|
||||
TRI_ASSERT(coll->vocbase());
|
||||
auto& dbname = coll->vocbase()->name();
|
||||
std::string const collName = coll->name();
|
||||
|
||||
Result res;
|
||||
|
@ -534,9 +552,9 @@ Result Collections::warmup(TRI_vocbase_t* vocbase, LogicalCollection* coll) {
|
|||
Result Collections::revisionId(TRI_vocbase_t* vocbase,
|
||||
LogicalCollection* coll,
|
||||
TRI_voc_rid_t& rid) {
|
||||
|
||||
TRI_ASSERT(coll != nullptr);
|
||||
std::string const databaseName(coll->dbName());
|
||||
TRI_ASSERT(coll->vocbase());
|
||||
auto& databaseName = coll->vocbase()->name();
|
||||
auto cid = std::to_string(coll->id());
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
|
|
|
@ -100,8 +100,8 @@ arangodb::Result Indexes::getAll(LogicalCollection const* collection,
|
|||
|
||||
VPackBuilder tmp;
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
std::string const databaseName(collection->dbName());
|
||||
//std::string const cid = collection->cid_as_string();
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& databaseName = collection->vocbase()->name();
|
||||
std::string const& cid = collection->name();
|
||||
|
||||
// add code for estimates here
|
||||
|
@ -305,7 +305,8 @@ Result Indexes::ensureIndexCoordinator(
|
|||
arangodb::LogicalCollection const* collection, VPackSlice const& indexDef,
|
||||
bool create, VPackBuilder& resultBuilder) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
std::string const dbName = collection->dbName();
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& dbName = collection->vocbase()->name();
|
||||
auto cid = std::to_string(collection->id());
|
||||
std::string errorMsg;
|
||||
int res = ClusterInfo::instance()->ensureIndexCoordinator(
|
||||
|
@ -343,9 +344,11 @@ Result Indexes::ensureIndex(LogicalCollection* collection,
|
|||
return Result(res);
|
||||
}
|
||||
|
||||
std::string const dbname(collection->dbName());
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& dbname = collection->vocbase()->name();
|
||||
std::string const collname(collection->name());
|
||||
VPackSlice indexDef = defBuilder.slice();
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
TRI_ASSERT(indexDef.isObject());
|
||||
|
||||
|
@ -549,7 +552,8 @@ arangodb::Result Indexes::drop(LogicalCollection const* collection,
|
|||
#ifdef USE_ENTERPRISE
|
||||
return Indexes::dropCoordinatorEE(collection, iid);
|
||||
#else
|
||||
std::string const databaseName(collection->dbName());
|
||||
TRI_ASSERT(collection->vocbase());
|
||||
auto& databaseName = collection->vocbase()->name();
|
||||
auto cid = std::to_string(collection->id());
|
||||
std::string errorMsg;
|
||||
int r = ClusterInfo::instance()->dropIndexCoordinator(databaseName, cid,
|
||||
|
|
|
@ -1022,11 +1022,10 @@ std::shared_ptr<arangodb::LogicalCollection> TRI_vocbase_t::lookupCollection(
|
|||
lookupDataSource(id)
|
||||
);
|
||||
#else
|
||||
auto dataSource = lookupDataSource(id);
|
||||
auto dataSource = lookupDataSource(id);
|
||||
|
||||
return dataSource && dataSource->category() == LogicalCollection::category()
|
||||
? std::static_pointer_cast<LogicalCollection>(dataSource)
|
||||
: nullptr;
|
||||
return dataSource && dataSource->category() == LogicalCollection::category()
|
||||
? std::static_pointer_cast<LogicalCollection>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1039,11 +1038,10 @@ std::shared_ptr<arangodb::LogicalCollection> TRI_vocbase_t::lookupCollection(
|
|||
lookupDataSource(nameOrId)
|
||||
);
|
||||
#else
|
||||
auto dataSource = lookupDataSource(nameOrId);
|
||||
auto dataSource = lookupDataSource(nameOrId);
|
||||
|
||||
return dataSource && dataSource->category() == LogicalCollection::category()
|
||||
? std::static_pointer_cast<LogicalCollection>(dataSource)
|
||||
: nullptr;
|
||||
return dataSource && dataSource->category() == LogicalCollection::category()
|
||||
? std::static_pointer_cast<LogicalCollection>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1058,12 +1056,14 @@ std::shared_ptr<arangodb::LogicalCollection> TRI_vocbase_t::lookupCollectionByUu
|
|||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
return itr == _dataSourceByUuid.end()
|
||||
? nullptr
|
||||
: std::dynamic_pointer_cast<arangodb::LogicalCollection>(itr->second);
|
||||
: std::dynamic_pointer_cast<arangodb::LogicalCollection>(itr->second)
|
||||
;
|
||||
#else
|
||||
return itr == _dataSourceByUuid.end()
|
||||
|| itr->second->category() != LogicalCollection::category()
|
||||
? nullptr
|
||||
: std::static_pointer_cast<LogicalCollection>(itr->second);
|
||||
: std::static_pointer_cast<LogicalCollection>(itr->second)
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1119,11 +1119,10 @@ std::shared_ptr<arangodb::LogicalView> TRI_vocbase_t::lookupView(
|
|||
lookupDataSource(id)
|
||||
);
|
||||
#else
|
||||
auto dataSource = lookupDataSource(id);
|
||||
auto dataSource = lookupDataSource(id);
|
||||
|
||||
return dataSource && dataSource->category() == LogicalView::category()
|
||||
? std::static_pointer_cast<LogicalView>(dataSource)
|
||||
: nullptr;
|
||||
return dataSource && dataSource->category() == LogicalView::category()
|
||||
? std::static_pointer_cast<LogicalView>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1136,11 +1135,10 @@ std::shared_ptr<arangodb::LogicalView> TRI_vocbase_t::lookupView(
|
|||
lookupDataSource(nameOrId)
|
||||
);
|
||||
#else
|
||||
auto dataSource = lookupDataSource(nameOrId);
|
||||
auto dataSource = lookupDataSource(nameOrId);
|
||||
|
||||
return dataSource && dataSource->category() == LogicalView::category()
|
||||
? std::static_pointer_cast<LogicalView>(dataSource)
|
||||
: nullptr;
|
||||
return dataSource && dataSource->category() == LogicalView::category()
|
||||
? std::static_pointer_cast<LogicalView>(dataSource) : nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2104,4 +2102,4 @@ TRI_voc_rid_t TRI_StringToRid(char const* p, size_t len, bool& isOld,
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -111,6 +111,7 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
|
||||
std::string dbname = "UnitTestDB";
|
||||
std::string colName = "UnitTestCollection";
|
||||
TRI_voc_cid_t cid = 1337;
|
||||
std::string cidString = "1337";
|
||||
|
||||
std::string s1 = "s1234";
|
||||
|
@ -170,7 +171,7 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
fakeit::When(
|
||||
ConstOverloadedMethod(colMock, shardIds, std::shared_ptr<ShardMap>()))
|
||||
.AlwaysReturn(shards);
|
||||
fakeit::When(Method(colMock, cid_as_string)).AlwaysReturn(cidString);
|
||||
const_cast<TRI_voc_cid_t&>(col.id()) = cid;
|
||||
|
||||
ShardDistributionReporter testee(
|
||||
std::shared_ptr<ClusterComm>(&cc, [](ClusterComm*) {}), &ci);
|
||||
|
|
|
@ -383,7 +383,7 @@ SECTION("test_async_index") {
|
|||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(collection0->vocbase()),
|
||||
collection0->cid(),
|
||||
collection0->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
resThread0 = trx.begin().ok();
|
||||
|
@ -414,7 +414,7 @@ SECTION("test_async_index") {
|
|||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(collection1->vocbase()),
|
||||
collection1->cid(),
|
||||
collection1->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
resThread1 = trx.begin().ok();
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_CASE("IResearchQueryTestAggregate", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -230,7 +230,7 @@ TEST_CASE("IResearchQueryTestAggregate", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_CASE("IResearchQueryTestAnd", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -230,7 +230,7 @@ TEST_CASE("IResearchQueryTestAnd", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -202,7 +202,7 @@ TEST_CASE("IResearchQueryTestBooleanTerm", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -236,7 +236,7 @@ TEST_CASE("IResearchQueryTestBooleanTerm", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_CASE("IResearchQueryTestComplexBoolean", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -230,7 +230,7 @@ TEST_CASE("IResearchQueryTestComplexBoolean", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_CASE("IResearchQueryTestExists", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -230,7 +230,7 @@ TEST_CASE("IResearchQueryTestExists", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -197,7 +197,7 @@ TEST_CASE("IResearchQueryTestIn", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -229,7 +229,7 @@ TEST_CASE("IResearchQueryTestIn", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -201,7 +201,7 @@ TEST_CASE("IResearchQueryTestNullTerm", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -235,7 +235,7 @@ TEST_CASE("IResearchQueryTestNullTerm", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_CASE("IResearchQueryTestPhrase", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -230,7 +230,7 @@ TEST_CASE("IResearchQueryTestPhrase", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -205,7 +205,7 @@ TEST_CASE("IResearchQueryTestTokens", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -237,7 +237,7 @@ TEST_CASE("IResearchQueryTestTokens", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -199,7 +199,7 @@ TEST_CASE("IResearchQueryTestTraversal", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -231,7 +231,7 @@ TEST_CASE("IResearchQueryTestTraversal", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -253,7 +253,7 @@ TEST_CASE("IResearchQueryTestTraversal", "[iresearch][iresearch-query]") {
|
|||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -198,7 +198,7 @@ TEST_CASE("IResearchQueryTestValue", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
@ -230,7 +230,7 @@ TEST_CASE("IResearchQueryTestValue", "[iresearch][iresearch-query]") {
|
|||
options.returnNew = true;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(&vocbase),
|
||||
collection->cid(),
|
||||
collection->id(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
|
|
|
@ -841,7 +841,7 @@ SECTION("test_link") {
|
|||
}
|
||||
|
||||
{
|
||||
CHECK((true == viewImpl->link(logicalCollection->cid(), arangodb::velocypack::Slice::nullSlice()).ok()));
|
||||
CHECK((true == viewImpl->link(logicalCollection->id(), arangodb::velocypack::Slice::nullSlice()).ok()));
|
||||
std::set<TRI_voc_cid_t> cids;
|
||||
viewImpl->visitCollections([&cids](TRI_voc_cid_t cid)->bool { cids.emplace(cid); return true; });
|
||||
CHECK((0 == cids.size()));
|
||||
|
@ -873,7 +873,7 @@ SECTION("test_link") {
|
|||
}
|
||||
|
||||
{
|
||||
CHECK((true == viewImpl->link(logicalCollection->cid(), arangodb::velocypack::Slice::nullSlice()).ok()));
|
||||
CHECK((true == viewImpl->link(logicalCollection->id(), arangodb::velocypack::Slice::nullSlice()).ok()));
|
||||
std::set<TRI_voc_cid_t> cids;
|
||||
viewImpl->visitCollections([&cids](TRI_voc_cid_t cid)->bool { cids.emplace(cid); return true; });
|
||||
CHECK((0 == cids.size()));
|
||||
|
@ -925,7 +925,7 @@ SECTION("test_link") {
|
|||
}
|
||||
|
||||
{
|
||||
CHECK((true == viewImpl->link(logicalCollection->cid(), arangodb::iresearch::emptyObjectSlice()).ok()));
|
||||
CHECK((true == viewImpl->link(logicalCollection->id(), arangodb::iresearch::emptyObjectSlice()).ok()));
|
||||
std::set<TRI_voc_cid_t> cids;
|
||||
viewImpl->visitCollections([&cids](TRI_voc_cid_t cid)->bool { cids.emplace(cid); return true; });
|
||||
std::unordered_set<TRI_voc_cid_t> expected = { 100 };
|
||||
|
@ -968,7 +968,7 @@ SECTION("test_link") {
|
|||
}
|
||||
|
||||
{
|
||||
CHECK((true == viewImpl->link(logicalCollection->cid(), arangodb::iresearch::emptyObjectSlice()).ok()));
|
||||
CHECK((true == viewImpl->link(logicalCollection->id(), arangodb::iresearch::emptyObjectSlice()).ok()));
|
||||
std::set<TRI_voc_cid_t> cids;
|
||||
viewImpl->visitCollections([&cids](TRI_voc_cid_t cid)->bool { cids.emplace(cid); return true; });
|
||||
std::unordered_set<TRI_voc_cid_t> expected = { 100 };
|
||||
|
@ -1020,7 +1020,7 @@ SECTION("test_link") {
|
|||
builder.add("includeAllFields", arangodb::velocypack::Value("abc"));
|
||||
builder.close();
|
||||
auto slice = builder.slice();
|
||||
CHECK((false == viewImpl->link(logicalCollection->cid(), slice).ok()));
|
||||
CHECK((false == viewImpl->link(logicalCollection->id(), slice).ok()));
|
||||
std::set<TRI_voc_cid_t> cids;
|
||||
viewImpl->visitCollections([&cids](TRI_voc_cid_t cid)->bool { cids.emplace(cid); return true; });
|
||||
std::unordered_set<TRI_voc_cid_t> expected = { 100 };
|
||||
|
@ -1432,7 +1432,7 @@ SECTION("test_unregister_link") {
|
|||
EMPTY, EMPTY, EMPTY, arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
view->insert(trx, logicalCollection->cid(), arangodb::LocalDocumentId(0), doc->slice(), meta);
|
||||
view->insert(trx, logicalCollection->id(), arangodb::LocalDocumentId(0), doc->slice(), meta);
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -1515,7 +1515,7 @@ SECTION("test_unregister_link") {
|
|||
EMPTY, EMPTY, EMPTY, arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
view->insert(trx, logicalCollection->cid(), arangodb::LocalDocumentId(0), doc->slice(), meta);
|
||||
view->insert(trx, logicalCollection->id(), arangodb::LocalDocumentId(0), doc->slice(), meta);
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -1888,8 +1888,8 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((2 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -1910,8 +1910,8 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((2 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -1932,8 +1932,8 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((2 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -1954,8 +1954,8 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((2 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -1976,8 +1976,8 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((2 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -1998,8 +1998,8 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((2 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection1->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0", "testCollection1" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2023,7 +2023,7 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((1 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2044,7 +2044,7 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((1 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2065,7 +2065,7 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((1 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2086,7 +2086,7 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((1 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2107,7 +2107,7 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((1 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2128,7 +2128,7 @@ SECTION("test_transaction_registration") {
|
|||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((1 == trx.state()->numCollections()));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->cid())));
|
||||
CHECK((nullptr != trx.state()->findCollection(logicalCollection0->id())));
|
||||
std::unordered_set<std::string> expectedNames = { "testCollection0" };
|
||||
auto actualNames = trx.state()->collectionNames();
|
||||
|
||||
|
@ -2370,7 +2370,7 @@ SECTION("test_update_overwrite") {
|
|||
arangodb::iresearch::IResearchViewMeta expectedMeta;
|
||||
std::unordered_map<std::string, arangodb::iresearch::IResearchLinkMeta> expectedLinkMeta;
|
||||
|
||||
expectedMeta._collections.insert(logicalCollection0->cid());
|
||||
expectedMeta._collections.insert(logicalCollection0->id());
|
||||
expectedLinkMeta["testCollection0"]; // use defaults
|
||||
CHECK((view->updateProperties(updateJson->slice(), true, false).ok()));
|
||||
|
||||
|
@ -2417,7 +2417,7 @@ SECTION("test_update_overwrite") {
|
|||
arangodb::iresearch::IResearchViewMeta expectedMeta;
|
||||
std::unordered_map<std::string, arangodb::iresearch::IResearchLinkMeta> expectedLinkMeta;
|
||||
|
||||
expectedMeta._collections.insert(logicalCollection1->cid());
|
||||
expectedMeta._collections.insert(logicalCollection1->id());
|
||||
expectedLinkMeta["testCollection1"]; // use defaults
|
||||
CHECK((view->updateProperties(updateJson->slice(), false, false).ok()));
|
||||
|
||||
|
@ -2653,7 +2653,7 @@ SECTION("test_update_partial") {
|
|||
\"testCollection\": {} \
|
||||
}}");
|
||||
|
||||
expectedMeta._collections.insert(logicalCollection->cid());
|
||||
expectedMeta._collections.insert(logicalCollection->id());
|
||||
expectedLinkMeta["testCollection"]; // use defaults
|
||||
persisted = false;
|
||||
CHECK((view->updateProperties(updateJson->slice(), true, false).ok()));
|
||||
|
@ -2725,7 +2725,7 @@ SECTION("test_update_partial") {
|
|||
\"testCollection\": {} \
|
||||
}}");
|
||||
|
||||
expectedMeta._collections.insert(logicalCollection->cid());
|
||||
expectedMeta._collections.insert(logicalCollection->id());
|
||||
expectedLinkMeta["testCollection"]; // use defaults
|
||||
persisted = false;
|
||||
CHECK((view->updateProperties(updateJson->slice(), true, false).ok()));
|
||||
|
@ -2872,7 +2872,7 @@ SECTION("test_update_partial") {
|
|||
|
||||
arangodb::iresearch::IResearchViewMeta expectedMeta;
|
||||
|
||||
expectedMeta._collections.insert(logicalCollection->cid());
|
||||
expectedMeta._collections.insert(logicalCollection->id());
|
||||
|
||||
{
|
||||
auto updateJson = arangodb::velocypack::Parser::fromJson("{ \
|
||||
|
|
|
@ -523,7 +523,7 @@ bool mergeSlice(
|
|||
|
||||
void ContextDataMock::pinData(arangodb::LogicalCollection* collection) {
|
||||
if (collection) {
|
||||
pinned.emplace(collection->cid());
|
||||
pinned.emplace(collection->id());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue