mirror of https://gitee.com/bigwinds/arangodb
Use uuid in Initial sync to ensure proper collection mapping (#3965)
* Use uuid in initalsync to ensure proper collection mapping * fix compile error after merge
This commit is contained in:
parent
e1d9135421
commit
19f68df395
|
@ -3418,11 +3418,8 @@ int MMFilesEngine::saveReplicationApplierConfiguration(std::string const& filena
|
|||
|
||||
Result MMFilesEngine::handleSyncKeys(arangodb::DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick) {
|
||||
return handleSyncKeysMMFiles(syncer, col, keysId, cid, collectionName,maxTick);
|
||||
std::string const& keysId) {
|
||||
return handleSyncKeysMMFiles(syncer, col, keysId);
|
||||
}
|
||||
|
||||
Result MMFilesEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder){
|
||||
|
|
|
@ -102,8 +102,7 @@ class MMFilesEngine final : public StorageEngine {
|
|||
bool doSync) override;
|
||||
Result handleSyncKeys(arangodb::DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName, TRI_voc_tick_t maxTick) override;
|
||||
std::string const& keysId) override;
|
||||
|
||||
Result createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder) override;
|
||||
Result createTickRanges(VPackBuilder& builder) override;
|
||||
|
|
|
@ -110,14 +110,11 @@ static bool FindRange(std::vector<uint8_t const*> const& markers,
|
|||
}
|
||||
|
||||
Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick) {
|
||||
arangodb::LogicalCollection* coll,
|
||||
std::string const& keysId) {
|
||||
|
||||
std::string progress =
|
||||
"collecting local keys for collection '" + collectionName + "'";
|
||||
"collecting local keys for collection '" + coll->name() + "'";
|
||||
syncer.setProgress(progress);
|
||||
|
||||
// fetch all local keys from primary index
|
||||
|
@ -129,7 +126,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
// note: the ditch also protects against unloading the collection
|
||||
{
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), col->cid(),
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), coll->cid(),
|
||||
AccessMode::Type::READ);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
@ -138,7 +135,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
return Result(res.errorNumber(), std::string("unable to start transaction (") + std::string(__FILE__) + std::string(":") + std::to_string(__LINE__) + std::string("): ") + res.errorMessage());
|
||||
}
|
||||
|
||||
ditch = arangodb::MMFilesCollection::toMMFilesCollection(col)
|
||||
ditch = arangodb::MMFilesCollection::toMMFilesCollection(coll)
|
||||
->ditches()
|
||||
->createMMFilesDocumentDitch(false, __FILE__, __LINE__);
|
||||
|
||||
|
@ -149,13 +146,13 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
|
||||
TRI_ASSERT(ditch != nullptr);
|
||||
|
||||
TRI_DEFER(arangodb::MMFilesCollection::toMMFilesCollection(col)
|
||||
TRI_DEFER(arangodb::MMFilesCollection::toMMFilesCollection(coll)
|
||||
->ditches()
|
||||
->freeDitch(ditch));
|
||||
|
||||
{
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), col->cid(),
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), coll->cid(),
|
||||
AccessMode::Type::READ);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
@ -195,7 +192,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
syncer.sendExtendBarrier();
|
||||
|
||||
std::string progress = "sorting " + std::to_string(markers.size()) +
|
||||
" local key(s) for collection '" + collectionName +
|
||||
" local key(s) for collection '" + coll->name() +
|
||||
"'";
|
||||
syncer.setProgress(progress);
|
||||
|
||||
|
@ -241,7 +238,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
|
||||
std::string url =
|
||||
baseUrl + "/" + keysId + "?chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching remote keys chunks for collection '" + collectionName +
|
||||
progress = "fetching remote keys chunks for collection '" + coll->name() +
|
||||
"' from " + url;
|
||||
syncer.setProgress(progress);
|
||||
|
||||
|
@ -249,7 +246,8 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
syncer._client->retryRequest(rest::RequestType::GET, url, nullptr, 0));
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
return Result(TRI_ERROR_REPLICATION_NO_RESPONSE, std::string("could not connect to master at ") + syncer._masterInfo._endpoint + ": " + syncer._client->getErrorMessage());
|
||||
return Result(TRI_ERROR_REPLICATION_NO_RESPONSE, std::string("could not connect to master at ") +
|
||||
syncer._masterInfo._endpoint + ": " + syncer._client->getErrorMessage());
|
||||
}
|
||||
|
||||
TRI_ASSERT(response != nullptr);
|
||||
|
@ -287,7 +285,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
if (n > 0) {
|
||||
// first chunk
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), col->cid(),
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), coll->cid(),
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
@ -318,7 +316,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
keyBuilder.add(StaticStrings::KeyString, VPackValue(key));
|
||||
keyBuilder.close();
|
||||
|
||||
trx.remove(collectionName, keyBuilder.slice(), options);
|
||||
trx.remove(coll->name(), keyBuilder.slice(), options);
|
||||
}
|
||||
|
||||
// last high
|
||||
|
@ -344,7 +342,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
keyBuilder.add(StaticStrings::KeyString, VPackValue(key));
|
||||
keyBuilder.close();
|
||||
|
||||
trx.remove(collectionName, keyBuilder.slice(), options);
|
||||
trx.remove(coll->name(), keyBuilder.slice(), options);
|
||||
}
|
||||
|
||||
trx.commit();
|
||||
|
@ -359,7 +357,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
}
|
||||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), col->cid(),
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()), coll->cid(),
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
@ -368,7 +366,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
return Result(res.errorNumber(), std::string("unable to start transaction : ") + res.errorMessage());
|
||||
}
|
||||
|
||||
trx.pinData(col->cid()); // will throw when it fails
|
||||
trx.pinData(coll->cid()); // will throw when it fails
|
||||
|
||||
// We do not take responsibility for the index.
|
||||
// The LogicalCollection is protected by trx.
|
||||
|
@ -382,7 +380,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
|
||||
size_t const currentChunkId = i;
|
||||
progress = "processing keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "'";
|
||||
" for collection '" + coll->name() + "'";
|
||||
syncer.setProgress(progress);
|
||||
|
||||
syncer.sendExtendBatch();
|
||||
|
@ -437,7 +435,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
"?type=keys&chunk=" + std::to_string(i) +
|
||||
"&chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "' from " + url;
|
||||
" for collection '" + coll->name() + "' from " + url;
|
||||
syncer.setProgress(progress);
|
||||
|
||||
std::unique_ptr<httpclient::SimpleHttpResult> response(
|
||||
|
@ -478,7 +476,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
keyBuilder.add(StaticStrings::KeyString, VPackValue(localKey));
|
||||
keyBuilder.close();
|
||||
|
||||
trx.remove(collectionName, keyBuilder.slice(), options);
|
||||
trx.remove(coll->name(), keyBuilder.slice(), options);
|
||||
++nextStart;
|
||||
} else {
|
||||
break;
|
||||
|
@ -530,7 +528,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
keyBuilder.add(StaticStrings::KeyString, VPackValue(localKey));
|
||||
keyBuilder.close();
|
||||
|
||||
trx.remove(collectionName, keyBuilder.slice(), options);
|
||||
trx.remove(coll->name(), keyBuilder.slice(), options);
|
||||
++nextStart;
|
||||
} else if (res == 0) {
|
||||
// key match
|
||||
|
@ -606,7 +604,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
"&chunkSize=" + std::to_string(chunkSize) + "&offset=" + std::to_string(offsetInChunk);
|
||||
progress = "fetching documents chunk " +
|
||||
std::to_string(currentChunkId) + " for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
coll->name() + "' from " + url;
|
||||
|
||||
syncer.setProgress(progress);
|
||||
|
||||
|
@ -670,7 +668,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
bool success = physical->readDocument(&trx, conflictId, mmdr);
|
||||
if (success) {
|
||||
VPackSlice conflictingKey(mmdr.vpack());
|
||||
return trx.remove(collectionName, conflictingKey, options);
|
||||
return trx.remove(coll->name(), conflictingKey, options);
|
||||
}
|
||||
}
|
||||
return OperationResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
||||
|
@ -678,7 +676,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
|
||||
if (!element) {
|
||||
// INSERT
|
||||
OperationResult opRes = trx.insert(collectionName, it, options);
|
||||
OperationResult opRes = trx.insert(coll->name(), it, options);
|
||||
if (opRes.fail()) {
|
||||
if (opRes.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) && opRes.errorMessage() > keySlice.copyString()) {
|
||||
// remove conflict and retry
|
||||
|
@ -686,7 +684,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
if (inner.fail()) {
|
||||
return opRes.result;
|
||||
}
|
||||
opRes = trx.insert(collectionName, it, options);
|
||||
opRes = trx.insert(coll->name(), it, options);
|
||||
if (opRes.fail()) {
|
||||
return opRes.result;
|
||||
}
|
||||
|
@ -696,7 +694,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
}
|
||||
} else {
|
||||
// REPLACE
|
||||
OperationResult opRes = trx.replace(collectionName, it, options);
|
||||
OperationResult opRes = trx.replace(coll->name(), it, options);
|
||||
if (opRes.fail()) {
|
||||
if (opRes.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) && opRes.errorMessage() > keySlice.copyString()) {
|
||||
// remove conflict and retry
|
||||
|
@ -704,7 +702,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
if (inner.fail()) {
|
||||
return opRes.result;
|
||||
}
|
||||
opRes = trx.replace(collectionName, it, options);
|
||||
opRes = trx.replace(coll->name(), it, options);
|
||||
if (opRes.fail()) {
|
||||
return opRes.result;
|
||||
}
|
||||
|
|
|
@ -213,11 +213,11 @@ Result DatabaseInitialSyncer::sendFlush() {
|
|||
|
||||
/// @brief apply the data from a collection dump
|
||||
Result DatabaseInitialSyncer::applyCollectionDump(transaction::Methods& trx,
|
||||
std::string const& collectionName,
|
||||
LogicalCollection* coll,
|
||||
SimpleHttpResult* response,
|
||||
uint64_t& markersProcessed) {
|
||||
std::string const invalidMsg =
|
||||
"received invalid dump data for collection '" + collectionName + "'";
|
||||
"received invalid dump data for collection '" + coll->name() + "'";
|
||||
|
||||
StringBuffer& data = response->getBody();
|
||||
char const* p = data.begin();
|
||||
|
@ -309,7 +309,7 @@ Result DatabaseInitialSyncer::applyCollectionDump(transaction::Methods& trx,
|
|||
|
||||
VPackSlice const old = oldBuilder->slice();
|
||||
|
||||
Result r = applyCollectionDumpMarker(trx, collectionName, type, old, doc);
|
||||
Result r = applyCollectionDumpMarker(trx, coll, type, old, doc);
|
||||
|
||||
if (r.fail()) {
|
||||
return r;
|
||||
|
@ -321,9 +321,8 @@ Result DatabaseInitialSyncer::applyCollectionDump(transaction::Methods& trx,
|
|||
}
|
||||
|
||||
/// @brief incrementally fetch data from a collection
|
||||
Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection* col,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection* coll,
|
||||
std::string const& leaderColl,
|
||||
TRI_voc_tick_t maxTick) {
|
||||
std::string appendix;
|
||||
|
||||
|
@ -338,7 +337,7 @@ Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection*
|
|||
uint64_t chunkSize = _configuration._chunkSize;
|
||||
|
||||
TRI_ASSERT(_batchId); // should not be equal to 0
|
||||
std::string const baseUrl = ReplicationUrl + "/dump?collection=" + cid +
|
||||
std::string const baseUrl = ReplicationUrl + "/dump?collection=" + StringUtils::urlEncode(leaderColl) +
|
||||
"&batchId=" + std::to_string(_batchId) + appendix;
|
||||
|
||||
TRI_voc_tick_t fromTick = 0;
|
||||
|
@ -365,13 +364,15 @@ Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection*
|
|||
url += "&includeSystem=" + std::string(_configuration._includeSystem ? "true" : "false");
|
||||
|
||||
std::string const typeString =
|
||||
(col->type() == TRI_COL_TYPE_EDGE ? "edge" : "document");
|
||||
(coll->type() == TRI_COL_TYPE_EDGE ? "edge" : "document");
|
||||
|
||||
// send request
|
||||
std::string const progress =
|
||||
"fetching master collection dump for collection '" + collectionName +
|
||||
"', type: " + typeString + ", id " + cid + ", batch " +
|
||||
StringUtils::itoa(batch);
|
||||
"fetching master collection dump for collection '" + coll->name() +
|
||||
"', type: " + typeString + ", id " + leaderColl + ", batch " +
|
||||
StringUtils::itoa(batch) +
|
||||
", markers processed: " + StringUtils::itoa(markersProcessed) +
|
||||
", bytes received: " + StringUtils::itoa(bytesReceived);
|
||||
|
||||
setProgress(progress);
|
||||
|
||||
|
@ -484,7 +485,7 @@ Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection*
|
|||
}
|
||||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()), col->cid(),
|
||||
transaction::StandaloneContext::Create(vocbase()), coll->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
@ -493,9 +494,9 @@ Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection*
|
|||
return Result(res.errorNumber(), std::string("unable to start transaction: ") + res.errorMessage());
|
||||
}
|
||||
|
||||
trx.pinData(col->cid()); // will throw when it fails
|
||||
trx.pinData(coll->cid()); // will throw when it fails
|
||||
|
||||
res = applyCollectionDump(trx, collectionName, response.get(), markersProcessed);
|
||||
res = applyCollectionDump(trx, coll, response.get(), markersProcessed);
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
}
|
||||
|
@ -503,8 +504,8 @@ Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection*
|
|||
res = trx.commit();
|
||||
|
||||
std::string const progress2 =
|
||||
"fetched master collection dump for collection '" + collectionName +
|
||||
"', type: " + typeString + ", id " + cid + ", batch " +
|
||||
"fetched master collection dump for collection '" + coll->name() +
|
||||
"', type: " + typeString + ", id " + leaderColl + ", batch " +
|
||||
StringUtils::itoa(batch) +
|
||||
", markers processed: " + StringUtils::itoa(markersProcessed) +
|
||||
", bytes received: " + StringUtils::itoa(bytesReceived);
|
||||
|
@ -536,21 +537,21 @@ Result DatabaseInitialSyncer::handleCollectionDump(arangodb::LogicalCollection*
|
|||
}
|
||||
|
||||
/// @brief incrementally fetch data from a collection
|
||||
Result DatabaseInitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
Result DatabaseInitialSyncer::handleCollectionSync(arangodb::LogicalCollection* coll,
|
||||
std::string const& leaderColl,
|
||||
TRI_voc_tick_t maxTick) {
|
||||
sendExtendBatch();
|
||||
sendExtendBarrier();
|
||||
|
||||
std::string const baseUrl = ReplicationUrl + "/keys";
|
||||
std::string url = baseUrl + "/keys" + "?collection=" + cid +
|
||||
std::string url = baseUrl + "/keys" +
|
||||
"?collection=" + StringUtils::urlEncode(leaderColl) +
|
||||
"&to=" + std::to_string(maxTick) +
|
||||
"&serverId=" + _localServerIdString +
|
||||
"&batchId=" + std::to_string(_batchId);
|
||||
|
||||
std::string progress = "fetching collection keys for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
coll->name() + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
// send an initial async request to collect the collection keys on the other
|
||||
|
@ -638,18 +639,18 @@ Result DatabaseInitialSyncer::handleCollectionSync(arangodb::LogicalCollection*
|
|||
_masterInfo._endpoint + url + ": response is no object");
|
||||
}
|
||||
|
||||
VPackSlice const id = slice.get("id");
|
||||
VPackSlice const keysId = slice.get("id");
|
||||
|
||||
if (!id.isString()) {
|
||||
if (!keysId.isString()) {
|
||||
return Result(TRI_ERROR_REPLICATION_INVALID_RESPONSE, std::string("got invalid response from master at ") +
|
||||
_masterInfo._endpoint + url + ": response does not contain valid 'id' attribute");
|
||||
}
|
||||
|
||||
auto shutdown = [&]() -> void {
|
||||
url = baseUrl + "/" + id.copyString();
|
||||
url = baseUrl + "/" + keysId.copyString();
|
||||
std::string progress =
|
||||
"deleting remote collection keys object for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
coll->name() + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
// now delete the keys we ordered
|
||||
|
@ -669,7 +670,7 @@ Result DatabaseInitialSyncer::handleCollectionSync(arangodb::LogicalCollection*
|
|||
if (count.getNumber<size_t>() <= 0) {
|
||||
// remote collection has no documents. now truncate our local collection
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()), col->cid(),
|
||||
transaction::StandaloneContext::Create(vocbase()), coll->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
@ -682,10 +683,11 @@ Result DatabaseInitialSyncer::handleCollectionSync(arangodb::LogicalCollection*
|
|||
if (!_leaderId.empty()) {
|
||||
options.isSynchronousReplicationFrom = _leaderId;
|
||||
}
|
||||
OperationResult opRes = trx.truncate(collectionName, options);
|
||||
OperationResult opRes = trx.truncate(coll->name(), options);
|
||||
|
||||
if (opRes.fail()) {
|
||||
return Result(opRes.errorNumber(), std::string("unable to truncate collection '") + collectionName + "': " + TRI_errno_string(opRes.errorNumber()));
|
||||
return Result(opRes.errorNumber(), std::string("unable to truncate collection '") + coll->name()
|
||||
+ "': " + TRI_errno_string(opRes.errorNumber()));
|
||||
}
|
||||
|
||||
return trx.finish(opRes.result);
|
||||
|
@ -693,8 +695,7 @@ Result DatabaseInitialSyncer::handleCollectionSync(arangodb::LogicalCollection*
|
|||
|
||||
// now we can fetch the complete chunk information from the master
|
||||
try {
|
||||
return EngineSelectorFeature::ENGINE->handleSyncKeys(
|
||||
*this, col, id.copyString(), cid, collectionName, maxTick);
|
||||
return EngineSelectorFeature::ENGINE->handleSyncKeys(*this, coll, keysId.copyString());
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
return Result(ex.code(), ex.what());
|
||||
} catch (std::exception const& ex) {
|
||||
|
@ -800,7 +801,9 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
// not found...
|
||||
col = vocbase()->lookupCollection(masterName);
|
||||
|
||||
if (col != nullptr && (col->name() != masterName || (!masterUuid.empty() && col->globallyUniqueId() != masterUuid))) {
|
||||
if (col != nullptr && (col->name() != masterName ||
|
||||
|
||||
(!masterUuid.empty() && col->globallyUniqueId() != masterUuid))) {
|
||||
// found another collection with the same name locally.
|
||||
// in this case we must drop it because we will run into duplicate
|
||||
// name conflicts otherwise
|
||||
|
@ -857,7 +860,7 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
return Result(res.errorNumber(), std::string("unable to truncate ") + collectionMsg + ": " + res.errorMessage());
|
||||
}
|
||||
} else {
|
||||
// regular collection
|
||||
// drop a regular collection
|
||||
if (_configuration._skipCreateDrop) {
|
||||
setProgress("dropping " + collectionMsg + " skipped because of configuration");
|
||||
return Result();
|
||||
|
@ -921,10 +924,11 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
|
||||
Result res;
|
||||
|
||||
std::string const& masterColl = !masterUuid.empty() ? masterUuid : StringUtils::itoa(masterCid);
|
||||
if (incremental && getSize(col) > 0) {
|
||||
res = handleCollectionSync(col, StringUtils::itoa(masterCid), masterName, _masterInfo._lastLogTick);
|
||||
res = handleCollectionSync(col, masterColl, _masterInfo._lastLogTick);
|
||||
} else {
|
||||
res = handleCollectionDump(col, StringUtils::itoa(masterCid), masterName, _masterInfo._lastLogTick);
|
||||
res = handleCollectionDump(col, masterColl, _masterInfo._lastLogTick);
|
||||
}
|
||||
|
||||
if (!res.ok()) {
|
||||
|
|
|
@ -38,14 +38,14 @@ class ReplicationApplierConfiguration;
|
|||
/*
|
||||
arangodb::Result handleSyncKeysMMFiles(DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
std::string const& keysId,
|
||||
std::string const& leaderColl,
|
||||
TRI_voc_tick_t maxTick);
|
||||
|
||||
arangodb::Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
std::string const& keysId,
|
||||
std::string const& leaderColl,
|
||||
TRI_voc_tick_t maxTick);
|
||||
|
||||
arangodb::Result syncChunkRocksDB(DatabaseInitialSyncer& syncer, SingleCollectionTransaction* trx,
|
||||
|
@ -56,12 +56,10 @@ arangodb::Result syncChunkRocksDB(DatabaseInitialSyncer& syncer, SingleCollectio
|
|||
*/
|
||||
class DatabaseInitialSyncer : public InitialSyncer {
|
||||
friend ::arangodb::Result handleSyncKeysMMFiles(DatabaseInitialSyncer& syncer, arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName, TRI_voc_tick_t maxTick);
|
||||
std::string const& keysId);
|
||||
|
||||
friend ::arangodb::Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer, arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName, TRI_voc_tick_t maxTick);
|
||||
std::string const& keysId);
|
||||
|
||||
friend ::arangodb::Result syncChunkRocksDB(DatabaseInitialSyncer& syncer, SingleCollectionTransaction* trx,
|
||||
std::string const& keysId, uint64_t chunkId,
|
||||
|
@ -153,19 +151,19 @@ class DatabaseInitialSyncer : public InitialSyncer {
|
|||
Result sendFlush();
|
||||
|
||||
/// @brief apply the data from a collection dump
|
||||
Result applyCollectionDump(transaction::Methods&, std::string const&,
|
||||
Result applyCollectionDump(transaction::Methods&, LogicalCollection* col,
|
||||
httpclient::SimpleHttpResult*, uint64_t&);
|
||||
|
||||
/// @brief determine the number of documents in a collection
|
||||
int64_t getSize(arangodb::LogicalCollection*);
|
||||
|
||||
/// @brief incrementally fetch data from a collection
|
||||
Result handleCollectionDump(arangodb::LogicalCollection*, std::string const&,
|
||||
std::string const&, TRI_voc_tick_t);
|
||||
Result handleCollectionDump(arangodb::LogicalCollection*,
|
||||
std::string const& leaderColl, TRI_voc_tick_t);
|
||||
|
||||
/// @brief incrementally fetch data from a collection
|
||||
Result handleCollectionSync(arangodb::LogicalCollection*, std::string const&,
|
||||
std::string const&, TRI_voc_tick_t);
|
||||
Result handleCollectionSync(arangodb::LogicalCollection*,
|
||||
std::string const& leaderColl, TRI_voc_tick_t);
|
||||
|
||||
/// @brief changes the properties of a collection, based on the VelocyPack
|
||||
/// provided
|
||||
|
|
|
@ -393,7 +393,7 @@ arangodb::LogicalCollection* Syncer::resolveCollection(TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
Result Syncer::applyCollectionDumpMarker(
|
||||
transaction::Methods& trx, std::string const& collectionName,
|
||||
transaction::Methods& trx, LogicalCollection* coll,
|
||||
TRI_replication_operation_e type, VPackSlice const& old,
|
||||
VPackSlice const& slice) {
|
||||
|
||||
|
@ -401,7 +401,7 @@ Result Syncer::applyCollectionDumpMarker(
|
|||
decltype(_configuration._lockTimeoutRetries) tries = 0;
|
||||
|
||||
while (true) {
|
||||
Result res = applyCollectionDumpMarkerInternal(trx, collectionName, type, old, slice);
|
||||
Result res = applyCollectionDumpMarkerInternal(trx, coll, type, old, slice);
|
||||
|
||||
if (res.errorNumber() != TRI_ERROR_LOCK_TIMEOUT) {
|
||||
return res;
|
||||
|
@ -417,13 +417,13 @@ Result Syncer::applyCollectionDumpMarker(
|
|||
// retry
|
||||
}
|
||||
} else {
|
||||
return applyCollectionDumpMarkerInternal(trx, collectionName, type, old, slice);
|
||||
return applyCollectionDumpMarkerInternal(trx, coll, type, old, slice);
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief apply the data from a collection dump or the continuous log
|
||||
Result Syncer::applyCollectionDumpMarkerInternal(
|
||||
transaction::Methods& trx, std::string const& collectionName,
|
||||
transaction::Methods& trx, LogicalCollection* coll,
|
||||
TRI_replication_operation_e type, VPackSlice const& old,
|
||||
VPackSlice const& slice) {
|
||||
|
||||
|
@ -440,11 +440,11 @@ Result Syncer::applyCollectionDumpMarkerInternal(
|
|||
|
||||
try {
|
||||
// try insert first
|
||||
OperationResult opRes = trx.insert(collectionName, slice, options);
|
||||
OperationResult opRes = trx.insert(coll->name(), slice, options);
|
||||
|
||||
if (opRes.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED)) {
|
||||
// perform an update
|
||||
opRes = trx.replace(collectionName, slice, options);
|
||||
opRes = trx.replace(coll->name(), slice, options);
|
||||
}
|
||||
|
||||
return Result(opRes.result);
|
||||
|
@ -467,7 +467,7 @@ Result Syncer::applyCollectionDumpMarkerInternal(
|
|||
if (!_leaderId.empty()) {
|
||||
options.isSynchronousReplicationFrom = _leaderId;
|
||||
}
|
||||
OperationResult opRes = trx.remove(collectionName, old, options);
|
||||
OperationResult opRes = trx.remove(coll->name(), old, options);
|
||||
|
||||
if (opRes.ok() ||
|
||||
opRes.is(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
|
||||
|
|
|
@ -112,7 +112,7 @@ class Syncer {
|
|||
|
||||
/// @brief apply a single marker from the collection dump
|
||||
Result applyCollectionDumpMarker(transaction::Methods&,
|
||||
std::string const&,
|
||||
LogicalCollection* coll,
|
||||
TRI_replication_operation_e,
|
||||
arangodb::velocypack::Slice const&,
|
||||
arangodb::velocypack::Slice const&);
|
||||
|
@ -162,7 +162,7 @@ class Syncer {
|
|||
|
||||
/// @brief apply a single marker from the collection dump
|
||||
Result applyCollectionDumpMarkerInternal(transaction::Methods&,
|
||||
std::string const&,
|
||||
LogicalCollection* coll,
|
||||
TRI_replication_operation_e,
|
||||
arangodb::velocypack::Slice const&,
|
||||
arangodb::velocypack::Slice const&);
|
||||
|
|
|
@ -291,14 +291,12 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
if (vocbase == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
arangodb::LogicalCollection* col = resolveCollection(vocbase, slice);
|
||||
if (col == nullptr) {
|
||||
arangodb::LogicalCollection* coll = resolveCollection(vocbase, slice);
|
||||
if (coll == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
|
||||
TRI_voc_cid_t cid = col->cid();
|
||||
bool isSystem = col->isSystem();
|
||||
|
||||
bool isSystem = coll->isSystem();
|
||||
// extract "data"
|
||||
VPackSlice const doc = slice.get("data");
|
||||
|
||||
|
@ -339,7 +337,7 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
StringUtils::uint64(transactionId.c_str(), transactionId.size()));
|
||||
}
|
||||
|
||||
if (tid > 0) {
|
||||
if (tid > 0) { // part of a transaction
|
||||
auto it = _ongoingTransactions.find(tid);
|
||||
|
||||
if (it == _ongoingTransactions.end()) {
|
||||
|
@ -352,16 +350,14 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
return Result(TRI_ERROR_REPLICATION_UNEXPECTED_TRANSACTION, std::string("unexpected transaction ") + StringUtils::itoa(tid));
|
||||
}
|
||||
|
||||
trx->addCollectionAtRuntime(cid, "", AccessMode::Type::EXCLUSIVE);
|
||||
|
||||
std::string collectionName = trx->name(cid);
|
||||
Result r = applyCollectionDumpMarker(*trx, collectionName, type, old, doc);
|
||||
trx->addCollectionAtRuntime(coll->cid(), coll->name(), AccessMode::Type::EXCLUSIVE);
|
||||
Result r = applyCollectionDumpMarker(*trx, coll, type, old, doc);
|
||||
|
||||
if (r.errorNumber() == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && isSystem) {
|
||||
// ignore unique constraint violations for system collections
|
||||
r.reset();
|
||||
}
|
||||
if (r.ok() && collectionName == TRI_COL_NAME_USERS) {
|
||||
if (r.ok() && coll->name() == TRI_COL_NAME_USERS) {
|
||||
_usersModified = true;
|
||||
}
|
||||
|
||||
|
@ -371,7 +367,7 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
// standalone operation
|
||||
// update the apply tick for all standalone operations
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase), cid,
|
||||
transaction::StandaloneContext::Create(vocbase), coll->cid(),
|
||||
AccessMode::Type::EXCLUSIVE);
|
||||
|
||||
if (_supportsSingleOperations) {
|
||||
|
@ -387,7 +383,7 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
|
||||
std::string collectionName = trx.name();
|
||||
|
||||
res = applyCollectionDumpMarker(trx, collectionName, type, old, doc);
|
||||
res = applyCollectionDumpMarker(trx, coll, type, old, doc);
|
||||
if (res.errorNumber() == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && isSystem) {
|
||||
// ignore unique constraint violations for system collections
|
||||
res.reset();
|
||||
|
|
|
@ -1718,12 +1718,8 @@ void RocksDBEngine::getStatistics(VPackBuilder& builder) const {
|
|||
|
||||
Result RocksDBEngine::handleSyncKeys(arangodb::DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick) {
|
||||
return handleSyncKeysRocksDB(syncer, col, keysId, cid, collectionName,
|
||||
maxTick);
|
||||
std::string const& keysId) {
|
||||
return handleSyncKeysRocksDB(syncer, col, keysId);
|
||||
}
|
||||
|
||||
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase,
|
||||
|
|
|
@ -143,8 +143,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
bool doSync) override;
|
||||
Result handleSyncKeys(arangodb::DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName, TRI_voc_tick_t maxTick) override;
|
||||
std::string const& keysId) override;
|
||||
Result createLoggerState(TRI_vocbase_t* vocbase,
|
||||
velocypack::Builder& builder) override;
|
||||
Result createTickRanges(velocypack::Builder& builder) override;
|
||||
|
|
|
@ -350,10 +350,9 @@ Result syncChunkRocksDB(DatabaseInitialSyncer& syncer,
|
|||
|
||||
Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName, TRI_voc_tick_t maxTick) {
|
||||
std::string const& keysId) {
|
||||
std::string progress =
|
||||
"collecting local keys for collection '" + collectionName + "'";
|
||||
"collecting local keys for collection '" + col->name() + "'";
|
||||
syncer.setProgress(progress);
|
||||
|
||||
if (syncer.checkAborted()) {
|
||||
|
@ -368,7 +367,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
|
||||
std::string url =
|
||||
baseUrl + "/" + keysId + "?chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching remote keys chunks for collection '" + collectionName +
|
||||
progress = "fetching remote keys chunks for collection '" + col->name() +
|
||||
"' from " + url;
|
||||
syncer.setProgress(progress);
|
||||
auto const headers = syncer.createHeaders();
|
||||
|
@ -451,9 +450,9 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
VPackSlice doc(mmdr.vpack());
|
||||
VPackSlice key = doc.get(StaticStrings::KeyString);
|
||||
if (key.compareString(lowKey.data(), lowKey.length()) < 0) {
|
||||
trx.remove(collectionName, doc, options);
|
||||
trx.remove(col->name(), doc, options);
|
||||
} else if (key.compareString(highKey.data(), highKey.length()) > 0) {
|
||||
trx.remove(collectionName, doc, options);
|
||||
trx.remove(col->name(), doc, options);
|
||||
}
|
||||
},
|
||||
UINT64_MAX);
|
||||
|
@ -499,7 +498,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
syncer.sendExtendBarrier();
|
||||
|
||||
progress = "processing keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "'";
|
||||
" for collection '" + col->name() + "'";
|
||||
syncer.setProgress(progress);
|
||||
|
||||
// read remote chunk
|
||||
|
@ -538,7 +537,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
|
||||
if (cmp1 < 0) {
|
||||
// smaller values than lowKey mean they don't exist remotely
|
||||
trx.remove(collectionName, key, options);
|
||||
trx.remove(col->name(), key, options);
|
||||
return;
|
||||
} else if (cmp1 >= 0 && cmp2 <= 0) {
|
||||
// we only need to hash we are in the range
|
||||
|
|
|
@ -38,9 +38,7 @@ Result syncChunkRocksDB(
|
|||
|
||||
Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick);
|
||||
std::string const& keysId);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -365,10 +365,7 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
|
||||
virtual Result handleSyncKeys(arangodb::DatabaseInitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick) = 0;
|
||||
std::string const& keysId) = 0;
|
||||
virtual Result createLoggerState(TRI_vocbase_t* vocbase,
|
||||
velocypack::Builder& builder) = 0;
|
||||
virtual Result createTickRanges(velocypack::Builder& builder) = 0;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "Aql/PlanCache.h"
|
||||
#include "Aql/QueryCache.h"
|
||||
#include "Basics/conversions.h"
|
||||
#include "Basics/fasthash.h"
|
||||
#include "Basics/LocalTaskQueue.h"
|
||||
#include "Basics/PerformanceLogScope.h"
|
||||
|
@ -43,6 +44,7 @@
|
|||
#include "Indexes/Index.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/ServerIdFeature.h"
|
||||
#include "Scheduler/Scheduler.h"
|
||||
#include "Scheduler/SchedulerFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
|
@ -1377,11 +1379,13 @@ Result LogicalCollection::compareChecksums(VPackSlice checksumSlice, std::string
|
|||
}
|
||||
|
||||
std::string LogicalCollection::generateGloballyUniqueId() const {
|
||||
ServerState::RoleEnum role = ServerState::instance()->getRole();
|
||||
if (_version < VERSION_33) {
|
||||
return _name; // predictable UUID for legacy collections
|
||||
}
|
||||
|
||||
ServerState::RoleEnum role = ServerState::instance()->getRole();
|
||||
std::string result;
|
||||
result.reserve(64);
|
||||
|
||||
if (ServerState::isCoordinator(role)) {
|
||||
TRI_ASSERT(_planId != 0);
|
||||
result.append(std::to_string(_planId));
|
||||
|
@ -1392,15 +1396,17 @@ std::string LogicalCollection::generateGloballyUniqueId() const {
|
|||
result.append(std::to_string(_planId));
|
||||
result.push_back('/');
|
||||
result.append(_name);
|
||||
} else {
|
||||
} else { // single server
|
||||
if (isSystem()) { // system collection can't be renamed
|
||||
result.append(_name);
|
||||
} else {
|
||||
std::string id = ServerState::instance()->getId();
|
||||
if (!id.empty()) {
|
||||
result.append(id);
|
||||
result.push_back('/');
|
||||
}
|
||||
TRI_ASSERT(_cid != 0);
|
||||
result.append("h");
|
||||
char buff[sizeof(TRI_server_id_t) * 2 + 1];
|
||||
size_t len = TRI_StringUInt64HexInPlace(ServerIdFeature::getId(), buff);
|
||||
result.append(buff, len);
|
||||
TRI_ASSERT(result.size() > 3);
|
||||
result.push_back('/');
|
||||
result.append(std::to_string(_cid));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ class LogicalCollection {
|
|||
|
||||
virtual ~LogicalCollection();
|
||||
|
||||
enum CollectionVersions { VERSION_30 = 5, VERSION_31 = 6 };
|
||||
enum CollectionVersions { VERSION_30 = 5, VERSION_31 = 6, VERSION_33 = 7 };
|
||||
|
||||
protected: // If you need a copy outside the class, use clone below.
|
||||
explicit LogicalCollection(LogicalCollection const&);
|
||||
|
@ -99,7 +99,7 @@ class LogicalCollection {
|
|||
/// @brief hard-coded minimum version number for collections
|
||||
static constexpr uint32_t minimumVersion() { return VERSION_30; }
|
||||
/// @brief current version for collections
|
||||
static constexpr uint32_t currentVersion() { return VERSION_31; }
|
||||
static constexpr uint32_t currentVersion() { return VERSION_33; }
|
||||
|
||||
/// @brief determine whether a collection name is a system collection name
|
||||
static inline bool IsSystemName(std::string const& name) {
|
||||
|
|
Loading…
Reference in New Issue