mirror of https://gitee.com/bigwinds/arangodb
Bug fix/simplify things (#6516)
This commit is contained in:
parent
78fc6aa758
commit
c38051519e
|
@ -839,7 +839,7 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
|
|||
ClusterCommTimeout timeout, size_t& nrDone,
|
||||
arangodb::LogTopic const& logTopic,
|
||||
bool retryOnCollNotFound) {
|
||||
if (requests.size() == 0) {
|
||||
if (requests.empty()) {
|
||||
nrDone = 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "Utils/OperationOptions.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/ticks.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "ClusterEngine/Common.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
namespace rocksdb {
|
||||
class Transaction;
|
||||
|
|
|
@ -65,11 +65,55 @@
|
|||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using Helper = arangodb::basics::VelocyPackHelper;
|
||||
|
||||
/// @brief state during opening of a collection
|
||||
namespace arangodb {
|
||||
struct OpenIteratorState {
|
||||
LogicalCollection* _collection;
|
||||
arangodb::MMFilesPrimaryIndex* _primaryIndex;
|
||||
TRI_voc_tid_t _tid;
|
||||
TRI_voc_fid_t _fid;
|
||||
std::unordered_map<TRI_voc_fid_t, MMFilesDatafileStatisticsContainer*>
|
||||
_stats;
|
||||
MMFilesDatafileStatisticsContainer* _dfi;
|
||||
transaction::Methods* _trx;
|
||||
ManagedDocumentResult _mdr;
|
||||
IndexLookupContext _context;
|
||||
uint64_t _deletions;
|
||||
uint64_t _documents;
|
||||
int64_t _initialCount;
|
||||
|
||||
OpenIteratorState(LogicalCollection* collection, transaction::Methods* trx)
|
||||
: _collection(collection),
|
||||
_primaryIndex(
|
||||
static_cast<MMFilesCollection*>(collection->getPhysical())
|
||||
->primaryIndex()),
|
||||
_tid(0),
|
||||
_fid(0),
|
||||
_stats(),
|
||||
_dfi(nullptr),
|
||||
_trx(trx),
|
||||
_context(trx, collection, &_mdr, 1),
|
||||
_deletions(0),
|
||||
_documents(0),
|
||||
_initialCount(-1) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
TRI_ASSERT(trx != nullptr);
|
||||
}
|
||||
|
||||
~OpenIteratorState() {
|
||||
for (auto& it : _stats) {
|
||||
delete it.second;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
/// @brief helper class for filling indexes
|
||||
|
@ -100,7 +144,7 @@ class MMFilesIndexFillerTask : public basics::LocalTask {
|
|||
|
||||
/// @brief find a statistics container for a given file id
|
||||
static MMFilesDatafileStatisticsContainer* FindDatafileStats(
|
||||
MMFilesCollection::OpenIteratorState* state, TRI_voc_fid_t fid) {
|
||||
OpenIteratorState* state, TRI_voc_fid_t fid) {
|
||||
auto it = state->_stats.find(fid);
|
||||
|
||||
if (it != state->_stats.end()) {
|
||||
|
@ -216,7 +260,7 @@ PhysicalCollection* MMFilesCollection::clone(LogicalCollection& logical) const {
|
|||
/// @brief process a document (or edge) marker when opening a collection
|
||||
int MMFilesCollection::OpenIteratorHandleDocumentMarker(
|
||||
MMFilesMarker const* marker, MMFilesDatafile* datafile,
|
||||
MMFilesCollection::OpenIteratorState* state) {
|
||||
OpenIteratorState* state) {
|
||||
LogicalCollection* collection = state->_collection;
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
auto physical = static_cast<MMFilesCollection*>(collection->getPhysical());
|
||||
|
@ -265,7 +309,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(
|
|||
// no primary index lock required here because we are the only ones reading
|
||||
// from the index ATM
|
||||
MMFilesSimpleIndexElement* found =
|
||||
state->_primaryIndex->lookupKeyRef(trx, keySlice, state->_mmdr);
|
||||
state->_primaryIndex->lookupKeyRef(trx, keySlice, state->_mdr);
|
||||
|
||||
// it is a new entry
|
||||
if (found == nullptr || !found->isSet()) {
|
||||
|
@ -274,7 +318,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(
|
|||
// insert into primary index
|
||||
Result res = state->_primaryIndex->insertKey(trx, localDocumentId,
|
||||
VPackSlice(vpack),
|
||||
state->_mmdr,
|
||||
state->_mdr,
|
||||
Index::OperationMode::normal);
|
||||
|
||||
if (res.fail()) {
|
||||
|
@ -336,7 +380,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(
|
|||
/// @brief process a deletion marker when opening a collection
|
||||
int MMFilesCollection::OpenIteratorHandleDeletionMarker(
|
||||
MMFilesMarker const* marker, MMFilesDatafile* datafile,
|
||||
MMFilesCollection::OpenIteratorState* state) {
|
||||
OpenIteratorState* state) {
|
||||
LogicalCollection* collection = state->_collection;
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
auto physical = static_cast<MMFilesCollection*>(collection->getPhysical());
|
||||
|
@ -371,7 +415,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(
|
|||
// no primary index lock required here because we are the only ones reading
|
||||
// from the index ATM
|
||||
MMFilesSimpleIndexElement found =
|
||||
state->_primaryIndex->lookupKey(trx, keySlice, state->_mmdr);
|
||||
state->_primaryIndex->lookupKey(trx, keySlice, state->_mdr);
|
||||
|
||||
// it is a new entry, so we missed the create
|
||||
if (!found) {
|
||||
|
@ -407,7 +451,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(
|
|||
state->_dfi->numberDeletions++;
|
||||
|
||||
state->_primaryIndex->removeKey(trx, oldLocalDocumentId, VPackSlice(vpack),
|
||||
state->_mmdr, Index::OperationMode::normal);
|
||||
state->_mdr, Index::OperationMode::normal);
|
||||
|
||||
physical->removeLocalDocumentId(oldLocalDocumentId, true);
|
||||
}
|
||||
|
@ -417,7 +461,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(
|
|||
|
||||
/// @brief iterator for open
|
||||
bool MMFilesCollection::OpenIterator(MMFilesMarker const* marker,
|
||||
MMFilesCollection::OpenIteratorState* data,
|
||||
OpenIteratorState* data,
|
||||
MMFilesDatafile* datafile) {
|
||||
TRI_voc_tick_t const tick = marker->getTick();
|
||||
MMFilesMarkerType const type = marker->getType();
|
||||
|
@ -434,15 +478,8 @@ bool MMFilesCollection::OpenIterator(MMFilesMarker const* marker,
|
|||
if (tick > datafile->_dataMax) {
|
||||
datafile->_dataMax = tick;
|
||||
}
|
||||
|
||||
if (++data->_operations % 1024 == 0) {
|
||||
data->_mmdr.reset();
|
||||
}
|
||||
} else if (type == TRI_DF_MARKER_VPACK_REMOVE) {
|
||||
res = OpenIteratorHandleDeletionMarker(marker, datafile, data);
|
||||
if (++data->_operations % 1024 == 0) {
|
||||
data->_mmdr.reset();
|
||||
}
|
||||
} else {
|
||||
if (type == TRI_DF_MARKER_HEADER) {
|
||||
// ensure there is a datafile info entry for each datafile of the
|
||||
|
@ -3290,7 +3327,7 @@ Result MMFilesCollection::update(
|
|||
|
||||
if (newSlice.length() <= 1) {
|
||||
// no need to do anything
|
||||
result = std::move(previous);
|
||||
result = previous;
|
||||
|
||||
if (_logicalCollection.waitForSync()) {
|
||||
options.waitForSync = true;
|
||||
|
|
|
@ -38,19 +38,17 @@
|
|||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
struct MMFilesDatafile;
|
||||
struct MMFilesMarker;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
|
||||
class LogicalCollection;
|
||||
class ManagedDocumentResult;
|
||||
struct MMFilesDocumentOperation;
|
||||
class MMFilesPrimaryIndex;
|
||||
class MMFilesWalMarker;
|
||||
struct OpenIteratorState;
|
||||
class Result;
|
||||
class TransactionState;
|
||||
|
||||
|
@ -72,51 +70,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
TRI_ASSERT(phys != nullptr);
|
||||
return toMMFilesCollection(phys);
|
||||
}
|
||||
|
||||
/// @brief state during opening of a collection
|
||||
struct OpenIteratorState {
|
||||
LogicalCollection* _collection;
|
||||
arangodb::MMFilesPrimaryIndex* _primaryIndex;
|
||||
TRI_voc_tid_t _tid;
|
||||
TRI_voc_fid_t _fid;
|
||||
std::unordered_map<TRI_voc_fid_t, MMFilesDatafileStatisticsContainer*>
|
||||
_stats;
|
||||
MMFilesDatafileStatisticsContainer* _dfi;
|
||||
transaction::Methods* _trx;
|
||||
ManagedDocumentResult _mmdr;
|
||||
IndexLookupContext _context;
|
||||
uint64_t _deletions;
|
||||
uint64_t _documents;
|
||||
uint64_t _operations;
|
||||
int64_t _initialCount;
|
||||
|
||||
OpenIteratorState(LogicalCollection* collection, transaction::Methods* trx)
|
||||
: _collection(collection),
|
||||
_primaryIndex(
|
||||
static_cast<MMFilesCollection*>(collection->getPhysical())
|
||||
->primaryIndex()),
|
||||
_tid(0),
|
||||
_fid(0),
|
||||
_stats(),
|
||||
_dfi(nullptr),
|
||||
_trx(trx),
|
||||
_mmdr(),
|
||||
_context(trx, collection, &_mmdr, 1),
|
||||
_deletions(0),
|
||||
_documents(0),
|
||||
_operations(0),
|
||||
_initialCount(-1) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
TRI_ASSERT(trx != nullptr);
|
||||
}
|
||||
|
||||
~OpenIteratorState() {
|
||||
for (auto& it : _stats) {
|
||||
delete it.second;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct DatafileDescription {
|
||||
MMFilesDatafile const* _data;
|
||||
TRI_voc_tick_t _dataMin;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
@ -110,13 +111,13 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
|
|||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
MMFilesCollection *mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
ManagedDocumentResult mdr;
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
|
||||
trx.invokeOnAllElements(
|
||||
_collection->name(), [this, &trx, &maxTick, &mmdr, &mmColl](LocalDocumentId const& token) {
|
||||
if (mmColl->readDocumentConditional(&trx, token, maxTick, mmdr)) {
|
||||
_vpack.emplace_back(mmdr.vpack());
|
||||
_collection->name(), [this, &trx, &maxTick, &mdr, &mmColl](LocalDocumentId const& token) {
|
||||
if (mmColl->readDocumentConditional(&trx, token, maxTick, mdr)) {
|
||||
_vpack.emplace_back(mdr.vpack());
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "Transaction/Methods.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -55,12 +56,12 @@ static std::vector<std::vector<arangodb::basics::AttributeName>> const
|
|||
|
||||
MMFilesEdgeIndexIterator::MMFilesEdgeIndexIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, arangodb::MMFilesEdgeIndex const* index,
|
||||
ManagedDocumentResult* mdr, arangodb::MMFilesEdgeIndex const* index,
|
||||
TRI_MMFilesEdgeIndexHash_t const* indexImpl,
|
||||
std::unique_ptr<VPackBuilder> keys)
|
||||
: IndexIterator(collection, trx),
|
||||
_index(indexImpl),
|
||||
_context(trx, collection, mmdr, index->fields().size()),
|
||||
_context(trx, collection, mdr, index->fields().size()),
|
||||
_keys(std::move(keys)),
|
||||
_iterator(_keys->slice()),
|
||||
_posInBuffer(0),
|
||||
|
@ -419,7 +420,7 @@ bool MMFilesEdgeIndex::supportsFilterCondition(
|
|||
|
||||
/// @brief creates an IndexIterator for the given Condition
|
||||
IndexIterator* MMFilesEdgeIndex::iteratorForCondition(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
transaction::Methods* trx, ManagedDocumentResult* mdr,
|
||||
arangodb::aql::AstNode const* node,
|
||||
arangodb::aql::Variable const* reference,
|
||||
IndexIteratorOptions const& opts) {
|
||||
|
@ -442,7 +443,7 @@ IndexIterator* MMFilesEdgeIndex::iteratorForCondition(
|
|||
|
||||
if (comp->type == aql::NODE_TYPE_OPERATOR_BINARY_EQ) {
|
||||
// a.b == value
|
||||
return createEqIterator(trx, mmdr, attrNode, valNode);
|
||||
return createEqIterator(trx, mdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
if (comp->type == aql::NODE_TYPE_OPERATOR_BINARY_IN) {
|
||||
|
@ -452,7 +453,7 @@ IndexIterator* MMFilesEdgeIndex::iteratorForCondition(
|
|||
return new EmptyIndexIterator(&_collection, trx);
|
||||
}
|
||||
|
||||
return createInIterator(trx, mmdr, attrNode, valNode);
|
||||
return createInIterator(trx, mdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
// operator type unsupported
|
||||
|
@ -469,7 +470,7 @@ arangodb::aql::AstNode* MMFilesEdgeIndex::specializeCondition(
|
|||
|
||||
/// @brief create the iterator
|
||||
IndexIterator* MMFilesEdgeIndex::createEqIterator(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
transaction::Methods* trx, ManagedDocumentResult* mdr,
|
||||
arangodb::aql::AstNode const* attrNode,
|
||||
arangodb::aql::AstNode const* valNode) const {
|
||||
// lease builder, but immediately pass it to the unique_ptr so we don't leak
|
||||
|
@ -489,7 +490,7 @@ IndexIterator* MMFilesEdgeIndex::createEqIterator(
|
|||
return new MMFilesEdgeIndexIterator(
|
||||
&_collection,
|
||||
trx,
|
||||
mmdr,
|
||||
mdr,
|
||||
this,
|
||||
isFrom ? _edgesFrom.get() : _edgesTo.get(),
|
||||
std::move(keys)
|
||||
|
@ -498,7 +499,7 @@ IndexIterator* MMFilesEdgeIndex::createEqIterator(
|
|||
|
||||
/// @brief create the iterator
|
||||
IndexIterator* MMFilesEdgeIndex::createInIterator(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
transaction::Methods* trx, ManagedDocumentResult* mdr,
|
||||
arangodb::aql::AstNode const* attrNode,
|
||||
arangodb::aql::AstNode const* valNode) const {
|
||||
// lease builder, but immediately pass it to the unique_ptr so we don't leak
|
||||
|
@ -525,7 +526,7 @@ IndexIterator* MMFilesEdgeIndex::createInIterator(
|
|||
return new MMFilesEdgeIndexIterator(
|
||||
&_collection,
|
||||
trx,
|
||||
mmdr,
|
||||
mdr,
|
||||
this,
|
||||
isFrom ? _edgesFrom.get() : _edgesTo.get(),
|
||||
std::move(keys)
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "Transaction/Context.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "Utils/OperationOptions.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
|
@ -179,12 +180,12 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
markers.reserve(trx.documentCollection()->numberDocuments(&trx, transaction::CountType::Normal));
|
||||
|
||||
uint64_t iterations = 0;
|
||||
ManagedDocumentResult mmdr;
|
||||
ManagedDocumentResult mdr;
|
||||
trx.invokeOnAllElements(
|
||||
trx.name(), [&syncer, &trx, &mmdr, &markers,
|
||||
trx.name(), [&syncer, &trx, &mdr, &markers,
|
||||
&iterations](LocalDocumentId const& token) {
|
||||
if (trx.documentCollection()->readDocument(&trx, token, mmdr)) {
|
||||
markers.emplace_back(mmdr.vpack());
|
||||
if (trx.documentCollection()->readDocument(&trx, token, mdr)) {
|
||||
markers.emplace_back(mdr.vpack());
|
||||
|
||||
if (++iterations % 10000 == 0) {
|
||||
if (syncer.isAborted()) {
|
||||
|
@ -413,7 +414,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
// The LogicalCollection is protected by trx.
|
||||
// Neither it nor it's indexes can be invalidated
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
ManagedDocumentResult mdr;
|
||||
|
||||
auto physical = static_cast<MMFilesCollection*>(
|
||||
trx.documentCollection()->getPhysical());
|
||||
|
@ -620,9 +621,9 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
toFetch.emplace_back(i);
|
||||
} else {
|
||||
TRI_voc_rid_t currentRevisionId = 0;
|
||||
if (physical->readDocument(&trx, element.localDocumentId(), mmdr)) {
|
||||
if (physical->readDocument(&trx, element.localDocumentId(), mdr)) {
|
||||
currentRevisionId = transaction::helpers::extractRevFromDocument(
|
||||
VPackSlice(mmdr.vpack()));
|
||||
VPackSlice(mdr.vpack()));
|
||||
}
|
||||
|
||||
if (TRI_RidToString(currentRevisionId) != pair.at(1).copyString()) {
|
||||
|
@ -765,10 +766,10 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
LocalDocumentId conflictId =
|
||||
physical->lookupKey(&trx, conflict.slice());
|
||||
if (conflictId.isSet()) {
|
||||
ManagedDocumentResult mmdr;
|
||||
bool success = physical->readDocument(&trx, conflictId, mmdr);
|
||||
ManagedDocumentResult mdr;
|
||||
bool success = physical->readDocument(&trx, conflictId, mdr);
|
||||
if (success) {
|
||||
VPackSlice conflictingKey(mmdr.vpack());
|
||||
VPackSlice conflictingKey(mdr.vpack());
|
||||
return trx.remove(coll->name(), conflictingKey, options);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <rocksdb/utilities/optimistic_transaction_db.h>
|
||||
#include <rocksdb/utilities/transaction.h>
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
#include "Enterprise/VocBase/VirtualCollection.h"
|
||||
|
@ -80,7 +81,7 @@ bool MMFilesPrimaryIndexIterator::next(LocalDocumentIdCallback const& cb, size_t
|
|||
return false;
|
||||
}
|
||||
while (_iterator.valid() && limit > 0) {
|
||||
// TODO: use version that hands in an existing mmdr
|
||||
// TODO: use version that hands in an existing mdr
|
||||
MMFilesSimpleIndexElement result =
|
||||
_index->lookupKey(_trx, _iterator.value());
|
||||
_iterator.next();
|
||||
|
@ -273,8 +274,8 @@ void MMFilesPrimaryIndex::unload() {
|
|||
/// @brief looks up an element given a key
|
||||
MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupKey(
|
||||
transaction::Methods* trx, VPackSlice const& key) const {
|
||||
ManagedDocumentResult mmdr;
|
||||
IndexLookupContext context(trx, &_collection, &mmdr, 1);
|
||||
ManagedDocumentResult mdr;
|
||||
IndexLookupContext context(trx, &_collection, &mdr, 1);
|
||||
TRI_ASSERT(key.isString());
|
||||
|
||||
return _primaryIndex->findByKey(&context, key.begin());
|
||||
|
@ -283,8 +284,8 @@ MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupKey(
|
|||
/// @brief looks up an element given a key
|
||||
MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupKey(
|
||||
transaction::Methods* trx, VPackSlice const& key,
|
||||
ManagedDocumentResult& mmdr) const {
|
||||
IndexLookupContext context(trx, &_collection, &mmdr, 1);
|
||||
ManagedDocumentResult& mdr) const {
|
||||
IndexLookupContext context(trx, &_collection, &mdr, 1);
|
||||
TRI_ASSERT(key.isString());
|
||||
|
||||
return _primaryIndex->findByKey(&context, key.begin());
|
||||
|
@ -310,8 +311,8 @@ MMFilesSimpleIndexElement* MMFilesPrimaryIndex::lookupKeyRef(
|
|||
/// @brief looks up an element given a key
|
||||
MMFilesSimpleIndexElement* MMFilesPrimaryIndex::lookupKeyRef(
|
||||
transaction::Methods* trx, VPackSlice const& key,
|
||||
ManagedDocumentResult& mmdr) const {
|
||||
IndexLookupContext context(trx, &_collection, &mmdr, 1);
|
||||
ManagedDocumentResult& mdr) const {
|
||||
IndexLookupContext context(trx, &_collection, &mdr, 1);
|
||||
TRI_ASSERT(key.isString());
|
||||
MMFilesSimpleIndexElement* element =
|
||||
_primaryIndex->findByKeyRef(&context, key.begin());
|
||||
|
@ -373,16 +374,16 @@ Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
ManagedDocumentResult mmdr;
|
||||
return insertKey(trx, documentId, doc, mmdr, mode);
|
||||
ManagedDocumentResult mdr;
|
||||
return insertKey(trx, documentId, doc, mdr, mode);
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
ManagedDocumentResult& mmdr,
|
||||
ManagedDocumentResult& mdr,
|
||||
OperationMode mode) {
|
||||
IndexLookupContext context(trx, &_collection, &mmdr, 1);
|
||||
IndexLookupContext context(trx, &_collection, &mdr, 1);
|
||||
MMFilesSimpleIndexElement element(buildKeyElement(documentId, doc));
|
||||
|
||||
// TODO: we can pass in a special IndexLookupContext which has some more on the information
|
||||
|
@ -407,16 +408,16 @@ Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
ManagedDocumentResult mmdr;
|
||||
return removeKey(trx, documentId, doc, mmdr, mode);
|
||||
ManagedDocumentResult mdr;
|
||||
return removeKey(trx, documentId, doc, mdr, mode);
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const& doc,
|
||||
ManagedDocumentResult& mmdr,
|
||||
ManagedDocumentResult& mdr,
|
||||
OperationMode mode) {
|
||||
IndexLookupContext context(trx, &_collection, &mmdr, 1);
|
||||
IndexLookupContext context(trx, &_collection, &mdr, 1);
|
||||
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(doc));
|
||||
MMFilesSimpleIndexElement found =
|
||||
_primaryIndex->removeByKey(&context, keySlice.begin());
|
||||
|
|
|
@ -502,7 +502,7 @@ void MMFilesSkiplistInLookupBuilder::buildSearchValues() {
|
|||
|
||||
MMFilesSkiplistIterator::MMFilesSkiplistIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, arangodb::MMFilesSkiplistIndex const* index,
|
||||
ManagedDocumentResult* mdr, arangodb::MMFilesSkiplistIndex const* index,
|
||||
TRI_Skiplist const* skiplist, size_t numPaths,
|
||||
std::function<int(void*, MMFilesSkiplistIndexElement const*,
|
||||
MMFilesSkiplistIndexElement const*,
|
||||
|
@ -510,7 +510,7 @@ MMFilesSkiplistIterator::MMFilesSkiplistIterator(
|
|||
bool reverse, MMFilesBaseSkiplistLookupBuilder* builder)
|
||||
: IndexIterator(collection, trx),
|
||||
_skiplistIndex(skiplist),
|
||||
_context(trx, collection, mmdr, index->fields().size()),
|
||||
_context(trx, collection, mdr, index->fields().size()),
|
||||
_numPaths(numPaths),
|
||||
_reverse(reverse),
|
||||
_cursor(nullptr),
|
||||
|
@ -1176,7 +1176,7 @@ bool MMFilesSkiplistIndex::findMatchingConditions(
|
|||
}
|
||||
|
||||
IndexIterator* MMFilesSkiplistIndex::iteratorForCondition(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
transaction::Methods* trx, ManagedDocumentResult* mdr,
|
||||
arangodb::aql::AstNode const* node,
|
||||
arangodb::aql::Variable const* reference,
|
||||
IndexIteratorOptions const& opts) {
|
||||
|
@ -1207,7 +1207,7 @@ IndexIterator* MMFilesSkiplistIndex::iteratorForCondition(
|
|||
return new MMFilesSkiplistIterator(
|
||||
&_collection,
|
||||
trx,
|
||||
mmdr,
|
||||
mdr,
|
||||
this,
|
||||
_skiplistIndex,
|
||||
numPaths(),
|
||||
|
@ -1223,7 +1223,7 @@ IndexIterator* MMFilesSkiplistIndex::iteratorForCondition(
|
|||
return new MMFilesSkiplistIterator(
|
||||
&_collection,
|
||||
trx,
|
||||
mmdr,
|
||||
mdr,
|
||||
this,
|
||||
_skiplistIndex,
|
||||
numPaths(),
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/ticks.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
|
@ -883,7 +884,7 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
if (res.ok()) {
|
||||
trackWaitForSync(trx, options);
|
||||
if (options.silent) {
|
||||
mdr.reset();
|
||||
mdr.clear();
|
||||
} else {
|
||||
mdr.setManaged(newSlice.begin(), documentId);
|
||||
TRI_ASSERT(!mdr.empty());
|
||||
|
@ -950,8 +951,7 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
|
||||
if (newSlice.length() <= 1) {
|
||||
// shortcut. no need to do anything
|
||||
previous.clone(mdr);
|
||||
|
||||
mdr = previous;
|
||||
TRI_ASSERT(!mdr.empty());
|
||||
|
||||
trackWaitForSync(trx, options);
|
||||
|
@ -997,7 +997,7 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
trackWaitForSync(trx, options);
|
||||
|
||||
if (options.silent) {
|
||||
mdr.reset();
|
||||
mdr.clear();
|
||||
} else {
|
||||
mdr.setManaged(newDoc.begin(), documentId);
|
||||
TRI_ASSERT(!mdr.empty());
|
||||
|
@ -1107,7 +1107,7 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
|||
trackWaitForSync(trx, options);
|
||||
|
||||
if (options.silent) {
|
||||
mdr.reset();
|
||||
mdr.clear();
|
||||
} else {
|
||||
mdr.setManaged(newDoc.begin(), documentId);
|
||||
TRI_ASSERT(!mdr.empty());
|
||||
|
@ -1532,7 +1532,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
auto f = _cache->find(key->string().data(),
|
||||
static_cast<uint32_t>(key->string().size()));
|
||||
if (f.found()) {
|
||||
std::string* value = mdr.prepareStringUsage();
|
||||
std::string* value = mdr.string();
|
||||
value->append(reinterpret_cast<char const*>(f.value()->value()),
|
||||
f.value()->valueSize());
|
||||
mdr.setManagedAfterStringUsage(documentId);
|
||||
|
@ -1545,7 +1545,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
}
|
||||
|
||||
RocksDBMethods* mthd = RocksDBTransactionState::toMethods(trx);
|
||||
std::string* value = mdr.prepareStringUsage();
|
||||
std::string* value = mdr.string();
|
||||
Result res = mthd->Get(RocksDBColumnFamily::documents(), key.ref(), value);
|
||||
|
||||
if (res.ok()) {
|
||||
|
@ -1577,7 +1577,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
<< "NOT FOUND rev: " << documentId.id() << " trx: " << trx->state()->id()
|
||||
<< " seq: " << mthd->sequenceNumber()
|
||||
<< " objectID " << _objectId << " name: " << _logicalCollection.name();
|
||||
mdr.reset();
|
||||
mdr.clear();
|
||||
}
|
||||
|
||||
return res;
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
namespace rocksdb {
|
||||
class Transaction;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "Utils/OperationOptions.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
|
|
|
@ -1724,111 +1724,18 @@ OperationResult transaction::Methods::insertLocal(
|
|||
}
|
||||
|
||||
if (res.ok() && replicationType == ReplicationType::LEADER) {
|
||||
TRI_ASSERT(followers != nullptr);
|
||||
|
||||
// Now replicate the same operation on all followers:
|
||||
Result r = replicateOperations(
|
||||
collection,
|
||||
value,
|
||||
resultBuilder,
|
||||
followers,
|
||||
arangodb::rest::RequestType::POST,
|
||||
"&" + StaticStrings::OverWrite + "=" + (options.overwrite ? "true" : "false")
|
||||
);
|
||||
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
std::string path =
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
|
||||
"/_api/document/" + arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" + ServerState::instance()->getId() +
|
||||
"&" + StaticStrings::SilentString + "=true" +
|
||||
"&" + StaticStrings::OverWrite + "=" + (options.overwrite ? "true" : "false");
|
||||
|
||||
transaction::BuilderLeaser payload(this);
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(payload.get());
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload->add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload->add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, *payload.get());
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (value.isArray()) {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
VPackArrayIterator itValue(value);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey(StaticStrings::Error)) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
} else {
|
||||
doOneDoc(value, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload->slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
requests.reserve(followers->size());
|
||||
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f, arangodb::rest::RequestType::POST,
|
||||
path, body);
|
||||
}
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests,
|
||||
chooseTimeout(count, body->size()*followers->size()),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED, options);
|
||||
}
|
||||
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code ==
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::CREATED);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "insertLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "insertLocal: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collectionName;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (r.fail()) {
|
||||
return OperationResult(r);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2095,118 +2002,19 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
res = workForOneDocument(newValue, false);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
if (res.ok() && replicationType == ReplicationType::LEADER) {
|
||||
TRI_ASSERT(followers != nullptr);
|
||||
|
||||
// Now replicate the same operation on all followers:
|
||||
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
Result r = replicateOperations(
|
||||
collection,
|
||||
newValue,
|
||||
resultBuilder,
|
||||
followers,
|
||||
(operation == TRI_VOC_DOCUMENT_OPERATION_REPLACE ? arangodb::rest::RequestType::PUT : arangodb::rest::RequestType::PATCH),
|
||||
""
|
||||
);
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
std::string path =
|
||||
"/_db/" +
|
||||
arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
|
||||
"/_api/document/" +
|
||||
arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" +
|
||||
ServerState::instance()->getId() + "&" + StaticStrings::SilentString + "=true";
|
||||
|
||||
transaction::BuilderLeaser payload(this);
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(payload.get());
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload->add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload->add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, *payload.get());
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (multiCase) {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
VPackArrayIterator itValue(newValue);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey(StaticStrings::Error)) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
} else {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
doOneDoc(newValue, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload->slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
requests.reserve(followers->size());
|
||||
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f,
|
||||
operation == TRI_VOC_DOCUMENT_OPERATION_REPLACE
|
||||
? arangodb::rest::RequestType::PUT
|
||||
: arangodb::rest::RequestType::PATCH,
|
||||
path, body);
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests,
|
||||
chooseTimeout(count, body->size()*followers->size()),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
}
|
||||
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code ==
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::OK);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "modifyLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "modifyLocal: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collectionName;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (r.fail()) {
|
||||
return OperationResult(r);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2392,116 +2200,19 @@ OperationResult transaction::Methods::removeLocal(
|
|||
res = workForOneDocument(value, false);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
if (res.ok() && replicationType == ReplicationType::LEADER) {
|
||||
TRI_ASSERT(followers != nullptr);
|
||||
|
||||
// Now replicate the same operation on all followers:
|
||||
Result r = replicateOperations(
|
||||
collection,
|
||||
value,
|
||||
resultBuilder,
|
||||
followers,
|
||||
arangodb::rest::RequestType::DELETE_REQ,
|
||||
""
|
||||
);
|
||||
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controled shutdown
|
||||
|
||||
std::string path =
|
||||
"/_db/" +
|
||||
arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
|
||||
"/_api/document/" +
|
||||
arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" +
|
||||
ServerState::instance()->getId() + "&" + StaticStrings::SilentString + "=true";
|
||||
|
||||
transaction::BuilderLeaser payload(this);
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(payload.get());
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload->add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload->add(StaticStrings::RevString, s);
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (value.isArray()) {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
VPackArrayIterator itValue(value);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey(StaticStrings::Error)) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
} else {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
doOneDoc(value, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload->slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
requests.reserve(followers->size());
|
||||
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f,
|
||||
arangodb::rest::RequestType::DELETE_REQ, path,
|
||||
body);
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests,
|
||||
chooseTimeout(count, body->size()*followers->size()),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
}
|
||||
|
||||
// we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code ==
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::OK);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "removeLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "removeLocal: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collectionName;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (r.fail()) {
|
||||
return OperationResult(r);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3473,3 +3184,133 @@ Result transaction::Methods::resolveId(char const* handle, size_t length,
|
|||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
Result Methods::replicateOperations(LogicalCollection* collection,
|
||||
VPackSlice const& inputValue,
|
||||
VPackBuilder const& resultBuilder,
|
||||
std::shared_ptr<std::vector<std::string> const>& followers,
|
||||
arangodb::rest::RequestType requestType,
|
||||
std::string const& pathAppendix) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
TRI_ASSERT(followers != nullptr);
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only on controlled shutdown
|
||||
return Result();
|
||||
}
|
||||
|
||||
transaction::BuilderLeaser payload(this);
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(payload.get());
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload->add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload->add(StaticStrings::RevString, s);
|
||||
|
||||
if (requestType != arangodb::rest::RequestType::DELETE_REQ) {
|
||||
// no need to add document data for remove operations
|
||||
TRI_SanitizeObject(doc, *payload.get());
|
||||
}
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (inputValue.isArray()) {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
VPackArrayIterator itValue(inputValue);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey(StaticStrings::Error)) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
} else {
|
||||
if (requestType == arangodb::rest::RequestType::POST) {
|
||||
doOneDoc(inputValue, ourResult);
|
||||
} else {
|
||||
VPackArrayBuilder guard(payload.get());
|
||||
doOneDoc(inputValue, ourResult);
|
||||
}
|
||||
count++;
|
||||
}
|
||||
|
||||
if (count == 0) {
|
||||
// nothing to do
|
||||
return Result();
|
||||
}
|
||||
|
||||
std::string path =
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(collection->vocbase().name()) +
|
||||
"/_api/document/" + arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" + ServerState::instance()->getId() +
|
||||
"&" + StaticStrings::SilentString + "=true" + pathAppendix;
|
||||
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload->slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
requests.reserve(followers->size());
|
||||
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f, requestType, path, body);
|
||||
}
|
||||
|
||||
double const timeout = chooseTimeout(count, body->size() * followers->size());
|
||||
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, timeout, nrDone, Logger::REPLICATION, false);
|
||||
|
||||
if (nrGood == followers->size()) {
|
||||
return Result();
|
||||
}
|
||||
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return Result(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
}
|
||||
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code == rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::CREATED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::OK);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "synchronous replication: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collection->name();
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "synchronous replication: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collection->name();
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we return "ok" here still.
|
||||
return Result();
|
||||
}
|
||||
|
||||
|
|
|
@ -28,11 +28,12 @@
|
|||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "Utils/OperationResult.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
#include "Transaction/CountCache.h"
|
||||
#include "Transaction/Hints.h"
|
||||
#include "Transaction/Options.h"
|
||||
#include "Transaction/Status.h"
|
||||
#include "Utils/OperationResult.h"
|
||||
#include "VocBase/AccessMode.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
@ -551,8 +552,15 @@ class Methods {
|
|||
ENTERPRISE_VIRT Result unlockRecursive(TRI_voc_cid_t, AccessMode::Type);
|
||||
|
||||
private:
|
||||
/// @brief replicates operations from leader to follower(s)
|
||||
Result replicateOperations(LogicalCollection* collection,
|
||||
arangodb::velocypack::Slice const& inputValue,
|
||||
arangodb::velocypack::Builder const& resultBuilder,
|
||||
std::shared_ptr<std::vector<std::string> const>& followers,
|
||||
arangodb::rest::RequestType requestType,
|
||||
std::string const& pathAppendix);
|
||||
|
||||
/// @brief Helper create a Cluster Communication document
|
||||
/// @brief Helper create a Cluster Communication document
|
||||
OperationResult clusterResultDocument(
|
||||
rest::ResponseCode const& responseCode,
|
||||
std::shared_ptr<arangodb::velocypack::Builder> const& resultBody,
|
||||
|
|
|
@ -73,16 +73,12 @@ class LocalDocumentId {
|
|||
void clear() { _id = 0; }
|
||||
|
||||
/// @brief create a not-set document id
|
||||
// clang does not like:
|
||||
// static constexpr LocalDocumentId none() { return LocalDocumentId(0); }
|
||||
static LocalDocumentId none() { return LocalDocumentId(0); }
|
||||
|
||||
/// @brief create a new document id
|
||||
static LocalDocumentId create() { return LocalDocumentId(TRI_HybridLogicalClock()); }
|
||||
|
||||
/// @brief create a document id from an existing id
|
||||
// clang does not like:
|
||||
// static constexpr LocalDocumentId create(BaseType id) { return LocalDocumentId(id); }
|
||||
static LocalDocumentId create(BaseType id) { return LocalDocumentId(id); }
|
||||
|
||||
private:
|
||||
|
|
|
@ -29,83 +29,36 @@
|
|||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
void ManagedDocumentResult::clone(ManagedDocumentResult& cloned) const {
|
||||
cloned.reset();
|
||||
if (_useString) {
|
||||
cloned._useString = true;
|
||||
cloned._string = _string;
|
||||
cloned._localDocumentId = _localDocumentId;
|
||||
cloned._vpack = reinterpret_cast<uint8_t*>(const_cast<char*>(cloned._string.data()));
|
||||
} else if (_managed) {
|
||||
cloned.setManaged(_vpack, _localDocumentId);
|
||||
} else {
|
||||
cloned.setUnmanaged(_vpack, _localDocumentId);
|
||||
}
|
||||
}
|
||||
|
||||
//add unmanaged vpack
|
||||
void ManagedDocumentResult::setUnmanaged(uint8_t const* vpack, LocalDocumentId const& documentId) {
|
||||
if(_managed || _useString) {
|
||||
reset();
|
||||
}
|
||||
TRI_ASSERT(_length == 0);
|
||||
_string.clear();
|
||||
_vpack = const_cast<uint8_t*>(vpack);
|
||||
_localDocumentId = documentId;
|
||||
_managed = false;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::setManaged(uint8_t const* vpack, LocalDocumentId const& documentId) {
|
||||
VPackSlice slice(vpack);
|
||||
auto newLen = slice.byteSize();
|
||||
if (_length >= newLen && _managed){
|
||||
std::memcpy(_vpack, vpack, newLen);
|
||||
} else {
|
||||
reset();
|
||||
_vpack = new uint8_t[newLen];
|
||||
std::memcpy(_vpack, vpack, newLen);
|
||||
_length=newLen;
|
||||
}
|
||||
_string.assign(reinterpret_cast<char const*>(vpack), VPackSlice(vpack).byteSize());
|
||||
_vpack = nullptr;
|
||||
_localDocumentId = documentId;
|
||||
_managed = true;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::setManagedAfterStringUsage(LocalDocumentId const& documentId) {
|
||||
TRI_ASSERT(!_string.empty());
|
||||
TRI_ASSERT(_useString);
|
||||
|
||||
_vpack = reinterpret_cast<uint8_t*>(const_cast<char*>(_string.data()));
|
||||
_localDocumentId = documentId;
|
||||
_useString = true;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::setManaged(std::string&& str, LocalDocumentId const& documentId) {
|
||||
reset();
|
||||
_string = std::move(str);
|
||||
_vpack = reinterpret_cast<uint8_t*>(const_cast<char*>(_string.data()));
|
||||
_localDocumentId = documentId;
|
||||
_useString = true;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::reset() noexcept {
|
||||
if (_managed) {
|
||||
delete[] _vpack;
|
||||
}
|
||||
_managed = false;
|
||||
_length = 0;
|
||||
|
||||
if (_useString) {
|
||||
_string.clear();
|
||||
_useString = false;
|
||||
}
|
||||
|
||||
_localDocumentId.clear();
|
||||
_vpack = nullptr;
|
||||
_localDocumentId = documentId;
|
||||
_managed = true;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::addToBuilder(velocypack::Builder& builder, bool allowExternals) const {
|
||||
TRI_ASSERT(!empty());
|
||||
auto slice = velocypack::Slice(_vpack);
|
||||
uint8_t const* vpack;
|
||||
if (_managed) {
|
||||
vpack = reinterpret_cast<uint8_t const*>(_string.data());
|
||||
} else {
|
||||
vpack = _vpack;
|
||||
}
|
||||
TRI_ASSERT(vpack != nullptr);
|
||||
auto slice = velocypack::Slice(vpack);
|
||||
TRI_ASSERT(!slice.isExternal());
|
||||
if (allowExternals && canUseInExternal()) {
|
||||
builder.addExternal(slice.begin());
|
||||
|
|
|
@ -33,83 +33,75 @@ namespace velocypack {
|
|||
class Builder;
|
||||
}
|
||||
|
||||
namespace aql {
|
||||
struct AqlValue;
|
||||
}
|
||||
|
||||
class ManagedDocumentResult {
|
||||
public:
|
||||
ManagedDocumentResult() :
|
||||
_length(0),
|
||||
_localDocumentId(),
|
||||
_vpack(nullptr),
|
||||
_managed(false),
|
||||
_useString(false) {}
|
||||
~ManagedDocumentResult() { reset(); }
|
||||
ManagedDocumentResult(ManagedDocumentResult const& other) = delete;
|
||||
ManagedDocumentResult& operator=(ManagedDocumentResult const& other) = delete;
|
||||
_managed(false) {}
|
||||
|
||||
ManagedDocumentResult(ManagedDocumentResult const& other) = default;
|
||||
ManagedDocumentResult& operator=(ManagedDocumentResult const& other) = default;
|
||||
|
||||
ManagedDocumentResult& operator=(ManagedDocumentResult&& other) {
|
||||
if (other._useString) {
|
||||
setManaged(std::move(other._string), other._localDocumentId);
|
||||
other._managed = false;
|
||||
other.reset();
|
||||
} else if (other._managed) {
|
||||
reset();
|
||||
_vpack = other._vpack;
|
||||
_length = other._length;
|
||||
_localDocumentId = other._localDocumentId;
|
||||
_managed = true;
|
||||
other._managed = false;
|
||||
other.reset();
|
||||
} else {
|
||||
setUnmanaged(other._vpack, other._localDocumentId);
|
||||
}
|
||||
ManagedDocumentResult& operator=(ManagedDocumentResult&& other) noexcept {
|
||||
_string = std::move(other._string);
|
||||
_vpack = other._vpack;
|
||||
_localDocumentId = other._localDocumentId;
|
||||
_managed = other._managed;
|
||||
|
||||
other.clear();
|
||||
return *this;
|
||||
}
|
||||
|
||||
ManagedDocumentResult(ManagedDocumentResult&& other) = delete;
|
||||
ManagedDocumentResult(ManagedDocumentResult&& other) noexcept
|
||||
: _string(std::move(other._string)),
|
||||
_vpack(other._vpack),
|
||||
_localDocumentId(other._localDocumentId),
|
||||
_managed(other._managed) {
|
||||
other.clear();
|
||||
}
|
||||
|
||||
void clone(ManagedDocumentResult& cloned) const;
|
||||
|
||||
//add unmanaged vpack
|
||||
void setUnmanaged(uint8_t const* vpack, LocalDocumentId const& documentId);
|
||||
|
||||
void setManaged(uint8_t const* vpack, LocalDocumentId const& documentId);
|
||||
void setManaged(std::string&& str, LocalDocumentId const& documentId);
|
||||
|
||||
void setManagedAfterStringUsage(LocalDocumentId const& documentId);
|
||||
|
||||
inline LocalDocumentId localDocumentId() const { return _localDocumentId; }
|
||||
|
||||
void reset() noexcept;
|
||||
|
||||
std::string* prepareStringUsage() {
|
||||
reset();
|
||||
_useString = true;
|
||||
void clear() noexcept {
|
||||
_string.clear();
|
||||
_vpack = nullptr;
|
||||
_localDocumentId.clear();
|
||||
_managed = false;
|
||||
}
|
||||
|
||||
std::string* string() {
|
||||
return &_string;
|
||||
}
|
||||
|
||||
void setManagedAfterStringUsage(LocalDocumentId const& documentId);
|
||||
|
||||
inline uint8_t const* vpack() const {
|
||||
if (_managed) {
|
||||
return reinterpret_cast<uint8_t const*>(_string.data());
|
||||
}
|
||||
TRI_ASSERT(_vpack != nullptr);
|
||||
return _vpack;
|
||||
}
|
||||
|
||||
inline bool empty() const { return _vpack == nullptr; }
|
||||
|
||||
inline bool empty() const {
|
||||
return (!_managed && _vpack == nullptr);
|
||||
}
|
||||
|
||||
inline bool canUseInExternal() const {
|
||||
return (!_managed && !_useString);
|
||||
return !_managed;
|
||||
}
|
||||
|
||||
void addToBuilder(velocypack::Builder& builder, bool allowExternals) const;
|
||||
|
||||
private:
|
||||
uint64_t _length;
|
||||
LocalDocumentId _localDocumentId;
|
||||
uint8_t* _vpack;
|
||||
std::string _string;
|
||||
uint8_t* _vpack;
|
||||
LocalDocumentId _localDocumentId;
|
||||
bool _managed;
|
||||
bool _useString;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue