mirror of https://gitee.com/bigwinds/arangodb
Minor refactoring (#7408)
This commit is contained in:
parent
2ca8a4fd36
commit
d5cb94d2d0
|
@ -385,7 +385,7 @@ void Query::prepare(QueryRegistry* registry) {
|
|||
|
||||
int res = trx->addCollections(*_collections.collections());
|
||||
|
||||
if(!trx->transactionContextPtr()->getParentTransaction()){
|
||||
if(!trx->transactionContextPtr()->getParentTransaction()) {
|
||||
trx->addHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL);
|
||||
}
|
||||
|
||||
|
|
|
@ -81,9 +81,9 @@ void AgencyCallback::refetchAndUpdate(bool needToAcquireMutex, bool forceCheck)
|
|||
|
||||
if (needToAcquireMutex) {
|
||||
CONDITION_LOCKER(locker, _cv);
|
||||
checkValue(newData, forceCheck);
|
||||
checkValue(std::move(newData), forceCheck);
|
||||
} else {
|
||||
checkValue(newData, forceCheck);
|
||||
checkValue(std::move(newData), forceCheck);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1009,6 +1009,36 @@ void Index::warmup(arangodb::transaction::Methods*,
|
|||
// it has to explicitly implement it.
|
||||
}
|
||||
|
||||
/// @brief generate error message
|
||||
/// @param key the conflicting key
|
||||
Result& Index::addErrorMsg(Result& r, std::string const& key) {
|
||||
// now provide more context based on index
|
||||
r.appendErrorMessage(" - in index ");
|
||||
r.appendErrorMessage(std::to_string(_iid));
|
||||
r.appendErrorMessage(" of type ");
|
||||
r.appendErrorMessage(typeName());
|
||||
|
||||
// build fields string
|
||||
r.appendErrorMessage(" over '");
|
||||
|
||||
for (size_t i = 0; i < _fields.size(); i++) {
|
||||
std::string msg;
|
||||
TRI_AttributeNamesToString(_fields[i], msg);
|
||||
r.appendErrorMessage(msg);
|
||||
if (i != _fields.size() - 1) {
|
||||
r.appendErrorMessage(", ");
|
||||
}
|
||||
}
|
||||
r.appendErrorMessage("'");
|
||||
|
||||
// provide conflicting key
|
||||
if (!key.empty()) {
|
||||
r.appendErrorMessage("; conflicting key: ");
|
||||
r.appendErrorMessage(key);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/// @brief append the index description to an output stream
|
||||
std::ostream& operator<<(std::ostream& stream, arangodb::Index const* index) {
|
||||
stream << index->context();
|
||||
|
|
|
@ -369,6 +369,23 @@ class Index {
|
|||
|
||||
static size_t sortWeight(arangodb::aql::AstNode const* node);
|
||||
|
||||
protected:
|
||||
|
||||
/// @brief generate error result
|
||||
/// @param code the error key
|
||||
/// @param key the conflicting key
|
||||
arangodb::Result& addErrorMsg(Result& r, int code, std::string const& key = "") {
|
||||
if (code != TRI_ERROR_NO_ERROR) {
|
||||
r.reset(code);
|
||||
return addErrorMsg(r, key);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/// @brief generate error result
|
||||
/// @param key the conflicting key
|
||||
arangodb::Result& addErrorMsg(Result& r, std::string const& key = "");
|
||||
|
||||
protected:
|
||||
TRI_idx_iid_t const _iid;
|
||||
LogicalCollection& _collection;
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGODB_INDEXES_INDEX_RESULT_H
|
||||
#define ARANGODB_INDEXES_INDEX_RESULT_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "Indexes/Index.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
|
||||
namespace arangodb {
|
||||
class IndexResult : public Result {
|
||||
public:
|
||||
IndexResult() : Result() {}
|
||||
|
||||
IndexResult(int errorNumber, std::string const& errorMessage)
|
||||
: Result(errorNumber, errorMessage) {}
|
||||
|
||||
IndexResult(int errorNumber, std::string&& errorMessage)
|
||||
: Result(errorNumber, std::move(errorMessage)) {}
|
||||
|
||||
IndexResult(int errorNumber, Index const* index) : Result(errorNumber) {
|
||||
if (errorNumber != TRI_ERROR_NO_ERROR && index != nullptr) {
|
||||
// now provide more context based on index
|
||||
std::string msg = errorMessage();
|
||||
msg.append(" - in index ");
|
||||
msg.append(std::to_string(index->id()));
|
||||
msg.append(" of type ");
|
||||
msg.append(index->typeName());
|
||||
|
||||
// build fields string
|
||||
VPackBuilder builder;
|
||||
index->toVelocyPack(builder, Index::makeFlags());
|
||||
VPackSlice fields = builder.slice().get("fields");
|
||||
if (!fields.isNone()) {
|
||||
msg.append(" over ");
|
||||
msg.append(fields.toJson());
|
||||
}
|
||||
Result::reset(errorNumber, msg);
|
||||
}
|
||||
}
|
||||
|
||||
IndexResult(int errorNumber, Index const* index, std::string const& key)
|
||||
: Result() {
|
||||
IndexResult::reset(errorNumber, index, key);
|
||||
}
|
||||
|
||||
IndexResult& reset(int errorNumber, Index const* index, std::string const& key) {
|
||||
Result::reset(errorNumber);
|
||||
if (errorNumber != TRI_ERROR_NO_ERROR && index != nullptr) {
|
||||
// now provide more context based on index
|
||||
std::string msg = errorMessage();
|
||||
msg.append(" - in index ");
|
||||
msg.append(std::to_string(index->id()));
|
||||
msg.append(" of type ");
|
||||
msg.append(index->typeName());
|
||||
|
||||
// build fields string
|
||||
VPackBuilder builder;
|
||||
index->toVelocyPack(builder, Index::makeFlags());
|
||||
VPackSlice fields = builder.slice().get("fields");
|
||||
if (!fields.isNone()) {
|
||||
msg.append(" over ");
|
||||
msg.append(fields.toJson());
|
||||
}
|
||||
|
||||
// provide conflicting key
|
||||
if (!key.empty()) {
|
||||
msg.append("; conflicting key: ");
|
||||
msg.append(key);
|
||||
}
|
||||
Result::reset(errorNumber, msg);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
IndexResult& reset(Result& res, Index const* index) {
|
||||
Result::reset(res);
|
||||
IndexResult::reset(res.errorNumber(), index, StaticStrings::Empty);
|
||||
return *this;
|
||||
}
|
||||
|
||||
IndexResult& reset(int res, std::string const& msg) {
|
||||
Result::reset(res, msg);
|
||||
return *this;
|
||||
}
|
||||
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -31,7 +31,6 @@
|
|||
#include "Basics/fasthash.h"
|
||||
#include "Basics/hashes.h"
|
||||
#include "MMFiles/MMFilesIndexLookupContext.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
|
@ -279,6 +278,8 @@ Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
|
||||
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
|
||||
ManagedDocumentResult result;
|
||||
|
@ -293,22 +294,24 @@ Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
|
|||
} catch (std::bad_alloc const&) {
|
||||
// roll back partial insert
|
||||
_edgesFrom->remove(&context, fromElement);
|
||||
|
||||
return IndexResult(TRI_ERROR_OUT_OF_MEMORY, this);
|
||||
res.reset(TRI_ERROR_OUT_OF_MEMORY);
|
||||
return addErrorMsg(res);
|
||||
} catch (...) {
|
||||
// roll back partial insert
|
||||
_edgesFrom->remove(&context, fromElement);
|
||||
|
||||
return IndexResult(TRI_ERROR_INTERNAL, this);
|
||||
res.reset(TRI_ERROR_INTERNAL);
|
||||
return addErrorMsg(res);
|
||||
}
|
||||
|
||||
return Result(TRI_ERROR_NO_ERROR);
|
||||
return res;
|
||||
}
|
||||
|
||||
Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
|
||||
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
|
||||
ManagedDocumentResult result;
|
||||
|
@ -318,13 +321,13 @@ Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
|||
_edgesFrom->remove(&context, fromElement);
|
||||
_edgesTo->remove(&context, toElement);
|
||||
|
||||
return Result(TRI_ERROR_NO_ERROR);
|
||||
return res;
|
||||
} catch (...) {
|
||||
if (mode == OperationMode::rollback) {
|
||||
return Result(TRI_ERROR_NO_ERROR);
|
||||
return res;
|
||||
}
|
||||
|
||||
return IndexResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND, this);
|
||||
res.reset(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
||||
return addErrorMsg(res);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "Basics/StringRef.h"
|
||||
#include "Basics/Utf8Helper.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/mmfiles-fulltext-index.h"
|
||||
#include "MMFiles/mmfiles-fulltext-query.h"
|
||||
|
@ -218,27 +217,32 @@ bool MMFilesFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
Result MMFilesFulltextIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
Result res;
|
||||
int r = TRI_ERROR_NO_ERROR;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
if (!words.empty()) {
|
||||
res =
|
||||
TRI_InsertWordsMMFilesFulltextIndex(_fulltextIndex, documentId, words);
|
||||
r = TRI_InsertWordsMMFilesFulltextIndex(_fulltextIndex, documentId, words);
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
addErrorMsg(res, r);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
Result MMFilesFulltextIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
Result res;
|
||||
int r = TRI_ERROR_NO_ERROR;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
if (!words.empty()) {
|
||||
res =
|
||||
TRI_RemoveWordsMMFilesFulltextIndex(_fulltextIndex, documentId, words);
|
||||
r = TRI_RemoveWordsMMFilesFulltextIndex(_fulltextIndex, documentId, words);
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
addErrorMsg(res, r);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void MMFilesFulltextIndex::unload() {
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include "Geo/GeoUtils.h"
|
||||
#include "GeoIndex/Near.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
@ -340,8 +339,10 @@ Result MMFilesGeoIndex::insert(transaction::Methods*,
|
|||
Result res = geo_index::Index::indexCells(doc, cells, centroid);
|
||||
|
||||
if (res.fail()) {
|
||||
// Invalid, no insert. Index is sparse
|
||||
return res.is(TRI_ERROR_BAD_PARAMETER) ? IndexResult() : res;
|
||||
if (res.is(TRI_ERROR_BAD_PARAMETER)) {
|
||||
res.reset(); // Invalid, no insert. Index is sparse
|
||||
}
|
||||
return res;
|
||||
}
|
||||
// LOG_TOPIC(ERR, Logger::ENGINES) << "Inserting #cells " << cells.size() << "
|
||||
// doc: " << doc.toJson() << " center: " << centroid.toString();
|
||||
|
@ -353,7 +354,7 @@ Result MMFilesGeoIndex::insert(transaction::Methods*,
|
|||
_tree.insert(std::make_pair(cell, value));
|
||||
}
|
||||
|
||||
return IndexResult();
|
||||
return res;
|
||||
}
|
||||
|
||||
Result MMFilesGeoIndex::remove(transaction::Methods*,
|
||||
|
@ -366,8 +367,10 @@ Result MMFilesGeoIndex::remove(transaction::Methods*,
|
|||
Result res = geo_index::Index::indexCells(doc, cells, centroid);
|
||||
|
||||
if (res.fail()) { // might occur if insert is rolled back
|
||||
// Invalid, no insert. Index is sparse
|
||||
return res.is(TRI_ERROR_BAD_PARAMETER) ? IndexResult() : res;
|
||||
if (res.is(TRI_ERROR_BAD_PARAMETER)) {
|
||||
res.reset(); // Invalid, no remove. Index is sparse
|
||||
}
|
||||
return res;
|
||||
}
|
||||
// LOG_TOPIC(ERR, Logger::ENGINES) << "Removing #cells " << cells.size() << "
|
||||
// doc: " << doc.toJson();
|
||||
|
@ -383,7 +386,7 @@ Result MMFilesGeoIndex::remove(transaction::Methods*,
|
|||
}
|
||||
}
|
||||
}
|
||||
return IndexResult();
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief creates an IndexIterator for the given Condition
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "Basics/SmallVector.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesIndexLookupContext.h"
|
||||
|
@ -483,7 +482,7 @@ Result MMFilesHashIndex::insert(transaction::Methods* trx,
|
|||
return insertUnique(trx, documentId, doc, mode);
|
||||
}
|
||||
|
||||
return IndexResult(insertMulti(trx, documentId, doc, mode), this);
|
||||
return insertMulti(trx, documentId, doc, mode);
|
||||
}
|
||||
|
||||
/// @brief removes an entry from the hash array part of the hash index
|
||||
|
@ -491,14 +490,16 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int res = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
Result res;
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (auto& hashElement : elements) {
|
||||
_allocator->deallocate(hashElement);
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
for (auto& hashElement : elements) {
|
||||
|
@ -512,12 +513,12 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
|||
// we may be looping through this multiple times, and if an error
|
||||
// occurs, we want to keep it
|
||||
if (result != TRI_ERROR_NO_ERROR) {
|
||||
res = result;
|
||||
addErrorMsg(res, result);
|
||||
}
|
||||
_allocator->deallocate(hashElement);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
void MMFilesHashIndex::batchInsert(
|
||||
|
@ -599,16 +600,17 @@ Result MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int res = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
Result res;
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (auto& it : elements) {
|
||||
// free all elements to prevent leak
|
||||
_allocator->deallocate(it);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
|
@ -624,11 +626,11 @@ Result MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
auto hashElement = elements[i];
|
||||
res = work(hashElement, mode);
|
||||
r = work(hashElement, mode);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
IndexResult error(res, this);
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
|
||||
if (r == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
LocalDocumentId rev(_uniqueArray->_hashArray->find(&context, hashElement)->localDocumentId());
|
||||
std::string existingId;
|
||||
|
||||
|
@ -637,10 +639,12 @@ Result MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
});
|
||||
|
||||
if (mode == OperationMode::internal) {
|
||||
error = IndexResult(res, std::move(existingId));
|
||||
res.reset(r, std::move(existingId));
|
||||
} else {
|
||||
error = IndexResult(res, this, existingId);
|
||||
addErrorMsg(res, r, existingId);
|
||||
}
|
||||
} else {
|
||||
addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
for (size_t j = i; j < n; ++j) {
|
||||
|
@ -648,12 +652,11 @@ Result MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
_allocator->deallocate(elements[j]);
|
||||
}
|
||||
|
||||
// Already indexed elements will be removed by the rollback
|
||||
return std::move(error);
|
||||
break; // Already indexed elements will be removed by the rollback
|
||||
}
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
void MMFilesHashIndex::batchInsertUnique(
|
||||
|
@ -715,18 +718,20 @@ void MMFilesHashIndex::batchInsertUnique(
|
|||
queue->enqueueCallback(cbTask);
|
||||
}
|
||||
|
||||
int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int res = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
Result MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (auto& hashElement : elements) {
|
||||
_allocator->deallocate(hashElement);
|
||||
}
|
||||
|
||||
return res;
|
||||
return res.reset(r);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
|
@ -754,14 +759,14 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
|||
try {
|
||||
work(hashElement, mode);
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
r = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
r = TRI_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
r = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = i; j < n; ++j) {
|
||||
// Free all elements that are not yet in the index
|
||||
_allocator->deallocate(elements[j]);
|
||||
|
@ -772,12 +777,11 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
|||
removeMultiElement(trx, elements[j], mode);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return res;
|
||||
}
|
||||
|
||||
void MMFilesHashIndex::batchInsertMulti(
|
||||
|
|
|
@ -310,8 +310,8 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
|
||||
int insertMulti(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, OperationMode mode);
|
||||
Result insertMulti(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, OperationMode mode);
|
||||
|
||||
void batchInsertMulti(
|
||||
transaction::Methods*,
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "MMFiles/MMFilesIndexLookupContext.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/PersistentIndexAttributeMatcher.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
|
@ -316,16 +315,17 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int res;
|
||||
int r;
|
||||
try {
|
||||
res = fillElement(elements, documentId, doc);
|
||||
r = fillElement(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
r = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
r = TRI_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
r = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
// make sure we clean up before we leave this method
|
||||
|
@ -337,8 +337,8 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
|
||||
TRI_DEFER(cleanup());
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(res, this);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
|
@ -430,11 +430,11 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
iterator->Seek(rocksdb::Slice(bound.first.c_str(), bound.first.size()));
|
||||
|
||||
if (iterator->Valid()) {
|
||||
int res = comparator->Compare(
|
||||
int cmp = comparator->Compare(
|
||||
iterator->key(),
|
||||
rocksdb::Slice(bound.second.c_str(), bound.second.size()));
|
||||
|
||||
if (res <= 0) {
|
||||
if (cmp <= 0) {
|
||||
uniqueConstraintViolated = true;
|
||||
VPackSlice slice(comparator->extractKeySlice(iterator->key()));
|
||||
uint64_t length = slice.length();
|
||||
|
@ -448,47 +448,47 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
|
||||
if (uniqueConstraintViolated) {
|
||||
// duplicate key
|
||||
res = TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED;
|
||||
r = TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED;
|
||||
auto physical =
|
||||
static_cast<MMFilesCollection*>(_collection.getPhysical());
|
||||
TRI_ASSERT(physical != nullptr);
|
||||
|
||||
if (!physical->useSecondaryIndexes()) {
|
||||
// suppress the error during recovery
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
r = TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
if (r == TRI_ERROR_NO_ERROR) {
|
||||
auto status = rocksTransaction->Put(values[i], std::string());
|
||||
|
||||
if (!status.ok()) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
r = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
rocksTransaction->Delete(values[i]);
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
|
||||
if (r == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
|
||||
// We ignore unique_constraint violated if we are not unique
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
r = TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
if (r == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, existingId);
|
||||
return res.reset(r, existingId);
|
||||
}
|
||||
return IndexResult(res, this, existingId);
|
||||
return addErrorMsg(res, r, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief removes a document from the index
|
||||
|
@ -497,16 +497,17 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
|||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int res;
|
||||
int r;
|
||||
try {
|
||||
res = fillElement(elements, documentId, doc);
|
||||
r = fillElement(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
r = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
r = TRI_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
r = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
// make sure we clean up before we leave this method
|
||||
|
@ -518,8 +519,8 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
|||
|
||||
TRI_DEFER(cleanup());
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(res, this);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
|
@ -562,11 +563,11 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
|||
// we may be looping through this multiple times, and if an error
|
||||
// occurs, we want to keep it
|
||||
if (!status.ok()) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
addErrorMsg(res, TRI_ERROR_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief called when the index is dropped
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "Basics/hashes.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "MMFiles/MMFilesIndexLookupContext.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
|
@ -435,22 +434,23 @@ Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
|
|||
OperationMode mode) {
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &mdr, 1);
|
||||
MMFilesSimpleIndexElement element(buildKeyElement(documentId, doc));
|
||||
Result res;
|
||||
|
||||
// TODO: we can pass in a special MMFilesIndexLookupContext which has some more on the information
|
||||
// about the to-be-inserted document. this way we can spare one lookup in
|
||||
// IsEqualElementElementByKey
|
||||
int res = _primaryIndex->insert(&context, element);
|
||||
int r = _primaryIndex->insert(&context, element);
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
if (r == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
std::string existingId(doc.get(StaticStrings::KeyString).copyString());
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, std::move(existingId));
|
||||
return res.reset(r, std::move(existingId));
|
||||
}
|
||||
|
||||
return IndexResult(res, this, existingId);
|
||||
return addErrorMsg(res, r, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
/// @brief removes a key/element from the index
|
||||
|
@ -472,11 +472,12 @@ Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
|
|||
MMFilesSimpleIndexElement found =
|
||||
_primaryIndex->removeByKey(&context, keySlice.begin());
|
||||
|
||||
Result res;
|
||||
if (!found) {
|
||||
return IndexResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND, this);
|
||||
return addErrorMsg(res, TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
||||
}
|
||||
|
||||
return Result();
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief resizes the index
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "MMFiles/MMFilesIndexLookupContext.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SkiplistIndexAttributeMatcher.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
|
@ -757,24 +756,25 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int res;
|
||||
int r;
|
||||
try {
|
||||
res = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
r = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
r = TRI_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
r = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (auto& element : elements) {
|
||||
// free all elements to prevent leak
|
||||
_allocator->deallocate(element);
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
|
@ -786,9 +786,9 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
|
||||
size_t badIndex = 0;
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
res = _skiplistIndex->insert(&context, elements[i]);
|
||||
r = _skiplistIndex->insert(&context, elements[i]);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
badIndex = i;
|
||||
|
||||
// Note: this element is freed already
|
||||
|
@ -800,16 +800,16 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
// No need to free elements[j] skiplist has taken over already
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
|
||||
if (r == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
|
||||
// We ignore unique_constraint violated if we are not unique
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
r = TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
if (r == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
elements.clear();
|
||||
|
||||
// need to rebuild elements, find conflicting key to return error,
|
||||
|
@ -833,7 +833,7 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
});
|
||||
|
||||
if (innerRes != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(innerRes, this);
|
||||
return addErrorMsg(res, innerRes);
|
||||
}
|
||||
|
||||
auto found = _skiplistIndex->rightLookup(&context, elements[badIndex]);
|
||||
|
@ -846,13 +846,13 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
});
|
||||
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, std::move(existingId));
|
||||
return res.reset(r, std::move(existingId));
|
||||
}
|
||||
|
||||
return IndexResult(res, this, existingId);
|
||||
return addErrorMsg(res, r, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
/// @brief removes a document from a skiplist index
|
||||
|
@ -860,24 +860,25 @@ Result MMFilesSkiplistIndex::remove(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int res;
|
||||
int r;
|
||||
try {
|
||||
res = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
r = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
r = TRI_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
r = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
for (auto& element : elements) {
|
||||
// free all elements to prevent leak
|
||||
_allocator->deallocate(element);
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
|
@ -893,13 +894,13 @@ Result MMFilesSkiplistIndex::remove(transaction::Methods* trx,
|
|||
// we may be looping through this multiple times, and if an error
|
||||
// occurs, we want to keep it
|
||||
if (result != TRI_ERROR_NO_ERROR) {
|
||||
res = result;
|
||||
r = result;
|
||||
}
|
||||
|
||||
_allocator->deallocate(elements[i]);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
||||
void MMFilesSkiplistIndex::unload() {
|
||||
|
|
|
@ -1435,6 +1435,7 @@ Result RocksDBCollection::insertDocument(
|
|||
// Coordinator doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
Result res;
|
||||
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructDocument(_objectId, documentId);
|
||||
|
@ -1445,11 +1446,11 @@ Result RocksDBCollection::insertDocument(
|
|||
// disable indexing in this transaction if we are allowed to
|
||||
IndexingDisabler disabler(mthds, trx->isSingleOperationTransaction());
|
||||
|
||||
Result res = mthds->Put(RocksDBColumnFamily::documents(), key.ref(),
|
||||
rocksdb::Slice(reinterpret_cast<char const*>(doc.begin()),
|
||||
static_cast<size_t>(doc.byteSize())));
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
rocksdb::Status s = mthds->Put(RocksDBColumnFamily::documents(), key.ref(),
|
||||
rocksdb::Slice(reinterpret_cast<char const*>(doc.begin()),
|
||||
static_cast<size_t>(doc.byteSize())));
|
||||
if (!s.ok()) {
|
||||
return res.reset(rocksutils::convertStatus(s, rocksutils::document));
|
||||
}
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
|
@ -1479,6 +1480,7 @@ Result RocksDBCollection::removeDocument(
|
|||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
Result res;
|
||||
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructDocument(_objectId, documentId);
|
||||
|
@ -1490,9 +1492,9 @@ Result RocksDBCollection::removeDocument(
|
|||
// disable indexing in this transaction if we are allowed to
|
||||
IndexingDisabler disabler(mthd, trx->isSingleOperationTransaction());
|
||||
|
||||
Result res = mthd->SingleDelete(RocksDBColumnFamily::documents(), key.ref());
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
rocksdb::Status s = mthd->SingleDelete(RocksDBColumnFamily::documents(), key.ref());
|
||||
if (!s.ok()) {
|
||||
return res.reset(rocksutils::convertStatus(s, rocksutils::document));
|
||||
}
|
||||
|
||||
/*LOG_TOPIC(ERR, Logger::ENGINES)
|
||||
|
@ -1526,25 +1528,23 @@ Result RocksDBCollection::updateDocument(
|
|||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
Result res;
|
||||
|
||||
RocksDBMethods* mthd = RocksDBTransactionState::toMethods(trx);
|
||||
|
||||
// We NEED to do the PUT first, otherwise WAL tailing breaks
|
||||
RocksDBKeyLeaser newKey(trx);
|
||||
newKey->constructDocument(_objectId, newDocumentId);
|
||||
// TODO: given that this should have a unique revision ID, do
|
||||
// we really need to blacklist the new key?
|
||||
blackListKey(newKey->string().data(),
|
||||
static_cast<uint32_t>(newKey->string().size()));
|
||||
// simon: we do not need to blacklist the new documentId
|
||||
|
||||
// disable indexing in this transaction if we are allowed to
|
||||
IndexingDisabler disabler(mthd, trx->isSingleOperationTransaction());
|
||||
|
||||
Result res = mthd->Put(RocksDBColumnFamily::documents(), newKey.ref(),
|
||||
rocksdb::Slice(reinterpret_cast<char const*>(newDoc.begin()),
|
||||
static_cast<size_t>(newDoc.byteSize())));
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
rocksdb::Status s = mthd->Put(RocksDBColumnFamily::documents(), newKey.ref(),
|
||||
rocksdb::Slice(reinterpret_cast<char const*>(newDoc.begin()),
|
||||
static_cast<size_t>(newDoc.byteSize())));
|
||||
if (!s.ok()) {
|
||||
return res.reset(rocksutils::convertStatus(s, rocksutils::document));
|
||||
}
|
||||
|
||||
RocksDBKeyLeaser oldKey(trx);
|
||||
|
@ -1552,9 +1552,9 @@ Result RocksDBCollection::updateDocument(
|
|||
blackListKey(oldKey->string().data(),
|
||||
static_cast<uint32_t>(oldKey->string().size()));
|
||||
|
||||
res = mthd->SingleDelete(RocksDBColumnFamily::documents(), oldKey.ref());
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
s = mthd->SingleDelete(RocksDBColumnFamily::documents(), oldKey.ref());
|
||||
if (!s.ok()) {
|
||||
return res.reset(rocksutils::convertStatus(s, rocksutils::document));
|
||||
}
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
|
@ -1579,6 +1579,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
arangodb::ManagedDocumentResult& mdr, bool withCache) const {
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
Result res;
|
||||
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructDocument(_objectId, documentId);
|
||||
|
@ -1593,7 +1594,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
std::string* value = mdr.setManaged(documentId);
|
||||
value->append(reinterpret_cast<char const*>(f.value()->value()),
|
||||
f.value()->valueSize());
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return res;
|
||||
}
|
||||
if (f.result().errorNumber() == TRI_ERROR_LOCK_TIMEOUT) {
|
||||
// assuming someone is currently holding a write lock, which
|
||||
|
@ -1604,9 +1605,10 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
|
||||
RocksDBMethods* mthd = RocksDBTransactionState::toMethods(trx);
|
||||
std::string* value = mdr.setManaged(documentId);
|
||||
Result res = mthd->Get(RocksDBColumnFamily::documents(), key.ref(), value);
|
||||
rocksdb::Status s = mthd->Get(RocksDBColumnFamily::documents(),
|
||||
key->string(), value);
|
||||
|
||||
if (res.ok()) {
|
||||
if (s.ok()) {
|
||||
if (withCache && useCache() && !lockTimeout) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
// write entry back to cache
|
||||
|
@ -1634,6 +1636,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
<< " seq: " << mthd->sequenceNumber()
|
||||
<< " objectID " << _objectId << " name: " << _logicalCollection.name();
|
||||
mdr.clear();
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::document));
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -1644,6 +1647,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
IndexIterator::DocumentCallback const& cb, bool withCache) const {
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
Result res;
|
||||
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructDocument(_objectId, documentId);
|
||||
|
@ -1657,7 +1661,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
if (f.found()) {
|
||||
cb(documentId,
|
||||
VPackSlice(reinterpret_cast<char const*>(f.value()->value())));
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return res;
|
||||
}
|
||||
if (f.result().errorNumber() == TRI_ERROR_LOCK_TIMEOUT) {
|
||||
// assuming someone is currently holding a write lock, which
|
||||
|
@ -1667,11 +1671,10 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
}
|
||||
|
||||
rocksdb::PinnableSlice ps;
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
RocksDBMethods* mthd = state->rocksdbMethods();
|
||||
Result res = mthd->Get(RocksDBColumnFamily::documents(), key.ref(), &ps);
|
||||
RocksDBMethods* mthd = RocksDBTransactionState::toMethods(trx);
|
||||
rocksdb::Status s = mthd->Get(RocksDBColumnFamily::documents(), key->string(), &ps);
|
||||
|
||||
if (res.ok()) {
|
||||
if (s.ok()) {
|
||||
if (withCache && useCache() && !lockTimeout) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
// write entry back to cache
|
||||
|
@ -1697,6 +1700,7 @@ arangodb::Result RocksDBCollection::lookupDocumentVPack(
|
|||
<< "NOT FOUND rev: " << documentId.id() << " trx: " << trx->state()->id()
|
||||
<< " seq: " << mthd->sequenceNumber()
|
||||
<< " objectID " << _objectId << " name: " << _logicalCollection.name();
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::document));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cache/CachedValue.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
|
@ -578,6 +577,8 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
|
@ -593,19 +594,18 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
|||
blackListKey(fromToRef);
|
||||
|
||||
// acquire rocksdb transaction
|
||||
Result r = mthd->Put(_cf, key.ref(),
|
||||
value.string(), rocksutils::index);
|
||||
if (r.ok()) {
|
||||
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
|
||||
if (s.ok()) {
|
||||
std::hash<StringRef> hasher;
|
||||
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
|
||||
RocksDBTransactionState::toState(trx)->trackIndexInsert(
|
||||
_collection.id(), id(), hash
|
||||
);
|
||||
|
||||
return IndexResult();
|
||||
} else {
|
||||
return IndexResult(r.errorNumber(), this);
|
||||
res.reset(rocksutils::convertStatus(s));
|
||||
addErrorMsg(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
||||
|
@ -613,6 +613,8 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
// VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
|
@ -628,18 +630,19 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
|||
// blacklist key in cache
|
||||
blackListKey(fromToRef);
|
||||
|
||||
Result res = mthd->Delete(_cf, key.ref());
|
||||
if (res.ok()) {
|
||||
rocksdb::Status s = mthd->Delete(_cf, key.ref());
|
||||
if (s.ok()) {
|
||||
std::hash<StringRef> hasher;
|
||||
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
|
||||
RocksDBTransactionState::toState(trx)->trackIndexRemove(
|
||||
_collection.id(), id(), hash
|
||||
);
|
||||
|
||||
return IndexResult();
|
||||
} else {
|
||||
return IndexResult(res.errorNumber(), this);
|
||||
res.reset(rocksutils::convertStatus(s));
|
||||
addErrorMsg(res);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void RocksDBEdgeIndex::batchInsert(
|
||||
|
@ -655,10 +658,9 @@ void RocksDBEdgeIndex::batchInsert(
|
|||
key->constructEdgeIndexValue(_objectId, fromToRef, doc.first);
|
||||
|
||||
blackListKey(fromToRef);
|
||||
Result r = mthds->Put(_cf, key.ref(),
|
||||
rocksdb::Slice(), rocksutils::index);
|
||||
if (!r.ok()) {
|
||||
queue->setStatus(r.errorNumber());
|
||||
rocksdb::Status s = mthds->Put(_cf, key.ref(), rocksdb::Slice());
|
||||
if (!s.ok()) {
|
||||
queue->setStatus(rocksutils::convertStatus(s).errorNumber());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "Basics/Utf8Helper.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
|
@ -191,28 +190,29 @@ Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return res;
|
||||
}
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
RocksDBValue value = RocksDBValue::VPackIndexValue();
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
// size_t const count = words.size();
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructFulltextIndexValue(_objectId, StringRef(word), documentId);
|
||||
|
||||
Result r = mthd->Put(_cf, key.ref(), value.string(), rocksutils::index);
|
||||
if (!r.ok()) {
|
||||
res = r.errorNumber();
|
||||
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx,
|
||||
|
@ -220,25 +220,27 @@ Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return IndexResult();
|
||||
return res;
|
||||
}
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructFulltextIndexValue(_objectId, StringRef(word), documentId);
|
||||
|
||||
Result r = mthd->Delete(_cf, key.ref());
|
||||
if (!r.ok()) {
|
||||
res = r.errorNumber();
|
||||
rocksdb::Status s = mthd->Delete(_cf, key.ref());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief walk over the attribute. Also Extract sub-attributes and elements in
|
||||
|
|
|
@ -425,9 +425,11 @@ Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx,
|
|||
RocksDBKeyLeaser key(trx);
|
||||
for (S2CellId cell : cells) {
|
||||
key->constructGeoIndexValue(_objectId, cell.id(), documentId);
|
||||
res = mthd->Put(RocksDBColumnFamily::geo(), key.ref(), val.string());
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
rocksdb::Status s = mthd->Put(RocksDBColumnFamily::geo(), key.ref(), val.string());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -459,9 +461,11 @@ Result RocksDBGeoIndex::removeInternal(transaction::Methods* trx,
|
|||
// the same cells everytime for the same parameters ?
|
||||
for (S2CellId cell : cells) {
|
||||
key->constructGeoIndexValue(_objectId, cell.id(), documentId);
|
||||
res = mthd->Delete(RocksDBColumnFamily::geo(), key.ref());
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
rocksdb::Status s = mthd->Delete(RocksDBColumnFamily::geo(), key.ref());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
|
|
|
@ -96,18 +96,6 @@ void RocksDBSavePoint::rollback() {
|
|||
|
||||
// =================== RocksDBMethods ===================
|
||||
|
||||
arangodb::Result RocksDBMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
std::string* val) {
|
||||
return Get(cf, key.string(), val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
return Get(cf, key.string(), val);
|
||||
}
|
||||
|
||||
rocksdb::SequenceNumber RocksDBMethods::sequenceNumber() {
|
||||
return _state->sequenceNumber();
|
||||
}
|
||||
|
@ -156,59 +144,37 @@ RocksDBReadOnlyMethods::RocksDBReadOnlyMethods(RocksDBTransactionState* state)
|
|||
_db = rocksutils::globalRocksDB();
|
||||
}
|
||||
|
||||
bool RocksDBReadOnlyMethods::Exists(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
bool valueFound = false;
|
||||
std::string val; // do not care about value
|
||||
bool mayExist = _db->KeyMayExist(_state->_rocksReadOptions, cf, key.string(),
|
||||
&val, &valueFound);
|
||||
if (valueFound) {
|
||||
return true;
|
||||
}
|
||||
if (mayExist) {
|
||||
rocksdb::PinnableSlice ps;
|
||||
rocksdb::Status s =
|
||||
_db->Get(_state->_rocksReadOptions, cf, key.string(), &ps);
|
||||
return !s.IsNotFound();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
std::string* val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
||||
TRI_ASSERT(ro.snapshot != nullptr);
|
||||
rocksdb::Status s = _db->Get(ro, cf, key, val);
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBReadOnlyMethods");
|
||||
return _db->Get(ro, cf, key, val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
||||
TRI_ASSERT(ro.snapshot != nullptr);
|
||||
rocksdb::Status s = _db->Get(ro, cf, key, val);
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBReadOnlyMethods");
|
||||
return _db->Get(ro, cf, key, val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBReadOnlyMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const&,
|
||||
rocksdb::Slice const&,
|
||||
rocksutils::StatusHint) {
|
||||
rocksdb::Status RocksDBReadOnlyMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const&,
|
||||
rocksdb::Slice const&) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_READ_ONLY);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBReadOnlyMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
rocksdb::Status RocksDBReadOnlyMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_READ_ONLY);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBReadOnlyMethods::SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) {
|
||||
rocksdb::Status RocksDBReadOnlyMethods::SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_READ_ONLY);
|
||||
}
|
||||
|
||||
|
@ -240,65 +206,42 @@ RocksDBTrxMethods::RocksDBTrxMethods(RocksDBTransactionState* state)
|
|||
: RocksDBMethods(state),
|
||||
_indexingDisabled(false) {}
|
||||
|
||||
bool RocksDBTrxMethods::Exists(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::PinnableSlice val; // do not care about value
|
||||
rocksdb::Status s = _state->_rocksTransaction->Get(_state->_rocksReadOptions,
|
||||
cf, key.string(), &val);
|
||||
return !s.IsNotFound();
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
std::string* val) {
|
||||
arangodb::Result rv;
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
||||
TRI_ASSERT(ro.snapshot != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->Get(ro, cf, key, val);
|
||||
if (!s.ok()) {
|
||||
rv = rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBTrxMethods");
|
||||
}
|
||||
return rv;
|
||||
return _state->_rocksTransaction->Get(ro, cf, key, val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
arangodb::Result rv;
|
||||
rocksdb::Status RocksDBTrxMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
||||
TRI_ASSERT(ro.snapshot != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->Get(ro, cf, key, val);
|
||||
if (!s.ok()) {
|
||||
rv = rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBTrxMethods");
|
||||
}
|
||||
return rv;
|
||||
return _state->_rocksTransaction->Get(ro, cf, key, val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint) {
|
||||
rocksdb::Slice const& val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->Put(cf, key.string(), val);
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s, hint);
|
||||
return _state->_rocksTransaction->Put(cf, key.string(), val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->Delete(cf, key.string());
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s);
|
||||
return _state->_rocksTransaction->Delete(cf, key.string());
|
||||
}
|
||||
|
||||
|
||||
arangodb::Result RocksDBTrxMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->SingleDelete(cf, key.string());
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s);
|
||||
return _state->_rocksTransaction->SingleDelete(cf, key.string());
|
||||
}
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> RocksDBTrxMethods::NewIterator(
|
||||
|
@ -312,9 +255,8 @@ void RocksDBTrxMethods::SetSavePoint() {
|
|||
_state->_rocksTransaction->SetSavePoint();
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxMethods::RollbackToSavePoint() {
|
||||
return rocksutils::convertStatus(
|
||||
_state->_rocksTransaction->RollbackToSavePoint());
|
||||
rocksdb::Status RocksDBTrxMethods::RollbackToSavePoint() {
|
||||
return _state->_rocksTransaction->RollbackToSavePoint();
|
||||
}
|
||||
|
||||
void RocksDBTrxMethods::PopSavePoint() {
|
||||
|
@ -331,75 +273,60 @@ void RocksDBTrxMethods::PopSavePoint() {
|
|||
RocksDBTrxUntrackedMethods::RocksDBTrxUntrackedMethods(RocksDBTransactionState* state)
|
||||
: RocksDBTrxMethods(state) {}
|
||||
|
||||
arangodb::Result RocksDBTrxUntrackedMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxUntrackedMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint) {
|
||||
rocksdb::Slice const& val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->PutUntracked(cf, key.string(), val);
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s, hint);
|
||||
return _state->_rocksTransaction->PutUntracked(cf, key.string(), val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxUntrackedMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxUntrackedMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->DeleteUntracked(cf, key.string());
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s);
|
||||
return _state->_rocksTransaction->DeleteUntracked(cf, key.string());
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBTrxUntrackedMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBTrxUntrackedMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::Status s = _state->_rocksTransaction->SingleDeleteUntracked(cf, key.string());
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s);
|
||||
return _state->_rocksTransaction->SingleDeleteUntracked(cf, key.string());
|
||||
}
|
||||
|
||||
// =================== RocksDBBatchedMethods ====================
|
||||
|
||||
RocksDBBatchedMethods::RocksDBBatchedMethods(RocksDBTransactionState* state,
|
||||
rocksdb::WriteBatch* wb)
|
||||
: RocksDBMethods(state), _wb(wb) {
|
||||
_db = rocksutils::globalRocksDB();
|
||||
}
|
||||
: RocksDBMethods(state), _wb(wb) {}
|
||||
|
||||
bool RocksDBBatchedMethods::Exists(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "BatchedMethods does not provide Exists");
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedMethods::Get(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Slice const&,
|
||||
std::string*) {
|
||||
rocksdb::Status RocksDBBatchedMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
std::string* val) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "BatchedMethods does not provide Get");
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
rocksdb::Status RocksDBBatchedMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "BatchedMethods does not provide Get");
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint) {
|
||||
rocksdb::Status RocksDBBatchedMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
_wb->Put(cf, key.string(), val);
|
||||
return arangodb::Result();
|
||||
return _wb->Put(cf, key.string(), val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
rocksdb::Status RocksDBBatchedMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
_wb->Delete(cf, key.string());
|
||||
return arangodb::Result();
|
||||
return _wb->Delete(cf, key.string());
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBBatchedMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
_wb->SingleDelete(cf, key.string());
|
||||
return arangodb::Result();
|
||||
return _wb->SingleDelete(cf, key.string());
|
||||
}
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> RocksDBBatchedMethods::NewIterator(
|
||||
|
@ -415,54 +342,39 @@ RocksDBBatchedWithIndexMethods::RocksDBBatchedWithIndexMethods(RocksDBTransactio
|
|||
_db = rocksutils::globalRocksDB();
|
||||
}
|
||||
|
||||
bool RocksDBBatchedWithIndexMethods::Exists(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions ro;
|
||||
rocksdb::PinnableSlice val; // do not care about value
|
||||
rocksdb::Status s = _wb->GetFromBatchAndDB(_db, ro, cf, key.string(), &val);
|
||||
return !s.IsNotFound();
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedWithIndexMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBBatchedWithIndexMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
std::string* val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions ro;
|
||||
rocksdb::Status s = _wb->GetFromBatchAndDB(_db, ro, cf, key, val);
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBBatchedWithIndexMethods");
|
||||
return _wb->GetFromBatchAndDB(_db, ro, cf, key, val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedWithIndexMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBBatchedWithIndexMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
rocksdb::ReadOptions ro;
|
||||
rocksdb::Status s = _wb->GetFromBatchAndDB(_db, ro, cf, key, val);
|
||||
return s.ok() ? arangodb::Result() : rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBBatchedWithIndexMethods");
|
||||
return _wb->GetFromBatchAndDB(_db, ro, cf, key, val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedWithIndexMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBBatchedWithIndexMethods::Put(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint) {
|
||||
rocksdb::Slice const& val) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
_wb->Put(cf, key.string(), val);
|
||||
return arangodb::Result();
|
||||
return _wb->Put(cf, key.string(), val);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedWithIndexMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBBatchedWithIndexMethods::Delete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
_wb->Delete(cf, key.string());
|
||||
return arangodb::Result();
|
||||
return _wb->Delete(cf, key.string());
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBBatchedWithIndexMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
rocksdb::Status RocksDBBatchedWithIndexMethods::SingleDelete(rocksdb::ColumnFamilyHandle* cf,
|
||||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
_wb->SingleDelete(cf, key.string());
|
||||
return arangodb::Result();
|
||||
return _wb->SingleDelete(cf, key.string());
|
||||
}
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> RocksDBBatchedWithIndexMethods::NewIterator(
|
||||
|
|
|
@ -85,35 +85,27 @@ class RocksDBMethods {
|
|||
// the default implementation is to do nothing
|
||||
virtual void EnableIndexing() {}
|
||||
|
||||
virtual bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) = 0;
|
||||
virtual arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&,
|
||||
virtual rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&,
|
||||
std::string*) = 0;
|
||||
virtual arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&,
|
||||
virtual rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&,
|
||||
rocksdb::PinnableSlice*) = 0;
|
||||
virtual arangodb::Result Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const&, rocksdb::Slice const&,
|
||||
rocksutils::StatusHint hint = rocksutils::StatusHint::none) = 0;
|
||||
virtual rocksdb::Status Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const&, rocksdb::Slice const&) = 0;
|
||||
|
||||
virtual arangodb::Result Delete(rocksdb::ColumnFamilyHandle*,
|
||||
virtual rocksdb::Status Delete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) = 0;
|
||||
/// contrary to Delete, a SingleDelete may only be used
|
||||
/// when keys are inserted exactly once (and never overwritten)
|
||||
virtual arangodb::Result SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
virtual rocksdb::Status SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) = 0;
|
||||
|
||||
virtual std::unique_ptr<rocksdb::Iterator> NewIterator(
|
||||
rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*) = 0;
|
||||
|
||||
virtual void SetSavePoint() = 0;
|
||||
virtual arangodb::Result RollbackToSavePoint() = 0;
|
||||
virtual rocksdb::Status RollbackToSavePoint() = 0;
|
||||
virtual void PopSavePoint() = 0;
|
||||
|
||||
// convenience and compatibility method
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, RocksDBKey const&,
|
||||
std::string*);
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, RocksDBKey const&,
|
||||
rocksdb::PinnableSlice*);
|
||||
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
std::size_t countInBounds(RocksDBKeyBounds const& bounds, bool isElementInRange = false);
|
||||
#endif
|
||||
|
@ -127,25 +119,23 @@ class RocksDBReadOnlyMethods final : public RocksDBMethods {
|
|||
public:
|
||||
explicit RocksDBReadOnlyMethods(RocksDBTransactionState* state);
|
||||
|
||||
bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
std::string* val) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) override;
|
||||
arangodb::Result Put(
|
||||
rocksdb::Status Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint = rocksutils::StatusHint::none) override;
|
||||
arangodb::Result Delete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Slice const& val) override;
|
||||
rocksdb::Status Delete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const& key) override;
|
||||
arangodb::Result SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Status SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) override;
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> NewIterator(
|
||||
rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*) override;
|
||||
|
||||
void SetSavePoint() override {}
|
||||
arangodb::Result RollbackToSavePoint() override { return arangodb::Result(); }
|
||||
rocksdb::Status RollbackToSavePoint() override { return rocksdb::Status::OK(); }
|
||||
void PopSavePoint() override {}
|
||||
|
||||
private:
|
||||
|
@ -162,25 +152,23 @@ class RocksDBTrxMethods : public RocksDBMethods {
|
|||
|
||||
void EnableIndexing() override;
|
||||
|
||||
bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
std::string* val) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) override;
|
||||
arangodb::Result Put(
|
||||
rocksdb::Status Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint = rocksutils::StatusHint::none) override;
|
||||
arangodb::Result Delete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Slice const& val) override;
|
||||
rocksdb::Status Delete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const& key) override;
|
||||
arangodb::Result SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Status SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) override;
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> NewIterator(
|
||||
rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*) override;
|
||||
|
||||
void SetSavePoint() override;
|
||||
arangodb::Result RollbackToSavePoint() override;
|
||||
rocksdb::Status RollbackToSavePoint() override;
|
||||
void PopSavePoint() override;
|
||||
|
||||
bool _indexingDisabled;
|
||||
|
@ -191,13 +179,12 @@ class RocksDBTrxUntrackedMethods final : public RocksDBTrxMethods {
|
|||
public:
|
||||
explicit RocksDBTrxUntrackedMethods(RocksDBTransactionState* state);
|
||||
|
||||
arangodb::Result Put(
|
||||
rocksdb::Status Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint = rocksutils::StatusHint::none) override;
|
||||
arangodb::Result Delete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Slice const& val) override;
|
||||
rocksdb::Status Delete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const& key) override;
|
||||
arangodb::Result SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Status SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) override;
|
||||
};
|
||||
|
||||
|
@ -207,28 +194,25 @@ class RocksDBBatchedMethods final : public RocksDBMethods {
|
|||
RocksDBBatchedMethods(RocksDBTransactionState*,
|
||||
rocksdb::WriteBatch*);
|
||||
|
||||
bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
std::string* val) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) override;
|
||||
arangodb::Result Put(
|
||||
rocksdb::Status Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint = rocksutils::StatusHint::none) override;
|
||||
arangodb::Result Delete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Slice const& val) override;
|
||||
rocksdb::Status Delete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const& key) override;
|
||||
arangodb::Result SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Status SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) override;
|
||||
std::unique_ptr<rocksdb::Iterator> NewIterator(
|
||||
rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*) override;
|
||||
|
||||
void SetSavePoint() override {}
|
||||
arangodb::Result RollbackToSavePoint() override { return arangodb::Result(); }
|
||||
rocksdb::Status RollbackToSavePoint() override { return rocksdb::Status::OK(); }
|
||||
void PopSavePoint() override {}
|
||||
|
||||
private:
|
||||
rocksdb::TransactionDB* _db;
|
||||
rocksdb::WriteBatch* _wb;
|
||||
};
|
||||
|
||||
|
@ -238,25 +222,23 @@ class RocksDBBatchedWithIndexMethods final : public RocksDBMethods {
|
|||
RocksDBBatchedWithIndexMethods(RocksDBTransactionState*,
|
||||
rocksdb::WriteBatchWithIndex*);
|
||||
|
||||
bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
std::string* val) override;
|
||||
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::Status Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
|
||||
rocksdb::PinnableSlice* val) override;
|
||||
arangodb::Result Put(
|
||||
rocksdb::Status Put(
|
||||
rocksdb::ColumnFamilyHandle*, RocksDBKey const& key,
|
||||
rocksdb::Slice const& val,
|
||||
rocksutils::StatusHint hint = rocksutils::StatusHint::none) override;
|
||||
arangodb::Result Delete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Slice const& val) override;
|
||||
rocksdb::Status Delete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const& key) override;
|
||||
arangodb::Result SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
rocksdb::Status SingleDelete(rocksdb::ColumnFamilyHandle*,
|
||||
RocksDBKey const&) override;
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> NewIterator(
|
||||
rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*) override;
|
||||
|
||||
void SetSavePoint() override {}
|
||||
arangodb::Result RollbackToSavePoint() override { return arangodb::Result(); }
|
||||
rocksdb::Status RollbackToSavePoint() override { return rocksdb::Status::OK(); }
|
||||
void PopSavePoint() override {}
|
||||
|
||||
private:
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "Cache/CachedValue.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
|
@ -253,7 +252,6 @@ LocalDocumentId RocksDBPrimaryIndex::lookupKey(transaction::Methods* trx,
|
|||
arangodb::StringRef keyRef) const {
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructPrimaryIndexValue(_objectId, keyRef);
|
||||
RocksDBValue value = RocksDBValue::Empty(RocksDBEntryType::PrimaryIndexValue);
|
||||
|
||||
bool lockTimeout = false;
|
||||
if (useCache()) {
|
||||
|
@ -272,10 +270,10 @@ LocalDocumentId RocksDBPrimaryIndex::lookupKey(transaction::Methods* trx,
|
|||
}
|
||||
}
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx);
|
||||
arangodb::Result r = mthds->Get(_cf, key.ref(), value.buffer());
|
||||
if (!r.ok()) {
|
||||
rocksdb::PinnableSlice val;
|
||||
rocksdb::Status s = mthds->Get(_cf, key->string(), &val);
|
||||
if (!s.ok()) {
|
||||
return LocalDocumentId();
|
||||
}
|
||||
|
||||
|
@ -285,7 +283,7 @@ LocalDocumentId RocksDBPrimaryIndex::lookupKey(transaction::Methods* trx,
|
|||
// write entry back to cache
|
||||
auto entry = cache::CachedValue::construct(
|
||||
key->string().data(), static_cast<uint32_t>(key->string().size()),
|
||||
value.buffer()->data(), static_cast<uint64_t>(value.buffer()->size()));
|
||||
val.data(), static_cast<uint64_t>(val.size()));
|
||||
if (entry) {
|
||||
Result status = _cache->insert(entry);
|
||||
if (status.errorNumber() == TRI_ERROR_LOCK_TIMEOUT) {
|
||||
|
@ -299,7 +297,7 @@ LocalDocumentId RocksDBPrimaryIndex::lookupKey(transaction::Methods* trx,
|
|||
}
|
||||
}
|
||||
|
||||
return RocksDBValue::documentId(value);
|
||||
return RocksDBValue::documentId(val);
|
||||
}
|
||||
|
||||
/// @brief reads a revision id from the primary index
|
||||
|
@ -319,20 +317,20 @@ bool RocksDBPrimaryIndex::lookupRevision(transaction::Methods* trx,
|
|||
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructPrimaryIndexValue(_objectId, keyRef);
|
||||
RocksDBValue value = RocksDBValue::Empty(RocksDBEntryType::PrimaryIndexValue);
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx);
|
||||
arangodb::Result r = mthds->Get(_cf, key.ref(), value.buffer());
|
||||
if (!r.ok()) {
|
||||
rocksdb::PinnableSlice val;
|
||||
rocksdb::Status s = mthds->Get(_cf, key->string(), &val);
|
||||
if (!s.ok()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
documentId = RocksDBValue::documentId(value);
|
||||
documentId = RocksDBValue::documentId(val);
|
||||
|
||||
// this call will populate revisionId if the revision id value is
|
||||
// stored in the primary index
|
||||
revisionId = RocksDBValue::revisionId(value);
|
||||
revisionId = RocksDBValue::revisionId(val);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -341,27 +339,36 @@ Result RocksDBPrimaryIndex::insertInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& slice,
|
||||
OperationMode mode) {
|
||||
IndexResult res;
|
||||
Result res;
|
||||
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
|
||||
TRI_ASSERT(keySlice.isString());
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructPrimaryIndexValue(_objectId, StringRef(keySlice));
|
||||
|
||||
if (mthd->Exists(_cf, key.ref())) {
|
||||
std::string existingId(slice.get(StaticStrings::KeyString).copyString());
|
||||
rocksdb::PinnableSlice val;
|
||||
rocksdb::Status s = mthd->Get(_cf, key->string(), &val);
|
||||
if (s.ok()) { // detected conflicting primary key
|
||||
std::string existingId = keySlice.copyString();
|
||||
if (mode == OperationMode::internal) {
|
||||
return res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, std::move(existingId));
|
||||
}
|
||||
return res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, this, existingId);
|
||||
res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED);
|
||||
return addErrorMsg(res, existingId);
|
||||
}
|
||||
val.Reset(); // clear used memory
|
||||
|
||||
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
|
||||
|
||||
TRI_voc_rid_t revision = transaction::helpers::extractRevFromDocument(slice);
|
||||
auto value = RocksDBValue::PrimaryIndexValue(documentId, revision);
|
||||
|
||||
Result status = mthd->Put(_cf, key.ref(), value.string(), rocksutils::index);
|
||||
return res.reset(status, this);
|
||||
s = mthd->Put(_cf, key.ref(), value.string());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
||||
|
@ -371,6 +378,8 @@ Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(oldDoc);
|
||||
TRI_ASSERT(keySlice == oldDoc.get(StaticStrings::KeyString));
|
||||
RocksDBKeyLeaser key(trx);
|
||||
|
@ -378,12 +387,15 @@ Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
|||
|
||||
TRI_voc_rid_t revision = transaction::helpers::extractRevFromDocument(newDoc);
|
||||
auto value = RocksDBValue::PrimaryIndexValue(newDocumentId, revision);
|
||||
|
||||
TRI_ASSERT(mthd->Exists(_cf, key.ref()));
|
||||
blackListKey(key->string().data(),
|
||||
static_cast<uint32_t>(key->string().size()));
|
||||
Result status = mthd->Put(_cf, key.ref(), value.string(), rocksutils::index);
|
||||
return IndexResult(status.errorNumber(), this);
|
||||
|
||||
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBPrimaryIndex::removeInternal(transaction::Methods* trx,
|
||||
|
@ -391,17 +403,25 @@ Result RocksDBPrimaryIndex::removeInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& slice,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
|
||||
// TODO: deal with matching revisions?
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
|
||||
TRI_ASSERT(keySlice.isString());
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructPrimaryIndexValue(
|
||||
_objectId, StringRef(slice.get(StaticStrings::KeyString)));
|
||||
_objectId, StringRef(keySlice));
|
||||
|
||||
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx);
|
||||
Result r = mthds->Delete(_cf, key.ref());
|
||||
return IndexResult(r.errorNumber(), this);
|
||||
rocksdb::Status s = mthds->Delete(_cf, key.ref());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief checks whether the index supports the condition
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "Aql/SortCondition.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "Indexes/PersistentIndexAttributeMatcher.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
|
@ -110,13 +109,12 @@ bool RocksDBVPackUniqueIndexIterator::next(LocalDocumentIdCallback const& cb,
|
|||
|
||||
_done = true;
|
||||
|
||||
auto value = RocksDBValue::Empty(RocksDBEntryType::PrimaryIndexValue);
|
||||
rocksdb::PinnableSlice val;
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(_trx);
|
||||
arangodb::Result r =
|
||||
mthds->Get(_index->columnFamily(), _key.ref(), value.buffer());
|
||||
rocksdb::Status s = mthds->Get(_index->columnFamily(), _key->string(), &val);
|
||||
|
||||
if (r.ok()) {
|
||||
cb(RocksDBValue::documentId(*value.buffer()));
|
||||
if (s.ok()) {
|
||||
cb(RocksDBValue::documentId(val));
|
||||
}
|
||||
|
||||
// there is at most one element, so we are done now
|
||||
|
@ -133,12 +131,12 @@ bool RocksDBVPackUniqueIndexIterator::nextCovering(DocumentCallback const& cb, s
|
|||
|
||||
_done = true;
|
||||
|
||||
auto value = RocksDBValue::Empty(RocksDBEntryType::PrimaryIndexValue);
|
||||
rocksdb::PinnableSlice val;
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(_trx);
|
||||
arangodb::Result r = mthds->Get(_index->columnFamily(), _key.ref(), value.buffer());
|
||||
rocksdb::Status s = mthds->Get(_index->columnFamily(), _key->string(), &val);
|
||||
|
||||
if (r.ok()) {
|
||||
cb(LocalDocumentId(RocksDBValue::documentId(*value.buffer())), RocksDBKey::indexedVPack(_key.ref()));
|
||||
if (s.ok()) {
|
||||
cb(LocalDocumentId(RocksDBValue::documentId(val)), RocksDBKey::indexedVPack(_key.ref()));
|
||||
}
|
||||
|
||||
// there is at most one element, so we are done now
|
||||
|
@ -640,88 +638,75 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
|
||||
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
||||
SmallVector<RocksDBKey> elements{elementsArena};
|
||||
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
|
||||
SmallVector<uint64_t> hashes{hashesArena};
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
res = fillElement(*(leased.get()), documentId, doc, elements, hashes);
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(res, this);
|
||||
int r = fillElement(*(leased.get()), documentId, doc, elements, hashes);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
}
|
||||
|
||||
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
RocksDBValue value = _unique ? RocksDBValue::UniqueVPackIndexValue(documentId)
|
||||
: RocksDBValue::VPackIndexValue();
|
||||
|
||||
size_t const count = elements.size();
|
||||
RocksDBValue existing = RocksDBValue::Empty(RocksDBEntryType::UniqueVPackIndexValue);
|
||||
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
rocksdb::PinnableSlice existing;
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
RocksDBKey& key = elements[i];
|
||||
if (_unique) {
|
||||
if (mthds->Exists(_cf, key)) {
|
||||
res = TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED;
|
||||
auto found = mthds->Get(_cf, key, existing.buffer());
|
||||
TRI_ASSERT(found.ok());
|
||||
s = mthds->Get(_cf, key.string(), &existing);
|
||||
if (s.ok()) { // detected conflicting index entry
|
||||
res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
arangodb::Result r =
|
||||
mthds->Put(_cf, key, value.string(), rocksutils::index);
|
||||
if (!r.ok()) {
|
||||
res = r.errorNumber();
|
||||
}
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
mthds->Delete(_cf, elements[j]);
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
|
||||
// We ignore unique_constraint violated if we are not unique
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
// TODO: remove this? seems dangerous...
|
||||
}
|
||||
s = mthds->Put(_cf, key, value.string());
|
||||
if (!s.ok()) {
|
||||
res = rocksutils::convertStatus(s, rocksutils::index);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR && !_unique) {
|
||||
if (res.ok() && !_unique) {
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
|
||||
for (auto& it : hashes) {
|
||||
// The estimator is only useful if we are in a non-unique indexes
|
||||
TRI_ASSERT(!_unique);
|
||||
state->trackIndexInsert(_collection.id(), id(), it);
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
LocalDocumentId documentId = RocksDBValue::documentId(existing);
|
||||
} else if (res.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED)) {
|
||||
// find conflicting document
|
||||
LocalDocumentId docId = RocksDBValue::documentId(existing);
|
||||
std::string existingKey;
|
||||
|
||||
bool success = _collection.getPhysical()->readDocumentWithCallback(trx, documentId, [&](LocalDocumentId const&, VPackSlice doc) {
|
||||
existingKey = doc.get(StaticStrings::KeyString).copyString();
|
||||
bool success = _collection.getPhysical()->readDocumentWithCallback(trx, docId,
|
||||
[&](LocalDocumentId const&, VPackSlice doc) {
|
||||
existingKey = transaction::helpers::extractKeyFromDocument(doc).copyString();
|
||||
});
|
||||
TRI_ASSERT(success);
|
||||
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, std::move(existingKey));
|
||||
res.resetErrorMessage(std::move(existingKey));
|
||||
} else {
|
||||
addErrorMsg(res, existingKey);
|
||||
}
|
||||
|
||||
return IndexResult(res, this, existingKey);
|
||||
} else if (res.fail()) {
|
||||
addErrorMsg(res);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBVPackIndex::updateInternal(
|
||||
|
@ -736,6 +721,9 @@ Result RocksDBVPackIndex::updateInternal(
|
|||
return RocksDBIndex::updateInternal(trx, mthds, oldDocumentId, oldDoc,
|
||||
newDocumentId, newDoc, mode);
|
||||
} else {
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
|
||||
bool equal = true;
|
||||
for (size_t i = 0; i < _paths.size(); ++i) {
|
||||
TRI_ASSERT(!_paths[i].empty());
|
||||
|
@ -747,7 +735,7 @@ Result RocksDBVPackIndex::updateInternal(
|
|||
if (_sparse) {
|
||||
// if sparse we do not have to index, this is indicated by result
|
||||
// being shorter than n
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return res;
|
||||
}
|
||||
} else if (basics::VelocyPackHelper::compare(oldSlice, newSlice, true)) {
|
||||
equal = false;
|
||||
|
@ -765,33 +753,22 @@ Result RocksDBVPackIndex::updateInternal(
|
|||
SmallVector<RocksDBKey> elements{elementsArena};
|
||||
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
|
||||
SmallVector<uint64_t> hashes{hashesArena};
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
res =
|
||||
fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes);
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(res, this);
|
||||
int r = fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBValue value = RocksDBValue::UniqueVPackIndexValue(newDocumentId);
|
||||
size_t const count = elements.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
RocksDBKey& key = elements[i];
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
arangodb::Result r =
|
||||
mthds->Put(_cf, key, value.string(), rocksutils::index);
|
||||
if (!r.ok()) {
|
||||
res = r.errorNumber();
|
||||
}
|
||||
}
|
||||
// fix the inserts again
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
mthds->Delete(_cf, elements[j]);
|
||||
}
|
||||
s = mthds->Put(_cf, key, value.string());
|
||||
if (!s.ok()) {
|
||||
res = rocksutils::convertStatus(s, rocksutils::index);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -806,53 +783,56 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
|
||||
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
||||
SmallVector<RocksDBKey> elements{elementsArena};
|
||||
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
|
||||
SmallVector<uint64_t> hashes{hashesArena};
|
||||
|
||||
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
res = fillElement(*(leased.get()), documentId, doc, elements, hashes);
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(res, this);
|
||||
int r = fillElement(*(leased.get()), documentId, doc, elements, hashes);
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
}
|
||||
|
||||
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
size_t const count = elements.size();
|
||||
if (_unique) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
arangodb::Result r = mthds->Delete(_cf, elements[i]);
|
||||
if (!r.ok()) {
|
||||
res = r.errorNumber();
|
||||
s = mthds->Delete(_cf, elements[i]);
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// non-unique index contain the unique objectID
|
||||
// they should be written exactly once
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
arangodb::Result r = mthds->SingleDelete(_cf, elements[i]);
|
||||
if (!r.ok()) {
|
||||
res = r.errorNumber();
|
||||
s = mthds->SingleDelete(_cf, elements[i]);
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
if (res.ok() && !_unique) {
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
|
||||
for (auto& it : hashes) {
|
||||
// The estimator is only useful if we are in a non-unique indexes
|
||||
TRI_ASSERT(!_unique);
|
||||
state->trackIndexRemove(_collection.id(), id(), it);
|
||||
}
|
||||
} else if (res.fail()) {
|
||||
addErrorMsg(res);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief attempts to locate an entry in the index
|
||||
|
|
|
@ -326,7 +326,7 @@ static Result EnsureIndexLocal(arangodb::LogicalCollection* collection,
|
|||
VPackBuilder b;
|
||||
b.openObject();
|
||||
b.add("isNewlyCreated", VPackValue(created));
|
||||
b.add("id",
|
||||
b.add(StaticStrings::IndexId,
|
||||
VPackValue(collection->name() + TRI_INDEX_HANDLE_SEPARATOR_CHR + iid));
|
||||
b.close();
|
||||
output = VPackCollection::merge(tmp.slice(), b.slice(), false);
|
||||
|
|
|
@ -149,13 +149,23 @@ class Result {
|
|||
* @brief Get error message
|
||||
* @return Our error message
|
||||
*/
|
||||
virtual std::string errorMessage() const&;
|
||||
std::string errorMessage() const&;
|
||||
|
||||
/**
|
||||
* @brief Get error message
|
||||
* @return Our error message
|
||||
*/
|
||||
virtual std::string errorMessage() &&;
|
||||
std::string errorMessage() &&;
|
||||
|
||||
template<typename S>
|
||||
void resetErrorMessage(S&& msg) {
|
||||
_errorMessage.assign(std::forward<S>(msg));
|
||||
}
|
||||
|
||||
template<typename S>
|
||||
void appendErrorMessage(S&& msg) {
|
||||
_errorMessage.append(std::forward<S>(msg));
|
||||
}
|
||||
|
||||
private:
|
||||
int _errorNumber;
|
||||
|
|
Loading…
Reference in New Issue