1
0
Fork 0

WIP - start adding optional overwrite to insert operation (RepSert) (#5268)

This commit is contained in:
Jan Christoph Uhde 2018-05-24 19:47:15 +02:00 committed by Jan
parent bc70486761
commit a2dcb6cc5d
34 changed files with 1014 additions and 421 deletions

View File

@ -17,6 +17,13 @@ devel
should change the protocol from SSLv2 to TLSv12 if possible, by adjusting
the value of the `--ssl.protocol` startup option.
* added `overwrite` option to the `document rest-handler` to allow for easier syncing.
This implements almost the much inquired UPSERT. In reality it is a REPSERT
(replace/insert) because only replacement and not modification of documents
is possible. The option does not work in cluster collections with custom
sharding.
* added startup option `--log.escape`
This option toggles the escaping of log output.

View File

@ -567,8 +567,15 @@ used to specify the following options:
a default *waitForSync* value of *true*.
- *silent*: If this flag is set to *true*, the method does not return
any output.
- *overwrite*: If set to *true*, the insert becomes a replace-insert.
If a document with the same *_key* already exists the new document
is not rejected with unique constraint violated but will replace
the old document.
- *returnNew*: If this flag is set to *true*, the complete new document
is returned in the output under the attribute *new*.
- *returnOld*: If this flag is set to *true*, the complete old document
is returned in the output under the attribute *old*. Only available
in combiantion with the *overwrite* option
Note: since ArangoDB 2.2, *insert* is an alias for *save*.
@ -610,6 +617,14 @@ multiple documents with one call.
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock documentsCollectionInsertMulti
@startDocuBlockInline documentsCollectionInsertSingleOverwrite
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertSingleOverwrite}
~ db._create("example");
db.example.insert({ _key : "666", Hello : "World" });
db.example.insert({ _key : "666", Hello : "Universe" }, {overwrite: true, returnOld: true});
~ db._drop("example");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock documentsCollectionInsertSingle
Replace

View File

@ -26,11 +26,20 @@ Wait until document has been synced to disk.
Additionally return the complete new document under the attribute *new*
in the result.
@RESTQUERYPARAM{returnOld,boolean,optional}
Additionally return the complete old document under the attribute *old*
in the result. Only available if the overwrite option is used.
@RESTQUERYPARAM{silent,boolean,optional}
If set to *true*, an empty object will be returned as response. No meta-data
will be returned for the created document. This option can be used to
save some network traffic.
@RESTQUERYPARAM{overwrite,boolean,optional}
If set to *true*, the insert becomes a replace-insert. If a document with the
same *_key* already exists the new document is not rejected with unique
constraint violated but will replace the old document.
@RESTDESCRIPTION
Creates a new document from the document given in the body, unless there
is already a document with the *_key* given. If no *_key* is given, a new
@ -239,5 +248,28 @@ Use of returnNew:
logJsonResponse(response);
db._drop(cn);
@END_EXAMPLE_ARANGOSH_RUN
@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostOverwrite}
var cn = "products";
db._drop(cn);
db._create(cn, { waitForSync: true });
var url = "/_api/document/" + cn;
var body = '{ "Hello": "World", "_key" : "lock" }';
var response = logCurlRequest('POST', url, body);
// insert
assert(response.code === 201);
logJsonResponse(response);
body = '{ "Hello": "Universe", "_key" : "lock" }';
url = "/_api/document/" + cn + "?overwrite=true";
response = logCurlRequest('POST', url, body);
// insert same key
assert(response.code === 201);
logJsonResponse(response);
db._drop(cn);
@END_EXAMPLE_ARANGOSH_RUN
@endDocuBlock

View File

@ -848,5 +848,210 @@ describe ArangoDB do
end
################################################################################
## known collection identifier, overwrite = true
################################################################################
context "known collection identifier, overwrite = true:" do
before do
@cn = "UnitTestsCollectionUnsynced"
@cid = ArangoDB.create_collection(@cn, false)
end
end
after do
ArangoDB.drop_collection(@cn)
end
it "replace a document by _key" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
doc.code.should eq(202)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
etag = doc.headers['etag']
etag.should be_kind_of(String)
location = doc.headers['location']
location.should be_kind_of(String)
rev = doc.parsed_response['_rev']
rev.should be_kind_of(String)
did = doc.parsed_response['_id']
did.should be_kind_of(String)
key = doc.parsed_response['_key']
key.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
etag.should eq("\"#{rev}\"")
location.should eq("/_db/_system/_api/document/#{did}")
cmd = "/_api/document?collection=#{@cn}&overwrite=true&waitForSync=false&returnOld=true"
body = "{ \"_key\" : \"#{key}\", \"Hallo\" : \"ULF\" }"
newdoc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
newrev = newdoc.parsed_response['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newoldrev = newdoc.parsed_response['_oldRev']
newoldrev.should be_kind_of(String)
newoldrev.should eq(rev)
newoldrev = newdoc.parsed_response['old']['Hallo']
newoldrev.should be_kind_of(String)
newoldrev.should eq("World")
newkey = newdoc.parsed_response['_key']
newkey.should be_kind_of(String)
newkey.should eq(key)
newdoc.code.should eq(202)
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
end
it "replace a document by _key, return new / old" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
doc.code.should eq(202)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
etag = doc.headers['etag']
etag.should be_kind_of(String)
location = doc.headers['location']
location.should be_kind_of(String)
rev = doc.parsed_response['_rev']
rev.should be_kind_of(String)
did = doc.parsed_response['_id']
did.should be_kind_of(String)
key = doc.parsed_response['_key']
key.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
etag.should eq("\"#{rev}\"")
location.should eq("/_db/_system/_api/document/#{did}")
cmd = "/_api/document?collection=#{@cn}&overwrite=true&returnNew=true&returnOld=true&waitForSync=true"
body = "{ \"_key\" : \"#{key}\", \"Hallo\" : \"ULF\" }"
newdoc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
newrev = newdoc.parsed_response['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newoldrev = newdoc.parsed_response['_oldRev']
newoldrev.should be_kind_of(String)
newoldrev.should eq(rev)
newkey = newdoc.parsed_response['_key']
newkey.should be_kind_of(String)
newkey.should eq(key)
newnew = newdoc.parsed_response['new']
newnew["_key"].should be_kind_of(String)
newnew["_key"].should eq(key)
newnew["_rev"].should eq(newrev)
newnew["Hallo"].should eq("ULF")
newold = newdoc.parsed_response['old']
newold["_key"].should eq(key)
newold["_rev"].should eq(newoldrev)
newold["Hallo"].should be_kind_of(String)
newold["Hallo"].should eq("World")
newdoc.code.should eq(201)
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
end
it "replace documents by _key" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
doc.code.should eq(202)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
etag = doc.headers['etag']
etag.should be_kind_of(String)
location = doc.headers['location']
location.should be_kind_of(String)
rev = doc.parsed_response['_rev']
rev.should be_kind_of(String)
did = doc.parsed_response['_id']
did.should be_kind_of(String)
key = doc.parsed_response['_key']
key.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
etag.should eq("\"#{rev}\"")
location.should eq("/_db/_system/_api/document/#{did}")
cmd = "/_api/document?collection=#{@cn}&overwrite=true&returnNew=true&returnOld=true&waitForSync=true"
body = "[{ \"_key\" : \"#{key}\", \"Hallo\" : \"ULF\" }, { \"_key\" : \"#{key}\", \"Hallo\" : \"ULFINE\" }]"
newdoc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
newrev = newdoc.parsed_response[0]['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newoldrev = newdoc.parsed_response[0]['_oldRev']
newoldrev.should be_kind_of(String)
newoldrev.should eq(rev)
newkey = newdoc.parsed_response[0]['_key']
newkey.should be_kind_of(String)
newkey.should eq(key)
newnew = newdoc.parsed_response[0]['new']
newnew["_key"].should be_kind_of(String)
newnew["_key"].should eq(key)
newnew["_rev"].should eq(newrev)
newold = newdoc.parsed_response[0]['old']
newold["_key"].should eq(key)
newold["_rev"].should eq(newoldrev)
newrev = newdoc.parsed_response[1]['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newdoc.parsed_response[1]['new']['Hallo'].should eq("ULFINE")
newdoc.parsed_response[1]['old']['Hallo'].should eq("ULF")
newdoc.code.should eq(201)
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
end
end #overwrite - end
end # context - end
end # decribe - end

View File

@ -29,6 +29,7 @@
#include "Aql/Function.h"
#include "Aql/Graphs.h"
#include "Aql/Query.h"
#include "Aql/ExecutionPlan.h"
#include "Basics/Exceptions.h"
#include "Basics/StringRef.h"
#include "Basics/StringUtils.h"
@ -326,18 +327,32 @@ AstNode* Ast::createNodeInsert(AstNode const* expression,
AstNode const* collection,
AstNode const* options) {
AstNode* node = createNode(NODE_TYPE_INSERT);
node->reserve(4);
if (options == nullptr) {
// no options given. now use default options
options = &NopNode;
}
bool overwrite = false;
if (options->type == NODE_TYPE_OBJECT){
auto ops = ExecutionPlan::parseModificationOptions(options);
overwrite = ops.overwrite;
}
node->reserve(overwrite ? 5: 4);
node->addMember(options);
node->addMember(collection);
node->addMember(expression);
node->addMember(
createNodeVariable(TRI_CHAR_LENGTH_PAIR(Variable::NAME_NEW), false));
if(overwrite){
node->addMember(
createNodeVariable(TRI_CHAR_LENGTH_PAIR(Variable::NAME_OLD), false)
);
}
return node;
}

View File

@ -63,6 +63,7 @@ size_t Collection::count(transaction::Methods* trx) const {
if (res.fail()) {
THROW_ARANGO_EXCEPTION(res.result);
}
TRI_ASSERT(res.ok());
numDocuments = res.slice().getInt();
}

View File

@ -997,6 +997,13 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
nrRegs.emplace_back(registerId);
auto ep = ExecutionNode::castTo<InsertNode const*>(en);
if (ep->getOutVariableOld() != nullptr) {
nrRegsHere[depth]++;
nrRegs[depth]++;
varInfo.emplace(ep->getOutVariableOld()->id,
VarInfo(depth, totalNrRegs));
totalNrRegs++;
}
if (ep->getOutVariableNew() != nullptr) {
nrRegsHere[depth]++;
nrRegs[depth]++;

View File

@ -642,8 +642,7 @@ CollectNode* ExecutionPlan::createAnonymousCollect(
return en;
}
/// @brief create modification options from an AST node
ModificationOptions ExecutionPlan::createModificationOptions(
ModificationOptions ExecutionPlan::parseModificationOptions(
AstNode const* node) {
ModificationOptions options;
@ -669,10 +668,19 @@ ModificationOptions ExecutionPlan::createModificationOptions(
options.nullMeansRemove = value->isFalse();
} else if (name == "mergeObjects") {
options.mergeObjects = value->isTrue();
} else if (name == "overwrite") {
options.overwrite = value->isTrue();
}
}
}
}
return options;
}
/// @brief create modification options from an AST node
ModificationOptions ExecutionPlan::createModificationOptions(
AstNode const* node) {
ModificationOptions options = parseModificationOptions(node);
// this means a data-modification query must first read the entire input data
// before starting with the modifications
@ -1543,7 +1551,8 @@ ExecutionNode* ExecutionPlan::fromNodeRemove(ExecutionNode* previous,
ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
AstNode const* node) {
TRI_ASSERT(node != nullptr && node->type == NODE_TYPE_INSERT);
TRI_ASSERT(node->numMembers() == 4);
TRI_ASSERT(node->numMembers() > 3);
TRI_ASSERT(node->numMembers() < 6);
auto options = createModificationOptions(node->getMember(0));
std::string const collectionName = node->getMember(1)->getString();
@ -1555,6 +1564,12 @@ ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
Variable const* outVariableNew =
static_cast<Variable*>(returnVarNode->getData());
Variable const* outVariableOld = nullptr;
if(node->numMembers() == 5) {
returnVarNode = node->getMember(4);
outVariableOld = static_cast<Variable*>(returnVarNode->getData());
}
ExecutionNode* en = nullptr;
if (expression->type == NODE_TYPE_REFERENCE) {
@ -1569,6 +1584,7 @@ ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
collection,
options,
v,
outVariableOld,
outVariableNew
));
} else {
@ -1582,6 +1598,7 @@ ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
collection,
options,
getOutVariable(calc),
outVariableOld,
outVariableNew
));
previous = calc;

View File

@ -251,9 +251,15 @@ class ExecutionPlan {
/// @brief creates an anonymous COLLECT node (for a DISTINCT)
CollectNode* createAnonymousCollect(CalculationNode const*);
/// @brief create modification options from an AST node
/// @brief create modification options by parsing an AST node
/// and adding plan specific options.
ModificationOptions createModificationOptions(AstNode const*);
public:
/// @brief parses modification options form an AST node
static ModificationOptions parseModificationOptions(AstNode const*);
private:
/// @brief create COLLECT options from an AST node
CollectOptions createCollectOptions(AstNode const*);

View File

@ -443,16 +443,20 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
RegisterId const registerId = it->second.registerId;
std::string errorMessage;
bool const producesOutput = (ep->_outVariableNew != nullptr);
bool const producesNew = (ep->_outVariableNew != nullptr);
bool const producesOld = (ep->_outVariableOld != nullptr);
bool const producesOutput = producesNew || producesOld;
result.reset(requestBlock(count, getPlanNode()->getRegisterPlan()->nrRegs[getPlanNode()->getDepth()]));
OperationOptions options;
// use "silent" mode if we do not access the results later on
options.silent = !producesOutput;
options.returnNew = producesNew;
options.returnOld = producesOld;
options.isRestore = ep->_options.useIsRestore;
options.waitForSync = ep->_options.waitForSync;
options.returnNew = producesOutput;
options.isRestore = ep->getOptions().useIsRestore;
options.overwrite = ep->_options.overwrite;
// loop over all blocks
size_t dstRow = 0;
@ -463,7 +467,7 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
throwIfKilled(); // check if we were aborted
bool const isMultiple = (n > 1);
if (!isMultiple) {
if (!isMultiple) { // single - case
// loop over the complete block. Well it is one element only
for (size_t i = 0; i < n; ++i) {
AqlValue const& a = res->getValueReference(i, registerId);
@ -483,11 +487,21 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
OperationResult opRes = _trx->insert(_collection->name, a.slice(), options);
errorCode = opRes.errorNumber();
if (options.returnNew && errorCode == TRI_ERROR_NO_ERROR) {
if (errorCode == TRI_ERROR_NO_ERROR) {
if (options.returnNew) {
// return $NEW
result->emplaceValue(dstRow, _outRegNew, opRes.slice().get("new"));
}
if (errorCode != TRI_ERROR_NO_ERROR) {
if (options.returnOld) {
// return $OLD
auto slice = opRes.slice().get("old");
if(slice.isNone()){
result->emplaceValue(dstRow, _outRegOld, VPackSlice::nullSlice());
} else {
result->emplaceValue(dstRow, _outRegOld, slice);
}
}
} else {
errorMessage.assign(opRes.errorMessage());
}
} else {
@ -499,7 +513,7 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
++dstRow;
}
// done with a block
} else {
} else { // many - case
_tempBuilder.clear();
_tempBuilder.openArray();
for (size_t i = 0; i < n; ++i) {
@ -535,9 +549,20 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
bool wasError = arangodb::basics::VelocyPackHelper::getBooleanValue(
elm, "error", false);
if (!wasError) {
// return $NEW
if (producesNew) {
// store $NEW
result->emplaceValue(dstRow, _outRegNew, elm.get("new"));
}
if (producesOld) {
// store $OLD
auto slice = elm.get("old");
if(slice.isNone()){
result->emplaceValue(dstRow, _outRegOld, VPackSlice::nullSlice());
} else {
result->emplaceValue(dstRow, _outRegOld, slice);
}
}
}
++iter;
}
++dstRow;
@ -548,7 +573,7 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
static_cast<size_t>(toSend.length()),
ep->_options.ignoreErrors);
}
}
} // single / many - case
// done with block. now unlink it and return it to block manager
(*it) = nullptr;

View File

@ -173,6 +173,7 @@ std::unique_ptr<ExecutionBlock> InsertNode::createBlock(
/// @brief clone ExecutionNode recursively
ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
bool withProperties) const {
auto outVariableOld = _outVariableOld;
auto outVariableNew = _outVariableNew;
auto inVariable = _inVariable;
@ -181,11 +182,15 @@ ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
outVariableNew =
plan->getAst()->variables()->createVariable(outVariableNew);
}
if (_outVariableOld != nullptr) {
outVariableOld =
plan->getAst()->variables()->createVariable(outVariableOld);
}
inVariable = plan->getAst()->variables()->createVariable(inVariable);
}
auto c = std::make_unique<InsertNode>(plan, _id, _vocbase, _collection, _options,
inVariable, outVariableNew);
inVariable, outVariableOld, outVariableNew);
if (!_countStats) {
c->disableStatistics();
}

View File

@ -248,9 +248,8 @@ class InsertNode : public ModificationNode {
public:
InsertNode(ExecutionPlan* plan, size_t id, TRI_vocbase_t* vocbase,
Collection* collection, ModificationOptions const& options,
Variable const* inVariable, Variable const* outVariableNew)
: ModificationNode(plan, id, vocbase, collection, options, nullptr,
outVariableNew),
Variable const* inVariable, Variable const* outVariableOld, Variable const* outVariableNew)
: ModificationNode(plan, id, vocbase, collection, options, outVariableOld, outVariableNew),
_inVariable(inVariable) {
TRI_ASSERT(_inVariable != nullptr);
// _outVariable might be a nullptr

View File

@ -44,6 +44,8 @@ ModificationOptions::ModificationOptions(VPackSlice const& slice) {
basics::VelocyPackHelper::getBooleanValue(obj, "useIsRestore", false);
consultAqlWriteFilter =
basics::VelocyPackHelper::getBooleanValue(obj, "consultAqlWriteFilter", false);
overwrite =
basics::VelocyPackHelper::getBooleanValue(obj, "overwrite", false);
}
void ModificationOptions::toVelocyPack(VPackBuilder& builder) const {
@ -56,4 +58,5 @@ void ModificationOptions::toVelocyPack(VPackBuilder& builder) const {
builder.add("readCompleteInput", VPackValue(readCompleteInput));
builder.add("useIsRestore", VPackValue(useIsRestore));
builder.add("consultAqlWriteFilter", VPackValue(consultAqlWriteFilter));
builder.add("overwrite", VPackValue(overwrite));
}

View File

@ -45,7 +45,9 @@ struct ModificationOptions {
ignoreDocumentNotFound(false),
readCompleteInput(true),
useIsRestore(false),
consultAqlWriteFilter(false) {}
consultAqlWriteFilter(false),
overwrite(false)
{}
void toVelocyPack(arangodb::velocypack::Builder&) const;
@ -57,6 +59,7 @@ struct ModificationOptions {
bool readCompleteInput;
bool useIsRestore;
bool consultAqlWriteFilter;
bool overwrite;
};
} // namespace arangodb::aql

View File

@ -1082,20 +1082,22 @@ int selectivityEstimatesOnCoordinator(
/// for their documents.
////////////////////////////////////////////////////////////////////////////////
int createDocumentOnCoordinator(
Result createDocumentOnCoordinator(
std::string const& dbname, std::string const& collname,
arangodb::OperationOptions const& options, VPackSlice const& slice,
arangodb::rest::ResponseCode& responseCode,
std::unordered_map<int, size_t>& errorCounter,
std::shared_ptr<VPackBuilder>& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
// nullptr should only happen during controlled shutdown
return TRI_ERROR_SHUTTING_DOWN;
}
ClusterInfo* ci = ClusterInfo::instance();
TRI_ASSERT(ci != nullptr);
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
try {
@ -1104,15 +1106,19 @@ int createDocumentOnCoordinator(
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
}
TRI_ASSERT(collinfo != nullptr);
auto collid = std::to_string(collinfo->id());
std::unordered_map<
ShardID, std::vector<std::pair<VPackValueLength, std::string>>> shardMap;
// create vars used in this function
bool const useMultiple = slice.isArray(); // insert more than one document
std::unordered_map< ShardID
, std::vector<std::pair<VPackValueLength, std::string>>
> shardMap;
std::vector<std::pair<ShardID, VPackValueLength>> reverseMapping;
bool useMultiple = slice.isArray();
{
// create shard map
int res = TRI_ERROR_NO_ERROR;
if (useMultiple) {
VPackValueLength length = slice.length();
for (VPackValueLength idx = 0; idx < length; ++idx) {
@ -1130,15 +1136,17 @@ int createDocumentOnCoordinator(
return res;
}
}
}
std::string const baseUrl =
"/_db/" + StringUtils::urlEncode(dbname) + "/_api/document?collection=";
std::string const optsUrlPart =
std::string("&waitForSync=") + (options.waitForSync ? "true" : "false") +
"&returnNew=" + (options.returnNew ? "true" : "false") + "&returnOld=" +
(options.returnOld ? "true" : "false") + "&isRestore=" +
(options.isRestore ? "true" : "false");
"&returnNew=" + (options.returnNew ? "true" : "false") +
"&returnOld=" + (options.returnOld ? "true" : "false") +
"&isRestore=" + (options.isRestore ? "true" : "false") +
"&" + StaticStrings::OverWrite + "=" + (options.overwrite ? "true" : "false");
VPackBuilder reqBuilder;
@ -1216,7 +1224,7 @@ int createDocumentOnCoordinator(
// the cluster operation was OK, however,
// the DBserver could have reported an error.
return TRI_ERROR_NO_ERROR;
return Result{};
}
////////////////////////////////////////////////////////////////////////////////
@ -2617,6 +2625,7 @@ std::shared_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors,
bool waitForSyncReplication, bool enforceReplicationFactor,
VPackSlice) {
std::string distributeShardsLike = col->distributeShardsLike();
std::vector<std::string> avoid = col->avoidServers();
ClusterInfo* ci = ClusterInfo::instance();

View File

@ -101,7 +101,7 @@ int selectivityEstimatesOnCoordinator(std::string const& dbname, std::string con
/// @brief creates a document in a coordinator
////////////////////////////////////////////////////////////////////////////////
int createDocumentOnCoordinator(
Result createDocumentOnCoordinator(
std::string const& dbname, std::string const& collname,
OperationOptions const& options, arangodb::velocypack::Slice const& slice,
arangodb::rest::ResponseCode& responseCode,

View File

@ -116,6 +116,8 @@ bool RestDocumentHandler::createDocument() {
opOptions.waitForSync = _request->parsedValue(StaticStrings::WaitForSyncString, false);
opOptions.returnNew = _request->parsedValue(StaticStrings::ReturnNewString, false);
opOptions.silent = _request->parsedValue(StaticStrings::SilentString, false);
opOptions.overwrite = _request->parsedValue(StaticStrings::OverWrite, false);
opOptions.returnOld = _request->parsedValue(StaticStrings::ReturnOldString, false) && opOptions.overwrite;
extractStringParameter(StaticStrings::IsSynchronousReplicationString,
opOptions.isSynchronousReplicationFrom);
@ -124,7 +126,7 @@ bool RestDocumentHandler::createDocument() {
SingleCollectionTransaction trx(ctx, collectionName, AccessMode::Type::WRITE);
bool const isMultiple = body.isArray();
if (!isMultiple) {
if (!isMultiple && !opOptions.overwrite) {
trx.addHint(transaction::Hints::Hint::SINGLE_OPERATION);
}

View File

@ -241,7 +241,7 @@ std::string RestVocbaseBaseHandler::assembleDocumentId(
void RestVocbaseBaseHandler::generateSaved(
arangodb::OperationResult const& result, std::string const& collectionName,
TRI_col_type_e type, VPackOptions const* options, bool isMultiple) {
if (result.wasSynchronous) {
if (result._options.waitForSync) {
resetResponse(rest::ResponseCode::CREATED);
} else {
resetResponse(rest::ResponseCode::ACCEPTED);
@ -268,7 +268,7 @@ void RestVocbaseBaseHandler::generateSaved(
void RestVocbaseBaseHandler::generateDeleted(
arangodb::OperationResult const& result, std::string const& collectionName,
TRI_col_type_e type, VPackOptions const* options) {
if (result.wasSynchronous) {
if (result._options.waitForSync) {
resetResponse(rest::ResponseCode::OK);
} else {
resetResponse(rest::ResponseCode::ACCEPTED);

View File

@ -385,7 +385,7 @@ OperationResult transaction::helpers::buildCountResult(std::vector<std::pair<std
}
resultBuilder.close();
}
return OperationResult(Result(), resultBuilder.steal(), nullptr, false);
return OperationResult(Result(), resultBuilder.buffer(), nullptr);
}
/// @brief creates an id string from a custom _id value and the _key string

View File

@ -154,11 +154,11 @@ static void createBabiesError(VPackBuilder& builder,
}
}
static OperationResult emptyResult(bool waitForSync) {
static OperationResult emptyResult(OperationOptions const& options) {
VPackBuilder resultBuilder;
resultBuilder.openArray();
resultBuilder.close();
return OperationResult(Result(), resultBuilder.steal(), nullptr, waitForSync);
return OperationResult(Result(), resultBuilder.steal(), nullptr, options);
}
} // namespace
@ -713,6 +713,7 @@ void transaction::Methods::buildDocumentIdentity(
LogicalCollection* collection, VPackBuilder& builder, TRI_voc_cid_t cid,
StringRef const& key, TRI_voc_rid_t rid, TRI_voc_rid_t oldRid,
ManagedDocumentResult const* oldDoc, ManagedDocumentResult const* newDoc) {
std::string temp; // TODO: pass a string into this function
temp.reserve(64);
@ -910,7 +911,7 @@ OperationResult transaction::Methods::anyLocal(
resultBuilder.close();
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler(), false);
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler());
}
TRI_voc_cid_t transaction::Methods::addCollectionAtRuntime(
@ -1156,7 +1157,7 @@ OperationResult transaction::Methods::clusterResultDocument(
case rest::ResponseCode::PRECONDITION_FAILED:
return OperationResult(Result(responseCode == rest::ResponseCode::OK
? TRI_ERROR_NO_ERROR
: TRI_ERROR_ARANGO_CONFLICT), resultBody->steal(), nullptr, false, errorCounter);
: TRI_ERROR_ARANGO_CONFLICT), resultBody->steal(), nullptr, OperationOptions{}, errorCounter);
case rest::ResponseCode::NOT_FOUND:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
default:
@ -1168,11 +1169,16 @@ OperationResult transaction::Methods::clusterResultDocument(
OperationResult transaction::Methods::clusterResultInsert(
rest::ResponseCode const& responseCode,
std::shared_ptr<VPackBuilder> const& resultBody,
OperationOptions const& options,
std::unordered_map<int, size_t> const& errorCounter) const {
switch (responseCode) {
case rest::ResponseCode::ACCEPTED:
case rest::ResponseCode::CREATED:
return OperationResult(Result(), resultBody->steal(), nullptr, responseCode == rest::ResponseCode::CREATED, errorCounter);
case rest::ResponseCode::CREATED: {
OperationOptions copy = options;
copy.waitForSync = (responseCode == rest::ResponseCode::CREATED); // wait for sync is abused herea
// operationResult should get a return code.
return OperationResult(Result(), resultBody->steal(), nullptr, copy, errorCounter);
}
case rest::ResponseCode::PRECONDITION_FAILED:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_ARANGO_CONFLICT);
case rest::ResponseCode::BAD:
@ -1202,10 +1208,11 @@ OperationResult transaction::Methods::clusterResultModify(
}
// Fall through
case rest::ResponseCode::ACCEPTED:
case rest::ResponseCode::CREATED:
return OperationResult(Result(errorCode), resultBody->steal(), nullptr,
responseCode == rest::ResponseCode::CREATED,
errorCounter);
case rest::ResponseCode::CREATED: {
OperationOptions options;
options.waitForSync = (responseCode == rest::ResponseCode::CREATED);
return OperationResult(Result(errorCode), resultBody->steal(), nullptr, options, errorCounter);
}
case rest::ResponseCode::BAD:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_INTERNAL);
case rest::ResponseCode::NOT_FOUND:
@ -1223,13 +1230,16 @@ OperationResult transaction::Methods::clusterResultRemove(
switch (responseCode) {
case rest::ResponseCode::OK:
case rest::ResponseCode::ACCEPTED:
case rest::ResponseCode::PRECONDITION_FAILED:
case rest::ResponseCode::PRECONDITION_FAILED: {
OperationOptions options;
options.waitForSync = (responseCode != rest::ResponseCode::ACCEPTED);
return OperationResult(
Result(responseCode == rest::ResponseCode::PRECONDITION_FAILED
? TRI_ERROR_ARANGO_CONFLICT
: TRI_ERROR_NO_ERROR),
resultBody->steal(), nullptr,
responseCode != rest::ResponseCode::ACCEPTED, errorCounter);
options, errorCounter);
}
case rest::ResponseCode::BAD:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_INTERNAL);
case rest::ResponseCode::NOT_FOUND:
@ -1370,7 +1380,7 @@ OperationResult transaction::Methods::documentLocal(
return OperationResult(std::move(res), resultBuilder.steal(),
_transactionContextPtr->orderCustomTypeHandler(),
options.waitForSync, countErrorCodes);
options, countErrorCodes);
}
/// @brief create one or multiple documents in a collection
@ -1386,7 +1396,7 @@ OperationResult transaction::Methods::insert(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (value.isArray() && value.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
// Validate Edges
@ -1412,21 +1422,15 @@ OperationResult transaction::Methods::insertCoordinator(
rest::ResponseCode responseCode;
std::unordered_map<int, size_t> errorCounter;
auto resultBody = std::make_shared<VPackBuilder>();
int res = arangodb::createDocumentOnCoordinator(
vocbase().name(),
collectionName,
options,
value,
responseCode,
errorCounter,
resultBody
);
if (res == TRI_ERROR_NO_ERROR) {
return clusterResultInsert(responseCode, resultBody, errorCounter);
Result res = arangodb::createDocumentOnCoordinator(
vocbase().name(), collectionName, options, value, responseCode,
errorCounter, resultBody);
if (res.ok()) {
return clusterResultInsert(responseCode, resultBody, options, errorCounter);
}
return OperationResult(res);
return OperationResult(res, options);
}
#endif
@ -1464,18 +1468,18 @@ OperationResult transaction::Methods::insertLocal(
std::string theLeader = collection->followers()->getLeader();
if (theLeader.empty()) {
if (!options.isSynchronousReplicationFrom.empty()) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_REFUSES_REPLICATION);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_REFUSES_REPLICATION, options);
}
} else { // we are a follower following theLeader
isFollower = true;
if (options.isSynchronousReplicationFrom.empty()) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED, options);
}
if (options.isSynchronousReplicationFrom != theLeader) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION);
}
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION, options);
}
}
} // isDBServer - early block
if (options.returnNew) {
pinData(cid); // will throw when it fails
@ -1489,13 +1493,28 @@ OperationResult transaction::Methods::insertLocal(
return Result(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
ManagedDocumentResult result;
ManagedDocumentResult documentResult;
TRI_voc_tick_t resultMarkerTick = 0;
TRI_voc_rid_t revisionId = 0;
Result res =
collection->insert(this, value, result, options, resultMarkerTick,
!isLocked(collection, AccessMode::Type::WRITE), revisionId);
auto const needsLock = !isLocked(collection, AccessMode::Type::WRITE);
Result res = collection->insert( this, value, documentResult, options
, resultMarkerTick, needsLock, revisionId
);
ManagedDocumentResult previousDocumentResult; // return OLD
TRI_voc_rid_t previousRevisionId = 0;
if(options.overwrite && res.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED)){
// RepSert Case - unique_constraint violated -> maxTick has not changed -> try replace
resultMarkerTick = 0;
res = collection->replace( this, value, documentResult, options
, resultMarkerTick, needsLock, previousRevisionId
, previousDocumentResult);
if(res.ok()){
revisionId = TRI_ExtractRevisionId(VPackSlice(documentResult.vpack()));
}
}
if (resultMarkerTick > 0 && resultMarkerTick > maxTick) {
maxTick = resultMarkerTick;
@ -1507,16 +1526,28 @@ OperationResult transaction::Methods::insertLocal(
return res;
}
if (!options.silent || _state->isDBServer()) {
TRI_ASSERT(!result.empty());
TRI_ASSERT(!documentResult.empty());
StringRef keyString(transaction::helpers::extractKeyFromDocument(
VPackSlice(result.vpack())));
VPackSlice(documentResult.vpack())));
buildDocumentIdentity(collection, resultBuilder, cid, keyString, revisionId,
0, nullptr, options.returnNew ? &result : nullptr);
bool showReplaced = false;
if(options.returnOld && previousRevisionId){
showReplaced = true;
}
if(showReplaced){
TRI_ASSERT(!previousDocumentResult.empty());
}
buildDocumentIdentity(collection, resultBuilder
,cid, keyString, revisionId ,previousRevisionId
,showReplaced ? &previousDocumentResult : nullptr
,options.returnNew ? &documentResult : nullptr);
}
return Result();
};
@ -1557,12 +1588,10 @@ OperationResult transaction::Methods::insertLocal(
// Now replicate the good operations on all followers:
std::string path =
"/_db/" +
arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
"/_api/document/" +
arangodb::basics::StringUtils::urlEncode(collection->name()) +
"?isRestore=true&isSynchronousReplication=" +
ServerState::instance()->getId();
"/_db/" + arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
"/_api/document/" + arangodb::basics::StringUtils::urlEncode(collection->name()) +
"?isRestore=true&isSynchronousReplication=" + ServerState::instance()->getId();
"&" + StaticStrings::OverWrite + "=" + (options.overwrite ? "true" : "false");
VPackBuilder payload;
@ -1618,7 +1647,7 @@ OperationResult transaction::Methods::insertLocal(
// error (note that we use the follower version, since we have
// lost leadership):
if (findRefusal(requests)) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED, options);
}
// Otherwise we drop all followers that were not successful:
@ -1660,7 +1689,7 @@ OperationResult transaction::Methods::insertLocal(
resultBuilder.clear();
}
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options.waitForSync, countErrorCodes);
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options, countErrorCodes);
}
/// @brief update/patch one or multiple documents in a collection
@ -1676,7 +1705,7 @@ OperationResult transaction::Methods::update(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (newValue.isArray() && newValue.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
OperationOptions optionsCopy = options;
@ -1737,7 +1766,7 @@ OperationResult transaction::Methods::replace(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (newValue.isArray() && newValue.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
OperationOptions optionsCopy = options;
@ -1857,7 +1886,7 @@ OperationResult transaction::Methods::modifyLocal(
maxTick = resultMarkerTick;
}
if (res.errorNumber() == TRI_ERROR_ARANGO_CONFLICT) {
if (res.is(TRI_ERROR_ARANGO_CONFLICT)) {
// still return
if (!isBabies) {
StringRef key(newVal.get(StaticStrings::KeyString));
@ -1882,7 +1911,7 @@ OperationResult transaction::Methods::modifyLocal(
}
return res; // must be ok!
};
}; // workForOneDocument
///////////////////////
bool multiCase = newValue.isArray();
@ -2033,7 +2062,7 @@ OperationResult transaction::Methods::modifyLocal(
resultBuilder.clear();
}
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options.waitForSync, errorCounter);
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options, errorCounter);
}
/// @brief remove one or multiple documents in a collection
@ -2049,7 +2078,7 @@ OperationResult transaction::Methods::remove(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (value.isArray() && value.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
OperationOptions optionsCopy = options;
@ -2162,7 +2191,7 @@ OperationResult transaction::Methods::removeLocal(
}
if (!res.ok()) {
if (res.errorNumber() == TRI_ERROR_ARANGO_CONFLICT && !isBabies) {
if (res.is(TRI_ERROR_ARANGO_CONFLICT) && !isBabies) {
buildDocumentIdentity(collection, resultBuilder, cid, key,
actualRevision, 0,
options.returnOld ? &previous : nullptr, nullptr);
@ -2321,7 +2350,7 @@ OperationResult transaction::Methods::removeLocal(
resultBuilder.clear();
}
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options.waitForSync, countErrorCodes);
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options, countErrorCodes);
}
/// @brief fetches all documents in a collection
@ -2385,7 +2414,7 @@ OperationResult transaction::Methods::allLocal(
resultBuilder.close();
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler(), false);
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler());
}
/// @brief remove all documents in a collection
@ -2612,7 +2641,6 @@ OperationResult transaction::Methods::countCoordinator(
if (res != TRI_ERROR_NO_ERROR) {
return OperationResult(res);
}
return buildCountResult(count, aggregate);
}
#endif
@ -2644,7 +2672,7 @@ OperationResult transaction::Methods::countLocal(
VPackBuilder resultBuilder;
resultBuilder.add(VPackValue(num));
return OperationResult(Result(), resultBuilder.steal(), nullptr, false);
return OperationResult(Result(), resultBuilder.steal(), nullptr);
}
/// @brief Gets the best fitting index for an AQL condition.

View File

@ -414,6 +414,8 @@ class Methods {
/// @brief build a VPack object with _id, _key and _rev and possibly
/// oldRef (if given), the result is added to the builder in the
/// argument as a single object.
//SHOULD THE OPTIONS BE CONST?
void buildDocumentIdentity(arangodb::LogicalCollection* collection,
VPackBuilder& builder, TRI_voc_cid_t cid,
StringRef const& key, TRI_voc_rid_t rid,
@ -528,6 +530,7 @@ class Methods {
OperationResult clusterResultInsert(
rest::ResponseCode const& responseCode,
std::shared_ptr<arangodb::velocypack::Builder> const& resultBody,
OperationOptions const& options,
std::unordered_map<int, size_t> const& errorCounter) const;
/// @brief Helper create a Cluster Communication modify result

View File

@ -33,7 +33,7 @@ struct OperationOptions {
OperationOptions()
: recoveryData(nullptr), waitForSync(false), keepNull(true),
mergeObjects(true), silent(false), ignoreRevs(true),
returnOld(false), returnNew(false), isRestore(false),
returnOld(false), returnNew(false), isRestore(false), overwrite(false),
indexOperationMode(Index::OperationMode::normal) {}
// original marker, set by an engine's recovery procedure only!
@ -64,6 +64,9 @@ struct OperationOptions {
// this option is there to ensure _key values once set can be restored by replicated and arangorestore
bool isRestore;
// for insert operations: do not fail if _key exists but replace the document
bool overwrite;
// for synchronous replication operations, we have to mark them such that
// we can deny them if we are a (new) leader, and that we can deny other
// operation if we are merely a follower. Finally, we must deny replications

View File

@ -26,6 +26,7 @@
#include "Basics/Common.h"
#include "Basics/Result.h"
#include "Utils/OperationOptions.h"
#include <velocypack/Buffer.h>
#include <velocypack/Options.h>
@ -39,10 +40,13 @@ struct OperationResult {
// create from integer status code
explicit OperationResult(int code) : result(code) {}
explicit OperationResult(int code, OperationOptions const& options) : result(code), _options(options) {}
// create from Result
explicit OperationResult(Result const& other) : result(other) {}
explicit OperationResult(Result const& other, OperationOptions const& options) : result(other), _options(options){}
explicit OperationResult(Result&& other) : result(std::move(other)) {}
explicit OperationResult(Result&& other, OperationOptions const& options) : result(std::move(other)), _options(options) {}
// copy
OperationResult(OperationResult const& other) = delete;
@ -55,7 +59,7 @@ struct OperationResult {
result = std::move(other.result);
buffer = std::move(other.buffer);
customTypeHandler = std::move(other.customTypeHandler);
wasSynchronous = other.wasSynchronous;
_options = other._options;
countErrorCodes = std::move(other.countErrorCodes);
}
return *this;
@ -65,13 +69,18 @@ struct OperationResult {
OperationResult(Result&& result,
std::shared_ptr<VPackBuffer<uint8_t>> const& buffer,
std::shared_ptr<VPackCustomTypeHandler> const& handler,
bool wasSynchronous,
OperationOptions const& options = {},
std::unordered_map<int, size_t> const& countErrorCodes = std::unordered_map<int, size_t>())
: result(std::move(result)),
buffer(buffer),
customTypeHandler(handler),
wasSynchronous(wasSynchronous),
countErrorCodes(countErrorCodes) {}
_options(options),
countErrorCodes(countErrorCodes) {
if(result.ok()){
TRI_ASSERT(buffer != nullptr);
TRI_ASSERT(buffer->data() != nullptr);
}
}
~OperationResult() = default;
@ -92,7 +101,7 @@ struct OperationResult {
// TODO: add a slice that points to either buffer or raw data
std::shared_ptr<VPackBuffer<uint8_t>> buffer;
std::shared_ptr<VPackCustomTypeHandler> customTypeHandler;
bool wasSynchronous = false;
OperationOptions _options;
// Executive summary for baby operations: reports all errors that did occur
// during these operations. Details are stored in the respective positions of

View File

@ -2199,6 +2199,10 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
options.waitForSync =
TRI_ObjectToBoolean(optionsObject->Get(WaitForSyncKey));
}
TRI_GET_GLOBAL_STRING(OverwriteKey);
if (optionsObject->Has(OverwriteKey)) {
options.overwrite = TRI_ObjectToBoolean(optionsObject->Get(OverwriteKey));
}
TRI_GET_GLOBAL_STRING(SilentKey);
if (optionsObject->Has(SilentKey)) {
options.silent = TRI_ObjectToBoolean(optionsObject->Get(SilentKey));
@ -2207,6 +2211,10 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
if (optionsObject->Has(ReturnNewKey)) {
options.returnNew = TRI_ObjectToBoolean(optionsObject->Get(ReturnNewKey));
}
TRI_GET_GLOBAL_STRING(ReturnOldKey);
if (optionsObject->Has(ReturnOldKey)) {
options.returnOld = TRI_ObjectToBoolean(optionsObject->Get(ReturnOldKey)) && options.overwrite;
}
TRI_GET_GLOBAL_STRING(IsRestoreKey);
if (optionsObject->Has(IsRestoreKey)) {
options.isRestore = TRI_ObjectToBoolean(optionsObject->Get(IsRestoreKey));
@ -2283,7 +2291,7 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
transactionContext, collection->id(), AccessMode::Type::WRITE
);
if (!payloadIsArray) {
if (!payloadIsArray && !options.overwrite) {
trx.addHint(transaction::Hints::Hint::SINGLE_OPERATION);
}

View File

@ -299,10 +299,12 @@ class LogicalCollection: public LogicalDataSource {
ManagedDocumentResult& result, OperationOptions&,
TRI_voc_tick_t&, bool, TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous);
Result replace(transaction::Methods*, velocypack::Slice const,
ManagedDocumentResult& result, OperationOptions&,
TRI_voc_tick_t&, bool, TRI_voc_rid_t& prevRev,
TRI_voc_tick_t&, bool /*lock*/, TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous);
Result remove(transaction::Methods*, velocypack::Slice const,
OperationOptions&, TRI_voc_tick_t&, bool,
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous);

View File

@ -891,10 +891,19 @@ ArangoCollection.prototype.save =
if (options.returnNew) {
url = this._appendBoolParameter(url, 'returnNew', options.returnNew);
}
if (options.returnOld) {
url = this._appendBoolParameter(url, 'returnOld', options.returnOld);
}
if (options.silent) {
url = this._appendBoolParameter(url, 'silent', options.silent);
}
if (options.overwrite) {
url = this._appendBoolParameter(url, 'overwrite', options.overwrite);
}
if (data === undefined || typeof data !== 'object') {
throw new ArangoError({
errorNum: internal.errors.ERROR_ARANGO_DOCUMENT_TYPE_INVALID.code,

View File

@ -1208,4 +1208,81 @@ describe('babies collection document', function () {
expect(b7[0]._oldRev).to.be.a('string');
});
});
describe('overwrite', function () {
let base_url = '/_api/document/' + cn;
it('overwrite once', function () {
let url1 = base_url;
let req1 = request.post(url1, extend(endpoint, {
body: JSON.stringify([{
'Hallo': 12
}])
}));
let b1 = JSON.parse(req1.rawBody);
let res1 = b1[0];
let url2 = base_url + '?overwrite=true&returnOld=true';
let req2 = request.post(url2, extend(endpoint, {
body: JSON.stringify([{
'_key': res1._key,
'ulf': 42
}])
}));
let b2 = JSON.parse(req2.rawBody);
let res2 = b2[0];
expect(req2.statusCode).to.equal(202);
expect(res2._key).to.equal(res1._key);
expect(res2._oldRev).to.equal(res1._rev);
expect(res2.old.Hallo).to.equal(12);
});
it('overwrite multi', function () {
let url1 = base_url;
let req1 = request.post(url1, extend(endpoint, {
body: JSON.stringify([{
'Hallo': 12
}])
}));
let b1 = JSON.parse(req1.rawBody);
let res1 = b1[0];
let key1 = res1._key;
let url2 = base_url + '?overwrite=true&returnOld=true&returnNew=true';
let req2 = request.post(url2, extend(endpoint, {
body: JSON.stringify([
{
'_key': key1,
'ulf': 42
},{
'_key': key1,
'ulf': 32
},{
'_key': key1,
'ulfine': 23
}
])
}));
expect(req2.statusCode).to.equal(202);
let b2 = JSON.parse(req2.rawBody);
expect(b2).to.be.instanceof(Array);
expect(b2).to.have.lengthOf(3);
expect(b2[0]).to.be.a('object');
expect(b2[1]).to.be.a('object');
expect(b2[2]).to.be.a('object');
expect(b2[0]._key).to.be.a('string');
expect(b2[0]._key).to.equal(key1);
expect(b2[1]._key).to.equal(key1);
expect(b2[2]._key).to.equal(key1);
expect(b2[1]._rev).to.equal(b2[2].old._rev);
expect(b2[2].old._rev).to.equal(b2[1].new._rev);
expect(b2[2].new.ulfine).to.equal(23);
expect(b2[2].new.ulf).to.equal(undefined);
});
}); // overwrite - end
});

View File

@ -2338,6 +2338,52 @@ function DatabaseDocumentSuiteReturnStuff () {
assertTypeOf("string", res2._rev);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief use overwrite option
////////////////////////////////////////////////////////////////////////////////
testInsertOverwrite : function () {
var docHandle = collection.insert({ a : 1});
var key = docHandle._key;
// normal insert with same key must fail!
try{
var res = collection.insert({a : 2, _key : key});
fail();
}
catch (err) {
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum);
}
// overwrite with same key must work
var rv = collection.insert({c : 3, _key: key},{overwrite:true, returnOld:true, returnNew:true});
var arr = collection.toArray();
assertEqual(arr.length, 1);
assertEqual(rv.new.c, 3);
assertFalse(rv.new.hasOwnProperty('a'));
assertEqual(rv.old.a, 1);
// overwrite (babies) with same key must work
collection.insert({b : 2, _key: key},{overwrite:true});
arr = collection.toArray();
assertEqual(arr.length, 1);
assertEqual(arr[0].b, 2);
// overwrite (babies) with same key must work
collection.insert([{a : 3, _key: key}, {a : 4, _key: key}, {a : 5, _key: key}], {overwrite:true});
arr = collection.toArray();
assertEqual(arr.length, 1);
assertEqual(arr[0].a, 5);
rv = collection.insert({x : 3},{overwrite:true, returnNew:true});
assertEqual(rv.new.x, 3);
assertTypeOf("string", rv._id);
assertTypeOf("string", rv._key);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test new features from 3.0
////////////////////////////////////////////////////////////////////////////////

View File

@ -1008,9 +1008,63 @@ function ahuacatlInsertSuite () {
}
db._drop("UnitTestsAhuacatlEdge");
}
},
};
////////////////////////////////////////////////////////////////////////////////
/// @brief test insert
////////////////////////////////////////////////////////////////////////////////
testInsertOverwrite : function () {
c1.truncate();
assertEqual(0, c1.count());
var rv1 = db._query(" INSERT { _key: '123', name: 'ulf' } IN @@cn OPTIONS { overwrite: false } RETURN NEW", { "@cn": cn1 });
assertEqual(1, c1.count());
var doc1 = rv1.toArray()[0];
assertEqual(doc1._key, '123');
assertEqual(doc1.name, 'ulf');
var rv2 = db._query(" INSERT { _key: '123', name: 'ulfine' } IN @@cn OPTIONS { overwrite: true } RETURN {old: OLD, new: NEW}", { "@cn": cn1 });
assertEqual(1, c1.count());
var doc2 = rv2.toArray()[0];
assertEqual(doc2.new._key, '123');
assertEqual(doc2.new.name, 'ulfine');
assertEqual(doc2.old._rev, doc1._rev);
assertEqual(doc2.old._key, doc1._key);
assertEqual(doc2.old.name, doc1.name);
var rv3 = db._query(`
LET x = (
FOR a IN 3..5
INSERT { _key: CONCAT('12',a), name: a }
IN @@cn
OPTIONS { overwrite: true }
RETURN {old: OLD, new: NEW}
)
FOR d IN x SORT d.new._key
RETURN d
`, { "@cn": cn1 });
var resultArray3 = rv3.toArray();
assertEqual(3, c1.count());
var doc3a = resultArray3[0];
var doc3b = resultArray3[1];
var doc3c = resultArray3[2];
assertEqual(doc3a.old._rev, doc2.new._rev);
assertEqual(doc3a.old._key, doc2.new._key);
assertEqual(doc3a.old.name, "ulfine");
assertEqual(doc3a.new.name, 3);
assertEqual(doc3b.old, null);
assertEqual(doc3b.new.name, 4);
assertEqual(doc3c.old, null);
assertEqual(doc3c.new.name, 5);
},
}; // end insert tests
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -61,6 +61,7 @@ std::string const StaticStrings::Group("group");
std::string const StaticStrings::Namespace("namespace");
std::string const StaticStrings::Prefix("prefix");
std::string const StaticStrings::ReplaceExisting("replaceExisting");
std::string const StaticStrings::OverWrite("overwrite");
// replication headers
std::string const StaticStrings::ReplicationHeaderCheckMore("x-arango-replication-checkmore");

View File

@ -65,7 +65,8 @@ class StaticStrings {
static std::string const Group;
static std::string const Namespace;
static std::string const ReplaceExisting;
static std::string const Prefix;;
static std::string const Prefix;
static std::string const OverWrite;
// replication headers
static std::string const ReplicationHeaderCheckMore;

View File

@ -79,6 +79,7 @@ TRI_v8_global_t::TRI_v8_global_t(v8::Isolate* isolate)
MergeObjectsKey(),
NameKey(),
OperationIDKey(),
OverwriteKey(),
ParametersKey(),
PathKey(),
PrefixKey(),