1
0
Fork 0

WIP - start adding optional overwrite to insert operation (RepSert) (#5268)

This commit is contained in:
Jan Christoph Uhde 2018-05-24 19:47:15 +02:00 committed by Jan
parent bc70486761
commit a2dcb6cc5d
34 changed files with 1014 additions and 421 deletions

View File

@ -8,7 +8,7 @@ devel
* abort startup when using SSLv2 for a server endpoint, or when connecting with
a client tool via an SSLv2 connection.
SSLv2 has been disabled in the OpenSSL library by default in recent versions
because of security vulnerabilities inherent in this protocol.
@ -17,6 +17,13 @@ devel
should change the protocol from SSLv2 to TLSv12 if possible, by adjusting
the value of the `--ssl.protocol` startup option.
* added `overwrite` option to the `document rest-handler` to allow for easier syncing.
This implements almost the much inquired UPSERT. In reality it is a REPSERT
(replace/insert) because only replacement and not modification of documents
is possible. The option does not work in cluster collections with custom
sharding.
* added startup option `--log.escape`
This option toggles the escaping of log output.

View File

@ -567,8 +567,15 @@ used to specify the following options:
a default *waitForSync* value of *true*.
- *silent*: If this flag is set to *true*, the method does not return
any output.
- *overwrite*: If set to *true*, the insert becomes a replace-insert.
If a document with the same *_key* already exists the new document
is not rejected with unique constraint violated but will replace
the old document.
- *returnNew*: If this flag is set to *true*, the complete new document
is returned in the output under the attribute *new*.
- *returnOld*: If this flag is set to *true*, the complete old document
is returned in the output under the attribute *old*. Only available
in combiantion with the *overwrite* option
Note: since ArangoDB 2.2, *insert* is an alias for *save*.
@ -610,6 +617,14 @@ multiple documents with one call.
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock documentsCollectionInsertMulti
@startDocuBlockInline documentsCollectionInsertSingleOverwrite
@EXAMPLE_ARANGOSH_OUTPUT{documentsCollectionInsertSingleOverwrite}
~ db._create("example");
db.example.insert({ _key : "666", Hello : "World" });
db.example.insert({ _key : "666", Hello : "Universe" }, {overwrite: true, returnOld: true});
~ db._drop("example");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock documentsCollectionInsertSingle
Replace

View File

@ -26,11 +26,20 @@ Wait until document has been synced to disk.
Additionally return the complete new document under the attribute *new*
in the result.
@RESTQUERYPARAM{returnOld,boolean,optional}
Additionally return the complete old document under the attribute *old*
in the result. Only available if the overwrite option is used.
@RESTQUERYPARAM{silent,boolean,optional}
If set to *true*, an empty object will be returned as response. No meta-data
will be returned for the created document. This option can be used to
save some network traffic.
@RESTQUERYPARAM{overwrite,boolean,optional}
If set to *true*, the insert becomes a replace-insert. If a document with the
same *_key* already exists the new document is not rejected with unique
constraint violated but will replace the old document.
@RESTDESCRIPTION
Creates a new document from the document given in the body, unless there
is already a document with the *_key* given. If no *_key* is given, a new
@ -239,5 +248,28 @@ Use of returnNew:
logJsonResponse(response);
db._drop(cn);
@END_EXAMPLE_ARANGOSH_RUN
@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostOverwrite}
var cn = "products";
db._drop(cn);
db._create(cn, { waitForSync: true });
var url = "/_api/document/" + cn;
var body = '{ "Hello": "World", "_key" : "lock" }';
var response = logCurlRequest('POST', url, body);
// insert
assert(response.code === 201);
logJsonResponse(response);
body = '{ "Hello": "Universe", "_key" : "lock" }';
url = "/_api/document/" + cn + "?overwrite=true";
response = logCurlRequest('POST', url, body);
// insert same key
assert(response.code === 201);
logJsonResponse(response);
db._drop(cn);
@END_EXAMPLE_ARANGOSH_RUN
@endDocuBlock

View File

@ -70,7 +70,7 @@ describe ArangoDB do
ArangoDB.drop_collection(cn)
end
it "returns an error if an object sub-attribute in the JSON body is corrupted" do
cn = "UnitTestsCollectionBasics"
id = ArangoDB.create_collection(cn)
@ -89,7 +89,7 @@ describe ArangoDB do
ArangoDB.drop_collection(cn)
end
it "returns an error if an array attribute in the JSON body is corrupted" do
cn = "UnitTestsCollectionBasics"
id = ArangoDB.create_collection(cn)
@ -143,7 +143,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -155,7 +155,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document, setting compatibility header" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
@ -175,7 +175,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -187,7 +187,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document complex body" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"Wo\\\"rld\" }"
@ -207,7 +207,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -226,7 +226,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document complex body, setting compatibility header " do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"Wo\\\"rld\" }"
@ -246,7 +246,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -285,7 +285,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -309,7 +309,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new umlaut document, setting compatibility header" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"öäüÖÄÜßあ寿司\" }"
@ -329,7 +329,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -349,7 +349,7 @@ describe ArangoDB do
newBody.should eq("\\u00F6\\u00E4\\u00FC\\u00D6\\u00C4\\u00DC\\u00DF\\u3042\\u5BFF\\u53F8")
doc.parsed_response['Hallo'].should eq('öäüÖÄÜßあ寿司')
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
@ -399,7 +399,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new not normalized umlaut document, setting compatibility header" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"Grüß Gott.\" }"
@ -469,7 +469,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
did.should eq("#{@cn}/#{@key}")
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -478,7 +478,7 @@ describe ArangoDB do
ArangoDB.delete("/_api/document/#{@cn}/#{@key}")
end
it "creating a document with an existing id, setting compatibility header" do
@key = "a_new_key"
@ -503,7 +503,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
did.should eq("#{@cn}/#{@key}")
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -512,7 +512,7 @@ describe ArangoDB do
ArangoDB.delete("/_api/document/#{@cn}/#{@key}")
end
it "creating a document with a duplicate existing id" do
@key = "a_new_key"
@ -568,7 +568,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -580,7 +580,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document, setting compatibility header" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
@ -600,7 +600,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -612,7 +612,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document, waitForSync URL param = false" do
cmd = "/_api/document?collection=#{@cn}&waitForSync=false"
body = "{ \"Hallo\" : \"World\" }"
@ -632,7 +632,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -644,7 +644,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document, waitForSync URL param = false, setting compatibility header" do
cmd = "/_api/document?collection=#{@cn}&waitForSync=false"
body = "{ \"Hallo\" : \"World\" }"
@ -664,7 +664,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -676,7 +676,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document, waitForSync URL param = true" do
cmd = "/_api/document?collection=#{@cn}&waitForSync=true"
body = "{ \"Hallo\" : \"World\" }"
@ -696,7 +696,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -708,7 +708,7 @@ describe ArangoDB do
ArangoDB.size_collection(@cn).should eq(0)
end
it "creating a new document, waitForSync URL param = true, setting compatibility header" do
cmd = "/_api/document?collection=#{@cn}&waitForSync=true"
body = "{ \"Hallo\" : \"World\" }"
@ -728,7 +728,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -755,7 +755,7 @@ describe ArangoDB do
after do
ArangoDB.drop_collection(@cn)
end
it "creating a new document" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
@ -775,7 +775,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -807,7 +807,7 @@ describe ArangoDB do
did = doc.parsed_response['_id']
did.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
@ -845,8 +845,213 @@ describe ArangoDB do
doc.parsed_response['code'].should eq(404)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
end
end
end
end
################################################################################
## known collection identifier, overwrite = true
################################################################################
context "known collection identifier, overwrite = true:" do
before do
@cn = "UnitTestsCollectionUnsynced"
@cid = ArangoDB.create_collection(@cn, false)
end
after do
ArangoDB.drop_collection(@cn)
end
it "replace a document by _key" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
doc.code.should eq(202)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
etag = doc.headers['etag']
etag.should be_kind_of(String)
location = doc.headers['location']
location.should be_kind_of(String)
rev = doc.parsed_response['_rev']
rev.should be_kind_of(String)
did = doc.parsed_response['_id']
did.should be_kind_of(String)
key = doc.parsed_response['_key']
key.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
etag.should eq("\"#{rev}\"")
location.should eq("/_db/_system/_api/document/#{did}")
cmd = "/_api/document?collection=#{@cn}&overwrite=true&waitForSync=false&returnOld=true"
body = "{ \"_key\" : \"#{key}\", \"Hallo\" : \"ULF\" }"
newdoc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
newrev = newdoc.parsed_response['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newoldrev = newdoc.parsed_response['_oldRev']
newoldrev.should be_kind_of(String)
newoldrev.should eq(rev)
newoldrev = newdoc.parsed_response['old']['Hallo']
newoldrev.should be_kind_of(String)
newoldrev.should eq("World")
newkey = newdoc.parsed_response['_key']
newkey.should be_kind_of(String)
newkey.should eq(key)
newdoc.code.should eq(202)
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
end
it "replace a document by _key, return new / old" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
doc.code.should eq(202)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
etag = doc.headers['etag']
etag.should be_kind_of(String)
location = doc.headers['location']
location.should be_kind_of(String)
rev = doc.parsed_response['_rev']
rev.should be_kind_of(String)
did = doc.parsed_response['_id']
did.should be_kind_of(String)
key = doc.parsed_response['_key']
key.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
etag.should eq("\"#{rev}\"")
location.should eq("/_db/_system/_api/document/#{did}")
cmd = "/_api/document?collection=#{@cn}&overwrite=true&returnNew=true&returnOld=true&waitForSync=true"
body = "{ \"_key\" : \"#{key}\", \"Hallo\" : \"ULF\" }"
newdoc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
newrev = newdoc.parsed_response['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newoldrev = newdoc.parsed_response['_oldRev']
newoldrev.should be_kind_of(String)
newoldrev.should eq(rev)
newkey = newdoc.parsed_response['_key']
newkey.should be_kind_of(String)
newkey.should eq(key)
newnew = newdoc.parsed_response['new']
newnew["_key"].should be_kind_of(String)
newnew["_key"].should eq(key)
newnew["_rev"].should eq(newrev)
newnew["Hallo"].should eq("ULF")
newold = newdoc.parsed_response['old']
newold["_key"].should eq(key)
newold["_rev"].should eq(newoldrev)
newold["Hallo"].should be_kind_of(String)
newold["Hallo"].should eq("World")
newdoc.code.should eq(201)
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
end
it "replace documents by _key" do
cmd = "/_api/document?collection=#{@cn}"
body = "{ \"Hallo\" : \"World\" }"
doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
doc.code.should eq(202)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
etag = doc.headers['etag']
etag.should be_kind_of(String)
location = doc.headers['location']
location.should be_kind_of(String)
rev = doc.parsed_response['_rev']
rev.should be_kind_of(String)
did = doc.parsed_response['_id']
did.should be_kind_of(String)
key = doc.parsed_response['_key']
key.should be_kind_of(String)
match = didRegex.match(did)
match[1].should eq("#{@cn}")
etag.should eq("\"#{rev}\"")
location.should eq("/_db/_system/_api/document/#{did}")
cmd = "/_api/document?collection=#{@cn}&overwrite=true&returnNew=true&returnOld=true&waitForSync=true"
body = "[{ \"_key\" : \"#{key}\", \"Hallo\" : \"ULF\" }, { \"_key\" : \"#{key}\", \"Hallo\" : \"ULFINE\" }]"
newdoc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => {})
newrev = newdoc.parsed_response[0]['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newoldrev = newdoc.parsed_response[0]['_oldRev']
newoldrev.should be_kind_of(String)
newoldrev.should eq(rev)
newkey = newdoc.parsed_response[0]['_key']
newkey.should be_kind_of(String)
newkey.should eq(key)
newnew = newdoc.parsed_response[0]['new']
newnew["_key"].should be_kind_of(String)
newnew["_key"].should eq(key)
newnew["_rev"].should eq(newrev)
newold = newdoc.parsed_response[0]['old']
newold["_key"].should eq(key)
newold["_rev"].should eq(newoldrev)
newrev = newdoc.parsed_response[1]['_rev']
newrev.should be_kind_of(String)
newrev.should !eq(rev)
newdoc.parsed_response[1]['new']['Hallo'].should eq("ULFINE")
newdoc.parsed_response[1]['old']['Hallo'].should eq("ULF")
newdoc.code.should eq(201)
ArangoDB.delete(location)
ArangoDB.size_collection(@cn).should eq(0)
end
end #overwrite - end
end # context - end
end # decribe - end

View File

@ -29,6 +29,7 @@
#include "Aql/Function.h"
#include "Aql/Graphs.h"
#include "Aql/Query.h"
#include "Aql/ExecutionPlan.h"
#include "Basics/Exceptions.h"
#include "Basics/StringRef.h"
#include "Basics/StringUtils.h"
@ -46,7 +47,7 @@
using namespace arangodb;
using namespace arangodb::aql;
namespace {
namespace {
auto doNothingVisitor = [](AstNode const*) {};
}
@ -326,18 +327,32 @@ AstNode* Ast::createNodeInsert(AstNode const* expression,
AstNode const* collection,
AstNode const* options) {
AstNode* node = createNode(NODE_TYPE_INSERT);
node->reserve(4);
if (options == nullptr) {
// no options given. now use default options
options = &NopNode;
}
bool overwrite = false;
if (options->type == NODE_TYPE_OBJECT){
auto ops = ExecutionPlan::parseModificationOptions(options);
overwrite = ops.overwrite;
}
node->reserve(overwrite ? 5: 4);
node->addMember(options);
node->addMember(collection);
node->addMember(expression);
node->addMember(
createNodeVariable(TRI_CHAR_LENGTH_PAIR(Variable::NAME_NEW), false));
if(overwrite){
node->addMember(
createNodeVariable(TRI_CHAR_LENGTH_PAIR(Variable::NAME_OLD), false)
);
}
return node;
}
@ -1813,13 +1828,13 @@ void Ast::validateAndOptimize() {
};
TraversalContext context;
auto preVisitor = [&](AstNode const* node) -> bool {
auto ctx = &context;
if (ctx->filterDepth >= 0) {
++ctx->filterDepth;
}
if (node->type == NODE_TYPE_FILTER) {
TRI_ASSERT(ctx->filterDepth == -1);
ctx->filterDepth = 0;
@ -2209,13 +2224,13 @@ std::unordered_set<std::string> Ast::getReferencedAttributesForKeep(AstNode cons
}
}
return false;
};
std::unordered_set<std::string> result;
isSafeForOptimization = true;
std::function<bool(AstNode const*)> visitor = [&](AstNode const* node) {
if (!isSafeForOptimization) {
return false;
@ -2263,7 +2278,7 @@ std::unordered_set<std::string> Ast::getReferencedAttributesForKeep(AstNode cons
/// @brief determines the top-level attributes referenced in an expression for the
/// specified out variable
bool Ast::getReferencedAttributes(AstNode const* node,
bool Ast::getReferencedAttributes(AstNode const* node,
Variable const* variable,
std::unordered_set<std::string>& vars) {
// traversal state
@ -2275,7 +2290,7 @@ bool Ast::getReferencedAttributes(AstNode const* node,
if (node == nullptr || !isSafeForOptimization) {
return false;
}
if (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
attributeName = node->getStringValue();
nameLength = node->getStringLength();
@ -2307,7 +2322,7 @@ bool Ast::getReferencedAttributes(AstNode const* node,
return true;
};
traverseReadOnly(node, visitor, doNothingVisitor);
traverseReadOnly(node, visitor, doNothingVisitor);
return isSafeForOptimization;
}
@ -2882,7 +2897,7 @@ AstNode* Ast::optimizeBinaryOperatorRelational(AstNode* node) {
Expression exp(nullptr, this, node);
FixedVarExpressionContext context;
bool mustDestroy;
AqlValue a = exp.execute(_query->trx(), &context, mustDestroy);
AqlValueGuard guard(a, mustDestroy);
@ -3158,7 +3173,7 @@ AstNode* Ast::optimizeFunctionCall(AstNode* node) {
// place. note that the transaction has not necessarily been
// started yet...
TRI_ASSERT(_query->trx() != nullptr);
if (node->willUseV8()) {
// if the expression is going to use V8 internally, we do not
// bother to optimize it here
@ -3168,7 +3183,7 @@ AstNode* Ast::optimizeFunctionCall(AstNode* node) {
Expression exp(nullptr, this, node);
FixedVarExpressionContext context;
bool mustDestroy;
AqlValue a = exp.execute(_query->trx(), &context, mustDestroy);
AqlValueGuard guard(a, mustDestroy);
@ -3540,7 +3555,7 @@ AstNode* Ast::traverseAndModify(
}
/// @brief traverse the AST, using pre- and post-order visitors
void Ast::traverseReadOnly(AstNode const* node,
void Ast::traverseReadOnly(AstNode const* node,
std::function<bool(AstNode const*)> const& preVisitor,
std::function<void(AstNode const*)> const& postVisitor) {
if (node == nullptr) {
@ -3619,4 +3634,4 @@ AstNode* Ast::createNode(AstNodeType type) {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -55,7 +55,7 @@ Collection::Collection(std::string const& name, TRI_vocbase_t* vocbase,
TRI_voc_cid_t Collection::cid() const {
return getCollection()->id();
}
/// @brief count the number of documents in the collection
size_t Collection::count(transaction::Methods* trx) const {
if (numDocuments == UNINITIALIZED) {
@ -63,6 +63,7 @@ size_t Collection::count(transaction::Methods* trx) const {
if (res.fail()) {
THROW_ARANGO_EXCEPTION(res.result);
}
TRI_ASSERT(res.ok());
numDocuments = res.slice().getInt();
}

View File

@ -109,7 +109,7 @@ void ExecutionNode::validateType(int type) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, "unknown TypeID");
}
}
/// @brief add a dependency
void ExecutionNode::addDependency(ExecutionNode* ep) {
TRI_ASSERT(ep != nullptr);
@ -997,6 +997,13 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
nrRegs.emplace_back(registerId);
auto ep = ExecutionNode::castTo<InsertNode const*>(en);
if (ep->getOutVariableOld() != nullptr) {
nrRegsHere[depth]++;
nrRegs[depth]++;
varInfo.emplace(ep->getOutVariableOld()->id,
VarInfo(depth, totalNrRegs));
totalNrRegs++;
}
if (ep->getOutVariableNew() != nullptr) {
nrRegsHere[depth]++;
nrRegs[depth]++;
@ -1189,7 +1196,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
en->setRegsToClear(std::move(regsToClear));
}
}
/// @brief replace a dependency, returns true if the pointer was found and
/// replaced, please note that this does not delete oldNode!
bool ExecutionNode::replaceDependency(ExecutionNode* oldNode, ExecutionNode* newNode) {
@ -1226,7 +1233,7 @@ bool ExecutionNode::replaceDependency(ExecutionNode* oldNode, ExecutionNode* new
}
return false;
}
/// @brief remove a dependency, returns true if the pointer was found and
/// removed, please note that this does not delete ep!
bool ExecutionNode::removeDependency(ExecutionNode* ep) {
@ -1260,7 +1267,7 @@ bool ExecutionNode::removeDependency(ExecutionNode* ep) {
return false;
}
/// @brief remove all dependencies for the given node
void ExecutionNode::removeDependencies() {
for (auto& x : _dependencies) {
@ -1954,7 +1961,7 @@ double ReturnNode::estimateCost(size_t& nrItems) const {
void NoResultsNode::toVelocyPackHelper(VPackBuilder& nodes, unsigned flags) const {
// call base class method
ExecutionNode::toVelocyPackHelperGeneric(nodes, flags);
//And close it
nodes.close();
}

View File

@ -60,7 +60,7 @@ using namespace arangodb::aql;
using namespace arangodb::basics;
namespace {
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
/// @brief validate the counters of the plan
struct NodeCounter final : public WalkerWorker<ExecutionNode> {
@ -234,7 +234,7 @@ ExecutionPlan::~ExecutionPlan() {
::NodeCounter counter;
_root->walk(counter);
// and compare it to the number of nodes we have in our counters array
size_t j = 0;
for (auto const& it : _typeCounts) {
@ -269,7 +269,7 @@ ExecutionPlan* ExecutionPlan::instantiateFromAst(Ast* ast) {
ast->query()->queryOptions().fullCount) {
ExecutionNode::castTo<LimitNode*>(plan->_lastLimitNode)->setFullCount();
}
// set count flag for final RETURN node
if (plan->_root->getType() == ExecutionNode::RETURN) {
static_cast<ReturnNode*>(plan->_root)->setCount();
@ -279,7 +279,7 @@ ExecutionPlan* ExecutionPlan::instantiateFromAst(Ast* ast) {
return plan.release();
}
/// @brief whether or not the plan contains at least one node of this type
bool ExecutionPlan::contains(ExecutionNode::NodeType type) const {
TRI_ASSERT(_varUsageComputed);
@ -441,7 +441,7 @@ ExecutionNode* ExecutionPlan::createCalculation(
TRI_ASSERT(expression->numMembers() == 1);
expression = expression->getMember(0);
}
bool containsCollection = false;
// replace occurrences of collection names used as function call arguments
// (that are of type NODE_TYPE_COLLECTION) with their string equivalents
@ -469,7 +469,7 @@ ExecutionNode* ExecutionPlan::createCalculation(
} else if (node->type == NODE_TYPE_COLLECTION) {
containsCollection = true;
}
return node;
};
@ -478,7 +478,7 @@ ExecutionNode* ExecutionPlan::createCalculation(
if (containsCollection) {
// we found at least one occurence of NODE_TYPE_COLLECTION
// now replace them with proper (FOR doc IN collection RETURN doc)
// now replace them with proper (FOR doc IN collection RETURN doc)
// subqueries
auto visitor = [this, &previous](AstNode* node) {
if (node->type == NODE_TYPE_COLLECTION) {
@ -490,12 +490,12 @@ ExecutionNode* ExecutionPlan::createCalculation(
AstNode* forNode = _ast->createNodeFor(v, node);
// RETURN part
AstNode* returnNode = _ast->createNodeReturn(_ast->createNodeReference(v));
// add both nodes to subquery
rootNode->addMember(forNode);
rootNode->addMember(returnNode);
// produce the proper ExecutionNodes from the subquery AST
// produce the proper ExecutionNodes from the subquery AST
auto subquery = fromNode(rootNode);
if (subquery == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
@ -509,11 +509,11 @@ ExecutionNode* ExecutionPlan::createCalculation(
previous = en;
return _ast->createNodeReference(v);
}
return node;
};
// replace remaining NODE_TYPE_COLLECTION occurrences in the expression
// replace remaining NODE_TYPE_COLLECTION occurrences in the expression
node = Ast::traverseAndModify(node, visitor);
}
@ -642,8 +642,7 @@ CollectNode* ExecutionPlan::createAnonymousCollect(
return en;
}
/// @brief create modification options from an AST node
ModificationOptions ExecutionPlan::createModificationOptions(
ModificationOptions ExecutionPlan::parseModificationOptions(
AstNode const* node) {
ModificationOptions options;
@ -669,10 +668,19 @@ ModificationOptions ExecutionPlan::createModificationOptions(
options.nullMeansRemove = value->isFalse();
} else if (name == "mergeObjects") {
options.mergeObjects = value->isTrue();
} else if (name == "overwrite") {
options.overwrite = value->isTrue();
}
}
}
}
return options;
}
/// @brief create modification options from an AST node
ModificationOptions ExecutionPlan::createModificationOptions(
AstNode const* node) {
ModificationOptions options = parseModificationOptions(node);
// this means a data-modification query must first read the entire input data
// before starting with the modifications
@ -1543,7 +1551,8 @@ ExecutionNode* ExecutionPlan::fromNodeRemove(ExecutionNode* previous,
ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
AstNode const* node) {
TRI_ASSERT(node != nullptr && node->type == NODE_TYPE_INSERT);
TRI_ASSERT(node->numMembers() == 4);
TRI_ASSERT(node->numMembers() > 3);
TRI_ASSERT(node->numMembers() < 6);
auto options = createModificationOptions(node->getMember(0));
std::string const collectionName = node->getMember(1)->getString();
@ -1555,6 +1564,12 @@ ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
Variable const* outVariableNew =
static_cast<Variable*>(returnVarNode->getData());
Variable const* outVariableOld = nullptr;
if(node->numMembers() == 5) {
returnVarNode = node->getMember(4);
outVariableOld = static_cast<Variable*>(returnVarNode->getData());
}
ExecutionNode* en = nullptr;
if (expression->type == NODE_TYPE_REFERENCE) {
@ -1569,6 +1584,7 @@ ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
collection,
options,
v,
outVariableOld,
outVariableNew
));
} else {
@ -1582,6 +1598,7 @@ ExecutionNode* ExecutionPlan::fromNodeInsert(ExecutionNode* previous,
collection,
options,
getOutVariable(calc),
outVariableOld,
outVariableNew
));
previous = calc;
@ -1933,8 +1950,8 @@ void ExecutionPlan::findNodesOfType(SmallVector<ExecutionNode*>& result,
void ExecutionPlan::findNodesOfType(
SmallVector<ExecutionNode*>& result,
std::vector<ExecutionNode::NodeType> const& types, bool enterSubqueries) {
// check if any of the node types is actually present in the plan
// check if any of the node types is actually present in the plan
for (auto const& type : types) {
if (contains(type)) {
// found a node type that is in the plan
@ -1981,7 +1998,7 @@ struct VarUsageFinder final : public WalkerWorker<ExecutionNode> {
bool before(ExecutionNode* en) override final {
// count the type of node found
en->plan()->increaseCounter(en->getType());
en->invalidateVarUsage();
en->setVarsUsedLater(_usedLater);
// Add variables used here to _usedLater:
@ -2173,7 +2190,7 @@ ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"plan \"nodes\" attribute is not an array");
}
ExecutionNode* ret = nullptr;
// first, re-create all nodes from the Slice, using the node ids
@ -2285,4 +2302,4 @@ void ExecutionPlan::show() {
_root->walk(shower);
}
#endif
#endif

View File

@ -37,7 +37,7 @@ namespace arangodb {
namespace velocypack {
class Slice;
}
namespace aql {
class Ast;
struct AstNode;
@ -65,7 +65,7 @@ class ExecutionPlan {
/// @brief create an execution plan from VelocyPack
static ExecutionPlan* instantiateFromVelocyPack(
Ast* ast, arangodb::velocypack::Slice const);
ExecutionPlan* clone(Ast*);
/// @brief clone the plan by recursively cloning starting from the root
@ -74,15 +74,15 @@ class ExecutionPlan {
/// @brief create an execution plan identical to this one
/// keep the memory of the plan on the query object specified.
ExecutionPlan* clone(Query const&);
/// @brief export to VelocyPack
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack(Ast*, bool verbose) const;
void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool verbose) const;
/// @brief check if the plan is empty
inline bool empty() const { return (_root == nullptr); }
bool isResponsibleForInitialize() const { return _isResponsibleForInitialize; }
/// @brief note that an optimizer rule was applied
@ -140,7 +140,7 @@ class ExecutionPlan {
void excludeFromScatterGather(ExecutionNode const* node) {
_excludeFromScatterGather.emplace(node);
}
bool shouldExcludeFromScatterGather(ExecutionNode const* node) const {
return (_excludeFromScatterGather.find(node) != _excludeFromScatterGather.end());
}
@ -193,7 +193,7 @@ class ExecutionPlan {
/// @brief register a node with the plan
ExecutionNode* registerNode(std::unique_ptr<ExecutionNode>);
/// @brief add a node to the plan, will delete node if addition
/// fails and throw an exception
ExecutionNode* registerNode(ExecutionNode*);
@ -212,7 +212,7 @@ class ExecutionPlan {
/// <oldNode>).
/// <newNode> must be registered with the plan before this method is called.
void insertDependency(ExecutionNode* oldNode, ExecutionNode* newNode);
/// @brief insert note directly after previous
/// will remove previous as a dependency from its parents and
/// add newNode as a dependency. <newNode> must be registered with the plan
@ -223,10 +223,10 @@ class ExecutionPlan {
/// @brief creates an anonymous calculation node for an arbitrary expression
ExecutionNode* createTemporaryCalculation(AstNode const*, ExecutionNode*);
/// @brief create an execution plan from an abstract syntax tree node
ExecutionNode* fromNode(AstNode const*);
/// @brief create an execution plan from VPack
ExecutionNode* fromSlice(velocypack::Slice const& slice);
@ -235,7 +235,7 @@ class ExecutionPlan {
/// @brief increase the node counter for the type
void increaseCounter(ExecutionNode::NodeType type) noexcept;
private:
/// @brief creates a calculation node
ExecutionNode* createCalculation(Variable*, Variable const*, AstNode const*,
@ -251,9 +251,15 @@ class ExecutionPlan {
/// @brief creates an anonymous COLLECT node (for a DISTINCT)
CollectNode* createAnonymousCollect(CalculationNode const*);
/// @brief create modification options from an AST node
/// @brief create modification options by parsing an AST node
/// and adding plan specific options.
ModificationOptions createModificationOptions(AstNode const*);
public:
/// @brief parses modification options form an AST node
static ModificationOptions parseModificationOptions(AstNode const*);
private:
/// @brief create COLLECT options from an AST node
CollectOptions createCollectOptions(AstNode const*);
@ -329,7 +335,7 @@ class ExecutionPlan {
bool _varUsageComputed;
bool _isResponsibleForInitialize;
/// @brief current nesting level while building the plan
int _nestingLevel;
@ -344,7 +350,7 @@ class ExecutionPlan {
/// @brief a lookup map for all subqueries created
std::unordered_map<VariableId, ExecutionNode*> _subqueries;
/// @brief these nodes will be excluded from building scatter/gather "diamonds" later
std::unordered_set<ExecutionNode const*> _excludeFromScatterGather;

View File

@ -86,7 +86,7 @@ AqlItemBlock* ModificationBlock::getSome(size_t atMost) {
if (getPlanNode()->getType() == ExecutionNode::NodeType::UPSERT) {
atMost = 1;
}
std::vector<AqlItemBlock*> blocks;
std::unique_ptr<AqlItemBlock> replyBlocks;
@ -260,8 +260,8 @@ void ModificationBlock::handleBabyResult(std::unordered_map<int, size_t> const&
THROW_ARANGO_EXCEPTION(first->first);
}
RemoveBlock::RemoveBlock(ExecutionEngine* engine, RemoveNode const* ep)
: ModificationBlock(engine, ep) {}
@ -415,7 +415,7 @@ AqlItemBlock* RemoveBlock::work(std::vector<AqlItemBlock*>& blocks) {
// Do not send request just increase the row
dstRow += n;
}
// done with block. now unlink it and return it to block manager
(*it) = nullptr;
returnBlock(res);
@ -443,17 +443,21 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
RegisterId const registerId = it->second.registerId;
std::string errorMessage;
bool const producesOutput = (ep->_outVariableNew != nullptr);
bool const producesNew = (ep->_outVariableNew != nullptr);
bool const producesOld = (ep->_outVariableOld != nullptr);
bool const producesOutput = producesNew || producesOld;
result.reset(requestBlock(count, getPlanNode()->getRegisterPlan()->nrRegs[getPlanNode()->getDepth()]));
OperationOptions options;
// use "silent" mode if we do not access the results later on
options.silent = !producesOutput;
options.returnNew = producesNew;
options.returnOld = producesOld;
options.isRestore = ep->_options.useIsRestore;
options.waitForSync = ep->_options.waitForSync;
options.returnNew = producesOutput;
options.isRestore = ep->getOptions().useIsRestore;
options.overwrite = ep->_options.overwrite;
// loop over all blocks
size_t dstRow = 0;
for (auto it = blocks.begin(); it != blocks.end(); ++it) {
@ -463,7 +467,7 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
throwIfKilled(); // check if we were aborted
bool const isMultiple = (n > 1);
if (!isMultiple) {
if (!isMultiple) { // single - case
// loop over the complete block. Well it is one element only
for (size_t i = 0; i < n; ++i) {
AqlValue const& a = res->getValueReference(i, registerId);
@ -480,16 +484,26 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
} else {
if (!ep->_options.consultAqlWriteFilter ||
!_collection->getCollection()->skipForAqlWrite(a.slice(), "")) {
OperationResult opRes = _trx->insert(_collection->name, a.slice(), options);
OperationResult opRes = _trx->insert(_collection->name, a.slice(), options);
errorCode = opRes.errorNumber();
if (options.returnNew && errorCode == TRI_ERROR_NO_ERROR) {
// return $NEW
result->emplaceValue(dstRow, _outRegNew, opRes.slice().get("new"));
}
if (errorCode != TRI_ERROR_NO_ERROR) {
if (errorCode == TRI_ERROR_NO_ERROR) {
if (options.returnNew) {
// return $NEW
result->emplaceValue(dstRow, _outRegNew, opRes.slice().get("new"));
}
if (options.returnOld) {
// return $OLD
auto slice = opRes.slice().get("old");
if(slice.isNone()){
result->emplaceValue(dstRow, _outRegOld, VPackSlice::nullSlice());
} else {
result->emplaceValue(dstRow, _outRegOld, slice);
}
}
} else {
errorMessage.assign(opRes.errorMessage());
}
}
} else {
errorCode = TRI_ERROR_NO_ERROR;
}
@ -499,7 +513,7 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
++dstRow;
}
// done with a block
} else {
} else { // many - case
_tempBuilder.clear();
_tempBuilder.openArray();
for (size_t i = 0; i < n; ++i) {
@ -535,8 +549,19 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
bool wasError = arangodb::basics::VelocyPackHelper::getBooleanValue(
elm, "error", false);
if (!wasError) {
// return $NEW
result->emplaceValue(dstRow, _outRegNew, elm.get("new"));
if (producesNew) {
// store $NEW
result->emplaceValue(dstRow, _outRegNew, elm.get("new"));
}
if (producesOld) {
// store $OLD
auto slice = elm.get("old");
if(slice.isNone()){
result->emplaceValue(dstRow, _outRegOld, VPackSlice::nullSlice());
} else {
result->emplaceValue(dstRow, _outRegOld, slice);
}
}
}
++iter;
}
@ -548,8 +573,8 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
static_cast<size_t>(toSend.length()),
ep->_options.ignoreErrors);
}
}
} // single / many - case
// done with block. now unlink it and return it to block manager
(*it) = nullptr;
returnBlock(res);
@ -577,7 +602,7 @@ AqlItemBlock* UpdateBlock::work(std::vector<AqlItemBlock*>& blocks) {
bool const ignoreDocumentNotFound = ep->getOptions().ignoreDocumentNotFound;
bool producesOutput = (ep->_outVariableOld != nullptr || ep->_outVariableNew != nullptr);
if (!producesOutput && _isDBServer && ignoreDocumentNotFound) {
// on a DB server, when we are told to ignore missing documents, we must
// set this flag in order to not assert later on
@ -605,7 +630,7 @@ AqlItemBlock* UpdateBlock::work(std::vector<AqlItemBlock*>& blocks) {
options.returnNew = (producesOutput && ep->_outVariableNew != nullptr);
options.ignoreRevs = true;
options.isRestore = ep->getOptions().useIsRestore;
// loop over all blocks
size_t dstRow = 0;
for (auto it = blocks.begin(); it != blocks.end(); ++it) {
@ -619,7 +644,7 @@ AqlItemBlock* UpdateBlock::work(std::vector<AqlItemBlock*>& blocks) {
if (isMultiple) {
object.openArray();
}
std::string key;
// loop over the complete block
@ -688,10 +713,10 @@ AqlItemBlock* UpdateBlock::work(std::vector<AqlItemBlock*>& blocks) {
}
// fetch old revision
OperationResult opRes = _trx->update(_collection->name, toUpdate, options);
OperationResult opRes = _trx->update(_collection->name, toUpdate, options);
if (!isMultiple) {
int errorCode = opRes.errorNumber();
if (errorCode == TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND && _isDBServer &&
ignoreDocumentNotFound) {
// Ignore document not found on the DBserver:
@ -740,7 +765,7 @@ AqlItemBlock* UpdateBlock::work(std::vector<AqlItemBlock*>& blocks) {
// store $NEW
result->emplaceValue(dstRow, _outRegNew, elm.get("new"));
}
}
}
++iter;
if (wasError) {
@ -812,7 +837,7 @@ AqlItemBlock* UpsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
options.returnNew = producesOutput;
options.ignoreRevs = true;
options.isRestore = ep->getOptions().useIsRestore;
VPackBuilder insertBuilder;
VPackBuilder updateBuilder;
@ -821,15 +846,15 @@ AqlItemBlock* UpsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
std::vector<size_t> insRows;
std::vector<size_t> upRows;
for (auto it = blocks.begin(); it != blocks.end(); ++it) {
auto* res = *it;
auto* res = *it;
throwIfKilled(); // check if we were aborted
insertBuilder.clear();
updateBuilder.clear();
size_t const n = res->size();
bool const isMultiple = (n > 1);
if (isMultiple) {
insertBuilder.openArray();
@ -869,7 +894,7 @@ AqlItemBlock* UpsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
if (updateDoc.isObject()) {
tookThis = true;
VPackSlice toUpdate = updateDoc.slice();
_tempBuilder.clear();
_tempBuilder.openObject();
_tempBuilder.add(StaticStrings::KeyString, VPackValue(key));
@ -946,7 +971,7 @@ AqlItemBlock* UpsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
}
} else {
OperationResult opRes = _trx->insert(_collection->name, toInsert, options);
errorCode = opRes.errorNumber();
errorCode = opRes.errorNumber();
if (options.returnNew && errorCode == TRI_ERROR_NO_ERROR) {
result->emplaceValue(dstRow - 1, _outRegNew, opRes.slice().get("new"));
@ -1042,7 +1067,7 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
bool const ignoreDocumentNotFound = ep->getOptions().ignoreDocumentNotFound;
bool producesOutput = (ep->_outVariableOld != nullptr || ep->_outVariableNew != nullptr);
if (!producesOutput && _isDBServer && ignoreDocumentNotFound) {
// on a DB server, when we are told to ignore missing documents, we must
// set this flag in order to not assert later on
@ -1070,7 +1095,7 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
options.returnNew = (producesOutput && ep->_outVariableNew != nullptr);
options.ignoreRevs = true;
options.isRestore = ep->getOptions().useIsRestore;
// loop over all blocks
size_t dstRow = 0;
for (auto it = blocks.begin(); it != blocks.end(); ++it) {
@ -1084,7 +1109,7 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
if (isMultiple) {
object.openArray();
}
std::string key;
// loop over the complete block
@ -1150,10 +1175,10 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
continue;
}
// fetch old revision
OperationResult opRes = _trx->replace(_collection->name, toUpdate, options);
OperationResult opRes = _trx->replace(_collection->name, toUpdate, options);
if (!isMultiple) {
int errorCode = opRes.errorNumber();
if (errorCode == TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND && _isDBServer &&
ignoreDocumentNotFound) {
// Ignore document not found on the DBserver:
@ -1204,7 +1229,7 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
}
}
++iter;
if (wasError) {
// do not increase dstRow here
continue;
@ -1221,7 +1246,7 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
(*it) = nullptr;
returnBlock(res);
}
if (dstRow < result->size()) {
if (dstRow == 0) {
result.reset();

View File

@ -43,7 +43,7 @@ ModificationNode::ModificationNode(ExecutionPlan* plan,
_outVariableOld(
Variable::varFromVPack(plan->getAst(), base, "outVariableOld", Optional)),
_outVariableNew(
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)),
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)),
_countStats(base.get("countStats").getBool()),
_restrictedTo("") {
TRI_ASSERT(_vocbase != nullptr);
@ -59,7 +59,7 @@ void ModificationNode::toVelocyPackHelper(VPackBuilder& builder,
unsigned flags) const {
// call base class method
ExecutionNode::toVelocyPackHelperGeneric(builder, flags);
// Now put info about vocbase and cid in there
builder.add("database", VPackValue(_vocbase->name()));
builder.add("collection", VPackValue(_collection->getName()));
@ -173,6 +173,7 @@ std::unique_ptr<ExecutionBlock> InsertNode::createBlock(
/// @brief clone ExecutionNode recursively
ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
bool withProperties) const {
auto outVariableOld = _outVariableOld;
auto outVariableNew = _outVariableNew;
auto inVariable = _inVariable;
@ -181,11 +182,15 @@ ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
outVariableNew =
plan->getAst()->variables()->createVariable(outVariableNew);
}
if (_outVariableOld != nullptr) {
outVariableOld =
plan->getAst()->variables()->createVariable(outVariableOld);
}
inVariable = plan->getAst()->variables()->createVariable(inVariable);
}
auto c = std::make_unique<InsertNode>(plan, _id, _vocbase, _collection, _options,
inVariable, outVariableNew);
inVariable, outVariableOld, outVariableNew);
if (!_countStats) {
c->disableStatistics();
}
@ -204,7 +209,7 @@ UpdateNode::UpdateNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& b
/// @brief toVelocyPack
void UpdateNode::toVelocyPackHelper(VPackBuilder& nodes, unsigned flags) const {
ModificationNode::toVelocyPackHelper(nodes, flags);
nodes.add(VPackValue("inDocVariable"));
_inDocVariable->toVelocyPack(nodes);

View File

@ -83,7 +83,7 @@ class ModificationNode : public ExecutionNode {
/// why we can make it final here.
double estimateCost(size_t&) const override final;
/// @brief data modification is non-deterministic
/// @brief data modification is non-deterministic
bool isDeterministic() override final { return false; }
/// @brief getOptions
@ -130,7 +130,7 @@ class ModificationNode : public ExecutionNode {
bool isModificationNode() const override { return true; }
/// @brief whether this node contributes to statistics. Only disabled in SmartGraph case
bool countStats() const { return _countStats; }
bool countStats() const { return _countStats; }
/// @brief Disable that this node is contributing to statistics. Only disabled in SmartGraph case
void disableStatistics() { _countStats = false; }
@ -248,9 +248,8 @@ class InsertNode : public ModificationNode {
public:
InsertNode(ExecutionPlan* plan, size_t id, TRI_vocbase_t* vocbase,
Collection* collection, ModificationOptions const& options,
Variable const* inVariable, Variable const* outVariableNew)
: ModificationNode(plan, id, vocbase, collection, options, nullptr,
outVariableNew),
Variable const* inVariable, Variable const* outVariableOld, Variable const* outVariableNew)
: ModificationNode(plan, id, vocbase, collection, options, outVariableOld, outVariableNew),
_inVariable(inVariable) {
TRI_ASSERT(_inVariable != nullptr);
// _outVariable might be a nullptr

View File

@ -44,6 +44,8 @@ ModificationOptions::ModificationOptions(VPackSlice const& slice) {
basics::VelocyPackHelper::getBooleanValue(obj, "useIsRestore", false);
consultAqlWriteFilter =
basics::VelocyPackHelper::getBooleanValue(obj, "consultAqlWriteFilter", false);
overwrite =
basics::VelocyPackHelper::getBooleanValue(obj, "overwrite", false);
}
void ModificationOptions::toVelocyPack(VPackBuilder& builder) const {
@ -56,4 +58,5 @@ void ModificationOptions::toVelocyPack(VPackBuilder& builder) const {
builder.add("readCompleteInput", VPackValue(readCompleteInput));
builder.add("useIsRestore", VPackValue(useIsRestore));
builder.add("consultAqlWriteFilter", VPackValue(consultAqlWriteFilter));
builder.add("overwrite", VPackValue(overwrite));
}

View File

@ -45,7 +45,9 @@ struct ModificationOptions {
ignoreDocumentNotFound(false),
readCompleteInput(true),
useIsRestore(false),
consultAqlWriteFilter(false) {}
consultAqlWriteFilter(false),
overwrite(false)
{}
void toVelocyPack(arangodb::velocypack::Builder&) const;
@ -57,6 +59,7 @@ struct ModificationOptions {
bool readCompleteInput;
bool useIsRestore;
bool consultAqlWriteFilter;
bool overwrite;
};
} // namespace arangodb::aql

View File

@ -275,7 +275,7 @@ Result auth::UserManager::storeUserInternal(auth::User const& entry, bool replac
if (userDoc.isExternal()) {
userDoc = userDoc.resolveExternal();
}
// parse user including document _key
auth::User created = auth::User::fromDocument(userDoc);
TRI_ASSERT(!created.key().empty() && created.rev() != 0);
@ -327,7 +327,7 @@ void auth::UserManager::createRootUser() {
}
TRI_ASSERT(_userCache.empty());
LOG_TOPIC(INFO, Logger::AUTHENTICATION) << "Creating user \"root\"";
try {
// Attention:
// the root user needs to have a specific rights grant
@ -506,7 +506,7 @@ Result auth::UserManager::updateUser(std::string const& name,
return r;
}
r = storeUserInternal(user, /*replace*/ true);
// cannot hold _userCacheLock while invalidating token cache
writeGuard.unlock();
if (r.ok() || r.is(TRI_ERROR_ARANGO_CONFLICT)) {
@ -537,7 +537,7 @@ Result auth::UserManager::accessUser(std::string const& user,
VPackBuilder auth::UserManager::serializeUser(std::string const& user) {
loadFromDB();
READ_LOCKER(readGuard, _userCacheLock);
UserMap::iterator const& it = _userCache.find(user);
@ -700,16 +700,16 @@ auth::Level auth::UserManager::databaseAuthLevel(std::string const& user,
if (dbname.empty()) {
return auth::Level::NONE;
}
loadFromDB();
READ_LOCKER(readGuard, _userCacheLock);
UserMap::iterator const& it = _userCache.find(user);
if (it == _userCache.end()) {
LOG_TOPIC(TRACE, Logger::AUTHORIZATION) << "User not found: " << user;
return auth::Level::NONE;
}
auth::Level level = it->second.databaseAuthLevel(dbname);
if (!configured) {
if (level > auth::Level::RO && !ServerState::writeOpsEnabled()) {
@ -727,16 +727,16 @@ auth::Level auth::UserManager::collectionAuthLevel(std::string const& user,
if (coll.empty()) {
return auth::Level::NONE;
}
loadFromDB();
READ_LOCKER(readGuard, _userCacheLock);
UserMap::iterator const& it = _userCache.find(user);
if (it == _userCache.end()) {
LOG_TOPIC(TRACE, Logger::AUTHORIZATION) << "User not found: " << user;
return auth::Level::NONE; // no user found
}
auth::Level level;
if (coll[0] >= '0' && coll[0] <= '9') {
std::string tmpColl = DatabaseFeature::DATABASE->translateCollectionName(dbname, coll);
@ -744,7 +744,7 @@ auth::Level auth::UserManager::collectionAuthLevel(std::string const& user,
} else {
level = it->second.collectionAuthLevel(dbname, coll);
}
if (!configured) {
static_assert(auth::Level::RO < auth::Level::RW, "ro < rw");
if (level > auth::Level::RO && !ServerState::writeOpsEnabled()) {

View File

@ -71,7 +71,7 @@ T addFigures(VPackSlice const& v1, VPackSlice const& v2, std::vector<std::string
if (found.isNumber()) {
value += found.getNumericValue<T>();
}
return value;
}
@ -88,23 +88,23 @@ void recursiveAdd(VPackSlice const& value, std::shared_ptr<VPackBuilder>& builde
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "alive", "count" })));
updated.add("size", VPackValue(addFigures<size_t>(value, builder->slice(), { "alive", "size" })));
updated.close();
updated.add("dead", VPackValue(VPackValueType::Object));
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "dead", "count" })));
updated.add("size", VPackValue(addFigures<size_t>(value, builder->slice(), { "dead", "size" })));
updated.add("deletion", VPackValue(addFigures<size_t>(value, builder->slice(), { "dead", "deletion" })));
updated.close();
updated.add("indexes", VPackValue(VPackValueType::Object));
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "indexes", "count" })));
updated.add("size", VPackValue(addFigures<size_t>(value, builder->slice(), { "indexes", "size" })));
updated.close();
updated.add("datafiles", VPackValue(VPackValueType::Object));
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "datafiles", "count" })));
updated.add("fileSize", VPackValue(addFigures<size_t>(value, builder->slice(), { "datafiles", "fileSize" })));
updated.close();
updated.add("journals", VPackValue(VPackValueType::Object));
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "journals", "count" })));
updated.add("fileSize", VPackValue(addFigures<size_t>(value, builder->slice(), { "journals", "fileSize" })));
@ -116,13 +116,13 @@ void recursiveAdd(VPackSlice const& value, std::shared_ptr<VPackBuilder>& builde
updated.close();
updated.add("documentReferences", VPackValue(addFigures<size_t>(value, builder->slice(), { "documentReferences" })));
updated.close();
TRI_ASSERT(updated.slice().isObject());
TRI_ASSERT(updated.isClosed());
builder.reset(new VPackBuilder(VPackCollection::merge(builder->slice(), updated.slice(), true, false)));
builder.reset(new VPackBuilder(VPackCollection::merge(builder->slice(), updated.slice(), true, false)));
TRI_ASSERT(builder->slice().isObject());
TRI_ASSERT(builder->isClosed());
}
@ -498,7 +498,7 @@ static std::shared_ptr<std::unordered_map<std::string, std::vector<std::string>>
// fetch a unique id for each shard to create
uint64_t const id = ci->uniqid(numberOfShards);
size_t leaderIndex = 0;
size_t followerIndex = 0;
for (uint64_t i = 0; i < numberOfShards; ++i) {
@ -1082,20 +1082,22 @@ int selectivityEstimatesOnCoordinator(
/// for their documents.
////////////////////////////////////////////////////////////////////////////////
int createDocumentOnCoordinator(
Result createDocumentOnCoordinator(
std::string const& dbname, std::string const& collname,
arangodb::OperationOptions const& options, VPackSlice const& slice,
arangodb::rest::ResponseCode& responseCode,
std::unordered_map<int, size_t>& errorCounter,
std::shared_ptr<VPackBuilder>& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
// nullptr should only happen during controlled shutdown
return TRI_ERROR_SHUTTING_DOWN;
}
ClusterInfo* ci = ClusterInfo::instance();
TRI_ASSERT(ci != nullptr);
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
try {
@ -1104,31 +1106,36 @@ int createDocumentOnCoordinator(
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
}
TRI_ASSERT(collinfo != nullptr);
auto collid = std::to_string(collinfo->id());
std::unordered_map<
ShardID, std::vector<std::pair<VPackValueLength, std::string>>> shardMap;
// create vars used in this function
bool const useMultiple = slice.isArray(); // insert more than one document
std::unordered_map< ShardID
, std::vector<std::pair<VPackValueLength, std::string>>
> shardMap;
std::vector<std::pair<ShardID, VPackValueLength>> reverseMapping;
bool useMultiple = slice.isArray();
int res = TRI_ERROR_NO_ERROR;
if (useMultiple) {
VPackValueLength length = slice.length();
for (VPackValueLength idx = 0; idx < length; ++idx) {
res = distributeBabyOnShards(shardMap, ci, collid, collinfo,
reverseMapping, slice.at(idx), idx,
options.isRestore);
{
// create shard map
int res = TRI_ERROR_NO_ERROR;
if (useMultiple) {
VPackValueLength length = slice.length();
for (VPackValueLength idx = 0; idx < length; ++idx) {
res = distributeBabyOnShards(shardMap, ci, collid, collinfo,
reverseMapping, slice.at(idx), idx,
options.isRestore);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
} else {
res = distributeBabyOnShards(shardMap, ci, collid, collinfo, reverseMapping,
slice, 0, options.isRestore);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
} else {
res = distributeBabyOnShards(shardMap, ci, collid, collinfo, reverseMapping,
slice, 0, options.isRestore);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
std::string const baseUrl =
@ -1136,9 +1143,10 @@ int createDocumentOnCoordinator(
std::string const optsUrlPart =
std::string("&waitForSync=") + (options.waitForSync ? "true" : "false") +
"&returnNew=" + (options.returnNew ? "true" : "false") + "&returnOld=" +
(options.returnOld ? "true" : "false") + "&isRestore=" +
(options.isRestore ? "true" : "false");
"&returnNew=" + (options.returnNew ? "true" : "false") +
"&returnOld=" + (options.returnOld ? "true" : "false") +
"&isRestore=" + (options.isRestore ? "true" : "false") +
"&" + StaticStrings::OverWrite + "=" + (options.overwrite ? "true" : "false");
VPackBuilder reqBuilder;
@ -1181,7 +1189,7 @@ int createDocumentOnCoordinator(
"shard:" + it.first, arangodb::rest::RequestType::POST,
baseUrl + StringUtils::urlEncode(it.first) + optsUrlPart, body);
}
// Perform the requests
size_t nrDone = 0;
cc->performRequests(requests, CL_DEFAULT_TIMEOUT, nrDone, Logger::COMMUNICATION, true);
@ -1216,7 +1224,7 @@ int createDocumentOnCoordinator(
// the cluster operation was OK, however,
// the DBserver could have reported an error.
return TRI_ERROR_NO_ERROR;
return Result{};
}
////////////////////////////////////////////////////////////////////////////////
@ -1922,7 +1930,7 @@ int fetchEdgesFromEngines(
StringRef idRef(id);
auto resE = cache.find(idRef);
if (resE == cache.end()) {
// This edge is not yet cached.
// This edge is not yet cached.
allCached = false;
cache.emplace(idRef, e);
result.emplace_back(e);
@ -2617,6 +2625,7 @@ std::shared_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors,
bool waitForSyncReplication, bool enforceReplicationFactor,
VPackSlice) {
std::string distributeShardsLike = col->distributeShardsLike();
std::vector<std::string> avoid = col->avoidServers();
ClusterInfo* ci = ClusterInfo::instance();
@ -2801,7 +2810,7 @@ int fetchEdgesFromEngines(
StringRef idRef(id);
auto resE = cache.find(idRef);
if (resE == cache.end()) {
// This edge is not yet cached.
// This edge is not yet cached.
allCached = false;
cache.emplace(idRef, e);
result.emplace_back(e);

View File

@ -101,7 +101,7 @@ int selectivityEstimatesOnCoordinator(std::string const& dbname, std::string con
/// @brief creates a document in a coordinator
////////////////////////////////////////////////////////////////////////////////
int createDocumentOnCoordinator(
Result createDocumentOnCoordinator(
std::string const& dbname, std::string const& collname,
OperationOptions const& options, arangodb::velocypack::Slice const& slice,
arangodb::rest::ResponseCode& responseCode,

View File

@ -116,6 +116,8 @@ bool RestDocumentHandler::createDocument() {
opOptions.waitForSync = _request->parsedValue(StaticStrings::WaitForSyncString, false);
opOptions.returnNew = _request->parsedValue(StaticStrings::ReturnNewString, false);
opOptions.silent = _request->parsedValue(StaticStrings::SilentString, false);
opOptions.overwrite = _request->parsedValue(StaticStrings::OverWrite, false);
opOptions.returnOld = _request->parsedValue(StaticStrings::ReturnOldString, false) && opOptions.overwrite;
extractStringParameter(StaticStrings::IsSynchronousReplicationString,
opOptions.isSynchronousReplicationFrom);
@ -124,7 +126,7 @@ bool RestDocumentHandler::createDocument() {
SingleCollectionTransaction trx(ctx, collectionName, AccessMode::Type::WRITE);
bool const isMultiple = body.isArray();
if (!isMultiple) {
if (!isMultiple && !opOptions.overwrite) {
trx.addHint(transaction::Hints::Hint::SINGLE_OPERATION);
}

View File

@ -241,7 +241,7 @@ std::string RestVocbaseBaseHandler::assembleDocumentId(
void RestVocbaseBaseHandler::generateSaved(
arangodb::OperationResult const& result, std::string const& collectionName,
TRI_col_type_e type, VPackOptions const* options, bool isMultiple) {
if (result.wasSynchronous) {
if (result._options.waitForSync) {
resetResponse(rest::ResponseCode::CREATED);
} else {
resetResponse(rest::ResponseCode::ACCEPTED);
@ -268,7 +268,7 @@ void RestVocbaseBaseHandler::generateSaved(
void RestVocbaseBaseHandler::generateDeleted(
arangodb::OperationResult const& result, std::string const& collectionName,
TRI_col_type_e type, VPackOptions const* options) {
if (result.wasSynchronous) {
if (result._options.waitForSync) {
resetResponse(rest::ResponseCode::OK);
} else {
resetResponse(rest::ResponseCode::ACCEPTED);

View File

@ -63,11 +63,11 @@ class RestVocbaseBaseHandler : public RestBaseHandler {
//////////////////////////////////////////////////////////////////////////////
static std::string const BATCH_PATH;
//////////////////////////////////////////////////////////////////////////////
/// @brief collection path
//////////////////////////////////////////////////////////////////////////////
static std::string const COLLECTION_PATH;
//////////////////////////////////////////////////////////////////////////////
@ -129,35 +129,35 @@ class RestVocbaseBaseHandler : public RestBaseHandler {
//////////////////////////////////////////////////////////////////////////////
static std::string const SIMPLE_QUERY_ALL_KEYS_PATH;
//////////////////////////////////////////////////////////////////////////////
/// @brief simple query by example path
//////////////////////////////////////////////////////////////////////////////
static std::string const SIMPLE_QUERY_BY_EXAMPLE;
//////////////////////////////////////////////////////////////////////////////
/// @brief simple query first example path
//////////////////////////////////////////////////////////////////////////////
static std::string const SIMPLE_FIRST_EXAMPLE;
//////////////////////////////////////////////////////////////////////////////
/// @brief simple query remove by example path
//////////////////////////////////////////////////////////////////////////////
static std::string const SIMPLE_REMOVE_BY_EXAMPLE;
//////////////////////////////////////////////////////////////////////////////
/// @brief simple query replace by example path
//////////////////////////////////////////////////////////////////////////////
static std::string const SIMPLE_REPLACE_BY_EXAMPLE;
//////////////////////////////////////////////////////////////////////////////
/// @brief simple query replace by example path
//////////////////////////////////////////////////////////////////////////////
static std::string const SIMPLE_UPDATE_BY_EXAMPLE;
//////////////////////////////////////////////////////////////////////////////

View File

@ -77,7 +77,7 @@ StringRef transaction::helpers::extractKeyPart(VPackSlice slice) {
return StringRef(); // fail
}
return StringRef(k);
}
}
if (slice.isString()) {
StringRef key(slice);
size_t pos = key.find('/');
@ -85,11 +85,11 @@ StringRef transaction::helpers::extractKeyPart(VPackSlice slice) {
return key;
}
return key.substr(pos + 1);
}
}
return StringRef();
}
/// @brief extract the _id attribute from a slice, and convert it into a
/// @brief extract the _id attribute from a slice, and convert it into a
/// string, static method
std::string transaction::helpers::extractIdString(CollectionNameResolver const* resolver,
VPackSlice slice,
@ -99,7 +99,7 @@ std::string transaction::helpers::extractIdString(CollectionNameResolver const*
if (slice.isExternal()) {
slice = slice.resolveExternal();
}
if (slice.isObject()) {
// extract id attribute from object
if (slice.isEmptyObject()) {
@ -113,7 +113,7 @@ std::string transaction::helpers::extractIdString(CollectionNameResolver const*
VPackSlice key = VPackSlice(p);
// skip over attribute value
p += key.byteSize();
if (*p == basics::VelocyPackHelper::IdAttribute) {
id = VPackSlice(p + 1);
if (id.isCustom()) {
@ -128,12 +128,12 @@ std::string transaction::helpers::extractIdString(CollectionNameResolver const*
}
}
// in case the quick access above did not work out, use the slow path...
// in case the quick access above did not work out, use the slow path...
id = slice.get(StaticStrings::IdString);
} else {
id = slice;
}
if (id.isString()) {
// already a string...
return id.copyString();
@ -156,7 +156,7 @@ std::string transaction::helpers::extractIdString(CollectionNameResolver const*
if (!key.isString()) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
return makeIdFromCustom(resolver, id, key);
}
@ -169,29 +169,29 @@ VPackSlice transaction::helpers::extractIdFromDocument(VPackSlice slice) {
slice = slice.resolveExternal();
}
TRI_ASSERT(slice.isObject());
if (slice.isEmptyObject()) {
return VPackSlice();
}
// a regular document must have at least the three attributes
// a regular document must have at least the three attributes
// _key, _id and _rev (in this order). _id must be the second attribute
uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head());
if (*p == basics::VelocyPackHelper::KeyAttribute) {
// skip over _key
// skip over _key
++p;
// skip over _key value
p += VPackSlice(p).byteSize();
if (*p == basics::VelocyPackHelper::IdAttribute) {
// the + 1 is required so that we can skip over the attribute name
// and point to the attribute value
return VPackSlice(p + 1);
// and point to the attribute value
return VPackSlice(p + 1);
}
}
// fall back to the regular lookup method
return slice.get(StaticStrings::IdString);
return slice.get(StaticStrings::IdString);
}
/// @brief quick access to the _from attribute in a database document
@ -202,7 +202,7 @@ VPackSlice transaction::helpers::extractFromFromDocument(VPackSlice slice) {
slice = slice.resolveExternal();
}
TRI_ASSERT(slice.isObject());
if (slice.isEmptyObject()) {
return VPackSlice();
}
@ -215,7 +215,7 @@ VPackSlice transaction::helpers::extractFromFromDocument(VPackSlice slice) {
while (*p <= basics::VelocyPackHelper::FromAttribute && ++count <= 3) {
if (*p == basics::VelocyPackHelper::FromAttribute) {
// the + 1 is required so that we can skip over the attribute name
// and point to the attribute value
// and point to the attribute value
return VPackSlice(p + 1);
}
// skip over the attribute name
@ -225,7 +225,7 @@ VPackSlice transaction::helpers::extractFromFromDocument(VPackSlice slice) {
}
// fall back to the regular lookup method
return slice.get(StaticStrings::FromString);
return slice.get(StaticStrings::FromString);
}
/// @brief quick access to the _to attribute in a database document
@ -235,7 +235,7 @@ VPackSlice transaction::helpers::extractToFromDocument(VPackSlice slice) {
if (slice.isExternal()) {
slice = slice.resolveExternal();
}
if (slice.isEmptyObject()) {
return VPackSlice();
}
@ -247,7 +247,7 @@ VPackSlice transaction::helpers::extractToFromDocument(VPackSlice slice) {
while (*p <= basics::VelocyPackHelper::ToAttribute && ++count <= 4) {
if (*p == basics::VelocyPackHelper::ToAttribute) {
// the + 1 is required so that we can skip over the attribute name
// and point to the attribute value
// and point to the attribute value
return VPackSlice(p + 1);
}
// skip over the attribute name
@ -255,22 +255,22 @@ VPackSlice transaction::helpers::extractToFromDocument(VPackSlice slice) {
// skip over the attribute value
p += VPackSlice(p).byteSize();
}
// fall back to the regular lookup method
return slice.get(StaticStrings::ToString);
return slice.get(StaticStrings::ToString);
}
/// @brief extract _key and _rev from a document, in one go
/// this is an optimized version used when loading collections, WAL
/// this is an optimized version used when loading collections, WAL
/// collection and compaction
void transaction::helpers::extractKeyAndRevFromDocument(VPackSlice slice,
VPackSlice& keySlice,
void transaction::helpers::extractKeyAndRevFromDocument(VPackSlice slice,
VPackSlice& keySlice,
TRI_voc_rid_t& revisionId) {
if (slice.isExternal()) {
slice = slice.resolveExternal();
}
TRI_ASSERT(slice.isObject());
TRI_ASSERT(slice.length() >= 2);
TRI_ASSERT(slice.length() >= 2);
uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head());
VPackValueLength count = 0;
@ -306,7 +306,7 @@ void transaction::helpers::extractKeyAndRevFromDocument(VPackSlice slice,
// fall back to regular lookup
{
keySlice = slice.get(StaticStrings::KeyString);
keySlice = slice.get(StaticStrings::KeyString);
VPackValueLength l;
char const* p = slice.get(StaticStrings::RevString).getString(l);
revisionId = TRI_StringToRid(p, l, false);
@ -316,7 +316,7 @@ void transaction::helpers::extractKeyAndRevFromDocument(VPackSlice slice,
/// @brief extract _rev from a database document
TRI_voc_rid_t transaction::helpers::extractRevFromDocument(VPackSlice slice) {
TRI_ASSERT(slice.isObject());
TRI_ASSERT(slice.length() >= 2);
TRI_ASSERT(slice.length() >= 2);
uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head());
VPackValueLength count = 0;
@ -339,9 +339,9 @@ TRI_voc_rid_t transaction::helpers::extractRevFromDocument(VPackSlice slice) {
// skip over the attribute value
p += VPackSlice(p).byteSize();
}
// fall back to regular lookup
{
// fall back to regular lookup
{
VPackValueLength l;
char const* p = slice.get(StaticStrings::RevString).getString(l);
return TRI_StringToRid(p, l, false);
@ -350,7 +350,7 @@ TRI_voc_rid_t transaction::helpers::extractRevFromDocument(VPackSlice slice) {
VPackSlice transaction::helpers::extractRevSliceFromDocument(VPackSlice slice) {
TRI_ASSERT(slice.isObject());
TRI_ASSERT(slice.length() >= 2);
TRI_ASSERT(slice.length() >= 2);
uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head());
VPackValueLength count = 0;
@ -365,10 +365,10 @@ VPackSlice transaction::helpers::extractRevSliceFromDocument(VPackSlice slice) {
p += VPackSlice(p).byteSize();
}
// fall back to regular lookup
// fall back to regular lookup
return slice.get(StaticStrings::RevString);
}
OperationResult transaction::helpers::buildCountResult(std::vector<std::pair<std::string, uint64_t>> const& count, bool aggregate) {
VPackBuilder resultBuilder;
@ -385,18 +385,18 @@ OperationResult transaction::helpers::buildCountResult(std::vector<std::pair<std
}
resultBuilder.close();
}
return OperationResult(Result(), resultBuilder.steal(), nullptr, false);
return OperationResult(Result(), resultBuilder.buffer(), nullptr);
}
/// @brief creates an id string from a custom _id value and the _key string
std::string transaction::helpers::makeIdFromCustom(CollectionNameResolver const* resolver,
VPackSlice const& id,
VPackSlice const& id,
VPackSlice const& key) {
TRI_ASSERT(id.isCustom() && id.head() == 0xf3);
TRI_ASSERT(key.isString());
uint64_t cid = encoding::readNumber<uint64_t>(id.begin() + 1, sizeof(uint64_t));
std::string resolved = resolver->getCollectionNameCluster(cid);
#ifdef USE_ENTERPRISE
if (resolved.compare(0, 7, "_local_") == 0) {
@ -419,40 +419,40 @@ std::string transaction::helpers::makeIdFromCustom(CollectionNameResolver const*
}
/// @brief constructor, leases a StringBuffer
transaction::StringBufferLeaser::StringBufferLeaser(transaction::Methods* trx)
transaction::StringBufferLeaser::StringBufferLeaser(transaction::Methods* trx)
: _transactionContext(trx->transactionContextPtr()),
_stringBuffer(_transactionContext->leaseStringBuffer(32)) {
}
/// @brief constructor, leases a StringBuffer
transaction::StringBufferLeaser::StringBufferLeaser(transaction::Context* transactionContext)
: _transactionContext(transactionContext),
transaction::StringBufferLeaser::StringBufferLeaser(transaction::Context* transactionContext)
: _transactionContext(transactionContext),
_stringBuffer(_transactionContext->leaseStringBuffer(32)) {
}
/// @brief destructor
transaction::StringBufferLeaser::~StringBufferLeaser() {
transaction::StringBufferLeaser::~StringBufferLeaser() {
_transactionContext->returnStringBuffer(_stringBuffer);
}
/// @brief constructor, leases a builder
transaction::BuilderLeaser::BuilderLeaser(transaction::Methods* trx)
: _transactionContext(trx->transactionContextPtr()),
transaction::BuilderLeaser::BuilderLeaser(transaction::Methods* trx)
: _transactionContext(trx->transactionContextPtr()),
_builder(_transactionContext->leaseBuilder()) {
TRI_ASSERT(_builder != nullptr);
}
/// @brief constructor, leases a builder
transaction::BuilderLeaser::BuilderLeaser(transaction::Context* transactionContext)
: _transactionContext(transactionContext),
transaction::BuilderLeaser::BuilderLeaser(transaction::Context* transactionContext)
: _transactionContext(transactionContext),
_builder(_transactionContext->leaseBuilder()) {
TRI_ASSERT(_builder != nullptr);
}
/// @brief destructor
transaction::BuilderLeaser::~BuilderLeaser() {
transaction::BuilderLeaser::~BuilderLeaser() {
if (_builder != nullptr) {
_transactionContext->returnBuilder(_builder);
_transactionContext->returnBuilder(_builder);
}
}

View File

@ -154,11 +154,11 @@ static void createBabiesError(VPackBuilder& builder,
}
}
static OperationResult emptyResult(bool waitForSync) {
static OperationResult emptyResult(OperationOptions const& options) {
VPackBuilder resultBuilder;
resultBuilder.openArray();
resultBuilder.close();
return OperationResult(Result(), resultBuilder.steal(), nullptr, waitForSync);
return OperationResult(Result(), resultBuilder.steal(), nullptr, options);
}
} // namespace
@ -512,7 +512,7 @@ std::pair<bool, bool> transaction::Methods::findIndexHandleForAndNode(
if (!supportsFilter && !supportsSort) {
continue;
}
double totalCost = filterCost;
if (!sortCondition->isEmpty()) {
// only take into account the costs for sorting if there is actually something to sort
@ -619,19 +619,19 @@ transaction::Methods::Methods(
// we are embedded but this is disallowed...
THROW_ARANGO_EXCEPTION(TRI_ERROR_TRANSACTION_NESTED);
}
_state = parent;
TRI_ASSERT(_state != nullptr);
_state->increaseNesting();
} else { // non-embedded
TRI_vocbase_t& vocbase = _transactionContextPtr->vocbase();
// now start our own transaction
StorageEngine* engine = EngineSelectorFeature::ENGINE;
_state = engine->createTransactionState(vocbase, options).release();
TRI_ASSERT(_state != nullptr);
// register the transaction in the context
_transactionContextPtr->registerTransaction(_state);
}
@ -713,6 +713,7 @@ void transaction::Methods::buildDocumentIdentity(
LogicalCollection* collection, VPackBuilder& builder, TRI_voc_cid_t cid,
StringRef const& key, TRI_voc_rid_t rid, TRI_voc_rid_t oldRid,
ManagedDocumentResult const* oldDoc, ManagedDocumentResult const* newDoc) {
std::string temp; // TODO: pass a string into this function
temp.reserve(64);
@ -881,7 +882,7 @@ OperationResult transaction::Methods::anyLocal(
if (cid == 0) {
throwCollectionNotFound(collectionName.c_str());
}
pinData(cid); // will throw when it fails
VPackBuilder resultBuilder;
@ -907,10 +908,10 @@ OperationResult transaction::Methods::anyLocal(
return OperationResult(res);
}
}
resultBuilder.close();
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler(), false);
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler());
}
TRI_voc_cid_t transaction::Methods::addCollectionAtRuntime(
@ -1018,7 +1019,7 @@ void transaction::Methods::invokeOnAllElements(
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
THROW_ARANGO_EXCEPTION(lockResult);
}
TRI_ASSERT(isLocked(collection, AccessMode::Type::READ));
collection->invokeOnAllElements(this, callback);
@ -1156,7 +1157,7 @@ OperationResult transaction::Methods::clusterResultDocument(
case rest::ResponseCode::PRECONDITION_FAILED:
return OperationResult(Result(responseCode == rest::ResponseCode::OK
? TRI_ERROR_NO_ERROR
: TRI_ERROR_ARANGO_CONFLICT), resultBody->steal(), nullptr, false, errorCounter);
: TRI_ERROR_ARANGO_CONFLICT), resultBody->steal(), nullptr, OperationOptions{}, errorCounter);
case rest::ResponseCode::NOT_FOUND:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
default:
@ -1168,11 +1169,16 @@ OperationResult transaction::Methods::clusterResultDocument(
OperationResult transaction::Methods::clusterResultInsert(
rest::ResponseCode const& responseCode,
std::shared_ptr<VPackBuilder> const& resultBody,
OperationOptions const& options,
std::unordered_map<int, size_t> const& errorCounter) const {
switch (responseCode) {
case rest::ResponseCode::ACCEPTED:
case rest::ResponseCode::CREATED:
return OperationResult(Result(), resultBody->steal(), nullptr, responseCode == rest::ResponseCode::CREATED, errorCounter);
case rest::ResponseCode::CREATED: {
OperationOptions copy = options;
copy.waitForSync = (responseCode == rest::ResponseCode::CREATED); // wait for sync is abused herea
// operationResult should get a return code.
return OperationResult(Result(), resultBody->steal(), nullptr, copy, errorCounter);
}
case rest::ResponseCode::PRECONDITION_FAILED:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_ARANGO_CONFLICT);
case rest::ResponseCode::BAD:
@ -1202,10 +1208,11 @@ OperationResult transaction::Methods::clusterResultModify(
}
// Fall through
case rest::ResponseCode::ACCEPTED:
case rest::ResponseCode::CREATED:
return OperationResult(Result(errorCode), resultBody->steal(), nullptr,
responseCode == rest::ResponseCode::CREATED,
errorCounter);
case rest::ResponseCode::CREATED: {
OperationOptions options;
options.waitForSync = (responseCode == rest::ResponseCode::CREATED);
return OperationResult(Result(errorCode), resultBody->steal(), nullptr, options, errorCounter);
}
case rest::ResponseCode::BAD:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_INTERNAL);
case rest::ResponseCode::NOT_FOUND:
@ -1223,13 +1230,16 @@ OperationResult transaction::Methods::clusterResultRemove(
switch (responseCode) {
case rest::ResponseCode::OK:
case rest::ResponseCode::ACCEPTED:
case rest::ResponseCode::PRECONDITION_FAILED:
case rest::ResponseCode::PRECONDITION_FAILED: {
OperationOptions options;
options.waitForSync = (responseCode != rest::ResponseCode::ACCEPTED);
return OperationResult(
Result(responseCode == rest::ResponseCode::PRECONDITION_FAILED
? TRI_ERROR_ARANGO_CONFLICT
: TRI_ERROR_NO_ERROR),
resultBody->steal(), nullptr,
responseCode != rest::ResponseCode::ACCEPTED, errorCounter);
options, errorCounter);
}
case rest::ResponseCode::BAD:
return errorCodeFromClusterResult(resultBody, TRI_ERROR_INTERNAL);
case rest::ResponseCode::NOT_FOUND:
@ -1370,7 +1380,7 @@ OperationResult transaction::Methods::documentLocal(
return OperationResult(std::move(res), resultBuilder.steal(),
_transactionContextPtr->orderCustomTypeHandler(),
options.waitForSync, countErrorCodes);
options, countErrorCodes);
}
/// @brief create one or multiple documents in a collection
@ -1386,7 +1396,7 @@ OperationResult transaction::Methods::insert(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (value.isArray() && value.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
// Validate Edges
@ -1412,21 +1422,15 @@ OperationResult transaction::Methods::insertCoordinator(
rest::ResponseCode responseCode;
std::unordered_map<int, size_t> errorCounter;
auto resultBody = std::make_shared<VPackBuilder>();
int res = arangodb::createDocumentOnCoordinator(
vocbase().name(),
collectionName,
options,
value,
responseCode,
errorCounter,
resultBody
);
if (res == TRI_ERROR_NO_ERROR) {
return clusterResultInsert(responseCode, resultBody, errorCounter);
Result res = arangodb::createDocumentOnCoordinator(
vocbase().name(), collectionName, options, value, responseCode,
errorCounter, resultBody);
if (res.ok()) {
return clusterResultInsert(responseCode, resultBody, options, errorCounter);
}
return OperationResult(res);
return OperationResult(res, options);
}
#endif
@ -1464,18 +1468,18 @@ OperationResult transaction::Methods::insertLocal(
std::string theLeader = collection->followers()->getLeader();
if (theLeader.empty()) {
if (!options.isSynchronousReplicationFrom.empty()) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_REFUSES_REPLICATION);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_REFUSES_REPLICATION, options);
}
} else { // we are a follower following theLeader
isFollower = true;
if (options.isSynchronousReplicationFrom.empty()) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED, options);
}
if (options.isSynchronousReplicationFrom != theLeader) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION, options);
}
}
}
} // isDBServer - early block
if (options.returnNew) {
pinData(cid); // will throw when it fails
@ -1489,13 +1493,28 @@ OperationResult transaction::Methods::insertLocal(
return Result(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
ManagedDocumentResult result;
ManagedDocumentResult documentResult;
TRI_voc_tick_t resultMarkerTick = 0;
TRI_voc_rid_t revisionId = 0;
Result res =
collection->insert(this, value, result, options, resultMarkerTick,
!isLocked(collection, AccessMode::Type::WRITE), revisionId);
auto const needsLock = !isLocked(collection, AccessMode::Type::WRITE);
Result res = collection->insert( this, value, documentResult, options
, resultMarkerTick, needsLock, revisionId
);
ManagedDocumentResult previousDocumentResult; // return OLD
TRI_voc_rid_t previousRevisionId = 0;
if(options.overwrite && res.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED)){
// RepSert Case - unique_constraint violated -> maxTick has not changed -> try replace
resultMarkerTick = 0;
res = collection->replace( this, value, documentResult, options
, resultMarkerTick, needsLock, previousRevisionId
, previousDocumentResult);
if(res.ok()){
revisionId = TRI_ExtractRevisionId(VPackSlice(documentResult.vpack()));
}
}
if (resultMarkerTick > 0 && resultMarkerTick > maxTick) {
maxTick = resultMarkerTick;
@ -1507,16 +1526,28 @@ OperationResult transaction::Methods::insertLocal(
return res;
}
if (!options.silent || _state->isDBServer()) {
TRI_ASSERT(!result.empty());
TRI_ASSERT(!documentResult.empty());
StringRef keyString(transaction::helpers::extractKeyFromDocument(
VPackSlice(result.vpack())));
StringRef keyString(transaction::helpers::extractKeyFromDocument(
VPackSlice(documentResult.vpack())));
buildDocumentIdentity(collection, resultBuilder, cid, keyString, revisionId,
0, nullptr, options.returnNew ? &result : nullptr);
bool showReplaced = false;
if(options.returnOld && previousRevisionId){
showReplaced = true;
}
if(showReplaced){
TRI_ASSERT(!previousDocumentResult.empty());
}
buildDocumentIdentity(collection, resultBuilder
,cid, keyString, revisionId ,previousRevisionId
,showReplaced ? &previousDocumentResult : nullptr
,options.returnNew ? &documentResult : nullptr);
}
return Result();
};
@ -1557,12 +1588,10 @@ OperationResult transaction::Methods::insertLocal(
// Now replicate the good operations on all followers:
std::string path =
"/_db/" +
arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
"/_api/document/" +
arangodb::basics::StringUtils::urlEncode(collection->name()) +
"?isRestore=true&isSynchronousReplication=" +
ServerState::instance()->getId();
"/_db/" + arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
"/_api/document/" + arangodb::basics::StringUtils::urlEncode(collection->name()) +
"?isRestore=true&isSynchronousReplication=" + ServerState::instance()->getId();
"&" + StaticStrings::OverWrite + "=" + (options.overwrite ? "true" : "false");
VPackBuilder payload;
@ -1618,7 +1647,7 @@ OperationResult transaction::Methods::insertLocal(
// error (note that we use the follower version, since we have
// lost leadership):
if (findRefusal(requests)) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED, options);
}
// Otherwise we drop all followers that were not successful:
@ -1660,7 +1689,7 @@ OperationResult transaction::Methods::insertLocal(
resultBuilder.clear();
}
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options.waitForSync, countErrorCodes);
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options, countErrorCodes);
}
/// @brief update/patch one or multiple documents in a collection
@ -1676,7 +1705,7 @@ OperationResult transaction::Methods::update(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (newValue.isArray() && newValue.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
OperationOptions optionsCopy = options;
@ -1737,7 +1766,7 @@ OperationResult transaction::Methods::replace(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (newValue.isArray() && newValue.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
OperationOptions optionsCopy = options;
@ -1824,7 +1853,7 @@ OperationResult transaction::Methods::modifyLocal(
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
return OperationResult(lockResult);
}
VPackBuilder resultBuilder; // building the complete result
TRI_voc_tick_t maxTick = 0;
@ -1857,7 +1886,7 @@ OperationResult transaction::Methods::modifyLocal(
maxTick = resultMarkerTick;
}
if (res.errorNumber() == TRI_ERROR_ARANGO_CONFLICT) {
if (res.is(TRI_ERROR_ARANGO_CONFLICT)) {
// still return
if (!isBabies) {
StringRef key(newVal.get(StaticStrings::KeyString));
@ -1882,7 +1911,7 @@ OperationResult transaction::Methods::modifyLocal(
}
return res; // must be ok!
};
}; // workForOneDocument
///////////////////////
bool multiCase = newValue.isArray();
@ -2033,7 +2062,7 @@ OperationResult transaction::Methods::modifyLocal(
resultBuilder.clear();
}
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options.waitForSync, errorCounter);
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options, errorCounter);
}
/// @brief remove one or multiple documents in a collection
@ -2049,7 +2078,7 @@ OperationResult transaction::Methods::remove(std::string const& collectionName,
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
if (value.isArray() && value.length() == 0) {
return emptyResult(options.waitForSync);
return emptyResult(options);
}
OperationOptions optionsCopy = options;
@ -2162,7 +2191,7 @@ OperationResult transaction::Methods::removeLocal(
}
if (!res.ok()) {
if (res.errorNumber() == TRI_ERROR_ARANGO_CONFLICT && !isBabies) {
if (res.is(TRI_ERROR_ARANGO_CONFLICT) && !isBabies) {
buildDocumentIdentity(collection, resultBuilder, cid, key,
actualRevision, 0,
options.returnOld ? &previous : nullptr, nullptr);
@ -2321,7 +2350,7 @@ OperationResult transaction::Methods::removeLocal(
resultBuilder.clear();
}
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options.waitForSync, countErrorCodes);
return OperationResult(std::move(res), resultBuilder.steal(), nullptr, options, countErrorCodes);
}
/// @brief fetches all documents in a collection
@ -2353,7 +2382,7 @@ OperationResult transaction::Methods::allLocal(
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
pinData(cid); // will throw when it fails
VPackBuilder resultBuilder;
resultBuilder.openArray();
@ -2382,10 +2411,10 @@ OperationResult transaction::Methods::allLocal(
return OperationResult(res);
}
}
resultBuilder.close();
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler(), false);
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler());
}
/// @brief remove all documents in a collection
@ -2449,7 +2478,7 @@ OperationResult transaction::Methods::truncateLocal(
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
return OperationResult(lockResult);
}
TRI_ASSERT(isLocked(collection, AccessMode::Type::WRITE));
try {
@ -2612,7 +2641,6 @@ OperationResult transaction::Methods::countCoordinator(
if (res != TRI_ERROR_NO_ERROR) {
return OperationResult(res);
}
return buildCountResult(count, aggregate);
}
#endif
@ -2628,7 +2656,7 @@ OperationResult transaction::Methods::countLocal(
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
return OperationResult(lockResult);
}
TRI_ASSERT(isLocked(collection, AccessMode::Type::READ));
uint64_t num = collection->numberDocuments(this);
@ -2644,7 +2672,7 @@ OperationResult transaction::Methods::countLocal(
VPackBuilder resultBuilder;
resultBuilder.add(VPackValue(num));
return OperationResult(Result(), resultBuilder.steal(), nullptr, false);
return OperationResult(Result(), resultBuilder.steal(), nullptr);
}
/// @brief Gets the best fitting index for an AQL condition.

View File

@ -206,7 +206,7 @@ class Methods {
/// @brief finish a transaction (commit or abort), based on the previous state
Result finish(int errorNum);
Result finish(Result const& res);
/// @brief return the transaction id
TRI_voc_tid_t tid() const;
@ -414,6 +414,8 @@ class Methods {
/// @brief build a VPack object with _id, _key and _rev and possibly
/// oldRef (if given), the result is added to the builder in the
/// argument as a single object.
//SHOULD THE OPTIONS BE CONST?
void buildDocumentIdentity(arangodb::LogicalCollection* collection,
VPackBuilder& builder, TRI_voc_cid_t cid,
StringRef const& key, TRI_voc_rid_t rid,
@ -528,6 +530,7 @@ class Methods {
OperationResult clusterResultInsert(
rest::ResponseCode const& responseCode,
std::shared_ptr<arangodb::velocypack::Builder> const& resultBody,
OperationOptions const& options,
std::unordered_map<int, size_t> const& errorCounter) const;
/// @brief Helper create a Cluster Communication modify result

View File

@ -33,7 +33,7 @@ struct OperationOptions {
OperationOptions()
: recoveryData(nullptr), waitForSync(false), keepNull(true),
mergeObjects(true), silent(false), ignoreRevs(true),
returnOld(false), returnNew(false), isRestore(false),
returnOld(false), returnNew(false), isRestore(false), overwrite(false),
indexOperationMode(Index::OperationMode::normal) {}
// original marker, set by an engine's recovery procedure only!
@ -64,6 +64,9 @@ struct OperationOptions {
// this option is there to ensure _key values once set can be restored by replicated and arangorestore
bool isRestore;
// for insert operations: do not fail if _key exists but replace the document
bool overwrite;
// for synchronous replication operations, we have to mark them such that
// we can deny them if we are a (new) leader, and that we can deny other
// operation if we are merely a follower. Finally, we must deny replications

View File

@ -26,6 +26,7 @@
#include "Basics/Common.h"
#include "Basics/Result.h"
#include "Utils/OperationOptions.h"
#include <velocypack/Buffer.h>
#include <velocypack/Options.h>
@ -33,16 +34,19 @@
#include <velocypack/velocypack-aliases.h>
namespace arangodb {
struct OperationResult {
OperationResult() {}
OperationResult() {}
// create from integer status code
explicit OperationResult(int code) : result(code) {}
explicit OperationResult(int code, OperationOptions const& options) : result(code), _options(options) {}
// create from Result
explicit OperationResult(Result const& other) : result(other) {}
explicit OperationResult(Result const& other, OperationOptions const& options) : result(other), _options(options){}
explicit OperationResult(Result&& other) : result(std::move(other)) {}
explicit OperationResult(Result&& other, OperationOptions const& options) : result(std::move(other)), _options(options) {}
// copy
OperationResult(OperationResult const& other) = delete;
@ -55,24 +59,29 @@ struct OperationResult {
result = std::move(other.result);
buffer = std::move(other.buffer);
customTypeHandler = std::move(other.customTypeHandler);
wasSynchronous = other.wasSynchronous;
_options = other._options;
countErrorCodes = std::move(other.countErrorCodes);
}
return *this;
}
// create result with details
// create result with details
OperationResult(Result&& result,
std::shared_ptr<VPackBuffer<uint8_t>> const& buffer,
std::shared_ptr<VPackCustomTypeHandler> const& handler,
bool wasSynchronous,
OperationOptions const& options = {},
std::unordered_map<int, size_t> const& countErrorCodes = std::unordered_map<int, size_t>())
: result(std::move(result)),
buffer(buffer),
customTypeHandler(handler),
wasSynchronous(wasSynchronous),
countErrorCodes(countErrorCodes) {}
_options(options),
countErrorCodes(countErrorCodes) {
if(result.ok()){
TRI_ASSERT(buffer != nullptr);
TRI_ASSERT(buffer->data() != nullptr);
}
}
~OperationResult() = default;
// Result-like interface
@ -84,7 +93,7 @@ struct OperationResult {
std::string errorMessage() const { return result.errorMessage(); }
inline VPackSlice slice() const {
TRI_ASSERT(buffer != nullptr);
TRI_ASSERT(buffer != nullptr);
return VPackSlice(buffer->data());
}
@ -92,7 +101,7 @@ struct OperationResult {
// TODO: add a slice that points to either buffer or raw data
std::shared_ptr<VPackBuffer<uint8_t>> buffer;
std::shared_ptr<VPackCustomTypeHandler> customTypeHandler;
bool wasSynchronous = false;
OperationOptions _options;
// Executive summary for baby operations: reports all errors that did occur
// during these operations. Details are stored in the respective positions of

View File

@ -947,7 +947,7 @@ static void JS_DropVocbaseCol(v8::FunctionCallbackInfo<v8::Value> const& args) {
allowDropSystem = TRI_ObjectToBoolean(args[0]);
}
}
auto res =
methods::Collections::drop(&vocbase, collection, allowDropSystem, timeout);
@ -2199,6 +2199,10 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
options.waitForSync =
TRI_ObjectToBoolean(optionsObject->Get(WaitForSyncKey));
}
TRI_GET_GLOBAL_STRING(OverwriteKey);
if (optionsObject->Has(OverwriteKey)) {
options.overwrite = TRI_ObjectToBoolean(optionsObject->Get(OverwriteKey));
}
TRI_GET_GLOBAL_STRING(SilentKey);
if (optionsObject->Has(SilentKey)) {
options.silent = TRI_ObjectToBoolean(optionsObject->Get(SilentKey));
@ -2207,6 +2211,10 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
if (optionsObject->Has(ReturnNewKey)) {
options.returnNew = TRI_ObjectToBoolean(optionsObject->Get(ReturnNewKey));
}
TRI_GET_GLOBAL_STRING(ReturnOldKey);
if (optionsObject->Has(ReturnOldKey)) {
options.returnOld = TRI_ObjectToBoolean(optionsObject->Get(ReturnOldKey)) && options.overwrite;
}
TRI_GET_GLOBAL_STRING(IsRestoreKey);
if (optionsObject->Has(IsRestoreKey)) {
options.isRestore = TRI_ObjectToBoolean(optionsObject->Get(IsRestoreKey));
@ -2283,7 +2291,7 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
transactionContext, collection->id(), AccessMode::Type::WRITE
);
if (!payloadIsArray) {
if (!payloadIsArray && !options.overwrite) {
trx.addHint(transaction::Hints::Hint::SINGLE_OPERATION);
}

View File

@ -299,10 +299,12 @@ class LogicalCollection: public LogicalDataSource {
ManagedDocumentResult& result, OperationOptions&,
TRI_voc_tick_t&, bool, TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous);
Result replace(transaction::Methods*, velocypack::Slice const,
ManagedDocumentResult& result, OperationOptions&,
TRI_voc_tick_t&, bool, TRI_voc_rid_t& prevRev,
TRI_voc_tick_t&, bool /*lock*/, TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous);
Result remove(transaction::Methods*, velocypack::Slice const,
OperationOptions&, TRI_voc_tick_t&, bool,
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous);

View File

@ -891,10 +891,19 @@ ArangoCollection.prototype.save =
if (options.returnNew) {
url = this._appendBoolParameter(url, 'returnNew', options.returnNew);
}
if (options.returnOld) {
url = this._appendBoolParameter(url, 'returnOld', options.returnOld);
}
if (options.silent) {
url = this._appendBoolParameter(url, 'silent', options.silent);
}
if (options.overwrite) {
url = this._appendBoolParameter(url, 'overwrite', options.overwrite);
}
if (data === undefined || typeof data !== 'object') {
throw new ArangoError({
errorNum: internal.errors.ERROR_ARANGO_DOCUMENT_TYPE_INVALID.code,

View File

@ -1208,4 +1208,81 @@ describe('babies collection document', function () {
expect(b7[0]._oldRev).to.be.a('string');
});
});
describe('overwrite', function () {
let base_url = '/_api/document/' + cn;
it('overwrite once', function () {
let url1 = base_url;
let req1 = request.post(url1, extend(endpoint, {
body: JSON.stringify([{
'Hallo': 12
}])
}));
let b1 = JSON.parse(req1.rawBody);
let res1 = b1[0];
let url2 = base_url + '?overwrite=true&returnOld=true';
let req2 = request.post(url2, extend(endpoint, {
body: JSON.stringify([{
'_key': res1._key,
'ulf': 42
}])
}));
let b2 = JSON.parse(req2.rawBody);
let res2 = b2[0];
expect(req2.statusCode).to.equal(202);
expect(res2._key).to.equal(res1._key);
expect(res2._oldRev).to.equal(res1._rev);
expect(res2.old.Hallo).to.equal(12);
});
it('overwrite multi', function () {
let url1 = base_url;
let req1 = request.post(url1, extend(endpoint, {
body: JSON.stringify([{
'Hallo': 12
}])
}));
let b1 = JSON.parse(req1.rawBody);
let res1 = b1[0];
let key1 = res1._key;
let url2 = base_url + '?overwrite=true&returnOld=true&returnNew=true';
let req2 = request.post(url2, extend(endpoint, {
body: JSON.stringify([
{
'_key': key1,
'ulf': 42
},{
'_key': key1,
'ulf': 32
},{
'_key': key1,
'ulfine': 23
}
])
}));
expect(req2.statusCode).to.equal(202);
let b2 = JSON.parse(req2.rawBody);
expect(b2).to.be.instanceof(Array);
expect(b2).to.have.lengthOf(3);
expect(b2[0]).to.be.a('object');
expect(b2[1]).to.be.a('object');
expect(b2[2]).to.be.a('object');
expect(b2[0]._key).to.be.a('string');
expect(b2[0]._key).to.equal(key1);
expect(b2[1]._key).to.equal(key1);
expect(b2[2]._key).to.equal(key1);
expect(b2[1]._rev).to.equal(b2[2].old._rev);
expect(b2[2].old._rev).to.equal(b2[1].new._rev);
expect(b2[2].new.ulfine).to.equal(23);
expect(b2[2].new.ulf).to.equal(undefined);
});
}); // overwrite - end
});

View File

@ -219,7 +219,7 @@ function CollectionDocumentSuite () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief create a document w/ invalid type
/// @brief create a document w/ invalid type
////////////////////////////////////////////////////////////////////////////////
testSaveInvalidDocumentType : function () {
@ -235,7 +235,7 @@ function CollectionDocumentSuite () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief update a document w/ invalid type
/// @brief update a document w/ invalid type
////////////////////////////////////////////////////////////////////////////////
testUpdateInvalidDocumentType : function () {
@ -253,7 +253,7 @@ function CollectionDocumentSuite () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief replace a document w/ invalid type
/// @brief replace a document w/ invalid type
////////////////////////////////////////////////////////////////////////////////
testReplaceInvalidDocumentType : function () {
@ -309,7 +309,7 @@ function CollectionDocumentSuite () {
testSaveSpecialCharsDocumentKey : function () {
[ ":", "-", "_", "@", ".", "..", "...", "a@b", "a@b.c", "a-b-c", "_a", "@a", "@a-b", ":80", ":_", "@:_",
"0", "1", "123456", "0123456", "true", "false", "a", "A", "a1", "A1", "01ab01", "01AB01",
"abcd-efgh", "abcd_efgh", "Abcd_Efgh", "@@", "abc@foo.bar", "@..abc-@-foo__bar",
"abcd-efgh", "abcd_efgh", "Abcd_Efgh", "@@", "abc@foo.bar", "@..abc-@-foo__bar",
".foobar", "-foobar", "_foobar", "@foobar", "(valid)", "%valid", "$valid",
"$$bill,y'all", "'valid", "'a-key-is-a-key-is-a-key'", "m+ller", ";valid", ",valid", "!valid!",
":::", ":-:-:", ";", ";;;;;;;;;;", "(", ")", "()xoxo()", "%",
@ -319,7 +319,7 @@ function CollectionDocumentSuite () {
var doc1 = collection.save({ _key: key, value: key });
assertEqual(key, doc1._key);
assertEqual(cn + "/" + key, doc1._id);
assertTrue(collection.exists(key));
assertTrue(db._exists(cn + "/" + key));
@ -360,7 +360,7 @@ function CollectionDocumentSuite () {
catch (err) {
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum);
}
assertTypeOf("string", d1._id);
assertTypeOf("string", d1._key);
assertTypeOf("string", d1._rev);
@ -392,7 +392,7 @@ function CollectionDocumentSuite () {
catch (err) {
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum);
}
assertTypeOf("string", d1._id);
assertTypeOf("string", d1._key);
assertTypeOf("string", d1._rev);
@ -423,7 +423,7 @@ function CollectionDocumentSuite () {
catch (err) {
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum);
}
d1 = null;
d2 = null;
@ -435,7 +435,7 @@ function CollectionDocumentSuite () {
assertEqual("UnitTestsCollectionBasics/test", doc._id);
assertEqual("test", doc._key);
assertEqual(1, doc.value);
assertEqual(1, collection.count());
},
@ -445,7 +445,7 @@ function CollectionDocumentSuite () {
testSaveDocumentDuplicateSecondaryUnloadReload : function () {
var d1, d2, doc;
collection.ensureUniqueConstraint("value1");
try {
@ -469,7 +469,7 @@ function CollectionDocumentSuite () {
assertEqual("test1", doc._key);
assertEqual(1, doc.value1);
assertEqual(1, doc.value2);
assertEqual(1, collection.count());
},
@ -481,7 +481,7 @@ function CollectionDocumentSuite () {
var d, i;
for (i = 0; i < 10; ++i) {
d = collection.save({ _key: "test", value: i });
d = collection.save({ _key: "test", value: i });
collection.remove(d);
}
d = null;
@ -493,8 +493,8 @@ function CollectionDocumentSuite () {
collection.load();
assertEqual(0, collection.count());
d = collection.save({ _key: "test", value: 200 });
d = collection.save({ _key: "test", value: 200 });
assertTypeOf("string", d._id);
assertTypeOf("string", d._key);
assertTypeOf("string", d._rev);
@ -517,7 +517,7 @@ function CollectionDocumentSuite () {
var i;
for (i = 0; i < 10; ++i) {
var d = collection.save({ _key: "test", value: i });
var d = collection.save({ _key: "test", value: i });
collection.remove(d);
}
collection.save({ _key: "test", value: 99 });
@ -529,7 +529,7 @@ function CollectionDocumentSuite () {
collection.load();
assertEqual(1, collection.count());
var doc = collection.document("test");
assertEqual("UnitTestsCollectionBasics/test", doc._id);
assertEqual("test", doc._key);
@ -542,7 +542,7 @@ function CollectionDocumentSuite () {
testSaveDocumentDuplicatesViolationSurvive : function () {
var i;
try {
collection.remove("test");
fail("whoops");
@ -552,7 +552,7 @@ function CollectionDocumentSuite () {
for (i = 0; i < 10; ++i) {
try {
collection.save({ _key: "test", value: i });
collection.save({ _key: "test", value: i });
}
catch (e2) {
}
@ -570,7 +570,7 @@ function CollectionDocumentSuite () {
collection.load();
assertEqual(1, collection.count());
doc = collection.document("test");
assertEqual("UnitTestsCollectionBasics/test", doc._id);
assertEqual("test", doc._key);
@ -686,7 +686,7 @@ function CollectionDocumentSuite () {
assertTypeOf("string", r._id);
assertTypeOf("string", r._key);
assertTypeOf("string", r._rev);
var d2 = collection.save({ _key : "2" });
// string keys
assertFalse(collection.exists("1"));
@ -847,7 +847,7 @@ function CollectionDocumentSuite () {
assertNotEqual(a1._rev, a2._rev);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tests the replace function with new signature
/// @brief tests the replace function with new signature
////////////////////////////////////////////////////////////////////////////////
testReplaceWithNewSignatureDocumentSyncFalse : function () {
@ -879,7 +879,7 @@ function CollectionDocumentSuite () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tests the replace function with new signature
/// @brief tests the replace function with new signature
////////////////////////////////////////////////////////////////////////////////
testReplaceDocumentSyncTrue2 : function () {
@ -934,11 +934,11 @@ function CollectionDocumentSuite () {
assertEqual(a1._id, doc4._id);
assertEqual(a4._rev, doc4._rev);
assertEqual(4, doc4.a);
var a5 = collection.update(a4, { b : 1, c : 2, d : "foo", e : null });
assertEqual(a1._id, a5._id);
assertNotEqual(a4._rev, a5._rev);
var doc5 = collection.document(a1._id);
assertEqual(a1._id, doc5._id);
assertEqual(a5._rev, doc5._rev);
@ -951,7 +951,7 @@ function CollectionDocumentSuite () {
var a6 = collection.update(a5, { f : null, b : null, a : null, g : 2, c : 4 });
assertEqual(a1._id, a6._id);
assertNotEqual(a5._rev, a6._rev);
var doc6 = collection.document(a1._id);
assertEqual(a1._id, doc6._id);
assertEqual(a6._rev, doc6._rev);
@ -962,11 +962,11 @@ function CollectionDocumentSuite () {
assertEqual(null, doc6.e);
assertEqual(null, doc6.f);
assertEqual(2, doc6.g);
var a7 = collection.update(a6, { a : null, b : null, c : null, g : null }, true, false);
assertEqual(a1._id, a7._id);
assertNotEqual(a6._rev, a7._rev);
var doc7 = collection.document(a1._id);
assertEqual(a1._id, doc7._id);
assertEqual(a7._rev, doc7._rev);
@ -977,33 +977,33 @@ function CollectionDocumentSuite () {
assertEqual(null, doc7.e);
assertEqual(null, doc7.f);
assertEqual(undefined, doc7.g);
var a8 = collection.update(a7, { d : { "one" : 1, "two" : 2, "three" : 3 }, e : { }, f : { "one" : 1 }} );
assertEqual(a1._id, a8._id);
assertNotEqual(a7._rev, a8._rev);
var doc8 = collection.document(a1._id);
assertEqual(a1._id, doc8._id);
assertEqual(a8._rev, doc8._rev);
assertEqual({"one": 1, "two": 2, "three": 3}, doc8.d);
assertEqual({}, doc8.e);
assertEqual({"one": 1}, doc8.f);
var a9 = collection.update(a8, { d : { "four" : 4 }, "e" : { "e1" : [ 1, 2 ], "e2" : 2 }, "f" : { "three" : 3 }} );
assertEqual(a1._id, a9._id);
assertNotEqual(a8._rev, a9._rev);
var doc9 = collection.document(a1._id);
assertEqual(a1._id, doc9._id);
assertEqual(a9._rev, doc9._rev);
assertEqual({"one": 1, "two": 2, "three": 3, "four": 4}, doc9.d);
assertEqual({"e2": 2, "e1": [ 1, 2 ]}, doc9.e);
assertEqual({"one": 1, "three": 3}, doc9.f);
var a10 = collection.update(a9, { d : { "one" : -1, "two": null, "four" : null, "five" : 5 }, "e" : { "e1" : 1, "e2" : null, "e3" : 3 }}, true, false);
assertEqual(a1._id, a10._id);
assertNotEqual(a9._rev, a10._rev);
var doc10 = collection.document(a1._id);
assertEqual(a1._id, doc10._id);
assertEqual(a10._rev, doc10._rev);
@ -1052,11 +1052,11 @@ function CollectionDocumentSuite () {
assertEqual(a1._id, doc4._id);
assertEqual(a4._rev, doc4._rev);
assertEqual(4, doc4.a);
var a5 = collection.update(a4, { b : 1, c : 2, d : "foo", e : null });
assertEqual(a1._id, a5._id);
assertNotEqual(a4._rev, a5._rev);
var doc5 = collection.document(a1._id);
assertEqual(a1._id, doc5._id);
assertEqual(a5._rev, doc5._rev);
@ -1069,7 +1069,7 @@ function CollectionDocumentSuite () {
var a6 = collection.update(a5, { f : null, b : null, a : null, g : 2, c : 4 });
assertEqual(a1._id, a6._id);
assertNotEqual(a5._rev, a6._rev);
var doc6 = collection.document(a1._id);
assertEqual(a1._id, doc6._id);
assertEqual(a6._rev, doc6._rev);
@ -1080,11 +1080,11 @@ function CollectionDocumentSuite () {
assertEqual(null, doc6.e);
assertEqual(null, doc6.f);
assertEqual(2, doc6.g);
var a7 = collection.update(a6, { a : null, b : null, c : null, g : null }, {"overwrite": true, "keepNull": false});
assertEqual(a1._id, a7._id);
assertNotEqual(a6._rev, a7._rev);
var doc7 = collection.document(a1._id);
assertEqual(a1._id, doc7._id);
assertEqual(a7._rev, doc7._rev);
@ -1095,33 +1095,33 @@ function CollectionDocumentSuite () {
assertEqual(null, doc7.e);
assertEqual(null, doc7.f);
assertEqual(undefined, doc7.g);
var a8 = collection.update(a7, { d : { "one" : 1, "two" : 2, "three" : 3 }, e : { }, f : { "one" : 1 }} );
assertEqual(a1._id, a8._id);
assertNotEqual(a7._rev, a8._rev);
var doc8 = collection.document(a1._id);
assertEqual(a1._id, doc8._id);
assertEqual(a8._rev, doc8._rev);
assertEqual({"one": 1, "two": 2, "three": 3}, doc8.d);
assertEqual({}, doc8.e);
assertEqual({"one": 1}, doc8.f);
var a9 = collection.update(a8, { d : { "four" : 4 }, "e" : { "e1" : [ 1, 2 ], "e2" : 2 }, "f" : { "three" : 3 }} );
assertEqual(a1._id, a9._id);
assertNotEqual(a8._rev, a9._rev);
var doc9 = collection.document(a1._id);
assertEqual(a1._id, doc9._id);
assertEqual(a9._rev, doc9._rev);
assertEqual({"one": 1, "two": 2, "three": 3, "four": 4}, doc9.d);
assertEqual({"e2": 2, "e1": [ 1, 2 ]}, doc9.e);
assertEqual({"one": 1, "three": 3}, doc9.f);
var a10 = collection.update(a9, { d : { "one" : -1, "two": null, "four" : null, "five" : 5 }, "e" : { "e1" : 1, "e2" : null, "e3" : 3 }}, {"overwrite": true, "keepNull": false});
assertEqual(a1._id, a10._id);
assertNotEqual(a9._rev, a10._rev);
var doc10 = collection.document(a1._id);
assertEqual(a1._id, doc10._id);
assertEqual(a10._rev, doc10._rev);
@ -1143,7 +1143,7 @@ function CollectionDocumentSuite () {
var doc = collection.document(doc1._key);
assertEqual({ first: "foo", last: "bar", middle: "baz" }, doc.name);
assertEqual({ evilCellPhone: [ 1 ], schabernack: true, pileOfBones: null }, doc.owns);
// explicitly specifiy mergeObjects
var doc2 = collection.save({ name: { first: "foo", last: "bar" }, owns: { evilCellPhone: [ 1 ] } });
collection.update(doc2, { name: { middle: "baz" }, owns: { schabernack: true, pileOfBones: null } }, { mergeObjects: true });
@ -1151,7 +1151,7 @@ function CollectionDocumentSuite () {
doc = collection.document(doc2._key);
assertEqual({ first: "foo", last: "bar", middle: "baz" }, doc.name);
assertEqual({ evilCellPhone: [ 1 ], schabernack: true, pileOfBones: null }, doc.owns);
// disable mergeObjects
var doc3 = collection.save({ name: { first: "foo", last: "bar" }, owns: { evilCellPhone: [ 1 ] } });
collection.update(doc3, { name: { middle: "baz" }, owns: { schabernack: true, pileOfBones: null } }, { mergeObjects: false });
@ -1577,7 +1577,7 @@ function DatabaseDocumentSuite () {
assertTrue(db._exists(cn + "/baz"));
// object key
assertTrue(db._exists(d1));
var d2 = collection.save({ _key : "2" });
// string keys
assertFalse(db._exists(cn + "/1"));
@ -1741,11 +1741,11 @@ function DatabaseDocumentSuite () {
assertEqual(a1._id, doc4._id);
assertEqual(a4._rev, doc4._rev);
assertEqual(4, doc4.a);
var a5 = db._update(a4, { b : 1, c : 2, d : "foo", e : null });
assertEqual(a1._id, a5._id);
assertNotEqual(a4._rev, a5._rev);
var doc5 = db._document(a1._id);
assertEqual(a1._id, doc5._id);
assertEqual(a5._rev, doc5._rev);
@ -1758,7 +1758,7 @@ function DatabaseDocumentSuite () {
var a6 = db._update(a5, { f : null, b : null, a : null, g : 2, c : 4 });
assertEqual(a1._id, a6._id);
assertNotEqual(a5._rev, a6._rev);
var doc6 = db._document(a1._id);
assertEqual(a1._id, doc6._id);
assertEqual(a6._rev, doc6._rev);
@ -1769,11 +1769,11 @@ function DatabaseDocumentSuite () {
assertEqual(null, doc6.e);
assertEqual(null, doc6.f);
assertEqual(2, doc6.g);
var a7 = db._update(a6, { a : null, b : null, c : null, g : null }, true, false);
assertEqual(a1._id, a7._id);
assertNotEqual(a6._rev, a7._rev);
var doc7 = db._document(a1._id);
assertEqual(a1._id, doc7._id);
assertEqual(a7._rev, doc7._rev);
@ -1784,33 +1784,33 @@ function DatabaseDocumentSuite () {
assertEqual(null, doc7.e);
assertEqual(null, doc7.f);
assertEqual(undefined, doc7.g);
var a8 = db._update(a7, { d : { "one" : 1, "two" : 2, "three" : 3 }, e : { }, f : { "one" : 1 }} );
assertEqual(a1._id, a8._id);
assertNotEqual(a7._rev, a8._rev);
var doc8 = db._document(a1._id);
assertEqual(a1._id, doc8._id);
assertEqual(a8._rev, doc8._rev);
assertEqual({"one": 1, "two": 2, "three": 3}, doc8.d);
assertEqual({}, doc8.e);
assertEqual({"one": 1}, doc8.f);
var a9 = db._update(a8, { d : { "four" : 4 }, "e" : { "e1" : [ 1, 2 ], "e2" : 2 }, "f" : { "three" : 3 }} );
assertEqual(a1._id, a9._id);
assertNotEqual(a8._rev, a9._rev);
var doc9 = db._document(a1._id);
assertEqual(a1._id, doc9._id);
assertEqual(a9._rev, doc9._rev);
assertEqual({"one": 1, "two": 2, "three": 3, "four": 4}, doc9.d);
assertEqual({"e2": 2, "e1": [ 1, 2 ]}, doc9.e);
assertEqual({"one": 1, "three": 3}, doc9.f);
var a10 = db._update(a9, { d : { "one" : -1, "two": null, "four" : null, "five" : 5 }, "e" : { "e1" : 1, "e2" : null, "e3" : 3 }}, true, false);
assertEqual(a1._id, a10._id);
assertNotEqual(a9._rev, a10._rev);
var doc10 = db._document(a1._id);
assertEqual(a1._id, doc10._id);
assertEqual(a10._rev, doc10._rev);
@ -1858,11 +1858,11 @@ function DatabaseDocumentSuite () {
assertEqual(a1._id, doc4._id);
assertEqual(a4._rev, doc4._rev);
assertEqual(4, doc4.a);
var a5 = db._update(a4, { b : 1, c : 2, d : "foo", e : null });
assertEqual(a1._id, a5._id);
assertNotEqual(a4._rev, a5._rev);
var doc5 = db._document(a1._id);
assertEqual(a1._id, doc5._id);
assertEqual(a5._rev, doc5._rev);
@ -1875,7 +1875,7 @@ function DatabaseDocumentSuite () {
var a6 = db._update(a5, { f : null, b : null, a : null, g : 2, c : 4 });
assertEqual(a1._id, a6._id);
assertNotEqual(a5._rev, a6._rev);
var doc6 = db._document(a1._id);
assertEqual(a1._id, doc6._id);
assertEqual(a6._rev, doc6._rev);
@ -1886,11 +1886,11 @@ function DatabaseDocumentSuite () {
assertEqual(null, doc6.e);
assertEqual(null, doc6.f);
assertEqual(2, doc6.g);
var a7 = db._update(a6, { a : null, b : null, c : null, g : null }, {"overwrite": true, "keepNull": false});
assertEqual(a1._id, a7._id);
assertNotEqual(a6._rev, a7._rev);
var doc7 = db._document(a1._id);
assertEqual(a1._id, doc7._id);
assertEqual(a7._rev, doc7._rev);
@ -1901,33 +1901,33 @@ function DatabaseDocumentSuite () {
assertEqual(null, doc7.e);
assertEqual(null, doc7.f);
assertEqual(undefined, doc7.g);
var a8 = db._update(a7, { d : { "one" : 1, "two" : 2, "three" : 3 }, e : { }, f : { "one" : 1 }} );
assertEqual(a1._id, a8._id);
assertNotEqual(a7._rev, a8._rev);
var doc8 = db._document(a1._id);
assertEqual(a1._id, doc8._id);
assertEqual(a8._rev, doc8._rev);
assertEqual({"one": 1, "two": 2, "three": 3}, doc8.d);
assertEqual({}, doc8.e);
assertEqual({"one": 1}, doc8.f);
var a9 = db._update(a8, { d : { "four" : 4 }, "e" : { "e1" : [ 1, 2 ], "e2" : 2 }, "f" : { "three" : 3 }} );
assertEqual(a1._id, a9._id);
assertNotEqual(a8._rev, a9._rev);
var doc9 = db._document(a1._id);
assertEqual(a1._id, doc9._id);
assertEqual(a9._rev, doc9._rev);
assertEqual({"one": 1, "two": 2, "three": 3, "four": 4}, doc9.d);
assertEqual({"e2": 2, "e1": [ 1, 2 ]}, doc9.e);
assertEqual({"one": 1, "three": 3}, doc9.f);
var a10 = db._update(a9, { d : { "one" : -1, "two": null, "four" : null, "five" : 5 }, "e" : { "e1" : 1, "e2" : null, "e3" : 3 }}, {"overwrite": true, "keepNull": false});
assertEqual(a1._id, a10._id);
assertNotEqual(a9._rev, a10._rev);
var doc10 = db._document(a1._id);
assertEqual(a1._id, doc10._id);
assertEqual(a10._rev, doc10._rev);
@ -2037,7 +2037,7 @@ function DatabaseDocumentSuite () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief create a very big document
/// @brief create a very big document
////////////////////////////////////////////////////////////////////////////////
testDocumentVeryLarge : function () {
@ -2338,6 +2338,52 @@ function DatabaseDocumentSuiteReturnStuff () {
assertTypeOf("string", res2._rev);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief use overwrite option
////////////////////////////////////////////////////////////////////////////////
testInsertOverwrite : function () {
var docHandle = collection.insert({ a : 1});
var key = docHandle._key;
// normal insert with same key must fail!
try{
var res = collection.insert({a : 2, _key : key});
fail();
}
catch (err) {
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum);
}
// overwrite with same key must work
var rv = collection.insert({c : 3, _key: key},{overwrite:true, returnOld:true, returnNew:true});
var arr = collection.toArray();
assertEqual(arr.length, 1);
assertEqual(rv.new.c, 3);
assertFalse(rv.new.hasOwnProperty('a'));
assertEqual(rv.old.a, 1);
// overwrite (babies) with same key must work
collection.insert({b : 2, _key: key},{overwrite:true});
arr = collection.toArray();
assertEqual(arr.length, 1);
assertEqual(arr[0].b, 2);
// overwrite (babies) with same key must work
collection.insert([{a : 3, _key: key}, {a : 4, _key: key}, {a : 5, _key: key}], {overwrite:true});
arr = collection.toArray();
assertEqual(arr.length, 1);
assertEqual(arr[0].a, 5);
rv = collection.insert({x : 3},{overwrite:true, returnNew:true});
assertEqual(rv.new.x, 3);
assertTypeOf("string", rv._id);
assertTypeOf("string", rv._key);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test new features from 3.0
////////////////////////////////////////////////////////////////////////////////

View File

@ -983,7 +983,7 @@ function ahuacatlInsertSuite () {
assertEqual(expected, sanitizeStats(actual.stats));
assertEqual(50, edge.count());
actual.json = actual.json.sort(function(l, r) {
return l.value[0] - r.value[0];
});
@ -1008,9 +1008,63 @@ function ahuacatlInsertSuite () {
}
db._drop("UnitTestsAhuacatlEdge");
}
},
};
////////////////////////////////////////////////////////////////////////////////
/// @brief test insert
////////////////////////////////////////////////////////////////////////////////
testInsertOverwrite : function () {
c1.truncate();
assertEqual(0, c1.count());
var rv1 = db._query(" INSERT { _key: '123', name: 'ulf' } IN @@cn OPTIONS { overwrite: false } RETURN NEW", { "@cn": cn1 });
assertEqual(1, c1.count());
var doc1 = rv1.toArray()[0];
assertEqual(doc1._key, '123');
assertEqual(doc1.name, 'ulf');
var rv2 = db._query(" INSERT { _key: '123', name: 'ulfine' } IN @@cn OPTIONS { overwrite: true } RETURN {old: OLD, new: NEW}", { "@cn": cn1 });
assertEqual(1, c1.count());
var doc2 = rv2.toArray()[0];
assertEqual(doc2.new._key, '123');
assertEqual(doc2.new.name, 'ulfine');
assertEqual(doc2.old._rev, doc1._rev);
assertEqual(doc2.old._key, doc1._key);
assertEqual(doc2.old.name, doc1.name);
var rv3 = db._query(`
LET x = (
FOR a IN 3..5
INSERT { _key: CONCAT('12',a), name: a }
IN @@cn
OPTIONS { overwrite: true }
RETURN {old: OLD, new: NEW}
)
FOR d IN x SORT d.new._key
RETURN d
`, { "@cn": cn1 });
var resultArray3 = rv3.toArray();
assertEqual(3, c1.count());
var doc3a = resultArray3[0];
var doc3b = resultArray3[1];
var doc3c = resultArray3[2];
assertEqual(doc3a.old._rev, doc2.new._rev);
assertEqual(doc3a.old._key, doc2.new._key);
assertEqual(doc3a.old.name, "ulfine");
assertEqual(doc3a.new.name, 3);
assertEqual(doc3b.old, null);
assertEqual(doc3b.new.name, 4);
assertEqual(doc3c.old, null);
assertEqual(doc3c.new.name, 5);
},
}; // end insert tests
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -61,6 +61,7 @@ std::string const StaticStrings::Group("group");
std::string const StaticStrings::Namespace("namespace");
std::string const StaticStrings::Prefix("prefix");
std::string const StaticStrings::ReplaceExisting("replaceExisting");
std::string const StaticStrings::OverWrite("overwrite");
// replication headers
std::string const StaticStrings::ReplicationHeaderCheckMore("x-arango-replication-checkmore");

View File

@ -65,7 +65,8 @@ class StaticStrings {
static std::string const Group;
static std::string const Namespace;
static std::string const ReplaceExisting;
static std::string const Prefix;;
static std::string const Prefix;
static std::string const OverWrite;
// replication headers
static std::string const ReplicationHeaderCheckMore;
@ -74,8 +75,8 @@ class StaticStrings {
static std::string const ReplicationHeaderLastTick;
static std::string const ReplicationHeaderFromPresent;
static std::string const ReplicationHeaderActive;
// database and collection names
// database and collection names
static std::string const SystemDatabase;
// LogicalDataSource definition fields

View File

@ -79,6 +79,7 @@ TRI_v8_global_t::TRI_v8_global_t(v8::Isolate* isolate)
MergeObjectsKey(),
NameKey(),
OperationIDKey(),
OverwriteKey(),
ParametersKey(),
PathKey(),
PrefixKey(),
@ -206,7 +207,7 @@ TRI_v8_global_t::TRI_v8_global_t(v8::Isolate* isolate)
_FromKey.Reset(isolate, TRI_V8_ASCII_STRING(isolate, "_from"));
_ToKey.Reset(isolate, TRI_V8_ASCII_STRING(isolate, "_to"));
}
TRI_v8_global_t::~TRI_v8_global_t() {}
/// @brief creates a global context
@ -248,7 +249,7 @@ void TRI_AddMethodVocbase(
/// @brief adds a global function to the given context
void TRI_AddGlobalFunctionVocbase(
v8::Isolate* isolate,
v8::Isolate* isolate,
v8::Handle<v8::String> name,
void (*func)(v8::FunctionCallbackInfo<v8::Value> const&), bool isHidden) {
// all global functions are read-only