1
0
Fork 0

Implement replace and update on cluster.

This still has a freeing too early bug with TRI_json_t.
This commit is contained in:
Max Neunhoeffer 2014-01-28 16:49:44 +01:00
parent 59ad7cab9a
commit c3f01237b0
15 changed files with 405 additions and 65 deletions

View File

@ -607,40 +607,40 @@ BOOST_AUTO_TEST_CASE (tst_json_hashattributes_single) {
const char* v1[] = { "_key" };
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ }");
const uint64_t h1 = TRI_HashJsonByAttributes(json, v1, 1);
const uint64_t h1 = TRI_HashJsonByAttributes(json, v1, 1, true, NULL);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"_key\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foobar\" }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foobar\", \"_key\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foobar\", \"keys\": { \"_key\": \"foobar\" } }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foobar\", \"KEY\": 1234, \"_KEY\": \"foobar\" }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"_key\": \"i-am-a-foo\" }");
const uint64_t h2 = TRI_HashJsonByAttributes(json, v1, 1);
const uint64_t h2 = TRI_HashJsonByAttributes(json, v1, 1, true, NULL);
BOOST_CHECK(h1 != h2);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foobar\", \"KEY\": 1234, \"_key\": \"i-am-a-foo\" }");
BOOST_CHECK_EQUAL(h2, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h2, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": [ \"foobar\" ], \"KEY\": { }, \"_key\": \"i-am-a-foo\" }");
BOOST_CHECK_EQUAL(h2, TRI_HashJsonByAttributes(json, v1, 1));
BOOST_CHECK_EQUAL(h2, TRI_HashJsonByAttributes(json, v1, 1, true, NULL));
FREE_JSON
}
@ -654,48 +654,48 @@ BOOST_AUTO_TEST_CASE (tst_json_hashattributes_mult1) {
const char* v1[] = { "a", "b" };
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ }");
const uint64_t h1 = TRI_HashJsonByAttributes(json, v1, 2);
const uint64_t h1 = TRI_HashJsonByAttributes(json, v1, 2, true, NULL);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": null, \"b\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"b\": null, \"a\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"b\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
// test if non-relevant attributes influence our hash
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": null, \"B\": 123 }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"B\": 1234, \"a\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": null, \"A\": 123, \"B\": \"hihi\" }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"c\": null, \"d\": null }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"A\": 1, \"B\": 2, \" a\": \"bar\" }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"ab\": 1, \"ba\": 2 }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
}
@ -710,39 +710,80 @@ BOOST_AUTO_TEST_CASE (tst_json_hashattributes_mult2) {
const uint64_t h1 = 6369173190757857502ULL;
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foo\", \"b\": \"bar\" }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"b\": \"bar\", \"a\": \"foo\" }");
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(h1, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"food\", \"b\": \"bar\" }");
BOOST_CHECK_EQUAL(720060016857102700ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(720060016857102700ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foo\", \"b\": \"baz\" }");
BOOST_CHECK_EQUAL(6361520589827022742ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(6361520589827022742ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"FOO\", \"b\": \"BAR\" }");
BOOST_CHECK_EQUAL(3595137217367956894ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(3595137217367956894ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foo\" }");
BOOST_CHECK_EQUAL(12739237936894360852ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(12739237936894360852ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foo\", \"b\": \"meow\" }");
BOOST_CHECK_EQUAL(13378327204915572311ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(13378327204915572311ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"b\": \"bar\" }");
BOOST_CHECK_EQUAL(10085884912118216755ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(10085884912118216755ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"b\": \"bar\", \"a\": \"meow\" }");
BOOST_CHECK_EQUAL(15753579192430387496ULL, TRI_HashJsonByAttributes(json, v1, 2));
BOOST_CHECK_EQUAL(15753579192430387496ULL, TRI_HashJsonByAttributes(json, v1, 2, true, NULL));
FREE_JSON
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test hashing by attribute names with incomplete docs
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE (tst_json_hashattributes_mult3) {
TRI_json_t* json;
const char* v1[] = { "a", "b" };
int error;
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foo\", \"b\": \"bar\" }");
TRI_HashJsonByAttributes(json, v1, 2, false, &error);
BOOST_CHECK_EQUAL(TRI_ERROR_NO_ERROR, error);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": \"foo\" }");
TRI_HashJsonByAttributes(json, v1, 2, false, &error);
BOOST_CHECK_EQUAL(TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN, error);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"b\": \"bar\" }");
TRI_HashJsonByAttributes(json, v1, 2, false, &error);
BOOST_CHECK_EQUAL(TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN, error);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ }");
TRI_HashJsonByAttributes(json, v1, 2, false, &error);
BOOST_CHECK_EQUAL(TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN, error);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"c\": 12 }");
TRI_HashJsonByAttributes(json, v1, 2, false, &error);
BOOST_CHECK_EQUAL(TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN, error);
FREE_JSON
json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, "{ \"a\": 1, \"b\": null }");
TRI_HashJsonByAttributes(json, v1, 2, false, &error);
BOOST_CHECK_EQUAL(TRI_ERROR_NO_ERROR, error);
FREE_JSON
}

View File

@ -807,6 +807,8 @@ void ClusterInfo::loadCurrentCollections (bool acquireLock) {
(json, "DBServer", "");
if (DBserver != "") {
_shardIds.insert(make_pair(shardID, DBserver));
cout << "_shardIDs[" << shardID << "] == " << DBserver
<< DBserver.size() << endl;
}
}
_collectionsCurrentValid = true;
@ -1559,16 +1561,20 @@ ServerID ClusterInfo::getResponsibleServer (ShardID const& shardID) {
/// values for some of the sharding attributes is silently ignored
/// and treated as if these values were `null`. In the second mode
/// (`docComplete`==false) leads to an error which is reported by
/// returning an empty string as the shardID. If the collection is
/// found, the flag `usesDefaultShardingAttributes` is used set to
/// `true` if `_key` is the one and only sharding attribute and is
/// set to `false` otherwise.
/// returning TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, which is the only
/// error code that can be returned.
///
/// In either case, if the collection is found, the variable
/// shardID is set to the ID of the responsible shard and the flag
/// `usesDefaultShardingAttributes` is used set to `true` if and only if
/// `_key` is the one and only sharding attribute.
////////////////////////////////////////////////////////////////////////////////
ShardID ClusterInfo::getResponsibleShard (CollectionID const& collectionID,
TRI_json_t const* json,
bool docComplete,
bool& usesDefaultShardingAttributes) {
int ClusterInfo::getResponsibleShard (CollectionID const& collectionID,
TRI_json_t const* json,
bool docComplete,
ShardID& shardID,
bool& usesDefaultShardingAttributes) {
// Note that currently we take the number of shards and the shardKeys
// from Plan, since they are immutable. Later we will have to switch
// this to Current, when we allow to add and remove shards.
@ -1580,6 +1586,7 @@ ShardID ClusterInfo::getResponsibleShard (CollectionID const& collectionID,
TRI_shared_ptr<vector<string> > shardKeysPtr;
char const** shardKeys = 0;
TRI_shared_ptr<vector<ShardID> > shards;
bool found = false;
while (++tries <= 2) {
{
@ -1601,6 +1608,7 @@ ShardID ClusterInfo::getResponsibleShard (CollectionID const& collectionID,
}
usesDefaultShardingAttributes = shardKeysPtr->size() == 1 &&
shardKeysPtr->at(0) == "_key";
found = true;
break; // all OK
}
}
@ -1608,15 +1616,18 @@ ShardID ClusterInfo::getResponsibleShard (CollectionID const& collectionID,
}
loadPlannedCollections();
}
if (0 == shardKeys) {
return string("");
if (!found) {
return TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
}
int error;
uint64_t hash = TRI_HashJsonByAttributes(json, shardKeys,
shardKeysPtr->size());
shardKeysPtr->size(),
docComplete, &error);
delete[] shardKeys;
return shards->at(hash % shards->size());
shardID = shards->at(hash % shards->size());
return error;
}

View File

@ -908,9 +908,10 @@ namespace triagens {
/// @brief find the shard that is responsible for a document
////////////////////////////////////////////////////////////////////////////////
ShardID getResponsibleShard (CollectionID const&, TRI_json_t const*,
bool docComplete,
bool& usesDefaultShardingAttributes);
int getResponsibleShard (CollectionID const&, TRI_json_t const*,
bool docComplete,
ShardID& shardID,
bool& usesDefaultShardingAttributes);
private:

View File

@ -92,11 +92,12 @@ int createDocumentOnCoordinator (
// Now find the responsible shard:
bool usesDefaultShardingAttributes;
ShardID shardID = ci->getResponsibleShard( collid, json, true,
usesDefaultShardingAttributes );
if (shardID == "") {
ShardID shardID;
int error = ci->getResponsibleShard( collid, json, true, shardID,
usesDefaultShardingAttributes );
if (error == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_ERROR_SHARD_GONE;
return TRI_ERROR_CLUSTER_SHARD_GONE;
}
// Now perform the above mentioned check:
@ -180,8 +181,9 @@ int deleteDocumentOnCoordinator (
TRI_CreateStringReference2Json(TRI_UNKNOWN_MEM_ZONE,
key.c_str(), key.size()));
bool usesDefaultShardingAttributes;
ShardID shardID = ci->getResponsibleShard( collid, json, true,
usesDefaultShardingAttributes );
ShardID shardID;
int error = ci->getResponsibleShard( collid, json, true, shardID,
usesDefaultShardingAttributes );
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
// Some stuff to prepare cluster-intern requests:
@ -197,8 +199,8 @@ int deleteDocumentOnCoordinator (
if (usesDefaultShardingAttributes) {
// OK, this is the fast method, we only have to ask one shard:
if (shardID == "") {
return TRI_ERROR_SHARD_GONE;
if (error == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
return TRI_ERROR_CLUSTER_SHARD_GONE;
}
// Send a synchronous request to that shard using ClusterComm:
@ -311,8 +313,9 @@ int getDocumentOnCoordinator (
TRI_CreateStringReference2Json(TRI_UNKNOWN_MEM_ZONE,
key.c_str(), key.size()));
bool usesDefaultShardingAttributes;
ShardID shardID = ci->getResponsibleShard( collid, json, true,
usesDefaultShardingAttributes );
ShardID shardID;
int error = ci->getResponsibleShard( collid, json, true, shardID,
usesDefaultShardingAttributes );
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
// Some stuff to prepare cluster-intern requests:
@ -331,8 +334,8 @@ int getDocumentOnCoordinator (
if (usesDefaultShardingAttributes) {
// OK, this is the fast method, we only have to ask one shard:
if (shardID == "") {
return TRI_ERROR_SHARD_GONE;
if (error == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
return TRI_ERROR_CLUSTER_SHARD_GONE;
}
// Set up revision string or header:
@ -415,6 +418,179 @@ int getDocumentOnCoordinator (
// the DBserver could have reported an error.
}
////////////////////////////////////////////////////////////////////////////////
/// @brief modify a document in a coordinator
////////////////////////////////////////////////////////////////////////////////
int modifyDocumentOnCoordinator (
string const& dbname,
string const& collname,
string const& key,
TRI_voc_rid_t const rev,
TRI_doc_update_policy_e policy,
bool waitForSync,
bool isPatch,
string const& keepNull, // only for isPatch == true
TRI_json_t* json,
triagens::rest::HttpResponse::HttpResponseCode& responseCode,
string& contentType,
string& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
// First determine the collection ID from the name:
CollectionInfo collinfo = ci->getCollection(dbname, collname);
if (collinfo.empty()) {
return TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
}
string collid = StringUtils::itoa(collinfo.id());
// We have a fast path and a slow path. The fast path only asks one shard
// to do the job and the slow path asks them all and expects to get
// "not found" from all but one shard. We have to cover the following
// cases:
// isPatch == false (this is a "replace" operation)
// Here, the complete new document is given, we assume that we
// can read off the responsible shard, therefore can use the fast
// path, this is always true if _key is the one and only sharding
// attribute, however, if there is any other sharding attribute,
// it is possible that the user has changed the values in any of
// them, in that case we will get a "not found" or a "sharding
// attributes changed answer" in the fast path. In the latter case
// we have to delegate to the slow path.
// isPatch == true (this is an "update" operation)
// In this case we might or might not have all sharding attributes
// specified in the partial document given. If _key is the one and
// only sharding attribute, it is always given, if not all sharding
// attributes are explicitly given (at least as value `null`), we must
// assume that the fast path cannot be used. If all sharding attributes
// are given, we first try the fast path, but might, as above,
// have to use the slow path after all.
bool usesDefaultShardingAttributes;
ShardID shardID;
int error;
if (isPatch) {
error = ci->getResponsibleShard( collid, json, false, shardID,
usesDefaultShardingAttributes );
}
else {
error = ci->getResponsibleShard( collid, json, true, shardID,
usesDefaultShardingAttributes );
}
if (error == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
return error;
}
// Some stuff to prepare cluster-internal requests:
ClusterCommResult* res;
string revstr;
if (rev != 0) {
revstr = "?rev="+StringUtils::itoa(rev);
}
triagens::rest::HttpRequest::HttpRequestType reqType;
if (isPatch) {
reqType = triagens::rest::HttpRequest::HTTP_REQUEST_PATCH;
if (!keepNull.empty()) {
if (revstr.empty()) {
revstr += "?keepNull=";
}
else {
revstr += "&keepNull=";
}
revstr += keepNull;
}
}
else {
reqType = triagens::rest::HttpRequest::HTTP_REQUEST_PUT;
}
string body = JsonHelper::toString(json);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
if (!isPatch ||
error != TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN) {
// This is the fast method, we only have to ask one shard, unless
// the we are in isPatch==false and the user has actually changed the
// sharding attributes
// Set up revision string or header:
map<string, string> headers;
// Send a synchronous request to that shard using ClusterComm:
res = cc->syncRequest("", TRI_NewTickServer(), "shard:"+shardID, reqType,
"/_db/"+dbname+"/_api/document/"+
StringUtils::urlEncode(shardID)+"/"+key+
revstr, body.c_str(), body.size(), headers, 60.0);
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
delete res;
return TRI_ERROR_CLUSTER_TIMEOUT;
}
if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (!res->result->isComplete()) {
delete res;
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
// In this case a proper HTTP error was reported by the DBserver,
// this can be 400 or 404, we simply forward the result.
// We intentionally fall through here.
}
// Now we have to distinguish whether we still have to go the slow way:
responseCode = static_cast<triagens::rest::HttpResponse::HttpResponseCode>
(res->result->getHttpReturnCode());
if (responseCode < triagens::rest::HttpResponse::BAD) {
// OK, we are done, let's report:
contentType = res->result->getContentType(false);
resultBody = res->result->getBody().str();
delete res;
return TRI_ERROR_NO_ERROR;
}
delete res;
}
// If we get here, we have to do it the slow way and contact everybody:
map<ShardID, ServerID> shards = collinfo.shardIds();
map<ShardID, ServerID>::iterator it;
CoordTransactionID coordTransactionID = TRI_NewTickServer();
for (it = shards.begin(); it != shards.end(); ++it) {
map<string, string>* headers = new map<string, string>;
res = cc->asyncRequest("", coordTransactionID, "shard:"+it->first, reqType,
"/_db/"+dbname+"/_api/document/"+
StringUtils::urlEncode(it->first)+"/"+key+revstr,
body.c_str(), body.size(), headers, NULL, 60.0);
delete res;
}
// Now listen to the results:
int count;
int nrok = 0;
for (count = shards.size(); count > 0; count--) {
res = cc->wait( "", coordTransactionID, 0, "", 0.0);
if (res->status == CL_COMM_RECEIVED) {
if (res->answer_code != triagens::rest::HttpResponse::NOT_FOUND ||
(nrok == 0 && count == 1)) {
nrok++;
responseCode = res->answer_code;
contentType = res->answer->header("content-type");
resultBody = string(res->answer->body(), res->answer->bodySize());
}
}
delete res;
}
// Note that nrok is always at least 1!
if (nrok > 1) {
return TRI_ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS;
}
return TRI_ERROR_NO_ERROR; // the cluster operation was OK, however,
// the DBserver could have reported an error.
}
} // namespace arango
} // namespace triagens

View File

@ -89,6 +89,24 @@ namespace triagens {
string& contentType,
string& resultBody);
////////////////////////////////////////////////////////////////////////////////
/// @brief modify a document in a coordinator
////////////////////////////////////////////////////////////////////////////////
int modifyDocumentOnCoordinator (
string const& dbname,
string const& collname,
string const& key,
TRI_voc_rid_t const rev,
TRI_doc_update_policy_e policy,
bool waitForSync,
bool isPatch,
string const& keepNull,
TRI_json_t* json,
triagens::rest::HttpResponse::HttpResponseCode& responseCode,
string& contentType,
string& resultBody);
// -----------------------------------------------------------------------------
// --SECTION-- public functions
// -----------------------------------------------------------------------------

View File

@ -1306,6 +1306,14 @@ bool RestDocumentHandler::modifyDocument (bool isPatch) {
const TRI_doc_update_policy_e policy = extractUpdatePolicy();
const bool waitForSync = extractWaitForSync();
#ifdef TRI_ENABLE_CLUSTER
if (ServerState::instance()->isCoordinator()) {
// json will be freed inside
return modifyDocumentCoordinator(collection, key, revision, policy,
waitForSync, isPatch, json);
}
#endif
TRI_doc_mptr_t document;
// find and load collection given by name or identifier
@ -1426,6 +1434,43 @@ bool RestDocumentHandler::modifyDocument (bool isPatch) {
return true;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief modifies a document, coordinator case in a cluster
////////////////////////////////////////////////////////////////////////////////
#ifdef TRI_ENABLE_CLUSTER
bool RestDocumentHandler::modifyDocumentCoordinator (
string const& collname,
string const& key,
TRI_voc_rid_t const rev,
TRI_doc_update_policy_e policy,
bool waitForSync,
bool isPatch,
TRI_json_t* json) {
string const& dbname = _request->originalDatabaseName();
triagens::rest::HttpResponse::HttpResponseCode responseCode;
string contentType;
string resultBody;
string const keepNull = _request->value("keepNull");
int error = triagens::arango::modifyDocumentOnCoordinator(
dbname, collname, key, rev, policy, waitForSync, isPatch,
keepNull, json, responseCode, contentType, resultBody);
if (error != TRI_ERROR_NO_ERROR) {
generateTransactionError(collname, error);
return false;
}
// Essentially return the response we got from the DBserver, be it
// OK or an error:
_response = createResponse(responseCode);
_response->setContentType(contentType);
_response->body().appendText(resultBody.c_str(), resultBody.size());
return responseCode >= triagens::rest::HttpResponse::BAD;
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief deletes a document
///

View File

@ -193,6 +193,18 @@ namespace triagens {
string const& key,
bool generateBody);
////////////////////////////////////////////////////////////////////////////////
/// @brief read a single document, coordinator case in a cluster
////////////////////////////////////////////////////////////////////////////////
bool modifyDocumentCoordinator (string const& collname,
string const& key,
TRI_voc_rid_t const rev,
TRI_doc_update_policy_e policy,
bool waitForSync,
bool isPatch,
TRI_json_t* json);
};
}
}

View File

@ -438,7 +438,7 @@ void RestVocbaseBaseHandler::generateTransactionError (const string& collectionN
return;
#ifdef TRI_ENABLE_CLUSTER
case TRI_ERROR_SHARD_GONE:
case TRI_ERROR_CLUSTER_SHARD_GONE:
generateError(HttpResponse::SERVER_ERROR, res,
"coordinator: no responsible shard found");
return;

View File

@ -125,10 +125,11 @@
"ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE" : { "code" : 1462, "message" : "could not create database" },
"ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_PLAN" : { "code" : 1463, "message" : "could not remove database from plan" },
"ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT" : { "code" : 1464, "message" : "could not remove database from current" },
"ERROR_SHARD_GONE" : { "code" : 1465, "message" : "no responsible shard found" },
"ERROR_CLUSTER_SHARD_GONE" : { "code" : 1465, "message" : "no responsible shard found" },
"ERROR_CLUSTER_CONNECTION_LOST" : { "code" : 1466, "message" : "cluster internal HTTP connection broken" },
"ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY" : { "code" : 1467, "message" : "must not specify _key for this collection" },
"ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS" : { "code" : 1468, "message" : "got contradicting answers from different shards" },
"ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN" : { "code" : 1469, "message" : "not all sharding attributes given" },
"ERROR_QUERY_KILLED" : { "code" : 1500, "message" : "query killed" },
"ERROR_QUERY_PARSE" : { "code" : 1501, "message" : "%s" },
"ERROR_QUERY_EMPTY" : { "code" : 1502, "message" : "query is empty" },

View File

@ -125,10 +125,11 @@
"ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE" : { "code" : 1462, "message" : "could not create database" },
"ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_PLAN" : { "code" : 1463, "message" : "could not remove database from plan" },
"ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT" : { "code" : 1464, "message" : "could not remove database from current" },
"ERROR_SHARD_GONE" : { "code" : 1465, "message" : "no responsible shard found" },
"ERROR_CLUSTER_SHARD_GONE" : { "code" : 1465, "message" : "no responsible shard found" },
"ERROR_CLUSTER_CONNECTION_LOST" : { "code" : 1466, "message" : "cluster internal HTTP connection broken" },
"ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY" : { "code" : 1467, "message" : "must not specify _key for this collection" },
"ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS" : { "code" : 1468, "message" : "got contradicting answers from different shards" },
"ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN" : { "code" : 1469, "message" : "not all sharding attributes given" },
"ERROR_QUERY_KILLED" : { "code" : 1500, "message" : "query killed" },
"ERROR_QUERY_PARSE" : { "code" : 1501, "message" : "%s" },
"ERROR_QUERY_EMPTY" : { "code" : 1502, "message" : "query is empty" },

View File

@ -160,10 +160,11 @@ ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE_IN_PLAN,1461,"could not create database
ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE,1462,"could not create database","Will be raised when a coordinator in a cluster notices that some DBServers report problems when creating databases for a new cluster wide database."
ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_PLAN,1463,"could not remove database from plan","Will be raised when a coordinator in a cluster cannot remove an entry for a database in the Plan hierarchy in the agency."
ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT,1464,"could not remove database from current","Will be raised when a coordinator in a cluster cannot remove an entry for a database in the Current hierarchy in the agency."
ERROR_SHARD_GONE,1465,"no responsible shard found","Will be raised when a coordinator in a cluster cannot determine the shard that is responsible for a given document."
ERROR_CLUSTER_SHARD_GONE,1465,"no responsible shard found","Will be raised when a coordinator in a cluster cannot determine the shard that is responsible for a given document."
ERROR_CLUSTER_CONNECTION_LOST,1466,"cluster internal HTTP connection broken","Will be raised when a coordinator in a cluster loses an HTTP connection to a DBserver in the cluster whilst transferring data."
ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY,1467,"must not specify _key for this collection","Will be raised when a coordinator in a cluster finds that the _key attribute was specified in a sharded collection the uses not only _key as sharding attribute."
ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS,1468,"got contradicting answers from different shards","Will be raised if a coordinator in a cluster gets conflicting results from different shards, which should never happen."
ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN,1469,"not all sharding attributes given","Will be raised if a coordinator tries to find out which shard is responsible for a partial document, but cannot do this because not all sharding attributes are specified."
################################################################################
## ArangoDB query errors

View File

@ -858,19 +858,33 @@ uint64_t TRI_HashJson (TRI_json_t const* json) {
/// Note that all JSON values given for `json` that are not JSON arrays
/// hash to the same value, which is not the same value a JSON array gets
/// that does not contain any of the specified attributes.
/// If the flag `docComplete` is false, it is an error if the document
/// does not contain explicit values for all attributes. An error
/// is reported by setting *error to
/// TRI_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN instead of
/// TRI_ERROR_NO_ERROR. It is allowed to give NULL as error in which
/// case no error is reported.
////////////////////////////////////////////////////////////////////////////////
uint64_t TRI_HashJsonByAttributes (TRI_json_t const* json,
char const *attributes[],
int nrAttributes) {
int nrAttributes,
bool docComplete,
int* error) {
int i;
TRI_json_t const* subjson;
uint64_t hash;
if (NULL != error) {
*error = TRI_ERROR_NO_ERROR;
}
hash = TRI_FnvHashBlockInitial();
if (TRI_IsArrayJson(json)) {
for (i = 0; i < nrAttributes; i++) {
subjson = TRI_LookupArrayJson(json, attributes[i]);
if (NULL == subjson && !docComplete && NULL != error) {
*error = TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN;
}
hash = HashJsonRecursive(hash, subjson);
}
}

View File

@ -150,7 +150,9 @@ uint64_t TRI_HashJson (TRI_json_t const* json);
uint64_t TRI_HashJsonByAttributes (TRI_json_t const* json,
char const *attributes[],
int nrAttributes);
int nrAttributes,
bool docComplete,
int* error);
////////////////////////////////////////////////////////////////////////////////
/// @}

View File

@ -121,10 +121,11 @@ void TRI_InitialiseErrorMessages (void) {
REG_ERROR(ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE, "could not create database");
REG_ERROR(ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_PLAN, "could not remove database from plan");
REG_ERROR(ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT, "could not remove database from current");
REG_ERROR(ERROR_SHARD_GONE, "no responsible shard found");
REG_ERROR(ERROR_CLUSTER_SHARD_GONE, "no responsible shard found");
REG_ERROR(ERROR_CLUSTER_CONNECTION_LOST, "cluster internal HTTP connection broken");
REG_ERROR(ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY, "must not specify _key for this collection");
REG_ERROR(ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS, "got contradicting answers from different shards");
REG_ERROR(ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN, "not all sharding attributes given");
REG_ERROR(ERROR_QUERY_KILLED, "query killed");
REG_ERROR(ERROR_QUERY_PARSE, "%s");
REG_ERROR(ERROR_QUERY_EMPTY, "query is empty");

View File

@ -293,6 +293,10 @@ extern "C" {
/// - 1468: @LIT{got contradicting answers from different shards}
/// Will be raised if a coordinator in a cluster gets conflicting results
/// from different shards, which should never happen.
/// - 1469: @LIT{not all sharding attributes given}
/// Will be raised if a coordinator tries to find out which shard is
/// responsible for a partial document, but cannot do this because not all
/// sharding attributes are specified.
/// - 1500: @LIT{query killed}
/// Will be raised when a running query is killed by an explicit admin
/// command.
@ -1638,7 +1642,7 @@ void TRI_InitialiseErrorMessages (void);
#define TRI_ERROR_CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT (1464)
////////////////////////////////////////////////////////////////////////////////
/// @brief 1465: ERROR_SHARD_GONE
/// @brief 1465: ERROR_CLUSTER_SHARD_GONE
///
/// no responsible shard found
///
@ -1646,7 +1650,7 @@ void TRI_InitialiseErrorMessages (void);
/// that is responsible for a given document.
////////////////////////////////////////////////////////////////////////////////
#define TRI_ERROR_SHARD_GONE (1465)
#define TRI_ERROR_CLUSTER_SHARD_GONE (1465)
////////////////////////////////////////////////////////////////////////////////
/// @brief 1466: ERROR_CLUSTER_CONNECTION_LOST
@ -1682,6 +1686,18 @@ void TRI_InitialiseErrorMessages (void);
#define TRI_ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS (1468)
////////////////////////////////////////////////////////////////////////////////
/// @brief 1469: ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN
///
/// not all sharding attributes given
///
/// Will be raised if a coordinator tries to find out which shard is
/// responsible for a partial document, but cannot do this because not all
/// sharding attributes are specified.
////////////////////////////////////////////////////////////////////////////////
#define TRI_ERROR_CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN (1469)
////////////////////////////////////////////////////////////////////////////////
/// @brief 1500: ERROR_QUERY_KILLED
///