mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
ef2fe19cbf
|
@ -643,7 +643,7 @@ if (USE_MAINTAINER_MODE)
|
||||||
OUTPUT
|
OUTPUT
|
||||||
${CMAKE_SOURCE_DIR}/${m}
|
${CMAKE_SOURCE_DIR}/${m}
|
||||||
COMMAND
|
COMMAND
|
||||||
./utils/generateErrorfile.sh ./${ERRORS_DAT} ./${m}
|
PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} ./utils/generateErrorfile.sh ./${ERRORS_DAT} ./${m}
|
||||||
DEPENDS
|
DEPENDS
|
||||||
${CMAKE_SOURCE_DIR}/${ERRORS_DAT}
|
${CMAKE_SOURCE_DIR}/${ERRORS_DAT}
|
||||||
WORKING_DIRECTORY
|
WORKING_DIRECTORY
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
.TH arangob 1 "3.0.0-devel" "ArangoDB" "ArangoDB"
|
.TH arangob 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
|
||||||
.SH NAME
|
.SH NAME
|
||||||
arangob - the ArangoDB benchmark and test tool
|
arangob - the ArangoDB benchmark and test tool
|
||||||
.SH SYNOPSIS
|
.SH SYNOPSIS
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
.TH arangodump 1 "3.0.0-devel" "ArangoDB" "ArangoDB"
|
.TH arangodump 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
|
||||||
.SH NAME
|
.SH NAME
|
||||||
arangodump - a tool to create logical dumps of an ArangoDB database
|
arangodump - a tool to create logical dumps of an ArangoDB database
|
||||||
.SH SYNOPSIS
|
.SH SYNOPSIS
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
.TH arangoimp 1 "3.0.0-devel" "ArangoDB" "ArangoDB"
|
.TH arangoimp 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
|
||||||
.SH NAME
|
.SH NAME
|
||||||
arangoimp - a bulk importer for the ArangoDB database
|
arangoimp - a bulk importer for the ArangoDB database
|
||||||
.SH SYNOPSIS
|
.SH SYNOPSIS
|
||||||
|
|
|
@ -854,6 +854,7 @@ void ClusterComm::asyncAnswer(std::string& coordinatorHeader,
|
||||||
|
|
||||||
std::string ClusterComm::processAnswer(std::string& coordinatorHeader,
|
std::string ClusterComm::processAnswer(std::string& coordinatorHeader,
|
||||||
arangodb::rest::HttpRequest* answer) {
|
arangodb::rest::HttpRequest* answer) {
|
||||||
|
TRI_ASSERT(answer != nullptr);
|
||||||
// First take apart the header to get the operaitonID:
|
// First take apart the header to get the operaitonID:
|
||||||
OperationID operationID;
|
OperationID operationID;
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
|
@ -881,6 +882,7 @@ std::string ClusterComm::processAnswer(std::string& coordinatorHeader,
|
||||||
ClusterComm::IndexIterator i;
|
ClusterComm::IndexIterator i;
|
||||||
i = receivedByOpID.find(operationID);
|
i = receivedByOpID.find(operationID);
|
||||||
if (i != receivedByOpID.end()) {
|
if (i != receivedByOpID.end()) {
|
||||||
|
TRI_ASSERT(answer != nullptr);
|
||||||
ClusterCommOperation* op = *(i->second);
|
ClusterCommOperation* op = *(i->second);
|
||||||
op->result.answer.reset(answer);
|
op->result.answer.reset(answer);
|
||||||
op->result.answer_code = rest::HttpResponse::responseCode(
|
op->result.answer_code = rest::HttpResponse::responseCode(
|
||||||
|
@ -904,6 +906,7 @@ std::string ClusterComm::processAnswer(std::string& coordinatorHeader,
|
||||||
|
|
||||||
i = toSendByOpID.find(operationID);
|
i = toSendByOpID.find(operationID);
|
||||||
if (i != toSendByOpID.end()) {
|
if (i != toSendByOpID.end()) {
|
||||||
|
TRI_ASSERT(answer != nullptr);
|
||||||
ClusterCommOperation* op = *(i->second);
|
ClusterCommOperation* op = *(i->second);
|
||||||
op->result.answer.reset(answer);
|
op->result.answer.reset(answer);
|
||||||
op->result.answer_code = rest::HttpResponse::responseCode(
|
op->result.answer_code = rest::HttpResponse::responseCode(
|
||||||
|
|
|
@ -1787,6 +1787,7 @@ static void Return_PrepareClusterCommResultForJS(
|
||||||
// boring:
|
// boring:
|
||||||
|
|
||||||
// The headers:
|
// The headers:
|
||||||
|
TRI_ASSERT(res.result != nullptr);
|
||||||
v8::Handle<v8::Object> h = v8::Object::New(isolate);
|
v8::Handle<v8::Object> h = v8::Object::New(isolate);
|
||||||
for (auto const& i : res.result->getHeaderFields()) {
|
for (auto const& i : res.result->getHeaderFields()) {
|
||||||
h->Set(TRI_V8_STD_STRING(i.first), TRI_V8_STD_STRING(i.second));
|
h->Set(TRI_V8_STD_STRING(i.first), TRI_V8_STD_STRING(i.second));
|
||||||
|
@ -1835,6 +1836,7 @@ static void Return_PrepareClusterCommResultForJS(
|
||||||
v8::Handle<v8::Object> h = v8::Object::New(isolate);
|
v8::Handle<v8::Object> h = v8::Object::New(isolate);
|
||||||
TRI_GET_GLOBAL_STRING(StatusKey);
|
TRI_GET_GLOBAL_STRING(StatusKey);
|
||||||
r->Set(StatusKey, TRI_V8_ASCII_STRING("RECEIVED"));
|
r->Set(StatusKey, TRI_V8_ASCII_STRING("RECEIVED"));
|
||||||
|
TRI_ASSERT(res.answer != nullptr);
|
||||||
std::map<std::string, std::string> headers = res.answer->headers();
|
std::map<std::string, std::string> headers = res.answer->headers();
|
||||||
std::map<std::string, std::string>::iterator i;
|
std::map<std::string, std::string>::iterator i;
|
||||||
for (i = headers.begin(); i != headers.end(); ++i) {
|
for (i = headers.begin(); i != headers.end(); ++i) {
|
||||||
|
|
|
@ -151,6 +151,7 @@
|
||||||
"ERROR_ARANGO_DOCUMENT_NOT_FOUND_OR_SHARDING_ATTRIBUTES_CHANGED" : { "code" : 1475, "message" : "document not found or sharding attributes changed" },
|
"ERROR_ARANGO_DOCUMENT_NOT_FOUND_OR_SHARDING_ATTRIBUTES_CHANGED" : { "code" : 1475, "message" : "document not found or sharding attributes changed" },
|
||||||
"ERROR_CLUSTER_COULD_NOT_DETERMINE_ID" : { "code" : 1476, "message" : "could not determine my ID from my local info" },
|
"ERROR_CLUSTER_COULD_NOT_DETERMINE_ID" : { "code" : 1476, "message" : "could not determine my ID from my local info" },
|
||||||
"ERROR_CLUSTER_ONLY_ON_DBSERVER" : { "code" : 1477, "message" : "this operation is only valid on a DBserver in a cluster" },
|
"ERROR_CLUSTER_ONLY_ON_DBSERVER" : { "code" : 1477, "message" : "this operation is only valid on a DBserver in a cluster" },
|
||||||
|
"ERROR_CLUSTER_BACKEND_UNAVAILABLE" : { "code" : 1478, "message" : "A cluster backend which was required for the operation could not be reached" },
|
||||||
"ERROR_QUERY_KILLED" : { "code" : 1500, "message" : "query killed" },
|
"ERROR_QUERY_KILLED" : { "code" : 1500, "message" : "query killed" },
|
||||||
"ERROR_QUERY_PARSE" : { "code" : 1501, "message" : "%s" },
|
"ERROR_QUERY_PARSE" : { "code" : 1501, "message" : "%s" },
|
||||||
"ERROR_QUERY_EMPTY" : { "code" : 1502, "message" : "query is empty" },
|
"ERROR_QUERY_EMPTY" : { "code" : 1502, "message" : "query is empty" },
|
||||||
|
|
|
@ -187,6 +187,7 @@ ERROR_CLUSTER_AQL_COMMUNICATION,1474,"error in cluster internal communication fo
|
||||||
ERROR_ARANGO_DOCUMENT_NOT_FOUND_OR_SHARDING_ATTRIBUTES_CHANGED,1475,"document not found or sharding attributes changed","Will be raised when a document with a given identifier or handle is unknown, or if the sharding attributes have been changed in a REPLACE operation in the cluster."
|
ERROR_ARANGO_DOCUMENT_NOT_FOUND_OR_SHARDING_ATTRIBUTES_CHANGED,1475,"document not found or sharding attributes changed","Will be raised when a document with a given identifier or handle is unknown, or if the sharding attributes have been changed in a REPLACE operation in the cluster."
|
||||||
ERROR_CLUSTER_COULD_NOT_DETERMINE_ID,1476,"could not determine my ID from my local info","Will be raised if a cluster server at startup could not determine its own ID from the local info provided."
|
ERROR_CLUSTER_COULD_NOT_DETERMINE_ID,1476,"could not determine my ID from my local info","Will be raised if a cluster server at startup could not determine its own ID from the local info provided."
|
||||||
ERROR_CLUSTER_ONLY_ON_DBSERVER,1477,"this operation is only valid on a DBserver in a cluster","Will be raised if there is an attempt to run a DBserver-only operation on a different type of node."
|
ERROR_CLUSTER_ONLY_ON_DBSERVER,1477,"this operation is only valid on a DBserver in a cluster","Will be raised if there is an attempt to run a DBserver-only operation on a different type of node."
|
||||||
|
ERROR_CLUSTER_BACKEND_UNAVAILABLE,1478,"A cluster backend which was required for the operation could not be reached","Will be raised if a required db server can't be reached."
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
## ArangoDB query errors
|
## ArangoDB query errors
|
||||||
|
|
|
@ -147,6 +147,7 @@ void TRI_InitializeErrorMessages () {
|
||||||
REG_ERROR(ERROR_ARANGO_DOCUMENT_NOT_FOUND_OR_SHARDING_ATTRIBUTES_CHANGED, "document not found or sharding attributes changed");
|
REG_ERROR(ERROR_ARANGO_DOCUMENT_NOT_FOUND_OR_SHARDING_ATTRIBUTES_CHANGED, "document not found or sharding attributes changed");
|
||||||
REG_ERROR(ERROR_CLUSTER_COULD_NOT_DETERMINE_ID, "could not determine my ID from my local info");
|
REG_ERROR(ERROR_CLUSTER_COULD_NOT_DETERMINE_ID, "could not determine my ID from my local info");
|
||||||
REG_ERROR(ERROR_CLUSTER_ONLY_ON_DBSERVER, "this operation is only valid on a DBserver in a cluster");
|
REG_ERROR(ERROR_CLUSTER_ONLY_ON_DBSERVER, "this operation is only valid on a DBserver in a cluster");
|
||||||
|
REG_ERROR(ERROR_CLUSTER_BACKEND_UNAVAILABLE, "A cluster backend which was required for the operation could not be reached");
|
||||||
REG_ERROR(ERROR_QUERY_KILLED, "query killed");
|
REG_ERROR(ERROR_QUERY_KILLED, "query killed");
|
||||||
REG_ERROR(ERROR_QUERY_PARSE, "%s");
|
REG_ERROR(ERROR_QUERY_PARSE, "%s");
|
||||||
REG_ERROR(ERROR_QUERY_EMPTY, "query is empty");
|
REG_ERROR(ERROR_QUERY_EMPTY, "query is empty");
|
||||||
|
|
|
@ -361,6 +361,8 @@
|
||||||
/// - 1477: @LIT{this operation is only valid on a DBserver in a cluster}
|
/// - 1477: @LIT{this operation is only valid on a DBserver in a cluster}
|
||||||
/// Will be raised if there is an attempt to run a DBserver-only operation on
|
/// Will be raised if there is an attempt to run a DBserver-only operation on
|
||||||
/// a different type of node.
|
/// a different type of node.
|
||||||
|
/// - 1478: @LIT{A cluster backend which was required for the operation could not be reached}
|
||||||
|
/// Will be raised if a required db server can't be reached.
|
||||||
/// - 1500: @LIT{query killed}
|
/// - 1500: @LIT{query killed}
|
||||||
/// Will be raised when a running query is killed by an explicit admin
|
/// Will be raised when a running query is killed by an explicit admin
|
||||||
/// command.
|
/// command.
|
||||||
|
@ -2126,6 +2128,7 @@ void TRI_InitializeErrorMessages ();
|
||||||
///
|
///
|
||||||
/// A cluster backend which was required for the operation could not be reached
|
/// A cluster backend which was required for the operation could not be reached
|
||||||
///
|
///
|
||||||
|
/// Will be raised if a required db server can't be reached.
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#define TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE (1478)
|
#define TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE (1478)
|
||||||
|
|
|
@ -57,6 +57,7 @@ start() {
|
||||||
--cluster.my-role $ROLE \
|
--cluster.my-role $ROLE \
|
||||||
--log.file cluster/$PORT.log \
|
--log.file cluster/$PORT.log \
|
||||||
--log.requests-file cluster/$PORT.req \
|
--log.requests-file cluster/$PORT.req \
|
||||||
|
--log.level TRACE \
|
||||||
--server.disable-statistics true \
|
--server.disable-statistics true \
|
||||||
--server.foxx-queues false \
|
--server.foxx-queues false \
|
||||||
--javascript.startup-directory ./js \
|
--javascript.startup-directory ./js \
|
||||||
|
|
|
@ -3,9 +3,11 @@
|
||||||
SOURCE="$1"
|
SOURCE="$1"
|
||||||
DEST="$2"
|
DEST="$2"
|
||||||
|
|
||||||
|
PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-python}
|
||||||
|
|
||||||
SCRIPT="`dirname $0`/generateErrorfile.py"
|
SCRIPT="`dirname $0`/generateErrorfile.py"
|
||||||
|
|
||||||
python "$SCRIPT" "$SOURCE" "$DEST.tmp"
|
${PYTHON_EXECUTABLE} "$SCRIPT" "$SOURCE" "$DEST.tmp"
|
||||||
|
|
||||||
if cmp -s $DEST ${DEST}.tmp; then
|
if cmp -s $DEST ${DEST}.tmp; then
|
||||||
rm ${DEST}.tmp
|
rm ${DEST}.tmp
|
||||||
|
|
Loading…
Reference in New Issue