1
0
Fork 0

improve error messages (#7697)

This commit is contained in:
Jan 2018-12-10 10:37:57 +01:00 committed by GitHub
parent c78e3c89e0
commit d22997b01c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 44 additions and 22 deletions

View File

@ -133,6 +133,7 @@ std::pair<arangodb::Result, uint64_t> startBatch(
body.c_str(), body.size())); body.c_str(), body.size()));
auto check = ::checkHttpResponse(client, response); auto check = ::checkHttpResponse(client, response);
if (check.fail()) { if (check.fail()) {
LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while creating dump context: " << check.errorMessage();
return {check, 0}; return {check, 0};
} }
@ -200,7 +201,7 @@ void flushWal(arangodb::httpclient::SimpleHttpClient& client) {
if (check.fail()) { if (check.fail()) {
// TODO should we abort early here? // TODO should we abort early here?
LOG_TOPIC(ERR, arangodb::Logger::DUMP) LOG_TOPIC(ERR, arangodb::Logger::DUMP)
<< "got invalid response from server: " + check.errorMessage(); << "Got invalid response from server when flushing WAL: " + check.errorMessage();
} }
} }
@ -212,8 +213,8 @@ bool isIgnoredHiddenEnterpriseCollection(
strncmp(name.c_str(), "_from_", 6) == 0 || strncmp(name.c_str(), "_from_", 6) == 0 ||
strncmp(name.c_str(), "_to_", 4) == 0) { strncmp(name.c_str(), "_to_", 4) == 0) {
LOG_TOPIC(INFO, arangodb::Logger::DUMP) LOG_TOPIC(INFO, arangodb::Logger::DUMP)
<< "Dump ignoring collection " << name << "Dump is ignoring collection '" << name
<< ". Will be created via SmartGraphs of a full dump. If you want to " << "'. Will be created via SmartGraphs of a full dump. If you want to "
"dump this collection anyway use 'arangodump --force'. " "dump this collection anyway use 'arangodump --force'. "
"However this is not recommended and you should instead dump " "However this is not recommended and you should instead dump "
"the EdgeCollection of the SmartGraph instead."; "the EdgeCollection of the SmartGraph instead.";
@ -256,11 +257,12 @@ arangodb::Result dumpCollection(arangodb::httpclient::SimpleHttpClient& client,
using arangodb::basics::StringUtils::boolean; using arangodb::basics::StringUtils::boolean;
using arangodb::basics::StringUtils::itoa; using arangodb::basics::StringUtils::itoa;
using arangodb::basics::StringUtils::uint64; using arangodb::basics::StringUtils::uint64;
using arangodb::basics::StringUtils::urlEncode;
uint64_t fromTick = minTick; uint64_t fromTick = minTick;
uint64_t chunkSize = uint64_t chunkSize =
jobData.options.initialChunkSize; // will grow adaptively up to max jobData.options.initialChunkSize; // will grow adaptively up to max
std::string baseUrl = "/_api/replication/dump?collection=" + name + std::string baseUrl = "/_api/replication/dump?collection=" + urlEncode(name) +
"&batchId=" + itoa(batchId) + "&ticks=false"; "&batchId=" + itoa(batchId) + "&ticks=false";
if (jobData.options.clusterMode) { if (jobData.options.clusterMode) {
// we are in cluster mode, must specify dbserver // we are in cluster mode, must specify dbserver
@ -284,6 +286,7 @@ arangodb::Result dumpCollection(arangodb::httpclient::SimpleHttpClient& client,
client.request(arangodb::rest::RequestType::GET, url, nullptr, 0)); client.request(arangodb::rest::RequestType::GET, url, nullptr, 0));
auto check = ::checkHttpResponse(client, response); auto check = ::checkHttpResponse(client, response);
if (check.fail()) { if (check.fail()) {
LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while dumping collection '" << name << "': " << check.errorMessage();
return check; return check;
} }
@ -314,7 +317,7 @@ arangodb::Result dumpCollection(arangodb::httpclient::SimpleHttpClient& client,
} }
if (!headerExtracted) { // NOT else, fallthrough from outer or inner above if (!headerExtracted) { // NOT else, fallthrough from outer or inner above
return {TRI_ERROR_REPLICATION_INVALID_RESPONSE, return {TRI_ERROR_REPLICATION_INVALID_RESPONSE,
"got invalid response from server: required header is missing"}; std::string("got invalid response from server: required header is missing while dumping collection '") + name + "'"};
} }
// now actually write retrieved data to dump file // now actually write retrieved data to dump file
@ -672,6 +675,7 @@ Result DumpFeature::runDump(httpclient::SimpleHttpClient& client,
client.request(rest::RequestType::GET, url, nullptr, 0)); client.request(rest::RequestType::GET, url, nullptr, 0));
auto check = ::checkHttpResponse(client, response); auto check = ::checkHttpResponse(client, response);
if (check.fail()) { if (check.fail()) {
LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while fetching inventory: " << check.errorMessage();
return check; return check;
} }
@ -792,6 +796,7 @@ Result DumpFeature::runClusterDump(httpclient::SimpleHttpClient& client,
client.request(rest::RequestType::GET, url, nullptr, 0)); client.request(rest::RequestType::GET, url, nullptr, 0));
auto check = ::checkHttpResponse(client, response); auto check = ::checkHttpResponse(client, response);
if (check.fail()) { if (check.fail()) {
LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while fetching inventory: " << check.errorMessage();
return check; return check;
} }
@ -1057,8 +1062,7 @@ void DumpFeature::start() {
std::tie(result, _options.clusterMode) = std::tie(result, _options.clusterMode) =
_clientManager.getArangoIsCluster(*httpClient); _clientManager.getArangoIsCluster(*httpClient);
if (result.fail()) { if (result.fail()) {
LOG_TOPIC(FATAL, Logger::FIXME) LOG_TOPIC(FATAL, arangodb::Logger::RESTORE) << "Error: could not detect ArangoDB instance type: " << result.errorMessage();
<< "Error: could not detect ArangoDB instance type";
FATAL_ERROR_EXIT(); FATAL_ERROR_EXIT();
} }
@ -1090,9 +1094,12 @@ void DumpFeature::start() {
} else { } else {
res = runClusterDump(*httpClient, dbName); res = runClusterDump(*httpClient, dbName);
} }
} catch (basics::Exception const& ex) {
LOG_TOPIC(ERR, Logger::FIXME) << "caught exception: " << ex.what();
res = {ex.code(), ex.what()};
} catch (std::exception const& ex) { } catch (std::exception const& ex) {
LOG_TOPIC(ERR, Logger::FIXME) << "caught exception: " << ex.what(); LOG_TOPIC(ERR, Logger::FIXME) << "caught exception: " << ex.what();
res = {TRI_ERROR_INTERNAL}; res = {TRI_ERROR_INTERNAL, ex.what()};
} catch (...) { } catch (...) {
LOG_TOPIC(ERR, Logger::FIXME) << "caught unknown exception"; LOG_TOPIC(ERR, Logger::FIXME) << "caught unknown exception";
res = {TRI_ERROR_INTERNAL}; res = {TRI_ERROR_INTERNAL};

View File

@ -361,9 +361,13 @@ arangodb::Result recreateCollection(
result = ::sendRestoreCollection(httpClient, jobData.options, result = ::sendRestoreCollection(httpClient, jobData.options,
jobData.collection, cname); jobData.collection, cname);
if (result.fail() && jobData.options.force) { if (result.fail()) {
LOG_TOPIC(ERR, Logger::RESTORE) << result.errorMessage(); if (jobData.options.force) {
result.reset(); LOG_TOPIC(WARN, Logger::RESTORE) << "Error while creating " << collectionType << " collection '" << cname << "': " << result.errorMessage();
result.reset();
} else {
LOG_TOPIC(ERR, Logger::RESTORE) << "Error while creating " << collectionType << " collection '" << cname << "': " << result.errorMessage();
}
} }
return result; return result;
} }
@ -382,20 +386,25 @@ arangodb::Result restoreIndexes(
// we actually have indexes // we actually have indexes
if (jobData.options.progress) { if (jobData.options.progress) {
std::string const cname = std::string const cname =
arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", "");
"");
LOG_TOPIC(INFO, Logger::RESTORE) LOG_TOPIC(INFO, Logger::RESTORE)
<< "# Creating indexes for collection '" << cname << "'..."; << "# Creating indexes for collection '" << cname << "'...";
} }
result = result =
::sendRestoreIndexes(httpClient, jobData.options, jobData.collection); ::sendRestoreIndexes(httpClient, jobData.options, jobData.collection);
if (result.fail()) {
std::string const cname = arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", "");
if (jobData.options.force) {
LOG_TOPIC(WARN, Logger::RESTORE) << "Error while creating indexes for collection '" << cname << "': " << result.errorMessage();
result.reset();
} else {
LOG_TOPIC(ERR, Logger::RESTORE) << "Error while creating indexes for collection '" << cname << "': " << result.errorMessage();
}
}
} }
if (result.fail() && jobData.options.force) {
LOG_TOPIC(ERR, Logger::RESTORE) << result.errorMessage();
result.reset();
}
return result; return result;
} }
@ -476,6 +485,7 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
length = found - buffer.begin(); // found a \n somewhere; break at line length = found - buffer.begin(); // found a \n somewhere; break at line
} }
jobData.stats.totalBatches++; jobData.stats.totalBatches++;
result = ::sendRestoreData(httpClient, jobData.options, cname, result = ::sendRestoreData(httpClient, jobData.options, cname,
buffer.begin(), length); buffer.begin(), length);
@ -483,9 +493,11 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
if (result.fail()) { if (result.fail()) {
if (jobData.options.force) { if (jobData.options.force) {
LOG_TOPIC(ERR, Logger::RESTORE) << result.errorMessage(); LOG_TOPIC(WARN, Logger::RESTORE) << "Error while restoring data into collection '" << cname << "': " << result.errorMessage();
result.reset(); result.reset();
continue; continue;
} else {
LOG_TOPIC(ERR, Logger::RESTORE) << "Error while restoring data into collection '" << cname << "': " << result.errorMessage();
} }
return result; return result;
} }
@ -759,7 +771,7 @@ arangodb::Result processInputDirectory(
// if we get here we need to trigger foxx heal // if we get here we need to trigger foxx heal
Result res = ::triggerFoxxHeal(httpClient); Result res = ::triggerFoxxHeal(httpClient);
if (res.fail()) { if (res.fail()) {
LOG_TOPIC(WARN, Logger::RESTORE) << "Reloading of Foxx failed. In the cluster Foxx Services will be available eventually, On SingleServers send a POST to '/_api/foxx/_local/heal' on the current database, with an empty body."; LOG_TOPIC(WARN, Logger::RESTORE) << "Reloading of Foxx services failed. In the cluster Foxx services will be available eventually, On single servers send a POST to '/_api/foxx/_local/heal' on the current database, with an empty body.";
} }
} }
@ -1047,7 +1059,7 @@ void RestoreFeature::start() {
std::tie(result, _options.clusterMode) = std::tie(result, _options.clusterMode) =
_clientManager.getArangoIsCluster(*httpClient); _clientManager.getArangoIsCluster(*httpClient);
if (result.fail()) { if (result.fail()) {
LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << result.errorMessage(); LOG_TOPIC(FATAL, arangodb::Logger::RESTORE) << "Error: could not detect ArangoDB instance type: " << result.errorMessage();
_exitCode = EXIT_FAILURE; _exitCode = EXIT_FAILURE;
return; return;
} }
@ -1055,7 +1067,7 @@ void RestoreFeature::start() {
std::tie(result, _options.indexesFirst) = std::tie(result, _options.indexesFirst) =
_clientManager.getArangoIsUsingEngine(*httpClient, "rocksdb"); _clientManager.getArangoIsUsingEngine(*httpClient, "rocksdb");
if (result.fail()) { if (result.fail()) {
LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << result.errorMessage(); LOG_TOPIC(FATAL, arangodb::Logger::RESTORE) << "Error while trying to determine server storage engine: " << result.errorMessage();
_exitCode = EXIT_FAILURE; _exitCode = EXIT_FAILURE;
return; return;
} }
@ -1075,9 +1087,12 @@ void RestoreFeature::start() {
try { try {
result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this, result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this,
_options, *_directory, _stats); _options, *_directory, _stats);
} catch (basics::Exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what();
result = {ex.code(), ex.what()};
} catch (std::exception const& ex) { } catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what(); LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what();
result = {TRI_ERROR_INTERNAL}; result = {TRI_ERROR_INTERNAL, ex.what()};
} catch (...) { } catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::RESTORE) LOG_TOPIC(ERR, arangodb::Logger::RESTORE)
<< "caught unknown exception"; << "caught unknown exception";