diff --git a/arangosh/Dump/DumpFeature.cpp b/arangosh/Dump/DumpFeature.cpp index 0838e00a22..f936825010 100644 --- a/arangosh/Dump/DumpFeature.cpp +++ b/arangosh/Dump/DumpFeature.cpp @@ -133,6 +133,7 @@ std::pair startBatch( body.c_str(), body.size())); auto check = ::checkHttpResponse(client, response); if (check.fail()) { + LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while creating dump context: " << check.errorMessage(); return {check, 0}; } @@ -200,7 +201,7 @@ void flushWal(arangodb::httpclient::SimpleHttpClient& client) { if (check.fail()) { // TODO should we abort early here? LOG_TOPIC(ERR, arangodb::Logger::DUMP) - << "got invalid response from server: " + check.errorMessage(); + << "Got invalid response from server when flushing WAL: " + check.errorMessage(); } } @@ -212,8 +213,8 @@ bool isIgnoredHiddenEnterpriseCollection( strncmp(name.c_str(), "_from_", 6) == 0 || strncmp(name.c_str(), "_to_", 4) == 0) { LOG_TOPIC(INFO, arangodb::Logger::DUMP) - << "Dump ignoring collection " << name - << ". Will be created via SmartGraphs of a full dump. If you want to " + << "Dump is ignoring collection '" << name + << "'. Will be created via SmartGraphs of a full dump. If you want to " "dump this collection anyway use 'arangodump --force'. " "However this is not recommended and you should instead dump " "the EdgeCollection of the SmartGraph instead."; @@ -256,11 +257,12 @@ arangodb::Result dumpCollection(arangodb::httpclient::SimpleHttpClient& client, using arangodb::basics::StringUtils::boolean; using arangodb::basics::StringUtils::itoa; using arangodb::basics::StringUtils::uint64; + using arangodb::basics::StringUtils::urlEncode; uint64_t fromTick = minTick; uint64_t chunkSize = jobData.options.initialChunkSize; // will grow adaptively up to max - std::string baseUrl = "/_api/replication/dump?collection=" + name + + std::string baseUrl = "/_api/replication/dump?collection=" + urlEncode(name) + "&batchId=" + itoa(batchId) + "&ticks=false"; if (jobData.options.clusterMode) { // we are in cluster mode, must specify dbserver @@ -284,6 +286,7 @@ arangodb::Result dumpCollection(arangodb::httpclient::SimpleHttpClient& client, client.request(arangodb::rest::RequestType::GET, url, nullptr, 0)); auto check = ::checkHttpResponse(client, response); if (check.fail()) { + LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while dumping collection '" << name << "': " << check.errorMessage(); return check; } @@ -314,7 +317,7 @@ arangodb::Result dumpCollection(arangodb::httpclient::SimpleHttpClient& client, } if (!headerExtracted) { // NOT else, fallthrough from outer or inner above return {TRI_ERROR_REPLICATION_INVALID_RESPONSE, - "got invalid response from server: required header is missing"}; + std::string("got invalid response from server: required header is missing while dumping collection '") + name + "'"}; } // now actually write retrieved data to dump file @@ -672,6 +675,7 @@ Result DumpFeature::runDump(httpclient::SimpleHttpClient& client, client.request(rest::RequestType::GET, url, nullptr, 0)); auto check = ::checkHttpResponse(client, response); if (check.fail()) { + LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while fetching inventory: " << check.errorMessage(); return check; } @@ -792,6 +796,7 @@ Result DumpFeature::runClusterDump(httpclient::SimpleHttpClient& client, client.request(rest::RequestType::GET, url, nullptr, 0)); auto check = ::checkHttpResponse(client, response); if (check.fail()) { + LOG_TOPIC(ERR, arangodb::Logger::DUMP) << "An error occurred while fetching inventory: " << check.errorMessage(); return check; } @@ -1057,8 +1062,7 @@ void DumpFeature::start() { std::tie(result, _options.clusterMode) = _clientManager.getArangoIsCluster(*httpClient); if (result.fail()) { - LOG_TOPIC(FATAL, Logger::FIXME) - << "Error: could not detect ArangoDB instance type"; + LOG_TOPIC(FATAL, arangodb::Logger::RESTORE) << "Error: could not detect ArangoDB instance type: " << result.errorMessage(); FATAL_ERROR_EXIT(); } @@ -1090,9 +1094,12 @@ void DumpFeature::start() { } else { res = runClusterDump(*httpClient, dbName); } + } catch (basics::Exception const& ex) { + LOG_TOPIC(ERR, Logger::FIXME) << "caught exception: " << ex.what(); + res = {ex.code(), ex.what()}; } catch (std::exception const& ex) { LOG_TOPIC(ERR, Logger::FIXME) << "caught exception: " << ex.what(); - res = {TRI_ERROR_INTERNAL}; + res = {TRI_ERROR_INTERNAL, ex.what()}; } catch (...) { LOG_TOPIC(ERR, Logger::FIXME) << "caught unknown exception"; res = {TRI_ERROR_INTERNAL}; diff --git a/arangosh/Restore/RestoreFeature.cpp b/arangosh/Restore/RestoreFeature.cpp index b5c9661602..6ea9ae995b 100644 --- a/arangosh/Restore/RestoreFeature.cpp +++ b/arangosh/Restore/RestoreFeature.cpp @@ -361,9 +361,13 @@ arangodb::Result recreateCollection( result = ::sendRestoreCollection(httpClient, jobData.options, jobData.collection, cname); - if (result.fail() && jobData.options.force) { - LOG_TOPIC(ERR, Logger::RESTORE) << result.errorMessage(); - result.reset(); + if (result.fail()) { + if (jobData.options.force) { + LOG_TOPIC(WARN, Logger::RESTORE) << "Error while creating " << collectionType << " collection '" << cname << "': " << result.errorMessage(); + result.reset(); + } else { + LOG_TOPIC(ERR, Logger::RESTORE) << "Error while creating " << collectionType << " collection '" << cname << "': " << result.errorMessage(); + } } return result; } @@ -382,20 +386,25 @@ arangodb::Result restoreIndexes( // we actually have indexes if (jobData.options.progress) { std::string const cname = - arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", - ""); + arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", ""); LOG_TOPIC(INFO, Logger::RESTORE) << "# Creating indexes for collection '" << cname << "'..."; } result = ::sendRestoreIndexes(httpClient, jobData.options, jobData.collection); + + if (result.fail()) { + std::string const cname = arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", ""); + if (jobData.options.force) { + LOG_TOPIC(WARN, Logger::RESTORE) << "Error while creating indexes for collection '" << cname << "': " << result.errorMessage(); + result.reset(); + } else { + LOG_TOPIC(ERR, Logger::RESTORE) << "Error while creating indexes for collection '" << cname << "': " << result.errorMessage(); + } + } } - if (result.fail() && jobData.options.force) { - LOG_TOPIC(ERR, Logger::RESTORE) << result.errorMessage(); - result.reset(); - } return result; } @@ -476,6 +485,7 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient, length = found - buffer.begin(); // found a \n somewhere; break at line } + jobData.stats.totalBatches++; result = ::sendRestoreData(httpClient, jobData.options, cname, buffer.begin(), length); @@ -483,9 +493,11 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient, if (result.fail()) { if (jobData.options.force) { - LOG_TOPIC(ERR, Logger::RESTORE) << result.errorMessage(); + LOG_TOPIC(WARN, Logger::RESTORE) << "Error while restoring data into collection '" << cname << "': " << result.errorMessage(); result.reset(); continue; + } else { + LOG_TOPIC(ERR, Logger::RESTORE) << "Error while restoring data into collection '" << cname << "': " << result.errorMessage(); } return result; } @@ -759,7 +771,7 @@ arangodb::Result processInputDirectory( // if we get here we need to trigger foxx heal Result res = ::triggerFoxxHeal(httpClient); if (res.fail()) { - LOG_TOPIC(WARN, Logger::RESTORE) << "Reloading of Foxx failed. In the cluster Foxx Services will be available eventually, On SingleServers send a POST to '/_api/foxx/_local/heal' on the current database, with an empty body."; + LOG_TOPIC(WARN, Logger::RESTORE) << "Reloading of Foxx services failed. In the cluster Foxx services will be available eventually, On single servers send a POST to '/_api/foxx/_local/heal' on the current database, with an empty body."; } } @@ -1047,7 +1059,7 @@ void RestoreFeature::start() { std::tie(result, _options.clusterMode) = _clientManager.getArangoIsCluster(*httpClient); if (result.fail()) { - LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << result.errorMessage(); + LOG_TOPIC(FATAL, arangodb::Logger::RESTORE) << "Error: could not detect ArangoDB instance type: " << result.errorMessage(); _exitCode = EXIT_FAILURE; return; } @@ -1055,7 +1067,7 @@ void RestoreFeature::start() { std::tie(result, _options.indexesFirst) = _clientManager.getArangoIsUsingEngine(*httpClient, "rocksdb"); if (result.fail()) { - LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << result.errorMessage(); + LOG_TOPIC(FATAL, arangodb::Logger::RESTORE) << "Error while trying to determine server storage engine: " << result.errorMessage(); _exitCode = EXIT_FAILURE; return; } @@ -1075,9 +1087,12 @@ void RestoreFeature::start() { try { result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this, _options, *_directory, _stats); + } catch (basics::Exception const& ex) { + LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what(); + result = {ex.code(), ex.what()}; } catch (std::exception const& ex) { LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what(); - result = {TRI_ERROR_INTERNAL}; + result = {TRI_ERROR_INTERNAL, ex.what()}; } catch (...) { LOG_TOPIC(ERR, arangodb::Logger::RESTORE) << "caught unknown exception";