1
0
Fork 0

port 3.4 changes that give libcurl time to retry a failed DNS query. Also add changes to vocbase.cpp that were missed in previous PR. (#7132)

This commit is contained in:
Matthew Von-Maszewski 2018-10-30 16:00:13 -04:00 committed by Max Neunhöffer
parent c8961b2faa
commit a054e31f73
3 changed files with 47 additions and 29 deletions

View File

@ -1,6 +1,9 @@
devel devel
----- -----
* force connection timeout to be 7 seconds to allow libcurl time to retry lost DNS
queries.
* fixes a routing issue within the web ui after the use of views * fixes a routing issue within the web ui after the use of views
* fixes some graph data parsing issues in the ui, e.g. cleaning up duplicate * fixes some graph data parsing issues in the ui, e.g. cleaning up duplicate

View File

@ -872,7 +872,10 @@ void TRI_vocbase_t::shutdown() {
// starts unloading of collections // starts unloading of collections
for (auto& collection : collections) { for (auto& collection : collections) {
collection->close(); // required to release indexes {
WRITE_LOCKER_EVENTUAL(locker, collection->lock());
collection->close(); // required to release indexes
}
unloadCollection(collection.get(), true); unloadCollection(collection.get(), true);
} }
@ -1828,6 +1831,7 @@ TRI_vocbase_t::~TRI_vocbase_t() {
// do a final cleanup of collections // do a final cleanup of collections
for (auto& it : _collections) { for (auto& it : _collections) {
WRITE_LOCKER_EVENTUAL(locker, it->lock());
it->close(); // required to release indexes it->close(); // required to release indexes
} }

View File

@ -382,8 +382,11 @@ void Communicator::createRequestInProgress(NewRequest&& newRequest) {
// in doubt change the timeout to _MS below and hardcode it to 999 and see if // in doubt change the timeout to _MS below and hardcode it to 999 and see if
// the requests immediately fail // the requests immediately fail
// if not this hack can go away // if not this hack can go away
if (connectTimeout <= 0) { if (connectTimeout <= 7) {
connectTimeout = 5; // matthewv: previously arangod default was 1. libcurl flushes its DNS cache
// every 60 seconds. Tests showed DNS packets lost under high load. libcurl
// retries DNS after 5 seconds. 7 seconds allows for one retry plus a little padding.
connectTimeout = 7;
} }
curl_easy_setopt( curl_easy_setopt(
@ -485,6 +488,14 @@ void Communicator::handleResult(CURL* handle, CURLcode rc) {
<< ::buildPrefix(rip->_ticketId) << "curl error details: " << rip->_errorBuffer; << ::buildPrefix(rip->_ticketId) << "curl error details: " << rip->_errorBuffer;
} }
double namelookup;
curl_easy_getinfo(handle, CURLINFO_NAMELOOKUP_TIME, &namelookup);
if (5.0 <= namelookup) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "libcurl DNS lookup took "
<< namelookup << " seconds. Consider using static IP addresses.";
} // if
switch (rc) { switch (rc) {
case CURLE_OK: { case CURLE_OK: {
long httpStatusCode = 200; long httpStatusCode = 200;