mirror of https://gitee.com/bigwinds/arangodb
more trace logging
This commit is contained in:
parent
af0fcc187c
commit
47b77150f3
|
@ -123,7 +123,7 @@ int Dispatcher::addJob(std::unique_ptr<Job>& job) {
|
||||||
job->requestStatisticsAgentSetQueueStart();
|
job->requestStatisticsAgentSetQueueStart();
|
||||||
|
|
||||||
// do not start new jobs if we are already shutting down
|
// do not start new jobs if we are already shutting down
|
||||||
if (_stopping.load(memory_order_relaxed)) {
|
if (_stopping.load(std::memory_order_relaxed)) {
|
||||||
return TRI_ERROR_DISPATCHER_IS_STOPPING;
|
return TRI_ERROR_DISPATCHER_IS_STOPPING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,6 +98,7 @@ int DispatcherQueue::addJob(std::unique_ptr<Job>& job) {
|
||||||
size_t pos;
|
size_t pos;
|
||||||
|
|
||||||
if (!_jobPositions.pop(pos)) {
|
if (!_jobPositions.pop(pos)) {
|
||||||
|
LOG_TRACE("cannot add job %p to queue %p. queue is full", (void*) job.get(), (void*) this);
|
||||||
return TRI_ERROR_QUEUE_FULL;
|
return TRI_ERROR_QUEUE_FULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -254,6 +254,8 @@ bool HttpServer::handleRequest(HttpCommTask* task,
|
||||||
// use a dispatcher queue, handler belongs to the job
|
// use a dispatcher queue, handler belongs to the job
|
||||||
std::unique_ptr<Job> job = std::make_unique<HttpServerJob>(this, handler);
|
std::unique_ptr<Job> job = std::make_unique<HttpServerJob>(this, handler);
|
||||||
|
|
||||||
|
LOG_TRACE("HttpCommTask %p created HttpServerJob %p", (void*) task, (void*) job.get());
|
||||||
|
|
||||||
// add the job to the dispatcher
|
// add the job to the dispatcher
|
||||||
int res = _dispatcher->addJob(job);
|
int res = _dispatcher->addJob(job);
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#include "statistics.h"
|
#include "statistics.h"
|
||||||
|
#include "Basics/logging.h"
|
||||||
#include "Basics/Mutex.h"
|
#include "Basics/Mutex.h"
|
||||||
#include "Basics/MutexLocker.h"
|
#include "Basics/MutexLocker.h"
|
||||||
#include "Basics/threads.h"
|
#include "Basics/threads.h"
|
||||||
|
@ -140,6 +141,8 @@ TRI_request_statistics_t* TRI_AcquireRequestStatistics() {
|
||||||
return statistics;
|
return statistics;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG_TRACE("no free element on statistics queue");
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -989,6 +989,8 @@ static bool CompactifyDocumentCollection(TRI_document_collection_t* document) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG_TRACE("inspecting datafiles of collection '%s' for compaction opportunities", document->_info.namec_str());
|
||||||
|
|
||||||
size_t start = document->getNextCompactionStartIndex();
|
size_t start = document->getNextCompactionStartIndex();
|
||||||
|
|
||||||
// number of documents is protected by the same lock
|
// number of documents is protected by the same lock
|
||||||
|
@ -1029,7 +1031,7 @@ static bool CompactifyDocumentCollection(TRI_document_collection_t* document) {
|
||||||
DatafileStatisticsContainer dfi = document->_datafileStatistics.get(df->_fid);
|
DatafileStatisticsContainer dfi = document->_datafileStatistics.get(df->_fid);
|
||||||
|
|
||||||
if (dfi.numberUncollected > 0) {
|
if (dfi.numberUncollected > 0) {
|
||||||
LOG_TRACE("cannot compact file %llu because it still has uncollected entries", (unsigned long long) df->_fid);
|
LOG_TRACE("cannot compact datafile %llu of collection '%s' because it still has uncollected entries", (unsigned long long) df->_fid, document->_info.namec_str());
|
||||||
start = i + 1;
|
start = i + 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -259,6 +259,7 @@ int TRI_document_collection_t::beginReadTimed(uint64_t timeout,
|
||||||
// insert reader
|
// insert reader
|
||||||
if (_vocbase->_deadlockDetector.setReaderBlocked(this)) {
|
if (_vocbase->_deadlockDetector.setReaderBlocked(this)) {
|
||||||
// deadlock
|
// deadlock
|
||||||
|
LOG_TRACE("deadlock detected while trying to acquire read-lock on collection '%s'", _info.name().c_str());
|
||||||
return TRI_ERROR_DEADLOCK;
|
return TRI_ERROR_DEADLOCK;
|
||||||
}
|
}
|
||||||
wasBlocked = true;
|
wasBlocked = true;
|
||||||
|
@ -270,6 +271,7 @@ int TRI_document_collection_t::beginReadTimed(uint64_t timeout,
|
||||||
if (_vocbase->_deadlockDetector.isDeadlocked(this)) {
|
if (_vocbase->_deadlockDetector.isDeadlocked(this)) {
|
||||||
// deadlock
|
// deadlock
|
||||||
_vocbase->_deadlockDetector.setReaderUnblocked(this);
|
_vocbase->_deadlockDetector.setReaderUnblocked(this);
|
||||||
|
LOG_TRACE("deadlock detected while trying to acquire read-lock on collection '%s'", _info.name().c_str());
|
||||||
return TRI_ERROR_DEADLOCK;
|
return TRI_ERROR_DEADLOCK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -338,6 +340,7 @@ int TRI_document_collection_t::beginWriteTimed(uint64_t timeout,
|
||||||
// insert writer (with method named "setReaderBlocked"..., but it works)
|
// insert writer (with method named "setReaderBlocked"..., but it works)
|
||||||
if (_vocbase->_deadlockDetector.setReaderBlocked(this)) {
|
if (_vocbase->_deadlockDetector.setReaderBlocked(this)) {
|
||||||
// deadlock
|
// deadlock
|
||||||
|
LOG_TRACE("deadlock detected while trying to acquire write-lock on collection '%s'", _info.name().c_str());
|
||||||
return TRI_ERROR_DEADLOCK;
|
return TRI_ERROR_DEADLOCK;
|
||||||
}
|
}
|
||||||
wasBlocked = true;
|
wasBlocked = true;
|
||||||
|
@ -349,6 +352,7 @@ int TRI_document_collection_t::beginWriteTimed(uint64_t timeout,
|
||||||
if (_vocbase->_deadlockDetector.isDeadlocked(this)) {
|
if (_vocbase->_deadlockDetector.isDeadlocked(this)) {
|
||||||
// deadlock
|
// deadlock
|
||||||
_vocbase->_deadlockDetector.setReaderUnblocked(this);
|
_vocbase->_deadlockDetector.setReaderUnblocked(this);
|
||||||
|
LOG_TRACE("deadlock detected while trying to acquire write-lock on collection '%s'", _info.name().c_str());
|
||||||
return TRI_ERROR_DEADLOCK;
|
return TRI_ERROR_DEADLOCK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1086,6 +1086,9 @@ class AssocMulti {
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void resizeInternal(UserData* userData, Bucket& b, size_t size) {
|
void resizeInternal(UserData* userData, Bucket& b, size_t size) {
|
||||||
|
LOG_TRACE("resizing index %s, target size: %llu", _contextCallback().c_str(),
|
||||||
|
(unsigned long long)size);
|
||||||
|
|
||||||
LOG_ACTION("index-resize %s, target size: %llu", _contextCallback().c_str(),
|
LOG_ACTION("index-resize %s, target size: %llu", _contextCallback().c_str(),
|
||||||
(unsigned long long)size);
|
(unsigned long long)size);
|
||||||
double start = TRI_microtime();
|
double start = TRI_microtime();
|
||||||
|
@ -1159,6 +1162,8 @@ class AssocMulti {
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] oldTable;
|
delete[] oldTable;
|
||||||
|
|
||||||
|
LOG_TRACE("resizing index %s done", _contextCallback().c_str());
|
||||||
|
|
||||||
LOG_TIMER((TRI_microtime() - start), "index-resize, %s, target size: %llu",
|
LOG_TIMER((TRI_microtime() - start), "index-resize, %s, target size: %llu",
|
||||||
_contextCallback().c_str(), (unsigned long long)size);
|
_contextCallback().c_str(), (unsigned long long)size);
|
||||||
|
|
|
@ -193,6 +193,10 @@ class AssocUnique {
|
||||||
// entries
|
// entries
|
||||||
static uint64_t const NotificationSizeThreshold = 131072;
|
static uint64_t const NotificationSizeThreshold = 131072;
|
||||||
|
|
||||||
|
LOG_TRACE("resizing index %s, target size: %llu",
|
||||||
|
_contextCallback().c_str(),
|
||||||
|
(unsigned long long) targetSize);
|
||||||
|
|
||||||
double start = TRI_microtime();
|
double start = TRI_microtime();
|
||||||
if (targetSize > NotificationSizeThreshold) {
|
if (targetSize > NotificationSizeThreshold) {
|
||||||
LOG_ACTION("index-resize %s, target size: %llu",
|
LOG_ACTION("index-resize %s, target size: %llu",
|
||||||
|
@ -249,6 +253,8 @@ class AssocUnique {
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] oldTable;
|
delete[] oldTable;
|
||||||
|
|
||||||
|
LOG_TRACE("resizing index %s done", _contextCallback().c_str());
|
||||||
|
|
||||||
LOG_TIMER((TRI_microtime() - start), "index-resize %s, target size: %llu",
|
LOG_TIMER((TRI_microtime() - start), "index-resize %s, target size: %llu",
|
||||||
_contextCallback().c_str(), (unsigned long long)targetSize);
|
_contextCallback().c_str(), (unsigned long long)targetSize);
|
||||||
|
|
Loading…
Reference in New Issue