1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api

This commit is contained in:
jsteemann 2017-02-15 00:09:42 +01:00
commit 328ff8a09e
16 changed files with 212 additions and 164 deletions

View File

@ -25,7 +25,7 @@ devel
* added data export tool, arangoexport.
arangoexport can be used to export collections to json and jsonl
arangoexport can be used to export collections to json, jsonl or xml
and export a graph or collections to xgmml.

View File

@ -50,6 +50,13 @@ Export JSONL
This exports the collection *test* into the output directory *export* as jsonl. Every line in the export is one document from the collection *test* as json.
Export XML
----------
unix> arangoexport --type xml --collection test
This exports the collection *test* into the output directory *export* as generic XML. The root element of the generated XML file is named *collection*. Each document in the collection is exported in a *doc* XML attribute. Each document attribute is export in a generic *att* element, which has a *type* attribute indicating the attribute value, and a *value* attribute containing the attribute's value.
Export XGMML
------------

View File

@ -308,7 +308,12 @@ MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
, _ditches(collection)
, _initialCount(0), _lastRevision(0)
, _uncollectedLogfileEntries(0)
{}
, _nextCompactionStartIndex(0)
, _lastCompactionStatus(nullptr)
, _lastCompactionStamp(0.0)
{
setCompactionStatus("compaction not yet started");
}
MMFilesCollection::~MMFilesCollection() {
try {
@ -875,6 +880,37 @@ bool MMFilesCollection::closeDatafiles(std::vector<MMFilesDatafile*> const& file
}
void MMFilesCollection::figures(std::shared_ptr<arangodb::velocypack::Builder>& builder) {
// fills in compaction status
char const* lastCompactionStatus = "-";
char lastCompactionStampString[21];
lastCompactionStampString[0] = '-';
lastCompactionStampString[1] = '\0';
double lastCompactionStamp;
{
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
lastCompactionStatus = _lastCompactionStatus;
lastCompactionStamp = _lastCompactionStamp;
}
if (lastCompactionStatus != nullptr) {
if (lastCompactionStamp == 0.0) {
lastCompactionStamp = TRI_microtime();
}
struct tm tb;
time_t tt = static_cast<time_t>(lastCompactionStamp);
TRI_gmtime(tt, &tb);
strftime(&lastCompactionStampString[0], sizeof(lastCompactionStampString),
"%Y-%m-%dT%H:%M:%SZ", &tb);
}
builder->add("compactionStatus", VPackValue(VPackValueType::Object));
builder->add("message", VPackValue(lastCompactionStatus));
builder->add("time", VPackValue(&lastCompactionStampString[0]));
builder->close(); // compactionStatus
builder->add("documentReferences", VPackValue(_ditches.numDocumentDitches()));
char const* waitingForDitch = _ditches.head();

View File

@ -152,12 +152,38 @@ class MMFilesCollection final : public PhysicalCollection {
/// @brief report extra memory used by indexes etc.
size_t memory() const override;
void preventCompaction() override;
bool tryPreventCompaction() override;
void allowCompaction() override;
void lockForCompaction() override;
bool tryLockForCompaction() override;
void finishCompaction() override;
//void preventCompaction() override;
//bool tryPreventCompaction() override;
//void allowCompaction() override;
//void lockForCompaction() override;
//bool tryLockForCompaction() override;
//void finishCompaction() override;
void preventCompaction();
bool tryPreventCompaction();
void allowCompaction();
void lockForCompaction();
bool tryLockForCompaction();
void finishCompaction();
void setNextCompactionStartIndex(size_t index){
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
_nextCompactionStartIndex = index;
}
size_t getNextCompactionStartIndex(){
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
return _nextCompactionStartIndex;
}
void setCompactionStatus(char const* reason){
TRI_ASSERT(reason != nullptr);
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
_lastCompactionStatus = reason;
}
double lastCompactionStamp() const { return _lastCompactionStamp; }
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
Ditches* ditches() const override { return &_ditches; }
@ -356,8 +382,24 @@ class MMFilesCollection final : public PhysicalCollection {
std::atomic<int64_t> _uncollectedLogfileEntries;
Mutex _compactionStatusLock;
size_t _nextCompactionStartIndex;
char const* _lastCompactionStatus;
double _lastCompactionStamp;
};
inline MMFilesCollection* physicalToMMFiles(PhysicalCollection* physical){
auto rv = dynamic_cast<MMFilesCollection*>(physical);
assert(rv != nullptr);
return rv;
}
inline MMFilesCollection* logicalToMMFiles(LogicalCollection* logical){
auto phys = logical->getPhysical();
assert(phys);
return physicalToMMFiles(phys);
}
}
#endif

View File

@ -637,7 +637,7 @@ int MMFilesCollectorThread::processCollectionOperations(MMFilesCollectorCache* c
// first try to read-lock the compactor-lock, afterwards try to write-lock the
// collection
// if any locking attempt fails, release and try again next time
TryCompactionPreventer compactionPreventer(collection);
TryCompactionPreventer compactionPreventer(logicalToMMFiles(collection));
if (!compactionPreventer.isLocked()) {
return TRI_ERROR_LOCK_TIMEOUT;

View File

@ -627,7 +627,7 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
// we already have created a compactor file in progress.
// if this happens, then a previous compaction attempt for this collection
// failed or is not finished yet
collection->setCompactionStatus(ReasonCompactionBlocked);
logicalToMMFiles(collection)->setCompactionStatus(ReasonCompactionBlocked);
wasBlocked = true;
return false;
}
@ -637,7 +637,7 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
if (datafiles.empty()) {
// collection has no datafiles
collection->setCompactionStatus(ReasonNoDatafiles);
logicalToMMFiles(collection)->setCompactionStatus(ReasonNoDatafiles);
return false;
}
@ -648,7 +648,7 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
size_t const n = datafiles.size();
LOG_TOPIC(DEBUG, Logger::COMPACTOR) << "inspecting datafiles of collection '" << collection->name() << "' for compaction opportunities";
size_t start = collection->getNextCompactionStartIndex();
size_t start = logicalToMMFiles(collection)->getNextCompactionStartIndex();
// get number of documents from collection
uint64_t const numDocuments = getNumberOfDocuments(collection);
@ -807,10 +807,10 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
if (toCompact.empty()) {
// nothing to compact. now reset start index
collection->setNextCompactionStartIndex(0);
logicalToMMFiles(collection)->setNextCompactionStartIndex(0);
// cleanup local variables
collection->setCompactionStatus(ReasonNothingToCompact);
logicalToMMFiles(collection)->setCompactionStatus(ReasonNothingToCompact);
LOG_TOPIC(DEBUG, Logger::COMPACTOR) << "inspecting datafiles of collection yielded: " << ReasonNothingToCompact;
return false;
}
@ -818,8 +818,8 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
// handle datafiles with dead objects
TRI_ASSERT(toCompact.size() >= 1);
TRI_ASSERT(reason != nullptr);
collection->setCompactionStatus(reason);
collection->setNextCompactionStartIndex(start);
logicalToMMFiles(collection)->setCompactionStatus(reason);
logicalToMMFiles(collection)->setNextCompactionStartIndex(start);
compactDatafiles(collection, toCompact);
return true;
@ -872,7 +872,7 @@ void MMFilesCompactorThread::run() {
// check whether someone else holds a read-lock on the compaction
// lock
TryCompactionLocker compactionLocker(collection);
TryCompactionLocker compactionLocker(logicalToMMFiles(collection));
if (!compactionLocker.isLocked()) {
// someone else is holding the compactor lock, we'll not compact
@ -881,7 +881,7 @@ void MMFilesCompactorThread::run() {
try {
double const now = TRI_microtime();
if (collection->lastCompactionStamp() + compactionCollectionInterval() <= now) {
if (logicalToMMFiles(collection)->lastCompactionStamp() + compactionCollectionInterval() <= now) {
auto ce = collection->ditches()->createCompactionDitch(__FILE__,
__LINE__);
@ -895,7 +895,7 @@ void MMFilesCompactorThread::run() {
if (!worked && !wasBlocked) {
// set compaction stamp
collection->lastCompactionStamp(now);
logicalToMMFiles(collection)->lastCompactionStamp(now);
}
// if we worked or were blocked, then we don't set the compaction stamp to
// force another round of compaction

View File

@ -243,7 +243,7 @@ int MMFilesTransactionCollection::use(int nestingLevel) {
// read-lock the compaction lock
if (!_transaction->_hints.has(transaction::Hints::Hint::NO_COMPACTION_LOCK)) {
if (!_compactionLocked) {
_collection->preventCompaction();
logicalToMMFiles(_collection)->preventCompaction();
_compactionLocked = true;
}
}
@ -284,7 +284,7 @@ void MMFilesTransactionCollection::unuse(int nestingLevel) {
if (!_transaction->_hints.has(transaction::Hints::Hint::NO_COMPACTION_LOCK)) {
if (AccessMode::isWriteOrExclusive(_accessType) && _compactionLocked) {
// read-unlock the compaction lock
_collection->allowCompaction();
logicalToMMFiles(_collection)->allowCompaction();
_compactionLocked = false;
}
}

View File

@ -25,13 +25,13 @@
#define ARANGOD_VOCBASE_COMPACTION_LOCKER_H 1
#include "Basics/Common.h"
#include "VocBase/LogicalCollection.h"
#include "MMFiles/MMFilesCollection.h"
namespace arangodb {
class CompactionPreventer {
public:
explicit CompactionPreventer(LogicalCollection* collection)
explicit CompactionPreventer(MMFilesCollection* collection)
: _collection(collection) {
_collection->preventCompaction();
}
@ -39,12 +39,12 @@ class CompactionPreventer {
~CompactionPreventer() { _collection->allowCompaction(); }
private:
LogicalCollection* _collection;
MMFilesCollection* _collection;
};
class TryCompactionPreventer {
public:
explicit TryCompactionPreventer(LogicalCollection* collection)
explicit TryCompactionPreventer(MMFilesCollection* collection)
: _collection(collection), _isLocked(false) {
_isLocked = _collection->tryPreventCompaction();
}
@ -58,13 +58,13 @@ class TryCompactionPreventer {
bool isLocked() const { return _isLocked; }
private:
LogicalCollection* _collection;
MMFilesCollection* _collection;
bool _isLocked;
};
class CompactionLocker {
public:
explicit CompactionLocker(LogicalCollection* collection)
explicit CompactionLocker(MMFilesCollection* collection)
: _collection(collection) {
_collection->lockForCompaction();
}
@ -74,12 +74,12 @@ class CompactionLocker {
}
private:
LogicalCollection* _collection;
MMFilesCollection* _collection;
};
class TryCompactionLocker {
public:
explicit TryCompactionLocker(LogicalCollection* collection)
explicit TryCompactionLocker(MMFilesCollection* collection)
: _collection(collection), _isLocked(false) {
_isLocked = _collection->tryLockForCompaction();
}
@ -93,7 +93,7 @@ class TryCompactionLocker {
bool isLocked() const { return _isLocked; }
private:
LogicalCollection* _collection;
MMFilesCollection* _collection;
bool _isLocked;
};

View File

@ -227,9 +227,6 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
_useSecondaryIndexes(true),
_maxTick(0),
_keyGenerator(),
_nextCompactionStartIndex(0),
_lastCompactionStatus(nullptr),
_lastCompactionStamp(0.0),
_isInitialIteration(false),
_revisionError(false) {
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
@ -245,7 +242,6 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
_indexes.emplace_back(idx);
}
setCompactionStatus("compaction not yet started");
}
// @brief Constructor used in coordinator case.
@ -291,9 +287,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
_useSecondaryIndexes(true),
_maxTick(0),
_keyGenerator(),
_nextCompactionStartIndex(0),
_lastCompactionStatus(nullptr),
_lastCompactionStamp(0.0),
_isInitialIteration(false),
_revisionError(false) {
if (!IsAllowedName(info)) {
@ -514,7 +507,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
// update server's tick value
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(_cid));
setCompactionStatus("compaction not yet started");
}
LogicalCollection::~LogicalCollection() {}
@ -1179,36 +1171,6 @@ std::shared_ptr<arangodb::velocypack::Builder> LogicalCollection::figures() {
)
);
// fills in compaction status
char const* lastCompactionStatus = "-";
char lastCompactionStampString[21];
lastCompactionStampString[0] = '-';
lastCompactionStampString[1] = '\0';
double lastCompactionStamp;
{
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
lastCompactionStatus = _lastCompactionStatus;
lastCompactionStamp = _lastCompactionStamp;
}
if (lastCompactionStatus != nullptr) {
if (lastCompactionStamp == 0.0) {
lastCompactionStamp = TRI_microtime();
}
struct tm tb;
time_t tt = static_cast<time_t>(lastCompactionStamp);
TRI_gmtime(tt, &tb);
strftime(&lastCompactionStampString[0], sizeof(lastCompactionStampString),
"%Y-%m-%dT%H:%M:%SZ", &tb);
}
builder->add("compactionStatus", VPackValue(VPackValueType::Object));
builder->add("message", VPackValue(lastCompactionStatus));
builder->add("time", VPackValue(&lastCompactionStampString[0]));
builder->close(); // compactionStatus
// add engine-specific figures
getPhysical()->figures(builder);
builder->close();

View File

@ -112,25 +112,6 @@ class LogicalCollection {
// TODO: MOVE TO PHYSICAL?
bool isFullyCollected(); //should not be exposed
void setNextCompactionStartIndex(size_t index) {
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
_nextCompactionStartIndex = index;
}
size_t getNextCompactionStartIndex() {
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
return _nextCompactionStartIndex;
}
void setCompactionStatus(char const* reason) {
TRI_ASSERT(reason != nullptr);
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
_lastCompactionStatus = reason;
}
double lastCompactionStamp() const { return _lastCompactionStamp; }
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
void setRevisionError() { _revisionError = true; }
// SECTION: Meta Information
@ -309,16 +290,16 @@ class LogicalCollection {
return getPhysical()->applyForTickRange(dataMin, dataMax, callback);
}
/// @brief disallow starting the compaction of the collection
void preventCompaction() { getPhysical()->preventCompaction(); }
bool tryPreventCompaction() { return getPhysical()->tryPreventCompaction(); }
/// @brief re-allow starting the compaction of the collection
void allowCompaction() { getPhysical()->allowCompaction(); }
// /// @brief disallow starting the compaction of the collection
// void preventCompaction() { getPhysical()->preventCompaction(); }
// bool tryPreventCompaction() { return getPhysical()->tryPreventCompaction(); }
// /// @brief re-allow starting the compaction of the collection
// void allowCompaction() { getPhysical()->allowCompaction(); }
/// @brief compaction finished
void lockForCompaction() { getPhysical()->lockForCompaction(); }
bool tryLockForCompaction() { return getPhysical()->tryLockForCompaction(); }
void finishCompaction() { getPhysical()->finishCompaction(); }
// /// @brief compaction finished
// void lockForCompaction() { getPhysical()->lockForCompaction(); }
// bool tryLockForCompaction() { return getPhysical()->tryLockForCompaction(); }
// void finishCompaction() { getPhysical()->finishCompaction(); }
void sizeHint(transaction::Methods* trx, int64_t hint);
@ -565,11 +546,6 @@ class LogicalCollection {
mutable basics::ReadWriteLock
_infoLock; // lock protecting the info
Mutex _compactionStatusLock;
size_t _nextCompactionStartIndex;
char const* _lastCompactionStatus;
double _lastCompactionStamp;
/// @brief: flag that is set to true when the documents are
/// initial enumerated and the primary index is built
bool _isInitialIteration;

View File

@ -76,26 +76,26 @@ class PhysicalCollection {
/// @brief report extra memory used by indexes etc.
virtual size_t memory() const = 0;
/// @brief disallow compaction of the collection
/// after this call it is guaranteed that no compaction will be started until allowCompaction() is called
virtual void preventCompaction() = 0;
// /// @brief disallow compaction of the collection
// /// after this call it is guaranteed that no compaction will be started until allowCompaction() is called
// virtual void preventCompaction() = 0;
/// @brief try disallowing compaction of the collection
/// returns true if compaction is disallowed, and false if not
virtual bool tryPreventCompaction() = 0;
// /// @brief try disallowing compaction of the collection
// /// returns true if compaction is disallowed, and false if not
// virtual bool tryPreventCompaction() = 0;
/// @brief re-allow compaction of the collection
virtual void allowCompaction() = 0;
/// @brief exclusively lock the collection for compaction
virtual void lockForCompaction() = 0;
/// @brief try to exclusively lock the collection for compaction
/// after this call it is guaranteed that no compaction will be started until allowCompaction() is called
virtual bool tryLockForCompaction() = 0;
// /// @brief re-allow compaction of the collection
// virtual void allowCompaction() = 0;
//
// /// @brief exclusively lock the collection for compaction
// virtual void lockForCompaction() = 0;
//
// /// @brief try to exclusively lock the collection for compaction
// /// after this call it is guaranteed that no compaction will be started until allowCompaction() is called
// virtual bool tryLockForCompaction() = 0;
/// @brief signal that compaction is finished
virtual void finishCompaction() = 0;
// /// @brief signal that compaction is finished
// virtual void finishCompaction() = 0;
/// @brief iterate all markers of a collection on load
virtual int iterateMarkersOnLoad(transaction::Methods* trx) = 0;

View File

@ -27,15 +27,16 @@
#include "Basics/StringRef.h"
#include "Basics/VPackStringBufferAdapter.h"
#include "Logger/Logger.h"
#include "MMFiles/MMFilesCollection.h"
#include "MMFiles/MMFilesDatafile.h"
#include "MMFiles/MMFilesDatafileHelper.h"
#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/MMFilesWalLogfile.h"
#include "MMFiles/MMFilesWalMarker.h"
#include "VocBase/CompactionLocker.h"
#include "VocBase/Ditch.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/vocbase.h"
#include "MMFiles/MMFilesWalLogfile.h"
#include <velocypack/Dumper.h>
#include <velocypack/Options.h>
@ -620,7 +621,7 @@ int TRI_DumpCollectionReplication(TRI_replication_dump_t* dump,
// block compaction
int res;
{
CompactionPreventer compactionPreventer(collection);
CompactionPreventer compactionPreventer(logicalToMMFiles(collection));
try {
res = DumpCollection(dump, collection, collection->vocbase()->id(),

View File

@ -92,14 +92,9 @@ void ExportFeature::collectOptions(
options->addOption("--progress", "show progress",
new BooleanParameter(&_progress));
std::unordered_set<std::string> exportsWithUpperCase = {"json", "jsonl", "xgmml",
"JSON", "JSONL", "XGMML"};
std::unordered_set<std::string> exports = {"json", "jsonl", "xgmml"};
std::vector<std::string> exportsVector(exports.begin(), exports.end());
std::string exportsJoined = StringUtils::join(exportsVector, ", ");
std::unordered_set<std::string> exports = {"json", "jsonl", "xgmml", "xml"};
options->addOption(
"--type", "type of export (" + exportsJoined + ")",
new DiscreteValuesParameter<StringParameter>(&_typeExport, exportsWithUpperCase));
"--type", "type of export", new DiscreteValuesParameter<StringParameter>(&_typeExport, exports));
}
void ExportFeature::validateOptions(
@ -128,8 +123,6 @@ void ExportFeature::validateOptions(
FATAL_ERROR_EXIT();
}
std::transform(_typeExport.begin(), _typeExport.end(), _typeExport.begin(), ::tolower);
if (_typeExport == "xgmml" && _graphName.empty() ) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting a graph name to dump a graph";
FATAL_ERROR_EXIT();
@ -216,11 +209,11 @@ void ExportFeature::start() {
uint64_t exportedSize = 0;
if (_typeExport == "json" || _typeExport == "jsonl") {
if (_typeExport == "json" || _typeExport == "jsonl" || _typeExport == "xml") {
if (_collections.size()) {
collectionExport(httpClient.get());
for(auto const& collection : _collections) {
for (auto const& collection : _collections) {
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
int64_t fileSize = TRI_SizeFile(filePath.c_str());
@ -262,9 +255,6 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
TRI_UnlinkFile(fileName.c_str());
}
int fd = -1;
TRI_DEFER(TRI_CLOSE(fd));
std::string const url = "_api/cursor";
VPackBuilder post;
@ -278,32 +268,44 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
VPackSlice body = parsedBody->slice();
fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
int fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
S_IRUSR | S_IWUSR);
if (fd < 0) {
errorMsg = "cannot write to file '" + fileName + "'";
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
}
TRI_DEFER(TRI_CLOSE(fd));
_firstLine = true;
if (_typeExport == "json") {
std::string openingBracket = "[\n";
std::string openingBracket = "[";
writeToFile(fd, openingBracket, fileName);
} else if (_typeExport == "xml") {
std::string xmlHeader = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n"
"<collection name=\"";
xmlHeader.append(encode_char_entities(collection));
xmlHeader.append("\">\n");
writeToFile(fd, xmlHeader, fileName);
}
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
while (body.hasKey("id")) {
std::string const url = "/_api/cursor/"+body.get("id").copyString();
std::string const url = "/_api/cursor/" + body.get("id").copyString();
parsedBody = httpCall(httpClient, url, rest::RequestType::PUT);
body = parsedBody->slice();
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
}
if (_typeExport == "json") {
std::string closingBracket = "]\n";
writeToFile(fd, closingBracket , fileName);
std::string closingBracket = "\n]";
writeToFile(fd, closingBracket, fileName);
} else if (_typeExport == "xml") {
std::string xmlFooter = "</collection>";
writeToFile(fd, xmlFooter, fileName);
}
}
}
@ -312,18 +314,39 @@ void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::str
std::string line;
line.reserve(1024);
for (auto const& doc : it) {
line.clear();
if (_firstLine && _typeExport == "json") {
_firstLine = false;
} else if (!_firstLine && _typeExport == "json") {
line.push_back(',');
if (_typeExport == "jsonl") {
for (auto const& doc : it) {
line.clear();
line += doc.toJson();
line.push_back('\n');
writeToFile(fd, line, fileName);
}
} else if (_typeExport == "json") {
for (auto const& doc : it) {
line.clear();
if (!_firstLine) {
line.append(",\n ", 4);
} else {
line.append("\n ", 3);
_firstLine = false;
}
line += doc.toJson();
writeToFile(fd, line, fileName);
}
} else if (_typeExport == "xml") {
for (auto const& doc : it) {
line.clear();
line.append("<doc key=\"");
line.append(encode_char_entities(doc.get("_key").copyString()));
line.append("\">\n");
writeToFile(fd, line, fileName);
for (auto const& att : VPackObjectIterator(doc)) {
xgmmlWriteOneAtt(fd, fileName, att.value, att.key.copyString(), 2);
}
line.clear();
line.append("</doc>\n");
writeToFile(fd, line, fileName);
}
line += doc.toJson();
line.push_back('\n');
writeToFile(fd, line, fileName);
}
}
@ -497,8 +520,7 @@ void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string c
writeToFile(fd, xmlTag, fileName);
for (auto const& it : VPackObjectIterator(doc)) {
xmlTag = encode_char_entities(it.key.copyString());
xgmmlWriteOneAtt(fd, fileName, it.value, xmlTag);
xgmmlWriteOneAtt(fd, fileName, it.value, it.key.copyString());
}
xmlTag = "</edge>\n";
@ -518,8 +540,7 @@ void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string c
writeToFile(fd, xmlTag, fileName);
for (auto const& it : VPackObjectIterator(doc)) {
xmlTag = encode_char_entities(it.key.copyString());
xgmmlWriteOneAtt(fd, fileName, it.value, xmlTag);
xgmmlWriteOneAtt(fd, fileName, it.value, it.key.copyString());
}
xmlTag = "</node>\n";

View File

@ -104,7 +104,7 @@ std::pair<std::shared_ptr<LogAppender>, LogTopic*> LogAppender::buildAppender(
}
}
auto key = make_pair(output, contentFilter);
auto key = std::make_pair(output, contentFilter);
#ifdef ARANGODB_ENABLE_SYSLOG
if (StringUtils::isPrefix(output, "syslog://")) {

View File

@ -271,13 +271,13 @@ void Logger::log(char const* function, char const* file, long int line,
}
// output prefix
if (! _outputPrefix.empty()) {
if (!_outputPrefix.empty()) {
out << _outputPrefix << " ";
}
// append the process / thread identifier
{
char processPrefix[128];
char processPrefix[48];
TRI_pid_t processId = Thread::currentProcessId();
@ -294,7 +294,7 @@ void Logger::log(char const* function, char const* file, long int line,
}
// log level
out << Logger::translateLogLevel(level) << " ";
out << Logger::translateLogLevel(level) << ' ';
// check if we must display the line number
if (_showLineNumber) {
@ -307,7 +307,7 @@ void Logger::log(char const* function, char const* file, long int line,
filename = shortened + 1;
}
}
out << "[" << filename << ":" << line << "] ";
out << '[' << filename << ':' << line << "] ";
}
// generate the complete message
@ -315,16 +315,19 @@ void Logger::log(char const* function, char const* file, long int line,
size_t offset = out.str().size() - message.size();
auto msg = std::make_unique<LogMessage>(level, topicId, out.str(), offset);
bool const isDirectLogLevel = (level == LogLevel::FATAL || level == LogLevel::ERR || level == LogLevel::WARN);
// now either queue or output the message
if (_threaded) {
if (_threaded && !isDirectLogLevel) {
try {
_loggingThread->log(msg);
return;
} catch (...) {
LogAppender::log(msg.get());
// fall-through to non-threaded logging
}
} else {
LogAppender::log(msg.get());
}
LogAppender::log(msg.get());
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -86,7 +86,7 @@ struct Option {
} else {
std::string description = parameter->description();
if (!description.empty()) {
value.push_back(' ');
value.append(". ");
value.append(description);
}
value += " (default: " + parameter->valueString() + ")";