1
0
Fork 0

Merge branch 'generic-col-types' of github.com:arangodb/arangodb into generic-col-types

This commit is contained in:
Michael Hackstein 2016-09-08 09:15:46 +02:00
commit 0f86530bbf
16 changed files with 241 additions and 211 deletions

1
.gitignore vendored
View File

@ -25,6 +25,7 @@ core.*
*.lnk
Thumbs.db
enterprise
compile_commands.json
instanceinfo.json
testresult.json

View File

@ -49,6 +49,15 @@ if (USE_DEV_TIMERS)
add_definitions("-DUSE_DEV_TIMERS=1")
endif ()
# enable enterprise features
set(ENTERPRISE_INCLUDE_DIR "enterprise")
option(USE_ENTERPRISE OFF)
if (USE_ENTERPRISE)
add_definitions("-DUSE_ENTERPRISE=1")
include_directories(${ENTERPRISE_INCLUDE_DIR})
endif ()
################################################################################
## ARANGODB
################################################################################

View File

@ -245,7 +245,7 @@ JOB_STATUS AddFollower::status () {
if (status == PENDING) {
std::string curPath =
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
Slice current = _snapshot(curPath).slice();
for (auto const& srv : VPackArrayIterator(current)) {
if (srv.copyString() == _newFollower) {
@ -254,6 +254,7 @@ JOB_STATUS AddFollower::status () {
}
}
}
}
return status;

View File

@ -127,6 +127,7 @@ TraverserEngine::TraverserEngine(TRI_vocbase_t* vocbase,
}
TraverserEngine::~TraverserEngine() {
/*
auto resolver = _trx->resolver();
// TODO Do we need this or will delete trx do this already?
for (auto const& shard : _locked) {
@ -141,9 +142,10 @@ TraverserEngine::~TraverserEngine() {
<< TRI_errno_string(res);
}
}
if (_trx != nullptr) {
*/
if (_trx) {
_trx->commit();
delete _trx;
_trx = nullptr;
}
if (_query != nullptr) {
delete _query;
@ -178,7 +180,7 @@ void TraverserEngine::getEdges(VPackSlice vertex, size_t depth, VPackBuilder& bu
// Result now contains all valid edges, probably multiples.
}
} else if (vertex.isString()) {
auto edgeCursor = _opts->nextCursor(vertex, depth);
std::unique_ptr<arangodb::traverser::EdgeCursor> edgeCursor(_opts->nextCursor(vertex, depth));
while(edgeCursor->next(result, cursorId)) {
if (!_opts->evaluateEdgeExpression(result.back(), vertex, depth, cursorId)) {

View File

@ -42,37 +42,21 @@ TraverserEngineRegistry::EngineInfo::~EngineInfo() {
}
TraverserEngineRegistry::~TraverserEngineRegistry() {
std::vector<TraverserEngineID> toDelete;
{
WRITE_LOCKER(writeLocker, _lock);
try {
for (auto const& it : _engines) {
toDelete.emplace_back(it.first);
}
} catch (...) {
// the emplace_back() above might fail
// prevent throwing exceptions in the destructor
}
}
// note: destroy() will acquire _lock itself, so it must be called without
// holding the lock
for (auto& p : toDelete) {
try { // just in case
destroy(p);
} catch (...) {
}
WRITE_LOCKER(writeLocker, _lock);
for (auto const& it : _engines) {
destroy(it.first, false);
}
}
/// @brief Create a new Engine and return it's id
TraverserEngineID TraverserEngineRegistry::createNew(TRI_vocbase_t* vocbase,
VPackSlice engineInfo) {
WRITE_LOCKER(writeLocker, _lock);
TraverserEngineID id = TRI_NewTickServer();
TRI_ASSERT(id != 0);
TRI_ASSERT(_engines.find(id) == _engines.end());
auto info = std::make_unique<EngineInfo>(vocbase, engineInfo);
WRITE_LOCKER(writeLocker, _lock);
TRI_ASSERT(_engines.find(id) == _engines.end());
_engines.emplace(id, info.get());
info.release();
return id;
@ -80,23 +64,7 @@ TraverserEngineID TraverserEngineRegistry::createNew(TRI_vocbase_t* vocbase,
/// @brief Destroy the engine with the given id
void TraverserEngineRegistry::destroy(TraverserEngineID id) {
WRITE_LOCKER(writeLocker, _lock);
auto e = _engines.find(id);
if (e == _engines.end()) {
// Nothing to destroy
// TODO: Should we throw an error instead?
return;
}
// TODO what about shard locking?
// TODO what about multiple dbs?
if (e->second->_isInUse) {
// Someone is still working with this engine.
// TODO can we just delete it? Or throw an error?
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEADLOCK);
}
delete e->second;
_engines.erase(id);
destroy(id, true);
}
/// @brief Get the engine with the given id
@ -131,3 +99,30 @@ void TraverserEngineRegistry::returnEngine(TraverserEngineID id) {
}
// TODO Should we throw an error if we are not allowed to return this
}
/// @brief Destroy the engine with the given id, worker function
void TraverserEngineRegistry::destroy(TraverserEngineID id, bool doLock) {
EngineInfo* engine = nullptr;
{
CONDITIONAL_WRITE_LOCKER(writeLocker, _lock, doLock);
auto e = _engines.find(id);
if (e == _engines.end()) {
// Nothing to destroy
// TODO: Should we throw an error instead?
return;
}
// TODO what about shard locking?
// TODO what about multiple dbs?
if (e->second->_isInUse) {
// Someone is still working with this engine.
// TODO can we just delete it? Or throw an error?
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEADLOCK);
}
engine = e->second;
_engines.erase(id);
}
delete engine;
}

View File

@ -64,6 +64,8 @@ class TraverserEngineRegistry {
void returnEngine(TraverserEngineID);
private:
void destroy(TraverserEngineID, bool doLock);
struct EngineInfo {
bool _isInUse; // Flag if this engine is in use

View File

@ -35,6 +35,10 @@
#include "RestServer/DatabaseFeature.h"
#include "V8Server/V8DealerFeature.h"
#ifdef USE_ENTERPRISE
#include "Enterprise/Version.h"
#endif
using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::options;
@ -155,11 +159,17 @@ void BootstrapFeature::start() {
// Start service properly:
rest::RestHandlerFactory::setMaintenance(false);
#ifdef USE_ENTERPRISE
LOG(INFO) << "ArangoDB (enterprise version " << ARANGODB_VERSION_FULL
<< " / " << ENTERPRISE_VERSION << ") is ready for business. Have fun!";
#else
LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL
<< ") is ready for business. Have fun!";
#endif
if (_bark) {
LOG(INFO) << "der Hund so: wau wau!";
LOG(INFO) << "The dog says: wau wau!";
}
_isReady = true;

View File

@ -306,50 +306,7 @@ static bool OpenIterator(TRI_df_marker_t const* marker, OpenIteratorState* data,
return (res == TRI_ERROR_NO_ERROR);
}
/// @brief iterate all markers of the collection
static int IterateMarkersCollection(arangodb::Transaction* trx,
LogicalCollection* collection) {
// initialize state for iteration
OpenIteratorState openState(collection);
if (collection->getPhysical()->initialCount() != -1) {
auto primaryIndex = collection->primaryIndex();
int res = primaryIndex->resize(
trx, static_cast<size_t>(collection->getPhysical()->initialCount() * 1.1));
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
openState._initialCount = collection->getPhysical()->initialCount();
}
// read all documents and fill primary index
auto cb = [&openState](TRI_df_marker_t const* marker, TRI_datafile_t* datafile) -> bool {
return OpenIterator(marker, &openState, datafile);
};
collection->iterateDatafiles(cb);
LOG(TRACE) << "found " << openState._documents << " document markers, "
<< openState._deletions << " deletion markers for collection '" << collection->name() << "'";
// update the real statistics for the collection
try {
for (auto& it : openState._stats) {
collection->createStats(it.first, *(it.second));
}
} catch (basics::Exception const& ex) {
return ex.code();
} catch (...) {
return TRI_ERROR_INTERNAL;
}
return TRI_ERROR_NO_ERROR;
}
}
} // namespace
MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
: PhysicalCollection(collection), _ditches(collection), _initialCount(0), _revision(0) {}
@ -1138,94 +1095,44 @@ void MMFilesCollection::finishCompaction() {
_compactionLock.unlock();
}
/// @brief opens an existing collection
void MMFilesCollection::open(bool ignoreErrors) {
TRI_vocbase_t* vocbase = _logicalCollection->vocbase();
/// @brief iterate all markers of the collection
int MMFilesCollection::iterateMarkersOnLoad(arangodb::Transaction* trx) {
// initialize state for iteration
OpenIteratorState openState(_logicalCollection);
VPackBuilder builder;
StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->getCollectionInfo(vocbase, _logicalCollection->cid(), builder, false, 0);
if (_initialCount != -1) {
auto primaryIndex = _logicalCollection->primaryIndex();
double start = TRI_microtime();
int res = primaryIndex->resize(
trx, static_cast<size_t>(_initialCount * 1.1));
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
<< "open-document-collection { collection: " << vocbase->name() << "/"
<< _logicalCollection->name() << " }";
int res = _logicalCollection->open(ignoreErrors);
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot open document collection from path '" << _logicalCollection->path() << "'";
THROW_ARANGO_EXCEPTION(res);
}
res = _logicalCollection->createInitialIndexes();
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res);
THROW_ARANGO_EXCEPTION(res);
}
arangodb::SingleCollectionTransaction trx(
arangodb::StandaloneTransactionContext::Create(vocbase),
_logicalCollection->cid(), TRI_TRANSACTION_WRITE);
// build the primary index
res = TRI_ERROR_INTERNAL;
try {
double start = TRI_microtime();
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
<< "iterate-markers { collection: " << vocbase->name() << "/"
<< _logicalCollection->name() << " }";
// iterate over all markers of the collection
res = IterateMarkersCollection(&trx, _logicalCollection);
LOG_TOPIC(TRACE, Logger::PERFORMANCE) << "[timer] " << Logger::FIXED(TRI_microtime() - start) << " s, iterate-markers { collection: " << vocbase->name() << "/" << _logicalCollection->name() << " }";
} catch (arangodb::basics::Exception const& ex) {
res = ex.code();
} catch (std::bad_alloc const&) {
res = TRI_ERROR_OUT_OF_MEMORY;
} catch (...) {
res = TRI_ERROR_INTERNAL;
}
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot iterate data of document collection: ") + TRI_errno_string(res));
}
// build the indexes meta-data, but do not fill the indexes yet
{
auto old = _logicalCollection->useSecondaryIndexes();
// turn filling of secondary indexes off. we're now only interested in getting
// the indexes' definition. we'll fill them below ourselves.
_logicalCollection->useSecondaryIndexes(false);
try {
_logicalCollection->detectIndexes(&trx);
_logicalCollection->useSecondaryIndexes(old);
} catch (basics::Exception const& ex) {
_logicalCollection->useSecondaryIndexes(old);
THROW_ARANGO_EXCEPTION_MESSAGE(ex.code(), std::string("cannot initialize collection indexes: ") + ex.what());
} catch (std::exception const& ex) {
_logicalCollection->useSecondaryIndexes(old);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, std::string("cannot initialize collection indexes: ") + ex.what());
} catch (...) {
_logicalCollection->useSecondaryIndexes(old);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot initialize collection indexes: unknown exception");
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
openState._initialCount = _initialCount;
}
if (!arangodb::wal::LogfileManager::instance()->isInRecovery()) {
// build the index structures, and fill the indexes
_logicalCollection->fillIndexes(&trx);
// read all documents and fill primary index
auto cb = [&openState](TRI_df_marker_t const* marker, TRI_datafile_t* datafile) -> bool {
return OpenIterator(marker, &openState, datafile);
};
iterateDatafiles(cb);
LOG(TRACE) << "found " << openState._documents << " document markers, "
<< openState._deletions << " deletion markers for collection '" << _logicalCollection->name() << "'";
// update the real statistics for the collection
try {
for (auto& it : openState._stats) {
createStats(it.first, *(it.second));
}
} catch (basics::Exception const& ex) {
return ex.code();
} catch (...) {
return TRI_ERROR_INTERNAL;
}
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
<< "[timer] " << Logger::FIXED(TRI_microtime() - start)
<< " s, open-document-collection { collection: " << vocbase->name() << "/"
<< _logicalCollection->name() << " }";
return TRI_ERROR_NO_ERROR;
}

View File

@ -95,9 +95,6 @@ class MMFilesCollection final : public PhysicalCollection {
/// @brief seal a datafile
int sealDatafile(TRI_datafile_t* datafile, bool isCompactor);
/// @brief iterates over a collection
bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) override;
/// @brief increase dead stats for a datafile, if it exists
void increaseDeadStats(TRI_voc_fid_t fid, int64_t number, int64_t size) override {
_datafileStatistics.increaseDead(fid, number, size);
@ -108,11 +105,6 @@ class MMFilesCollection final : public PhysicalCollection {
_datafileStatistics.update(fid, values);
}
/// @brief create statistics for a datafile, using the stats provided
void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) override {
_datafileStatistics.create(fid, values);
}
/// @brief order a new master pointer
TRI_doc_mptr_t* requestMasterpointer() override;
@ -129,11 +121,20 @@ class MMFilesCollection final : public PhysicalCollection {
bool tryLockForCompaction() override;
void finishCompaction() override;
void open(bool ignoreErrors) override;
Ditches* ditches() const override { return &_ditches; }
/// @brief iterate all markers of a collection on load
int iterateMarkersOnLoad(arangodb::Transaction* trx) override;
private:
/// @brief create statistics for a datafile, using the stats provided
void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) {
_datafileStatistics.create(fid, values);
}
/// @brief iterates over a collection
bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb);
/// @brief creates a datafile
TRI_datafile_t* createDatafile(TRI_voc_fid_t fid,
TRI_voc_size_t journalSize,

View File

@ -653,7 +653,7 @@ static void CreateCollectionCoordinator(
uint64_t replicationFactor = 1;
// default shard key
shardKeys.push_back("_key");
shardKeys.push_back(StaticStrings::KeyString);
std::string distributeShardsLike;

View File

@ -48,6 +48,8 @@
#include "Utils/CollectionNameResolver.h"
#include "Utils/CollectionReadLocker.h"
#include "Utils/CollectionWriteLocker.h"
#include "Utils/SingleCollectionTransaction.h"
#include "Utils/StandaloneTransactionContext.h"
#include "VocBase/PhysicalCollection.h"
#include "VocBase/IndexPoolFeature.h"
#include "VocBase/KeyGenerator.h"
@ -1115,7 +1117,97 @@ PhysicalCollection* LogicalCollection::createPhysical() {
}
/// @brief opens an existing collection
int LogicalCollection::open(bool ignoreErrors) {
void LogicalCollection::open(bool ignoreErrors) {
VPackBuilder builder;
StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->getCollectionInfo(_vocbase, cid(), builder, false, 0);
double start = TRI_microtime();
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
<< "open-document-collection { collection: " << _vocbase->name() << "/"
<< _name << " }";
int res = openWorker(ignoreErrors);
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot open document collection from path '" << path() << "'";
THROW_ARANGO_EXCEPTION(res);
}
res = createInitialIndexes();
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res);
THROW_ARANGO_EXCEPTION(res);
}
arangodb::SingleCollectionTransaction trx(
arangodb::StandaloneTransactionContext::Create(_vocbase),
cid(), TRI_TRANSACTION_WRITE);
// build the primary index
res = TRI_ERROR_INTERNAL;
try {
double start = TRI_microtime();
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
<< "iterate-markers { collection: " << _vocbase->name() << "/"
<< _name << " }";
// iterate over all markers of the collection
res = getPhysical()->iterateMarkersOnLoad(&trx);
LOG_TOPIC(TRACE, Logger::PERFORMANCE) << "[timer] " << Logger::FIXED(TRI_microtime() - start) << " s, iterate-markers { collection: " << _vocbase->name() << "/" << _name << " }";
} catch (arangodb::basics::Exception const& ex) {
res = ex.code();
} catch (std::bad_alloc const&) {
res = TRI_ERROR_OUT_OF_MEMORY;
} catch (...) {
res = TRI_ERROR_INTERNAL;
}
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot iterate data of document collection: ") + TRI_errno_string(res));
}
// build the indexes meta-data, but do not fill the indexes yet
{
auto old = useSecondaryIndexes();
// turn filling of secondary indexes off. we're now only interested in getting
// the indexes' definition. we'll fill them below ourselves.
useSecondaryIndexes(false);
try {
detectIndexes(&trx);
useSecondaryIndexes(old);
} catch (basics::Exception const& ex) {
useSecondaryIndexes(old);
THROW_ARANGO_EXCEPTION_MESSAGE(ex.code(), std::string("cannot initialize collection indexes: ") + ex.what());
} catch (std::exception const& ex) {
useSecondaryIndexes(old);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, std::string("cannot initialize collection indexes: ") + ex.what());
} catch (...) {
useSecondaryIndexes(old);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot initialize collection indexes: unknown exception");
}
}
if (!arangodb::wal::LogfileManager::instance()->isInRecovery()) {
// build the index structures, and fill the indexes
fillIndexes(&trx);
}
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
<< "[timer] " << Logger::FIXED(TRI_microtime() - start)
<< " s, open-document-collection { collection: " << _vocbase->name() << "/"
<< _name << " }";
}
/// @brief opens an existing collection
int LogicalCollection::openWorker(bool ignoreErrors) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
double start = TRI_microtime();

View File

@ -237,7 +237,7 @@ class LogicalCollection {
/// @brief opens an existing collection
int open(bool ignoreErrors);
void open(bool ignoreErrors);
/// @brief closes an open collection
int close();
@ -249,11 +249,6 @@ class LogicalCollection {
return getPhysical()->rotateActiveJournal();
}
/// @brief iterates over a collection
bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& callback) {
return getPhysical()->iterateDatafiles(callback);
}
/// @brief increase dead stats for a datafile, if it exists
void increaseDeadStats(TRI_voc_fid_t fid, int64_t number, int64_t size) {
return getPhysical()->increaseDeadStats(fid, number, size);
@ -264,12 +259,6 @@ class LogicalCollection {
return getPhysical()->updateStats(fid, values);
}
/// @brief create statistics for a datafile, using the stats provided
void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) {
return getPhysical()->createStats(fid, values);
}
int applyForTickRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax,
std::function<bool(TRI_voc_tick_t foundTick, TRI_df_marker_t const* marker)> const& callback) {
return getPhysical()->applyForTickRange(dataMin, dataMax, callback);
@ -284,7 +273,7 @@ class LogicalCollection {
void releaseMasterpointer(TRI_doc_mptr_t* mptr) {
getPhysical()->releaseMasterpointer(mptr);
}
/// @brief disallow starting the compaction of the collection
void preventCompaction() { getPhysical()->preventCompaction(); }
bool tryPreventCompaction() { return getPhysical()->tryPreventCompaction(); }
@ -369,11 +358,11 @@ class LogicalCollection {
// SECTION: Index creation
/// @brief creates the initial indexes for the collection
public:
// FIXME Should be private
int createInitialIndexes();
private:
/// @brief creates the initial indexes for the collection
int createInitialIndexes();
int openWorker(bool ignoreErrors);
bool removeIndex(TRI_idx_iid_t iid);

View File

@ -37,6 +37,7 @@ struct TRI_doc_mptr_t;
namespace arangodb {
class Ditches;
class LogicalCollection;
class Transaction;
class PhysicalCollection {
protected:
@ -66,18 +67,12 @@ class PhysicalCollection {
virtual int applyForTickRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax,
std::function<bool(TRI_voc_tick_t foundTick, TRI_df_marker_t const* marker)> const& callback) = 0;
/// @brief iterates over a collection
virtual bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) = 0;
/// @brief increase dead stats for a datafile, if it exists
virtual void increaseDeadStats(TRI_voc_fid_t fid, int64_t number, int64_t size) = 0;
/// @brief increase dead stats for a datafile, if it exists
virtual void updateStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) = 0;
/// @brief create statistics for a datafile, using the stats provided
virtual void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) = 0;
/// @brief report extra memory used by indexes etc.
virtual size_t memory() const = 0;
@ -107,9 +102,10 @@ class PhysicalCollection {
/// @brief signal that compaction is finished
virtual void finishCompaction() = 0;
virtual void open(bool ignoreErrors) = 0;
/// @brief iterate all markers of a collection on load
virtual int iterateMarkersOnLoad(arangodb::Transaction* trx) = 0;
protected:
LogicalCollection* _logicalCollection;
};

View File

@ -448,7 +448,7 @@ int TRI_vocbase_t::loadCollection(arangodb::LogicalCollection* collection,
}
try {
collection->getPhysical()->open(ignoreDatafileErrors);
collection->open(ignoreDatafileErrors);
} catch (...) {
collection->setStatus(TRI_VOC_COL_STATUS_CORRUPTED);
return TRI_ERROR_ARANGO_CORRUPTED_COLLECTION;

View File

@ -614,6 +614,22 @@ function agencyTestSuite () {
assertEqual(readAndCheck([["/bumms", "/bummsfallera"]]), [{bumms:"fallera", bummsfallera: "lalalala"}]);
}
/*
testHiddenAgencyWrite: function() {
var res = writeAgency([[{".agency": {"op":"set","new":"fallera"}}]]);
assertEqual(res.statusCode, 400);
},
testHiddenAgencyWriteSlash: function() {
var res = writeAgency([[{"/.agency": {"op":"set","new":"fallera"}}]]);
assertEqual(res.statusCode, 400);
},
testHiddenAgencyWriteDeep: function() {
var res = writeAgency([[{"/.agency/hans": {"op":"set","new":"fallera"}}]]);
assertEqual(res.statusCode, 400);
}
*/
};
}

View File

@ -44,6 +44,10 @@
#include <rocksdb/version.h>
#endif
#ifdef USE_ENTERPRISE
#include "Enterprise/Version.h"
#endif
using namespace arangodb::rest;
std::map<std::string, std::string> Version::Values;
@ -96,9 +100,6 @@ void Version::initialize() {
Values["asm-crc32"] = (ENABLE_ASM_CRC32) ? "true" : "false";
Values["boost-version"] = getBoostVersion();
Values["build-date"] = getBuildDate();
#if HAVE_ARANGODB_BUILD_REPOSITORY
Values["build-repository"] = getBuildRepository();
#endif
Values["compiler"] = getCompiler();
Values["endianness"] = getEndianness();
Values["fd-setsize"] = arangodb::basics::StringUtils::itoa(FD_SETSIZE);
@ -113,6 +114,14 @@ void Version::initialize() {
Values["zlib-version"] = getZLibVersion();
#if USE_ENTERPRISE
Values["enterprise-version"] = ENTERPRISE_VERSION;
#endif
#if HAVE_ARANGODB_BUILD_REPOSITORY
Values["build-repository"] = getBuildRepository();
#endif
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
Values["assertions"] = "true";
#else