mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api
This commit is contained in:
commit
1ce4d4a97e
|
@ -4,6 +4,17 @@
|
|||
# External Projects used by ArangoDB
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
|
||||
if(NOT EXISTS V8/v8/LICENSE OR
|
||||
NOT EXISTS V8/v8/testing/gtest/LICENSE)
|
||||
message(FATAL_ERROR "GIT sumbodules not checked out properly - aborting! Run:
|
||||
git submodule update --recursive
|
||||
git submodule update --init --recursive
|
||||
On Windows you need to make sure git is recent enough and may create symlinks!
|
||||
")
|
||||
endif()
|
||||
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -52,6 +52,9 @@ This exports the collection *test* into the output directory *export* as jsonl.
|
|||
|
||||
Export XGMML
|
||||
------------
|
||||
|
||||
[XGMML](https://en.wikipedia.org/wiki/XGMML) is an XML application based on [GML](https://en.wikipedia.org/wiki/Graph_Modelling_Language). To view the XGMML file you can use for example [Cytoscape](http://cytoscape.org).
|
||||
|
||||
## XGMML specific options
|
||||
|
||||
*--xgmml-label-attribute* specify the name of the attribute that will become the label in the xgmml file.
|
||||
|
|
|
@ -500,8 +500,9 @@ generate
|
|||
- `./utils/generateExamples.sh --onlyThisOne geoIndexSelect` will only produce one example - *geoIndexSelect*
|
||||
- `./utils/generateExamples.sh --onlyThisOne 'MOD.*'` will only produce the examples matching that regex; Note that
|
||||
examples with enumerations in their name may base on others in their series - so you should generate the whole group.
|
||||
- `./utils/generateExamples.sh --server.endpoint tcp://127.0.0.1:8529` will utilize an existing arangod instead of starting a new one.
|
||||
this does seriously cut down the execution time.
|
||||
- running `onlyThisOne` in conjunction with a pre-started server cuts down the execution time even more.
|
||||
In addition to the `--onlyThisOne ...` specify i.e. `--server.endpoint tcp://127.0.0.1:8529` to utilize your already running arangod.
|
||||
Please note that examples may collide with existing collections like 'test' - you need to make sure your server is clean enough.
|
||||
- you can use generateExamples like that:
|
||||
`./utils/generateExamples.sh \
|
||||
--server.endpoint 'tcp://127.0.0.1:8529' \
|
||||
|
|
|
@ -401,12 +401,6 @@ bool ServerState::integrateIntoCluster(ServerState::RoleEnum role,
|
|||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
const std::string agencyKey = roleToAgencyKey(role);
|
||||
const std::string planKey = "Plan/" + agencyKey + "/" + id;
|
||||
const std::string currentKey = "Current/" + agencyKey + "/" + id;
|
||||
|
||||
_id = id;
|
||||
|
||||
findAndSetRoleBlocking();
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER) << "We successfully announced ourselves as "
|
||||
<< roleToString(role) << " and our id is "
|
||||
|
|
|
@ -299,7 +299,11 @@ bool MMFilesCollection::OpenIterator(TRI_df_marker_t const* marker, MMFilesColle
|
|||
}
|
||||
|
||||
MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
|
||||
: PhysicalCollection(collection), _ditches(collection), _initialCount(0), _lastRevision(0) {}
|
||||
: PhysicalCollection(collection)
|
||||
, _ditches(collection)
|
||||
, _initialCount(0), _lastRevision(0)
|
||||
, _uncollectedLogfileEntries(0)
|
||||
{}
|
||||
|
||||
MMFilesCollection::~MMFilesCollection() {
|
||||
try {
|
||||
|
@ -1241,6 +1245,11 @@ int MMFilesCollection::insert(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
bool MMFilesCollection::isFullyCollected() const {
|
||||
int64_t uncollected = _uncollectedLogfileEntries.load();
|
||||
return (uncollected == 0);
|
||||
}
|
||||
|
||||
MMFilesDocumentPosition MMFilesCollection::lookupRevision(TRI_voc_rid_t revisionId) const {
|
||||
TRI_ASSERT(revisionId != 0);
|
||||
MMFilesDocumentPosition const old = _revisionsCache.lookup(revisionId);
|
||||
|
|
|
@ -161,9 +161,6 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
Ditches* ditches() const override { return &_ditches; }
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int iterateMarkersOnLoad(transaction::Methods* trx) override;
|
||||
|
||||
////////////////////////////////////
|
||||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
@ -173,6 +170,26 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
arangodb::ManagedDocumentResult& result,
|
||||
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock) override;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
|
||||
|
||||
virtual bool isFullyCollected() const override;
|
||||
|
||||
int64_t uncollectedLogfileEntries() const {
|
||||
return _uncollectedLogfileEntries.load();
|
||||
}
|
||||
|
||||
void increaseUncollectedLogfileEntries(int64_t value) {
|
||||
_uncollectedLogfileEntries += value;
|
||||
}
|
||||
|
||||
void decreaseUncollectedLogfileEntries(int64_t value) {
|
||||
_uncollectedLogfileEntries -= value;
|
||||
if (_uncollectedLogfileEntries < 0) {
|
||||
_uncollectedLogfileEntries = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static int OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* marker,
|
||||
|
@ -258,6 +275,9 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
TRI_voc_rid_t _lastRevision;
|
||||
|
||||
MMFilesRevisionsCache _revisionsCache;
|
||||
|
||||
std::atomic<int64_t> _uncollectedLogfileEntries;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "Basics/hashes.h"
|
||||
#include "Basics/memory-map.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
|
@ -678,7 +679,7 @@ int MMFilesCollectorThread::processCollectionOperations(MMFilesCollectorCache* c
|
|||
<< collection->name() << "'";
|
||||
updateDatafileStatistics(collection, cache);
|
||||
|
||||
collection->decreaseUncollectedLogfileEntries(cache->totalOperationsCount);
|
||||
static_cast<arangodb::MMFilesCollection*>(collection->getPhysical())->decreaseUncollectedLogfileEntries(cache->totalOperationsCount);
|
||||
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
|
|
|
@ -125,10 +125,10 @@ std::string const MMFilesEngine::FeatureName("MMFilesEngine");
|
|||
|
||||
// create the storage engine
|
||||
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
|
||||
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory()),
|
||||
_isUpgrade(false),
|
||||
_maxTick(0) {
|
||||
}
|
||||
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory())
|
||||
, _isUpgrade(false)
|
||||
, _maxTick(0)
|
||||
{}
|
||||
|
||||
MMFilesEngine::~MMFilesEngine() {
|
||||
}
|
||||
|
|
|
@ -257,7 +257,7 @@ public:
|
|||
/// @brief Add engine specific AQL functions.
|
||||
|
||||
void addAqlFunctions() const override;
|
||||
|
||||
|
||||
private:
|
||||
/// @brief: check the initial markers in a datafile
|
||||
bool checkDatafileHeader(MMFilesDatafile* datafile, std::string const& filename) const;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "Basics/Exceptions.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/Hints.h"
|
||||
|
@ -140,7 +141,8 @@ void MMFilesTransactionCollection::freeOperations(transaction::Methods* activeTr
|
|||
_collection->setRevision(_originalRevision, true);
|
||||
} else if (!_collection->isVolatile() && !isSingleOperationTransaction) {
|
||||
// only count logfileEntries if the collection is durable
|
||||
_collection->increaseUncollectedLogfileEntries(_operations->size());
|
||||
arangodb::PhysicalCollection* collPtr = _collection->getPhysical();
|
||||
static_cast<arangodb::MMFilesCollection*>(collPtr)->increaseUncollectedLogfileEntries(_operations->size());
|
||||
}
|
||||
|
||||
delete _operations;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "Aql/QueryCache.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
|
@ -298,7 +299,8 @@ int MMFilesTransactionState::addOperation(TRI_voc_rid_t revisionId,
|
|||
arangodb::aql::QueryCache::instance()->invalidate(
|
||||
_vocbase, collection->name());
|
||||
|
||||
collection->increaseUncollectedLogfileEntries(1);
|
||||
auto cptr = collection->getPhysical();
|
||||
static_cast<arangodb::MMFilesCollection*>(cptr)->increaseUncollectedLogfileEntries(1);
|
||||
} else {
|
||||
// operation is buffered and might be rolled back
|
||||
TransactionCollection* trxCollection = this->collection(collection->cid(), AccessMode::Type::WRITE);
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||
//#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesCollection.h" //remove
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
#include "MMFiles/MMFilesToken.h"
|
||||
|
@ -230,7 +231,6 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
|
|||
_nextCompactionStartIndex(0),
|
||||
_lastCompactionStatus(nullptr),
|
||||
_lastCompactionStamp(0.0),
|
||||
_uncollectedLogfileEntries(0),
|
||||
_isInitialIteration(false),
|
||||
_revisionError(false) {
|
||||
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
||||
|
@ -295,7 +295,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
_nextCompactionStartIndex(0),
|
||||
_lastCompactionStatus(nullptr),
|
||||
_lastCompactionStamp(0.0),
|
||||
_uncollectedLogfileEntries(0),
|
||||
_isInitialIteration(false),
|
||||
_revisionError(false) {
|
||||
if (!IsAllowedName(info)) {
|
||||
|
@ -602,9 +601,7 @@ bool LogicalCollection::IsAllowedName(bool allowSystem,
|
|||
|
||||
/// @brief whether or not a collection is fully collected
|
||||
bool LogicalCollection::isFullyCollected() {
|
||||
int64_t uncollected = _uncollectedLogfileEntries.load();
|
||||
|
||||
return (uncollected == 0);
|
||||
return getPhysical()->isFullyCollected();
|
||||
}
|
||||
|
||||
void LogicalCollection::setNextCompactionStartIndex(size_t index) {
|
||||
|
@ -1193,7 +1190,12 @@ std::shared_ptr<arangodb::velocypack::Builder> LogicalCollection::figures() {
|
|||
|
||||
builder->add("lastTick", VPackValue(_maxTick));
|
||||
builder->add("uncollectedLogfileEntries",
|
||||
VPackValue(_uncollectedLogfileEntries));
|
||||
VPackValue(
|
||||
//MOVE TO PHYSICAL
|
||||
static_cast<arangodb::MMFilesCollection*>(getPhysical())
|
||||
->uncollectedLogfileEntries()
|
||||
)
|
||||
);
|
||||
|
||||
// fills in compaction status
|
||||
char const* lastCompactionStatus = "-";
|
||||
|
|
|
@ -111,28 +111,27 @@ class LogicalCollection {
|
|||
void isInitialIteration(bool value) { _isInitialIteration = value; }
|
||||
|
||||
// TODO: MOVE TO PHYSICAL?
|
||||
bool isFullyCollected();
|
||||
int64_t uncollectedLogfileEntries() const {
|
||||
return _uncollectedLogfileEntries.load();
|
||||
bool isFullyCollected(); //should not be exposed
|
||||
|
||||
void setNextCompactionStartIndex(size_t index){
|
||||
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||
_nextCompactionStartIndex = index;
|
||||
}
|
||||
|
||||
void increaseUncollectedLogfileEntries(int64_t value) {
|
||||
_uncollectedLogfileEntries += value;
|
||||
size_t getNextCompactionStartIndex(){
|
||||
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||
return _nextCompactionStartIndex;
|
||||
}
|
||||
|
||||
void decreaseUncollectedLogfileEntries(int64_t value) {
|
||||
_uncollectedLogfileEntries -= value;
|
||||
if (_uncollectedLogfileEntries < 0) {
|
||||
_uncollectedLogfileEntries = 0;
|
||||
}
|
||||
void setCompactionStatus(char const* reason){
|
||||
TRI_ASSERT(reason != nullptr);
|
||||
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||
_lastCompactionStatus = reason;
|
||||
}
|
||||
|
||||
void setNextCompactionStartIndex(size_t);
|
||||
size_t getNextCompactionStartIndex();
|
||||
void setCompactionStatus(char const*);
|
||||
double lastCompactionStamp() const { return _lastCompactionStamp; }
|
||||
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
|
||||
|
||||
|
||||
void setRevisionError() { _revisionError = true; }
|
||||
|
||||
// SECTION: Meta Information
|
||||
|
@ -615,8 +614,6 @@ class LogicalCollection {
|
|||
char const* _lastCompactionStatus;
|
||||
double _lastCompactionStamp;
|
||||
|
||||
std::atomic<int64_t> _uncollectedLogfileEntries;
|
||||
|
||||
/// @brief: flag that is set to true when the documents are
|
||||
/// initial enumerated and the primary index is built
|
||||
bool _isInitialIteration;
|
||||
|
|
|
@ -106,6 +106,8 @@ class PhysicalCollection {
|
|||
virtual void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) = 0;
|
||||
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
||||
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
||||
|
||||
virtual bool isFullyCollected() const = 0;
|
||||
|
||||
virtual int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
|
|
@ -111,8 +111,12 @@ function main(argv) {
|
|||
}
|
||||
|
||||
if (options.hasOwnProperty('server.endpoint')) {
|
||||
if (scriptArguments.hasOwnProperty('onlyThisOne')) {
|
||||
throw("don't run the full suite on pre-existing servers");
|
||||
}
|
||||
startServer = false;
|
||||
serverEndpoint = options['server.endpoint'];
|
||||
|
||||
}
|
||||
|
||||
let args = [theScript].concat(internal.toArgv(scriptArguments));
|
||||
|
|
Loading…
Reference in New Issue