1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

* 'devel' of github.com:arangodb/arangodb:
  generated API docs
  don't let exceptions escape normal processing
  updated documentation
  fixes for tick handling
  documenting lock in agency
  gv colors
  gruntfile cleanup
  Make ssl stuff copy pastable
  exit release script on error.
  Fix commandline parsing of release script.
  config access to lastAckedAgo protected
  config access to lastAckedAgo protected
  fixed windows compile errors
  simplify
  fix struct/class mismatch
  fix compile warnings because of mismatched class/struct usages
This commit is contained in:
Jan Christoph Uhde 2016-10-24 16:08:18 +02:00
commit fe21ab1bbe
27 changed files with 661 additions and 532 deletions

View File

@ -5,14 +5,14 @@
drops a collection
`collection.drop()`
Drops a *collection* and all its indexes.
`collection.drop(options)`
Drops a *collection* and all its indexes and data.
In order to drop a system collection, an *options* object
with attribute *isSystem* set to *true* must be specified.
**Examples**
@startDocuBlockInline collectionDrop
@EXAMPLE_ARANGOSH_OUTPUT{collectionDrop}
~ db._create("example");
@ -23,6 +23,15 @@ Drops a *collection* and all its indexes.
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock collectionDrop
@startDocuBlockInline collectionDropSystem
@EXAMPLE_ARANGOSH_OUTPUT{collectionDropSystem}
~ db._create("_example", { isSystem: true });
col = db._example;
col.drop({ isSystem: true });
col;
~ db._drop("example", { isSystem: true });
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock collectionDropSystem
!SUBSECTION Truncate

View File

@ -284,18 +284,25 @@ default properties.
drops a collection
`db._drop(collection)`
Drops a *collection* and all its indexes.
Drops a *collection* and all its indexes and data.
`db._drop(collection-identifier)`
Drops a collection identified by *collection-identifier* and all its
indexes. No error is thrown if there is no such collection.
Drops a collection identified by *collection-identifier* with all its
indexes and data. No error is thrown if there is no such collection.
`db._drop(collection-name)`
Drops a collection named *collection-name* and all its indexes. No error
is thrown if there is no such collection.
`db._drop(collection-name, options)`
In order to drop a system collection, one must specify an *options* object
with attribute *isSystem* set to *true*. Otherwise it is not possible to
drop system collections.
*Examples*
Drops a collection:
@ -321,7 +328,16 @@ Drops a collection identified by name:
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock collectionDatabaseDropName
Drops a system collection
@startDocuBlockInline collectionDatabaseDropSystem
@EXAMPLE_ARANGOSH_OUTPUT{collectionDatabaseDropSystem}
~ db._create("_example", { isSystem: true });
col = db._example;
db._drop("_example", { isSystem: true });
col;
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock collectionDatabaseDropSystem
!SUBSECTION Truncate
<!-- js/server/modules/@arangodb/arango-database.js -->

View File

@ -2,13 +2,19 @@
@startDocuBlock JSF_delete_api_collection
@brief drops a collection
@RESTHEADER{DELETE /_api/collection/{collection-name}, Drops collection}
@RESTHEADER{DELETE /_api/collection/{collection-name}, Drops a collection}
@RESTURLPARAMETERS
@RESTURLPARAM{collection-name,string,required}
The name of the collection to drop.
@RESTQUERYPARAMETERS
@RESTQUERYPARAM{isSystem,bool,optional}
Whether or not the collection to drop is a system collection. This parameter
must be set to *true* in order to drop a system collection.
@RESTDESCRIPTION
Drops the collection identified by *collection-name*.
@ -60,5 +66,22 @@ Using a name:
logJsonResponse(response);
@END_EXAMPLE_ARANGOSH_RUN
Dropping a system collection
@EXAMPLE_ARANGOSH_RUN{RestCollectionDeleteCollectionSystem}
var cn = "_example";
db._drop(cn, { isSystem: true });
db._create(cn, { isSystem: true });
var url = "/_api/collection/_example?isSystem=true";
var response = logCurlRequest('DELETE', url);
db[cn] = undefined;
assert(response.code === 200);
logJsonResponse(response);
@END_EXAMPLE_ARANGOSH_RUN
@endDocuBlock

View File

@ -3,7 +3,7 @@
@brief keyfile containing server certificate
`--ssl.keyfile filename`
If SSL encryption is used, this option must be used to specify the filename
If SSL encryption is used, this option must be used to specify the filename
of the server private key. The file must be PEM formatted and contain both
the certificate and the server's private key.
@ -23,7 +23,7 @@ cp server.key server.key.org
openssl rsa -in server.key.org -out server.key
# sign the csr with the key, creates certificate PEM file "server.crt"
openssl x509 -req -days 365 -in server.csr -signkey server.key -out
openssl x509 -req -days 365 -in server.csr -signkey server.key -out \
server.crt
# combine certificate and key into single PEM file "server.pem"

View File

@ -9,40 +9,50 @@ EXAMPLES=1
LINT=1
while [ "$#" -gt 1 ]; do
if [ "$1" == "--no-lint" ]; then
LINT=0
shift
fi
case "$1" in
--no-lint)
LINT=0
shift
;;
if [ "$1" == "--no-build" ]; then
BUILD=0
shift
fi
--no-build)
BUILD=0
shift
;;
if [ "$1" == "--recycle-build" ]; then
BUILD=2
shift
fi
--recycle-build)
BUILD=2
shift
;;
if [ "$1" == "--no-swagger" ]; then
SWAGGER=0
shift
fi
--no-swagger)
SWAGGER=0
shift
;;
if [ "$1" == "--no-examples" ]; then
EXAMPLES=0
shift
fi
--no-examples)
EXAMPLES=0
shift
;;
if [ "$1" == "--no-commit" ]; then
TAG=0
shift
fi
--no-commit)
TAG=0
shift
;;
if [ "$1" == "--no-book" ]; then
BOOK=0
shift
fi
--no-book)
BOOK=0
shift
;;
*)
if test -n "${VERSION}"; then
echo "we already have a version ${VERSION} aborting because of $1"
exit 1
fi
VERSION="$1"
shift
;;
esac
done
if [ "$#" -ne 1 ]; then
@ -50,7 +60,6 @@ if [ "$#" -ne 1 ]; then
exit 1
fi
VERSION="$1"
if echo ${VERSION} | grep -q -- '-'; then
echo "${VERSION} mustn't contain minuses! "

View File

@ -75,7 +75,7 @@ bool Agent::id(std::string const& id) {
/// Merge command line and persisted comfigurations
bool Agent::mergeConfiguration(VPackSlice const& persisted) {
return _config.merge(persisted);
return _config.merge(persisted); // Concurrency managed in merge
}
/// Dtor shuts down thread
@ -164,6 +164,7 @@ bool Agent::waitFor(index_t index, double timeout) {
return true;
}
// Get condition variable to notice commits
CONDITION_LOCKER(guard, _waitForCV);
// Wait until woken up through AgentCallback
@ -192,6 +193,7 @@ bool Agent::waitFor(index_t index, double timeout) {
void Agent::reportIn(std::string const& id, index_t index) {
{
// Enforce _lastCommitIndex, _readDB and compaction to progress atomically
MUTEX_LOCKER(mutexLocker, _ioLock);
// Update last acknowledged answer
@ -227,9 +229,9 @@ void Agent::reportIn(std::string const& id, index_t index) {
}
}
}
} // MUTEX_LOCKER
{
{ // Wake up rest handler
CONDITION_LOCKER(guard, _waitForCV);
guard.broadcast();
}
@ -251,6 +253,7 @@ bool Agent::recvAppendEntriesRPC(
return false;
}
// State machine, _lastCommitIndex to advance atomically
MUTEX_LOCKER(mutexLocker, _ioLock);
if (!_constituent.checkLeader(term, leaderId, prevIndex, prevTerm)) {
@ -293,6 +296,8 @@ bool Agent::recvAppendEntriesRPC(
/// Leader's append entries
void Agent::sendAppendEntriesRPC() {
// _lastSent, _lastHighest and _confirmed only accessed in main thread
for (auto const& followerId : _config.active()) {
if (followerId != id()) {
@ -316,8 +321,7 @@ void Agent::sendAppendEntriesRPC() {
index_t highest = unconfirmed.back().index;
duration<double> m =
system_clock::now() - _lastSent[followerId];
duration<double> m = system_clock::now() - _lastSent[followerId];
if (highest == _lastHighest[followerId] &&
m.count() < 0.5 * _config.minPing()) {
@ -353,10 +357,6 @@ void Agent::sendAppendEntriesRPC() {
}
// Send request
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Sending AppendEntriesRPC with " << unconfirmed.size() - 1
<< " entries to " << followerId << "...";
auto headerFields =
std::make_unique<std::unordered_map<std::string, std::string>>();
arangodb::ClusterComm::instance()->asyncRequest(
@ -367,7 +367,7 @@ void Agent::sendAppendEntriesRPC() {
0.7 * _config.minPing(), true);
{
MUTEX_LOCKER(mutexLocker, _ioLock);
MUTEX_LOCKER(mutexLocker, _ioLock); // KV: Not sure if needed?
_lastSent[followerId] = system_clock::now();
_lastHighest[followerId] = highest;
}
@ -392,15 +392,14 @@ query_t Agent::activate(query_t const& everything) {
Slice slice = everything->slice();
if (slice.isObject()) {
if (active()) {
ret->add("success", VPackValue(false));
} else {
MUTEX_LOCKER(mutexLocker, _ioLock);
MUTEX_LOCKER(mutexLocker, _ioLock); // Atomicity
Slice compact = slice.get("compact");
Slice logs = slice.get("logs");
Slice logs = slice.get("logs");
if (!compact.isEmptyArray()) {
_readDB = compact.get("readDB");
@ -432,7 +431,7 @@ query_t Agent::activate(query_t const& everything) {
}
/// @brief
/// @brief Activate agency (Inception thread for multi-host, main thread else)
bool Agent::activateAgency() {
if (_config.activeEmpty()) {
size_t count = 0;
@ -442,20 +441,20 @@ bool Agent::activateAgency() {
break;
}
}
bool persisted = false;
bool persisted = false;
try {
persisted = _state.persistActiveAgents(_config.activeToBuilder(),
_config.poolToBuilder());
} catch (std::exception const& e) {
LOG_TOPIC(FATAL, Logger::AGENCY) << "Failed to persist active agency: "
<< e.what();
LOG_TOPIC(FATAL, Logger::AGENCY)
<< "Failed to persist active agency: " << e.what();
}
return persisted;
}
return true;
}
/// Load persistent state
/// Load persistent state called once
bool Agent::load() {
DatabaseFeature* database =
@ -513,26 +512,35 @@ bool Agent::load() {
}
/// Challenge my own leadership
/// Still leading? Under MUTEX from ::read or ::write
bool Agent::challengeLeadership() {
// Still leading?
size_t good = 0;
for (auto const& i : _lastAcked) {
duration<double> m = system_clock::now() - i.second;
if (0.9 * _config.minPing() > m.count()) {
++good;
}
}
return (good < size() / 2); // not counting myself
}
/// Get last acknowlwdged responses on leader
/// Get last acknowledged responses on leader
query_t Agent::lastAckedAgo() const {
query_t ret = std::make_shared<Builder>();
std::map<std::string, TimePoint> lastAcked;
{
MUTEX_LOCKER(mutexLocker, _ioLock);
lastAcked = _lastAcked;
}
auto ret = std::make_shared<Builder>();
ret->openObject();
if (leading()) {
for (auto const& i : _lastAcked) {
for (auto const& i : lastAcked) {
ret->add(i.first, VPackValue(
1.0e-2 * std::floor(
(i.first!=id() ?
@ -541,7 +549,9 @@ query_t Agent::lastAckedAgo() const {
}
}
ret->close();
return ret;
}
@ -635,6 +645,7 @@ void Agent::run() {
}
}
}
@ -655,7 +666,7 @@ void Agent::reportActivated(
_activator.reset(nullptr);
}
// Agency configuration
// Agency configuration
auto agency = std::make_shared<Builder>();
agency->openArray();
agency->openArray();
@ -685,12 +696,18 @@ void Agent::failedActivation(
void Agent::detectActiveAgentFailures() {
// Detect faulty agent if pool larger than agency
std::map<std::string, TimePoint> lastAcked;
{
MUTEX_LOCKER(mutexLocker, _ioLock);
lastAcked = _lastAcked;
}
if (_config.poolSize() > _config.size()) {
std::vector<std::string> active = _config.active();
for (auto const& id : active) {
if (id != this->id()) {
auto ds = duration<double>(
system_clock::now() - _lastAcked.at(id)).count();
system_clock::now() - lastAcked.at(id)).count();
if (ds > 180.0) {
std::string repl = _config.nextAgentInLine();
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Active agent " << id << " has failed. << "
@ -751,8 +768,12 @@ bool Agent::lead() {
guard.broadcast();
}
for (auto const& i : _config.active()) {
_lastAcked[i] = system_clock::now();
// Reset last acknowledged
{
MUTEX_LOCKER(mutexLocker, _ioLock);
for (auto const& i : _config.active()) {
_lastAcked[i] = system_clock::now();
}
}
// Agency configuration
@ -993,6 +1014,7 @@ query_t Agent::gossip(query_t const& in, bool isCallback) {
void Agent::ready(bool b) {
// From main thread of Inception
_ready = b;
}

View File

@ -251,7 +251,7 @@ class Agent : public arangodb::Thread {
std::map<std::string, TimePoint> _lastAcked;
std::map<std::string, TimePoint> _lastSent;
arangodb::Mutex _ioLock; /**< @brief Read/Write lock */
mutable arangodb::Mutex _ioLock; /**< @brief Read/Write lock */
/// @brief Server active agents rest handler
bool _serveActiveAgent;

View File

@ -55,16 +55,16 @@ const std::vector<std::string> roleStr({"Follower", "Candidate", "Leader"});
/// Configure with agent's configuration
void Constituent::configure(Agent* agent) {
MUTEX_LOCKER(guard, _castLock);
_agent = agent;
TRI_ASSERT(_agent != nullptr);
if (size() == 1) {
_role = LEADER;
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _role to LEADER in term "
<< _term;
} else {
_id = _agent->config().id();
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _role to LEADER in term " << _term;
}
}
// Default ctor
@ -89,7 +89,7 @@ bool Constituent::waitForSync() const { return _agent->config().waitForSync(); }
/// Random sleep times in election process
duration_t Constituent::sleepFor(double min_t, double max_t) {
int32_t left = static_cast<int32_t>(1000.0 * min_t),
right = static_cast<int32_t>(1000.0 * max_t);
right = static_cast<int32_t>(1000.0 * max_t);
return duration_t(static_cast<long>(RandomGenerator::interval(left, right)));
}
@ -199,7 +199,7 @@ void Constituent::lead(term_t term,
_leaderID = _id;
}
// give some debug output
// give some debug output _id never is changed after
if (!votes.empty()) {
std::stringstream ss;
ss << _id << ": Converted to leader in term " << _term << " with votes: ";
@ -492,6 +492,8 @@ bool Constituent::start(TRI_vocbase_t* vocbase,
/// Get persisted information and run election process
void Constituent::run() {
// single instance
_id = _agent->config().id();
TRI_ASSERT(_vocbase != nullptr);

View File

@ -892,7 +892,7 @@ VPackSlice AstNode::computeValue() const {
/// @brief compute the value for a constant value node
/// the value is owned by the node and must not be freed by the caller
VPackSlice AstNode::computeValue(Transaction* trx) const {
VPackSlice AstNode::computeValue(arangodb::Transaction* trx) const {
TRI_ASSERT(isConstant());
if (computedValue == nullptr) {

View File

@ -29,12 +29,12 @@
using namespace arangodb::aql;
CollectionScanner::CollectionScanner(arangodb::Transaction* trx,
ManagedDocumentResult* mmdr,
arangodb::ManagedDocumentResult* mmdr,
std::string const& collection,
bool readRandom)
: _cursor(trx->indexScan(collection,
(readRandom ? Transaction::CursorType::ANY
: Transaction::CursorType::ALL),
(readRandom ? arangodb::Transaction::CursorType::ANY
: arangodb::Transaction::CursorType::ALL),
Transaction::IndexHandle(), VPackSlice(), mmdr, 0,
UINT64_MAX, 1000, false)) {
TRI_ASSERT(_cursor->successful());
@ -42,7 +42,7 @@ CollectionScanner::CollectionScanner(arangodb::Transaction* trx,
CollectionScanner::~CollectionScanner() {}
void CollectionScanner::scan(std::vector<IndexLookupResult>& result, size_t batchSize) {
void CollectionScanner::scan(std::vector<arangodb::IndexLookupResult>& result, size_t batchSize) {
result.clear();
if (!_cursor->hasMore()) {

View File

@ -29,7 +29,7 @@
namespace arangodb {
class ManagedDocumentResult;
class OperationCursor;
struct OperationCursor;
class Transaction;
namespace aql {

View File

@ -41,8 +41,8 @@ class IndexLookupContext;
/// by the datafile the element is in. If the last byte in data[] is 1, then
/// value.data contains the actual VelocyPack data in place.
struct IndexElementValue {
friend class HashIndexElement;
friend class SkiplistIndexElement;
friend struct HashIndexElement;
friend struct SkiplistIndexElement;
public:
IndexElementValue() {}

View File

@ -231,10 +231,12 @@ void RestSimpleHandler::removeByKeys(VPackSlice const& slice) {
if (queryResult.code != TRI_ERROR_NO_ERROR) {
if (queryResult.code == TRI_ERROR_REQUEST_CANCELED ||
(queryResult.code == TRI_ERROR_QUERY_KILLED && wasCanceled())) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_REQUEST_CANCELED);
generateError(GeneralResponse::responseCode(TRI_ERROR_REQUEST_CANCELED), TRI_ERROR_REQUEST_CANCELED);
return;
}
THROW_ARANGO_EXCEPTION_MESSAGE(queryResult.code, queryResult.details);
generateError(GeneralResponse::responseCode(queryResult.code), queryResult.code, queryResult.details);
return;
}
{
@ -341,10 +343,12 @@ void RestSimpleHandler::lookupByKeys(VPackSlice const& slice) {
if (queryResult.code != TRI_ERROR_NO_ERROR) {
if (queryResult.code == TRI_ERROR_REQUEST_CANCELED ||
(queryResult.code == TRI_ERROR_QUERY_KILLED && wasCanceled())) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_REQUEST_CANCELED);
generateError(GeneralResponse::responseCode(TRI_ERROR_REQUEST_CANCELED), TRI_ERROR_REQUEST_CANCELED);
return;
}
THROW_ARANGO_EXCEPTION_MESSAGE(queryResult.code, queryResult.details);
generateError(GeneralResponse::responseCode(queryResult.code), queryResult.code, queryResult.details);
return;
}
size_t resultSize = 10;

View File

@ -40,6 +40,7 @@
#include "VocBase/KeyGenerator.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/datafile.h"
#include "VocBase/ticks.h"
#include "Wal/LogfileManager.h"
using namespace arangodb;
@ -1088,7 +1089,7 @@ int MMFilesCollection::iterateMarkersOnLoad(arangodb::Transaction* trx) {
_lastRevision >= static_cast<TRI_voc_rid_t>(2016 - 1970) * 1000 * 60 * 60 * 24 * 365 &&
application_features::ApplicationServer::server->getFeature<DatabaseFeature>("Database")->check30Revisions()) {
// a collection from 3.0 or earlier with a _rev value that is higher than we can handle safely
LOG(FATAL) << "collection '" << _logicalCollection->name() << "' contains _rev values that are higher than expected for an ArangoDB 3.0 database. If this collection was created or used with a pre-release ArangoDB 3.1, please restart the server with option '--database.check-30-revisions false` to suppress this warning.";
LOG(FATAL) << "collection '" << _logicalCollection->name() << "' contains _rev values that are higher than expected for an ArangoDB 3.0 database. If this collection was created or used with a pre-release ArangoDB 3.1, please restart the server with option '--database.check-30-revisions false' to suppress this warning.";
FATAL_ERROR_EXIT();
}

View File

@ -266,9 +266,8 @@ static inline void InitMarker(TRI_df_marker_t* marker,
TRI_ASSERT(size > 0);
marker->setSize(size);
marker->setType(type);
marker->setTypeAndTick(type, tick);
marker->setCrc(0);
marker->setTick(tick);
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -261,12 +261,14 @@ bool Ditches::contains(Ditch::DitchType type) {
void Ditches::freeDitch(Ditch* ditch) {
TRI_ASSERT(ditch != nullptr);
bool const isDocumentDitch = (ditch->type() == Ditch::TRI_DITCH_DOCUMENT);
{
MUTEX_LOCKER(mutexLocker, _lock);
unlink(ditch);
if (ditch->type() == Ditch::TRI_DITCH_DOCUMENT) {
if (isDocumentDitch) {
// decrease counter
--_numDocumentDitches;
}
@ -415,16 +417,15 @@ void Ditches::link(Ditch* ditch) {
// empty list
if (_end == nullptr) {
_begin = ditch;
_end = ditch;
}
// add to the end
else {
ditch->_prev = _end;
_end->_next = ditch;
_end = ditch;
}
_end = ditch;
if (isDocumentDitch) {
// increase counter

View File

@ -62,7 +62,7 @@ EdgeCollectionInfo::EdgeCollectionInfo(arangodb::Transaction* trx,
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getEdges(
std::string const& vertexId,
ManagedDocumentResult* mmdr) {
arangodb::ManagedDocumentResult* mmdr) {
_searchBuilder.clear();
EdgeIndex::buildSearchValue(_forwardDir, vertexId, _searchBuilder);
return _trx->indexScan(_collectionName,
@ -72,7 +72,7 @@ std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getEdges(
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getEdges(
VPackSlice const& vertexId,
ManagedDocumentResult* mmdr) {
arangodb::ManagedDocumentResult* mmdr) {
_searchBuilder.clear();
EdgeIndex::buildSearchValue(_forwardDir, vertexId, _searchBuilder);
return _trx->indexScan(_collectionName,
@ -102,7 +102,7 @@ int EdgeCollectionInfo::getEdgesCoordinator(VPackSlice const& vertexId,
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getReverseEdges(
std::string const& vertexId,
ManagedDocumentResult* mmdr) {
arangodb::ManagedDocumentResult* mmdr) {
_searchBuilder.clear();
EdgeIndex::buildSearchValue(_backwardDir, vertexId, _searchBuilder);
return _trx->indexScan(_collectionName,
@ -112,7 +112,7 @@ std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getReverseEdges(
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getReverseEdges(
VPackSlice const& vertexId,
ManagedDocumentResult* mmdr) {
arangodb::ManagedDocumentResult* mmdr) {
_searchBuilder.clear();
EdgeIndex::buildSearchValue(_backwardDir, vertexId, _searchBuilder);
return _trx->indexScan(_collectionName,

View File

@ -50,7 +50,7 @@ typedef std::string ShardID; // ID of a shard
typedef std::unordered_map<ShardID, std::vector<ServerID>> ShardMap;
class CollectionRevisionsCache;
class DatafileStatisticsContainer;
struct DatafileStatisticsContainer;
class Ditches;
class FollowerInfo;
class Index;
@ -59,7 +59,6 @@ class ManagedDocumentResult;
struct OperationOptions;
class PhysicalCollection;
class PrimaryIndex;
class SimpleIndexElement;
class StringRef;
class Transaction;

View File

@ -332,7 +332,7 @@ void Traverser::UniqueVertexGetter::reset(VPackSlice startVertex) {
}
Traverser::Traverser(arangodb::traverser::TraverserOptions* opts, arangodb::Transaction* trx,
ManagedDocumentResult* mmdr)
arangodb::ManagedDocumentResult* mmdr)
: _trx(trx),
_mmdr(mmdr),
_startIdBuilder(trx),

View File

@ -420,8 +420,8 @@ TRI_datafile_t* TRI_datafile_t::create(std::string const& filename, TRI_voc_fid_
/// @brief returns the name for a marker
////////////////////////////////////////////////////////////////////////////////
char const* TRI_NameMarkerDatafile(TRI_df_marker_t const* marker) {
switch (marker->getType()) {
char const* TRI_NameMarkerDatafile(TRI_df_marker_type_t type) {
switch (type) {
// general markers
case TRI_DF_MARKER_HEADER:
return "datafile header";
@ -682,6 +682,9 @@ bool TRI_IterateDatafile(TRI_datafile_t* datafile,
return false;
}
TRI_voc_tick_t maxTick = 0;
TRI_DEFER(TRI_UpdateTickServer(maxTick));
while (ptr < end) {
auto const* marker = reinterpret_cast<TRI_df_marker_t const*>(ptr);
@ -689,6 +692,12 @@ bool TRI_IterateDatafile(TRI_datafile_t* datafile,
return true;
}
TRI_voc_tick_t tick = marker->getTick();
if (tick > maxTick) {
maxTick = tick;
}
// update the tick statistics
TRI_UpdateTicksDatafile(datafile, marker);
@ -717,12 +726,20 @@ bool TRI_IterateDatafile(TRI_datafile_t* datafile,
return false;
}
TRI_voc_tick_t maxTick = 0;
TRI_DEFER(TRI_UpdateTickServer(maxTick));
while (ptr < end) {
auto const* marker = reinterpret_cast<TRI_df_marker_t const*>(ptr);
if (marker->getSize() == 0) {
return true;
}
TRI_voc_tick_t tick = marker->getTick();
if (tick > maxTick) {
maxTick = tick;
}
// update the tick statistics
TRI_UpdateTicksDatafile(datafile, marker);
@ -1212,9 +1229,7 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
}
TRI_voc_tick_t maxTick = 0;
auto updateTick =
[](TRI_voc_tick_t maxTick) -> void { TRI_UpdateTickServer(maxTick); };
TRI_DEFER(TRI_UpdateTickServer(maxTick));
while (ptr < end) {
TRI_df_marker_t const* marker = reinterpret_cast<TRI_df_marker_t const*>(ptr);
@ -1232,8 +1247,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
_currentSize = currentSize;
_next = _data + _currentSize;
updateTick(maxTick);
return true;
}
@ -1249,8 +1262,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
LOG(WARN) << "marker in datafile '" << getName() << "' too small, size " << size << ", should be at least " << sizeof(TRI_df_marker_t);
updateTick(maxTick);
return false;
}
@ -1271,8 +1282,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
}
printMarker(marker, end - ptr, _data, end);
updateTick(maxTick);
return false;
}
@ -1298,8 +1307,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
}
printMarker(marker, size, _data, end);
updateTick(maxTick);
return false;
}
}
@ -1384,8 +1391,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
LOG(WARN) << "data directly following this marker cannot be analyzed";
}
updateTick(maxTick);
return false;
}
}
@ -1404,7 +1409,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
_currentSize = currentSize;
_next = _data + _currentSize;
updateTick(maxTick);
return true;
}
@ -1412,7 +1416,6 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
ptr += alignedSize;
}
updateTick(maxTick);
return true;
}

View File

@ -370,7 +370,7 @@ struct TRI_df_marker_t {
}
inline void setTick(TRI_voc_tick_t tick) noexcept {
_typeAndTick &= 0xff00000000000000ULL;
_typeAndTick |= tick;
_typeAndTick |= tick & 0x00ffffffffffffffULL;
}
inline TRI_df_marker_type_t getType() const noexcept {
return static_cast<TRI_df_marker_type_t>((_typeAndTick & 0xff00000000000000ULL) >> 56);
@ -452,7 +452,11 @@ struct TRI_col_header_marker_t {
/// @brief returns the name for a marker
////////////////////////////////////////////////////////////////////////////////
char const* TRI_NameMarkerDatafile(TRI_df_marker_t const*);
char const* TRI_NameMarkerDatafile(TRI_df_marker_type_t);
static inline char const* TRI_NameMarkerDatafile(TRI_df_marker_t const* marker) {
return TRI_NameMarkerDatafile(marker->getType());
}
////////////////////////////////////////////////////////////////////////////////
/// @brief checks whether a marker is valid

View File

@ -34,7 +34,7 @@ static std::atomic<uint64_t> CurrentTick(0);
static HybridLogicalClock hybridLogicalClock;
/// @brief create a new tick, using a hybrid logical clock
TRI_voc_tick_t TRI_HybridLogicalClock(void) {
TRI_voc_tick_t TRI_HybridLogicalClock() {
return hybridLogicalClock.getTimeStamp();
}

View File

@ -19,11 +19,7 @@
"frontend/js/lib/bootstrap-min.js",
"frontend/js/lib/d3.min.js",
"frontend/js/lib/nv.d3.min.js",
"frontend/js/lib/dygraph-combined.min.js",
"frontend/js/lib/jquery-2.1.0.min.js",
"frontend/js/lib/underscore-min.js",
"frontend/js/lib/backbone-min.js",
"frontend/js/lib/bootstrap-min.js"
"frontend/js/lib/dygraph-combined.min.js"
],
css: [
"frontend/css/swagger/hightlight.default.css",

View File

@ -510,6 +510,7 @@ authRouter.get('/graph/:name', function (req, res) {
}
}
}
edgeObj.sortColor = edgeObj.color;
edgesObj[edge._id] = edgeObj;
});
@ -548,6 +549,7 @@ authRouter.get('/graph/:name', function (req, res) {
label: nodeLabel,
size: nodeSize || 3,
color: config.nodeColor || '#2ecc71',
sortColor: undefined,
x: Math.random(),
y: Math.random()
};
@ -572,6 +574,7 @@ authRouter.get('/graph/:name', function (req, res) {
}
}
nodeObj.sortColor = nodeObj.color;
nodesObj[node._id] = nodeObj;
}
});

File diff suppressed because one or more lines are too long

View File

@ -350,7 +350,11 @@
if (value === 'true') {
window.App.graphViewer.switchNodeColorByCollection(true);
} else {
window.App.graphViewer.switchNodeColorByCollection(false);
if ($('#g_nodeColorAttribute').is(':disabled')) {
window.App.graphViewer.switchNodeColorByCollection(false);
} else {
window.App.graphViewer.switchNodeColorByCollection(false, true);
}
}
return;
// EDGES COLORING
@ -359,7 +363,11 @@
if (value === 'true') {
window.App.graphViewer.switchEdgeColorByCollection(true);
} else {
window.App.graphViewer.switchEdgeColorByCollection(false);
if ($('#g_nodeColorAttribute').is(':disabled')) {
window.App.graphViewer.switchEdgeColorByCollection(false);
} else {
window.App.graphViewer.switchEdgeColorByCollection(false, true);
}
}
return;
}

View File

@ -259,24 +259,31 @@
this.killCurrentGraph();
// TODO add WebGL features
this.renderGraph(this.graphData.modified, null, false, layout, 'canvas');
if ($('#g_nodeColorByCollection').val() === 'true') {
this.switchNodeColorByCollection(true);
} else {
if (this.ncolor) {
this.updateColors(true, true, this.ncolor, this.ecolor);
if ($('#g_nodeColor').is(':disabled')) {
this.updateColors(true, true, null, null, true);
} else {
this.updateColors(true, true, '#2ecc71', '#2ecc71');
if (this.ncolor) {
this.updateColors(true, true, this.ncolor, this.ecolor);
} else {
this.updateColors(true, true, '#2ecc71', '#2ecc71');
}
}
}
if ($('#g_edgeColorByCollection').val() === 'true') {
this.switchEdgeColorByCollection(true);
} else {
if (this.ecolor) {
this.updateColors(true, true, this.ncolor, this.ecolor);
if ($('#g_edgeColor').is(':disabled')) {
this.updateColors(true, true, null, null, true);
} else {
this.updateColors(true, true, '#2ecc71', '#2ecc71');
if (this.ecolor) {
this.updateColors(true, true, this.ncolor, this.ecolor);
} else {
this.updateColors(true, true, '#2ecc71', '#2ecc71');
}
}
}
},
@ -315,7 +322,7 @@
}
},
switchNodeColorByCollection: function (boolean) {
switchNodeColorByCollection: function (boolean, origin) {
var self = this;
self.buildCollectionColors();
if (boolean) {
@ -325,15 +332,19 @@
self.currentGraph.refresh();
} else {
if (this.ncolor) {
this.updateColors(true, null, this.ncolor, this.ecolor);
if (origin) {
this.updateColors(true, null, null, null, origin);
} else {
this.updateColors(true, null, '#2ecc71', '#2ecc71');
if (this.ncolor) {
this.updateColors(true, null, this.ncolor, this.ecolor);
} else {
this.updateColors(true, null, '#2ecc71', '#2ecc71');
}
}
}
},
switchEdgeColorByCollection: function (boolean) {
switchEdgeColorByCollection: function (boolean, origin) {
var self = this;
self.buildCollectionColors();
@ -344,10 +355,14 @@
self.currentGraph.refresh();
} else {
if (this.ecolor) {
this.updateColors(null, true, this.ncolor, this.ecolor);
if (origin) {
this.updateColors(true, null, null, null, origin);
} else {
this.updateColors(null, true, '#2ecc71', '#2ecc71');
if (this.ecolor) {
this.updateColors(null, true, this.ncolor, this.ecolor);
} else {
this.updateColors(null, true, '#2ecc71', '#2ecc71');
}
}
}
},
@ -1084,7 +1099,7 @@
}
},
updateColors: function (nodes, edges, ncolor, ecolor) {
updateColors: function (nodes, edges, ncolor, ecolor, origin) {
var combinedName = frontendConfig.db + '_' + this.name;
var self = this;
@ -1101,7 +1116,11 @@
self.graphConfig = data.toJSON().graphs[combinedName];
try {
self.currentGraph.graph.nodes().forEach(function (n) {
n.color = ncolor;
if (origin) {
n.color = n.sortColor;
} else {
n.color = ncolor;
}
});
} catch (e) {
self.graphNotInitialized = true;
@ -1112,7 +1131,11 @@
if (edges === true) {
try {
self.currentGraph.graph.edges().forEach(function (e) {
e.color = ecolor;
if (origin) {
e.color = e.sortColor;
} else {
e.color = ecolor;
}
});
} catch (ignore) {
self.graphNotInitialized = true;