mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into 3.5
This commit is contained in:
commit
0684deb4b9
|
@ -99,6 +99,8 @@ js/apps/system/_admin/aardvark/APP/node_modules/*
|
|||
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/app.js
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/app.js.gz
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/templates.js
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/templates.js.gz
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/extra.css
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/extra.css.gz
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/index.html
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
v3.5.0-preview.1 (2019-05--14)
|
||||
------------------------------
|
||||
|
||||
* removed bug during start up with a single agent, that leads to dbserver crash.
|
||||
|
||||
* fixed issue #7011: description when replacing a foxx application was misleading
|
||||
|
||||
* fixed issue #8841: Graph Viewer dropped ability to edit an edge after
|
||||
rerendering.
|
||||
|
||||
* upgraded arangodb starter version to 0.14.3
|
||||
|
||||
* ArangoQueryStreamCursor.prototype.id needs to be a string, v8 32 bit integers
|
||||
|
|
|
@ -161,7 +161,7 @@ train connections in Europe and North America.
|
|||

|
||||
|
||||
@startDocuBlockInline GRAPHKSP_01_create_graph
|
||||
@ FIXME EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_01_create_graph}
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_01_create_graph}
|
||||
~addIgnoreCollection("places");
|
||||
~addIgnoreCollection("connections");
|
||||
var examples = require("@arangodb/graph-examples/example-graph.js");
|
||||
|
@ -177,25 +177,25 @@ SHORTEST_PATH and K_SHORTEST_PATH with LIMIT 1 should return a path of the same
|
|||
length (or weight), they do not need to return the same path.
|
||||
|
||||
@startDocuBlockInline GRAPHKSP_02_Aberdeen_to_London
|
||||
@ FIXME EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_02_Aberdeen_to_London}
|
||||
db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'places/Aberdeen' TO 'places/London' GRAPH 'shortestPathsGraph' RETURN [v,e]");
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'shortestPathsGraph' LIMIT 1 RETURN p");
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_02_Aberdeen_to_London}
|
||||
db._query("FOR v, e IN OUTBOUND SHORTEST_PATH 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' RETURN [v,e]");
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' LIMIT 1 RETURN p");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock GRAPHKSP_02_Aberdeen_to_London
|
||||
|
||||
Next, we can ask for more than one option for a route:
|
||||
|
||||
@startDocuBlockInline GRAPHKSP_03_Aberdeen_to_London
|
||||
@ FIXME EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_03_Aberdeen_to_London}
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'shortestPathsGraph' LIMIT 3 RETURN p");
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_03_Aberdeen_to_London}
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/London' GRAPH 'kShortestPathsGraph' LIMIT 3 RETURN p");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock GRAPHKSP_03_Aberdeen_to_London
|
||||
|
||||
If we ask for routes that don't exist we get an empty result:
|
||||
|
||||
@startDocuBlockInline GRAPHKSP_04_Aberdeen_to_Toronto
|
||||
@ FIXME EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_04_Aberdeen_to_Toronto}
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/Toronto' GRAPH 'shortestPathsGraph' LIMIT 3 RETURN p");
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_04_Aberdeen_to_Toronto}
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/Aberdeen' TO 'places/Toronto' GRAPH 'kShortestPathsGraph' LIMIT 3 RETURN p");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock GRAPHKSP_04_Aberdeen_to_Toronto
|
||||
|
||||
|
@ -203,15 +203,15 @@ We can use the attribute *travelTime* that connections have as edge weights to
|
|||
take into account which connections are quicker:
|
||||
|
||||
@startDocuBlockInline GRAPHKSP_05_StAndrews_to_Cologne
|
||||
@ FIXME EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_05_StAndrews_to_Cologne}
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/StAndrews' TO 'places/Cologne' GRAPH 'shortestPathsGraph' OPTIONS { 'weightAttribute': 'travelTime', defaultWeight: '15'} LIMIT 3 RETURN p");
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_05_StAndrews_to_Cologne}
|
||||
db._query("FOR p IN OUTBOUND K_SHORTEST_PATHS 'places/StAndrews' TO 'places/Cologne' GRAPH 'kShortestPathsGraph' OPTIONS { 'weightAttribute': 'travelTime', defaultWeight: '15'} LIMIT 3 RETURN p");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock GRAPHKSP_05_StAndrews_to_Cologne
|
||||
|
||||
And finally clean up by removing the named graph:
|
||||
|
||||
@startDocuBlockInline GRAPHKSP_99_drop_graph
|
||||
@ FIXME EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_99_drop_graph}
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHKSP_99_drop_graph}
|
||||
var examples = require("@arangodb/graph-examples/example-graph.js");
|
||||
examples.dropGraph("kShortestPathsGraph");
|
||||
~removeIgnoreCollection("places");
|
||||
|
|
|
@ -155,7 +155,7 @@ bool ActiveFailoverJob::start(bool&) {
|
|||
if (jobId.second && !abortable(_snapshot, jobId.first)) {
|
||||
return false;
|
||||
} else if (jobId.second) {
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort();
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort("ActiveFailoverJob requests abort");
|
||||
}
|
||||
|
||||
// Todo entry
|
||||
|
@ -236,7 +236,7 @@ JOB_STATUS ActiveFailoverJob::status() {
|
|||
return _status;
|
||||
}
|
||||
|
||||
arangodb::Result ActiveFailoverJob::abort() {
|
||||
arangodb::Result ActiveFailoverJob::abort(std::string const& reason) {
|
||||
// We can assume that the job is in ToDo or not there:
|
||||
if (_status == NOTFOUND || _status == FINISHED || _status == FAILED) {
|
||||
return Result(TRI_ERROR_SUPERVISION_GENERAL_FAILURE,
|
||||
|
@ -246,7 +246,7 @@ arangodb::Result ActiveFailoverJob::abort() {
|
|||
Result result;
|
||||
// Can now only be TODO or PENDING
|
||||
if (_status == TODO) {
|
||||
finish("", "", false, "job aborted");
|
||||
finish("", "", false, "job aborted: " + reason);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ struct ActiveFailoverJob final : public Job {
|
|||
virtual void run(bool&) override final;
|
||||
virtual bool create(std::shared_ptr<VPackBuilder> envelope = nullptr) override final;
|
||||
virtual bool start(bool&) override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
|
||||
private:
|
||||
std::string findBestFollower();
|
||||
|
|
|
@ -322,7 +322,7 @@ JOB_STATUS AddFollower::status() {
|
|||
return _status;
|
||||
}
|
||||
|
||||
arangodb::Result AddFollower::abort() {
|
||||
arangodb::Result AddFollower::abort(std::string const& reason) {
|
||||
// We can assume that the job is in ToDo or not there:
|
||||
if (_status == NOTFOUND || _status == FINISHED || _status == FAILED) {
|
||||
return Result(TRI_ERROR_SUPERVISION_GENERAL_FAILURE,
|
||||
|
@ -332,7 +332,7 @@ arangodb::Result AddFollower::abort() {
|
|||
Result result;
|
||||
// Can now only be TODO or PENDING
|
||||
if (_status == TODO) {
|
||||
finish("", "", false, "job aborted");
|
||||
finish("", "", false, "job aborted:" + reason);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ struct AddFollower : public Job {
|
|||
virtual bool create(std::shared_ptr<VPackBuilder> envelope = nullptr) override final;
|
||||
virtual void run(bool&) override final;
|
||||
virtual bool start(bool&) override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
|
||||
std::string _database;
|
||||
std::string _collection;
|
||||
|
|
|
@ -1309,6 +1309,14 @@ AgencyCommResult AgencyComm::sendWithFailover(arangodb::rest::RequestType method
|
|||
double const timeout,
|
||||
std::string const& initialUrl,
|
||||
VPackSlice inBody) {
|
||||
AgencyCommResult result;
|
||||
if (!AgencyCommManager::isEnabled()) {
|
||||
LOG_TOPIC("42fae", ERR, Logger::AGENCYCOMM)
|
||||
<< "No AgencyCommManager. Inappropriate agent usage?";
|
||||
result.set(503, "No AgencyCommManager. Inappropriate agent usage?");
|
||||
return result;
|
||||
} // if
|
||||
|
||||
std::string endpoint;
|
||||
std::unique_ptr<GeneralClientConnection> connection =
|
||||
AgencyCommManager::MANAGER->acquire(endpoint);
|
||||
|
@ -1326,7 +1334,6 @@ AgencyCommResult AgencyComm::sendWithFailover(arangodb::rest::RequestType method
|
|||
}
|
||||
}
|
||||
}
|
||||
AgencyCommResult result;
|
||||
std::string url;
|
||||
|
||||
std::chrono::duration<double> waitInterval(.0); // seconds
|
||||
|
|
|
@ -233,12 +233,14 @@ void AgencyFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
// the selected storage engine
|
||||
// - ArangoSearch: not needed by agency even if MMFiles is the selected
|
||||
// storage engine
|
||||
// - IResearchAnalyzer: analyzers are not needed by agency
|
||||
// - Statistics: turn off statistics gathering for agency
|
||||
// - Action/Script/FoxxQueues/Frontend: Foxx and JavaScript APIs
|
||||
|
||||
std::vector<std::string> disabledFeatures(
|
||||
{"MMFilesPersistentIndex", "ArangoSearch", "Statistics", "Action",
|
||||
"Script", "FoxxQueues", "Frontend"});
|
||||
std::vector<std::string> disabledFeatures({
|
||||
"MMFilesPersistentIndex", "ArangoSearch", "IResearchAnalyzer",
|
||||
"Statistics", "Action", "Script", "FoxxQueues", "Frontend"});
|
||||
|
||||
if (!result.touched("console") || !*(options->get<BooleanParameter>("console")->ptr)) {
|
||||
// specifiying --console requires JavaScript, so we can only turn it off
|
||||
// if not specified
|
||||
|
|
|
@ -88,7 +88,7 @@ JOB_STATUS CleanOutServer::status() {
|
|||
Supervision::TimePoint timeCreated = stringToTimepoint(timeCreatedString);
|
||||
Supervision::TimePoint now(std::chrono::system_clock::now());
|
||||
if (now - timeCreated > std::chrono::duration<double>(86400.0)) { // 1 day
|
||||
abort();
|
||||
abort("job timed out");
|
||||
return FAILED;
|
||||
}
|
||||
return PENDING;
|
||||
|
@ -103,7 +103,7 @@ JOB_STATUS CleanOutServer::status() {
|
|||
}
|
||||
|
||||
if (failedFound > 0) {
|
||||
abort();
|
||||
abort("child job failed");
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ bool CleanOutServer::checkFeasibility() {
|
|||
return true;
|
||||
}
|
||||
|
||||
arangodb::Result CleanOutServer::abort() {
|
||||
arangodb::Result CleanOutServer::abort(std::string const& reason) {
|
||||
// We can assume that the job is either in ToDo or in Pending.
|
||||
Result result;
|
||||
|
||||
|
@ -514,7 +514,7 @@ arangodb::Result CleanOutServer::abort() {
|
|||
|
||||
// Can now only be TODO or PENDING
|
||||
if (_status == TODO) {
|
||||
finish("", "", false, "job aborted");
|
||||
finish("", "", false, "job aborted:" + reason);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -522,14 +522,16 @@ arangodb::Result CleanOutServer::abort() {
|
|||
Node::Children const& todos = _snapshot.hasAsChildren(toDoPrefix).first;
|
||||
Node::Children const& pends = _snapshot.hasAsChildren(pendingPrefix).first;
|
||||
|
||||
std::string childAbortReason = "parent job aborted - reason: " + reason;
|
||||
|
||||
for (auto const& subJob : todos) {
|
||||
if (subJob.first.compare(0, _jobId.size() + 1, _jobId + "-") == 0) {
|
||||
JobContext(TODO, subJob.first, _snapshot, _agent).abort();
|
||||
JobContext(TODO, subJob.first, _snapshot, _agent).abort(childAbortReason);
|
||||
}
|
||||
}
|
||||
for (auto const& subJob : pends) {
|
||||
if (subJob.first.compare(0, _jobId.size() + 1, _jobId + "-") == 0) {
|
||||
JobContext(PENDING, subJob.first, _snapshot, _agent).abort();
|
||||
JobContext(PENDING, subJob.first, _snapshot, _agent).abort(childAbortReason);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -544,7 +546,7 @@ arangodb::Result CleanOutServer::abort() {
|
|||
}
|
||||
}
|
||||
|
||||
finish(_server, "", false, "job aborted", payload);
|
||||
finish(_server, "", false, "job aborted: " + reason, payload);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ struct CleanOutServer : public Job {
|
|||
virtual bool create(std::shared_ptr<VPackBuilder> envelope = nullptr) override final;
|
||||
virtual void run(bool&) override final;
|
||||
virtual bool start(bool&) override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
|
||||
// Check if all shards' replication factors can be satisfied after clean out.
|
||||
bool checkFeasibility();
|
||||
|
|
|
@ -273,7 +273,7 @@ bool FailedFollower::start(bool& aborts) {
|
|||
return false;
|
||||
} else if (jobId.second) {
|
||||
aborts = true;
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort();
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort("failed follower requests abort");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ JOB_STATUS FailedFollower::status() {
|
|||
return TODO;
|
||||
}
|
||||
|
||||
arangodb::Result FailedFollower::abort() {
|
||||
arangodb::Result FailedFollower::abort(std::string const& reason) {
|
||||
// We can assume that the job is in ToDo or not there:
|
||||
if (_status == NOTFOUND || _status == FINISHED || _status == FAILED) {
|
||||
return Result(TRI_ERROR_SUPERVISION_GENERAL_FAILURE,
|
||||
|
@ -350,7 +350,7 @@ arangodb::Result FailedFollower::abort() {
|
|||
Result result;
|
||||
// Can now only be TODO
|
||||
if (_status == TODO) {
|
||||
finish("", "", false, "job aborted");
|
||||
finish("", "", false, "job aborted: " + reason);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ struct FailedFollower : public Job {
|
|||
virtual void run(bool&) override final;
|
||||
virtual bool start(bool&) override final;
|
||||
virtual JOB_STATUS status() override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
|
||||
std::string _database;
|
||||
std::string _collection;
|
||||
|
|
|
@ -310,7 +310,7 @@ bool FailedLeader::start(bool& aborts) {
|
|||
return false;
|
||||
} else if (jobId.second) {
|
||||
aborts = true;
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort();
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort("failed leader requests abort");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -442,13 +442,13 @@ JOB_STATUS FailedLeader::status() {
|
|||
return _status;
|
||||
}
|
||||
|
||||
arangodb::Result FailedLeader::abort() {
|
||||
arangodb::Result FailedLeader::abort(std::string const& reason) {
|
||||
// job is only abortable when it is in ToDo
|
||||
if (_status != TODO) {
|
||||
return Result(TRI_ERROR_SUPERVISION_GENERAL_FAILURE,
|
||||
"Failed aborting failedFollower job beyond todo stage");
|
||||
} else {
|
||||
finish("", "", false, "job aborted");
|
||||
finish("", "", false, "job aborted: " + reason);
|
||||
return Result();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ struct FailedLeader : public Job {
|
|||
virtual bool start(bool&) override final;
|
||||
virtual JOB_STATUS status() override final;
|
||||
virtual void run(bool&) override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
void rollback();
|
||||
|
||||
std::string _database;
|
||||
|
|
|
@ -85,7 +85,7 @@ bool FailedServer::start(bool& aborts) {
|
|||
return false;
|
||||
} else if (jobId.second) {
|
||||
aborts = true;
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort();
|
||||
JobContext(PENDING, jobId.first, _snapshot, _agent).abort("failed server");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -364,8 +364,9 @@ JOB_STATUS FailedServer::status() {
|
|||
return _status;
|
||||
}
|
||||
|
||||
arangodb::Result FailedServer::abort() {
|
||||
arangodb::Result FailedServer::abort(std::string const& reason) {
|
||||
Result result;
|
||||
return result;
|
||||
// FIXME: No abort procedure, simply throw error or so
|
||||
// ??????????????
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ struct FailedServer : public Job {
|
|||
virtual bool create(std::shared_ptr<VPackBuilder> b = nullptr) override final;
|
||||
virtual JOB_STATUS status() override final;
|
||||
virtual void run(bool&) override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
|
||||
std::string _server;
|
||||
};
|
||||
|
|
|
@ -102,7 +102,7 @@ struct Job {
|
|||
}
|
||||
}
|
||||
|
||||
virtual Result abort() = 0;
|
||||
virtual Result abort(std::string const& reason) = 0;
|
||||
|
||||
virtual bool finish(std::string const& server, std::string const& shard,
|
||||
bool success = true, std::string const& reason = std::string(),
|
||||
|
|
|
@ -85,8 +85,8 @@ void JobContext::run(bool& aborts) {
|
|||
}
|
||||
}
|
||||
|
||||
void JobContext::abort() {
|
||||
void JobContext::abort(std::string const& reason) {
|
||||
if (_job != nullptr) {
|
||||
_job->abort();
|
||||
_job->abort(reason);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ class JobContext {
|
|||
void run(bool& aborts);
|
||||
|
||||
/// @brief Abort job
|
||||
void abort();
|
||||
void abort(std::string const& reason);
|
||||
|
||||
private:
|
||||
/// @brief Actual job context
|
||||
|
|
|
@ -431,7 +431,7 @@ JOB_STATUS MoveShard::pendingLeader() {
|
|||
Supervision::TimePoint timeCreated = stringToTimepoint(timeCreatedString);
|
||||
Supervision::TimePoint now(std::chrono::system_clock::now());
|
||||
if (now - timeCreated > std::chrono::duration<double>(43200.0)) { // 12h
|
||||
abort();
|
||||
abort("MoveShard timed out in pending leader");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -683,7 +683,7 @@ JOB_STATUS MoveShard::pendingFollower() {
|
|||
Supervision::TimePoint timeCreated = stringToTimepoint(timeCreatedString);
|
||||
Supervision::TimePoint now(std::chrono::system_clock::now());
|
||||
if (now - timeCreated > std::chrono::duration<double>(10000.0)) {
|
||||
abort();
|
||||
abort("MoveShard timed out in pending follower");
|
||||
return FAILED;
|
||||
}
|
||||
return PENDING;
|
||||
|
@ -745,7 +745,7 @@ JOB_STATUS MoveShard::pendingFollower() {
|
|||
return PENDING;
|
||||
}
|
||||
|
||||
arangodb::Result MoveShard::abort() {
|
||||
arangodb::Result MoveShard::abort(std::string const& reason) {
|
||||
arangodb::Result result;
|
||||
|
||||
// We can assume that the job is either in ToDo or in Pending.
|
||||
|
@ -773,7 +773,7 @@ arangodb::Result MoveShard::abort() {
|
|||
}
|
||||
}
|
||||
|
||||
if (finish("", "", true, "job aborted", todoPrec)) {
|
||||
if (finish("", "", true, "job aborted (1): " + reason, todoPrec)) {
|
||||
return result;
|
||||
}
|
||||
_status = PENDING;
|
||||
|
@ -794,7 +794,7 @@ arangodb::Result MoveShard::abort() {
|
|||
if (cur.second && cur.first[0].copyString() == _to) {
|
||||
LOG_TOPIC("72a82", INFO, Logger::SUPERVISION) <<
|
||||
"MoveShard can no longer abort through reversion to where it started. Flight forward";
|
||||
finish(_to, _shard, true, "job aborted - new leader already in place");
|
||||
finish(_to, _shard, true, "job aborted (2) - new leader already in place: " + reason);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -843,7 +843,7 @@ arangodb::Result MoveShard::abort() {
|
|||
addRemoveJobFromSomewhere(trx, "Pending", _jobId);
|
||||
Builder job;
|
||||
_snapshot.hasAsBuilder(pendingPrefix + _jobId, job);
|
||||
addPutJobIntoSomewhere(trx, "Failed", job.slice(), "job aborted");
|
||||
addPutJobIntoSomewhere(trx, "Failed", job.slice(), "job aborted (3): " + reason);
|
||||
addReleaseShard(trx, _shard);
|
||||
addReleaseServer(trx, _to);
|
||||
addIncreasePlanVersion(trx);
|
||||
|
@ -871,7 +871,7 @@ arangodb::Result MoveShard::abort() {
|
|||
// Tough luck. Things have changed. We'll move on
|
||||
LOG_TOPIC("513e6", INFO, Logger::SUPERVISION) <<
|
||||
"MoveShard can no longer abort through reversion to where it started. Flight forward";
|
||||
finish(_to, _shard, true, "job aborted - new leader already in place");
|
||||
finish(_to, _shard, true, "job aborted (4) - new leader already in place: " + reason);
|
||||
return result;
|
||||
}
|
||||
result = Result(
|
||||
|
|
|
@ -50,7 +50,7 @@ struct MoveShard : public Job {
|
|||
virtual void run(bool&) override final;
|
||||
virtual bool create(std::shared_ptr<VPackBuilder> envelope = nullptr) override final;
|
||||
virtual bool start(bool&) override final;
|
||||
virtual Result abort() override;
|
||||
virtual Result abort(std::string const& reason) override;
|
||||
JOB_STATUS pendingLeader();
|
||||
JOB_STATUS pendingFollower();
|
||||
|
||||
|
|
|
@ -435,7 +435,7 @@ JOB_STATUS RemoveFollower::status() {
|
|||
return _status;
|
||||
}
|
||||
|
||||
arangodb::Result RemoveFollower::abort() {
|
||||
arangodb::Result RemoveFollower::abort(std::string const& reason) {
|
||||
Result result;
|
||||
// We can assume that the job is in ToDo or not there:
|
||||
if (_status == NOTFOUND || _status == FINISHED || _status == FAILED) {
|
||||
|
@ -445,7 +445,7 @@ arangodb::Result RemoveFollower::abort() {
|
|||
}
|
||||
// Can now only be TODO or PENDING
|
||||
if (_status == TODO) {
|
||||
finish("", "", false, "job aborted");
|
||||
finish("", "", false, "job aborted:" + reason);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ struct RemoveFollower : public Job {
|
|||
virtual bool create(std::shared_ptr<VPackBuilder> envelope = nullptr) override final;
|
||||
virtual void run(bool& aborts) override final;
|
||||
virtual bool start(bool&) override final;
|
||||
virtual Result abort() override final;
|
||||
virtual Result abort(std::string const& reason) override final;
|
||||
|
||||
std::string _database;
|
||||
std::string _collection;
|
||||
|
|
|
@ -403,7 +403,7 @@ RestStatus RestAgencyHandler::handleInquire() {
|
|||
}
|
||||
|
||||
// Leadership established?
|
||||
if (_agent->size() > 1 && _agent->leaderID() == NO_LEADER) {
|
||||
if (_agent->leaderID() == NO_LEADER) {
|
||||
return reportMessage(rest::ResponseCode::SERVICE_UNAVAILABLE, "No leader");
|
||||
}
|
||||
|
||||
|
|
|
@ -423,7 +423,7 @@ SharedAqlItemBlockPtr AqlItemBlock::slice(size_t from, size_t to) const {
|
|||
/// @brief slice/clone, this does a deep copy of all entries
|
||||
SharedAqlItemBlockPtr AqlItemBlock::slice(size_t row,
|
||||
std::unordered_set<RegisterId> const& registers,
|
||||
size_t newNrRegs) const {
|
||||
RegisterCount newNrRegs) const {
|
||||
TRI_ASSERT(_nrRegs <= newNrRegs);
|
||||
|
||||
std::unordered_set<AqlValue> cache;
|
||||
|
|
|
@ -332,7 +332,7 @@ class AqlItemBlock {
|
|||
/// @brief create an AqlItemBlock with a single row, with copies of the
|
||||
/// specified registers from the current block
|
||||
SharedAqlItemBlockPtr slice(size_t row, std::unordered_set<RegisterId> const& registers,
|
||||
size_t newNrRegs) const;
|
||||
RegisterCount newNrRegs) const;
|
||||
|
||||
/// @brief slice/clone chosen rows for a subset, this does a deep copy
|
||||
/// of all entries
|
||||
|
|
|
@ -93,7 +93,7 @@ class ConstrainedSortExecutor {
|
|||
Fetcher& _fetcher;
|
||||
ExecutionState _state;
|
||||
size_t _returnNext;
|
||||
std::vector<uint32_t> _rows;
|
||||
std::vector<size_t> _rows;
|
||||
size_t _rowsPushed;
|
||||
SharedAqlItemBlockPtr _heapBuffer;
|
||||
std::unique_ptr<ConstrainedLessThan> _cmpHeap; // in pointer to avoid
|
||||
|
|
|
@ -173,13 +173,14 @@ std::tuple<ExecutionState, EnumerateCollectionStats, size_t> EnumerateCollection
|
|||
std::tie(_state, _input) = _fetcher.fetchRow();
|
||||
|
||||
if (_state == ExecutionState::WAITING) {
|
||||
return {_state, stats, 0};
|
||||
return std::make_tuple(_state, stats, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
if (!_input) {
|
||||
TRI_ASSERT(_state == ExecutionState::DONE);
|
||||
return {_state, stats, 0};
|
||||
return std::make_tuple(_state, stats, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
_cursor->reset();
|
||||
_cursorHasMore = _cursor->hasMore();
|
||||
}
|
||||
|
@ -192,10 +193,10 @@ std::tuple<ExecutionState, EnumerateCollectionStats, size_t> EnumerateCollection
|
|||
stats.incrScanned(actuallySkipped);
|
||||
|
||||
if (_state == ExecutionState::DONE && !_cursorHasMore) {
|
||||
return {ExecutionState::DONE, stats, actuallySkipped};
|
||||
return std::make_tuple(ExecutionState::DONE, stats, actuallySkipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
return {ExecutionState::HASMORE, stats, actuallySkipped};
|
||||
return std::make_tuple(ExecutionState::HASMORE, stats, actuallySkipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
void EnumerateCollectionExecutor::initializeCursor() {
|
||||
|
|
|
@ -229,7 +229,7 @@ struct ExecuteSkipVariant<SkipVariants::FETCHER> {
|
|||
static std::tuple<ExecutionState, typename Executor::Stats, size_t> executeSkip(
|
||||
Executor& executor, typename Executor::Fetcher& fetcher, size_t toSkip) {
|
||||
auto res = fetcher.skipRows(toSkip);
|
||||
return {res.first, typename Executor::Stats{}, res.second};
|
||||
return std::make_tuple(res.first, typename Executor::Stats{}, res.second); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -250,7 +250,7 @@ struct ExecuteSkipVariant<SkipVariants::DEFAULT> {
|
|||
// this function should never be executed
|
||||
TRI_ASSERT(false);
|
||||
// Make MSVC happy:
|
||||
return {ExecutionState::DONE, {}, 0};
|
||||
return std::make_tuple(ExecutionState::DONE, typename Executor::Stats{}, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -498,7 +498,7 @@ std::pair<ExecutionState, Result> ExecutionBlockImpl<SubqueryExecutor<false>>::s
|
|||
|
||||
template <class Executor>
|
||||
std::pair<ExecutionState, SharedAqlItemBlockPtr> ExecutionBlockImpl<Executor>::requestWrappedBlock(
|
||||
size_t nrItems, RegisterId nrRegs) {
|
||||
size_t nrItems, RegisterCount nrRegs) {
|
||||
SharedAqlItemBlockPtr block;
|
||||
if /* constexpr */ (Executor::Properties::allowsBlockPassthrough) {
|
||||
// If blocks can be passed through, we do not create new blocks.
|
||||
|
|
|
@ -208,7 +208,7 @@ class ExecutionBlockImpl final : public ExecutionBlock {
|
|||
Executor& executor() { return _executor; }
|
||||
|
||||
/// @brief request an AqlItemBlock from the memory manager
|
||||
SharedAqlItemBlockPtr requestBlock(size_t nrItems, RegisterId nrRegs);
|
||||
SharedAqlItemBlockPtr requestBlock(size_t nrItems, RegisterCount nrRegs);
|
||||
|
||||
private:
|
||||
/**
|
||||
|
|
|
@ -294,17 +294,17 @@ IResearchViewExecutorBase<Impl, Traits>::skipRows(size_t toSkip) {
|
|||
if (!_inputRow.isInitialized()) {
|
||||
if (_upstreamState == ExecutionState::DONE) {
|
||||
// There will be no more rows, stop fetching.
|
||||
return {ExecutionState::DONE, stats, 0};
|
||||
return std::make_tuple(ExecutionState::DONE, stats, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
std::tie(_upstreamState, _inputRow) = _fetcher.fetchRow();
|
||||
|
||||
if (_upstreamState == ExecutionState::WAITING) {
|
||||
return {_upstreamState, stats, 0};
|
||||
return std::make_tuple(_upstreamState, stats, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
if (!_inputRow.isInitialized()) {
|
||||
return {ExecutionState::DONE, stats, 0};
|
||||
return std::make_tuple(ExecutionState::DONE, stats, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
// reset must be called exactly after we've got a new and valid input row.
|
||||
|
@ -321,7 +321,7 @@ IResearchViewExecutorBase<Impl, Traits>::skipRows(size_t toSkip) {
|
|||
_inputRow = InputAqlItemRow{CreateInvalidInputRowHint{}};
|
||||
}
|
||||
|
||||
return {ExecutionState::HASMORE, stats, skipped};
|
||||
return std::make_tuple(ExecutionState::HASMORE, stats, skipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
template <typename Impl, typename Traits>
|
||||
|
|
|
@ -537,21 +537,25 @@ std::tuple<ExecutionState, IndexExecutor::Stats, size_t> IndexExecutor::skipRows
|
|||
if (!_input) {
|
||||
if (_state == ExecutionState::DONE) {
|
||||
size_t skipped = _skipped;
|
||||
|
||||
_skipped = 0;
|
||||
return {_state, stats, skipped};
|
||||
|
||||
return std::make_tuple(_state, stats, skipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
std::tie(_state, _input) = _fetcher.fetchRow();
|
||||
|
||||
if (_state == ExecutionState::WAITING) {
|
||||
return {_state, stats, 0};
|
||||
return std::make_tuple(_state, stats, 0); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
if (!_input) {
|
||||
TRI_ASSERT(_state == ExecutionState::DONE);
|
||||
size_t skipped = _skipped;
|
||||
|
||||
_skipped = 0;
|
||||
return {_state, stats, skipped};
|
||||
|
||||
return std::make_tuple(_state, stats, skipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
initIndexes(_input);
|
||||
|
@ -577,10 +581,12 @@ std::tuple<ExecutionState, IndexExecutor::Stats, size_t> IndexExecutor::skipRows
|
|||
}
|
||||
|
||||
size_t skipped = _skipped;
|
||||
|
||||
_skipped = 0;
|
||||
|
||||
if (_state == ExecutionState::DONE && !_input) {
|
||||
return {ExecutionState::DONE, stats, skipped};
|
||||
} else {
|
||||
return {ExecutionState::HASMORE, stats, skipped};
|
||||
return std::make_tuple(ExecutionState::DONE, stats, skipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
||||
return std::make_tuple(ExecutionState::HASMORE, stats, skipped); // tupple, cannot use initializer list due to build failure
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ typedef uint32_t VariableId;
|
|||
|
||||
/// @brief type for register numbers/ids
|
||||
typedef unsigned int RegisterId;
|
||||
typedef RegisterId RegisterCount;
|
||||
|
||||
/// @brief type of a query id
|
||||
typedef uint64_t QueryId;
|
||||
|
|
|
@ -38,10 +38,55 @@
|
|||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <thread>
|
||||
#include <iomanip>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::communicator;
|
||||
|
||||
|
||||
namespace {
|
||||
std::stringstream createRequestInfo(NewRequest const& request) {
|
||||
bool trace = Logger::CLUSTERCOMM.level() == LogLevel::TRACE;
|
||||
std::stringstream ss;
|
||||
ss << "id: " << std::setw(8) << std::setiosflags(std::ios::left)
|
||||
<< request._ticketId << std::resetiosflags(std::ios::adjustfield)
|
||||
<< " --> " << request._destination
|
||||
<< " -- " << arangodb::GeneralRequest::translateMethod(request._request->requestType())
|
||||
<< ": " << ( request._request->fullUrl().empty() ? "url unknown" : request._request->fullUrl())
|
||||
;
|
||||
if(trace){
|
||||
try {
|
||||
ss << " -- payload: '" << request._request->payload().toJson() << "'";
|
||||
} catch (...) {
|
||||
ss << " -- can not show payload";
|
||||
}
|
||||
}
|
||||
return ss;
|
||||
}
|
||||
|
||||
std::stringstream createResponseInfo(ClusterCommResult const* result) {
|
||||
bool trace = Logger::CLUSTERCOMM.level() == LogLevel::TRACE;
|
||||
std::stringstream ss;
|
||||
ss << "id: " << std::setw(8) << std::setiosflags(std::ios::left)
|
||||
<< result->operationID << std::resetiosflags(std::ios::adjustfield)
|
||||
<< " <-- " << result->endpoint
|
||||
<< " -- " << result->serverID << ":" << (result->shardID.empty() ? "unknown ShardID" : result->shardID)
|
||||
;
|
||||
if(trace){
|
||||
try {
|
||||
if(result->result){
|
||||
ss << " -- payload: '" << result->result->getBody() << "'";
|
||||
} else {
|
||||
ss << " -- payload: no result";
|
||||
}
|
||||
} catch (...) {
|
||||
ss << "can not show payload";
|
||||
}
|
||||
}
|
||||
return ss;
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief empty map with headers
|
||||
std::unordered_map<std::string, std::string> const ClusterCommRequest::noHeaders;
|
||||
|
||||
|
@ -450,6 +495,7 @@ OperationID ClusterComm::asyncRequest(
|
|||
}
|
||||
}
|
||||
result->fromError(errorCode, std::move(response));
|
||||
LOG_TOPIC("2345c", DEBUG, Logger::CLUSTERCOMM) << createResponseInfo(result.get()).rdbuf();
|
||||
if (result->status == CL_COMM_BACKEND_UNAVAILABLE) {
|
||||
logConnectionError(doLogConnectionErrors, result.get(), initTimeout, __LINE__);
|
||||
}
|
||||
|
@ -468,6 +514,7 @@ OperationID ClusterComm::asyncRequest(
|
|||
}
|
||||
TRI_ASSERT(response.get() != nullptr);
|
||||
result->fromResponse(std::move(response));
|
||||
LOG_TOPIC("23457", DEBUG, Logger::CLUSTERCOMM) << createResponseInfo(result.get()).rdbuf();
|
||||
/*bool ret =*/((*callback.get())(result.get()));
|
||||
// TRI_ASSERT(ret == true);
|
||||
};
|
||||
|
@ -478,6 +525,7 @@ OperationID ClusterComm::asyncRequest(
|
|||
// having a shared_ptr So it will be gone after this callback
|
||||
CONDITION_LOCKER(locker, somethingReceived);
|
||||
result->fromError(errorCode, std::move(response));
|
||||
LOG_TOPIC("23458", DEBUG, Logger::CLUSTERCOMM) << createResponseInfo(result.get()).rdbuf();
|
||||
if (result->status == CL_COMM_BACKEND_UNAVAILABLE) {
|
||||
logConnectionError(doLogConnectionErrors, result.get(), initTimeout, __LINE__);
|
||||
}
|
||||
|
@ -489,6 +537,7 @@ OperationID ClusterComm::asyncRequest(
|
|||
TRI_ASSERT(response.get() != nullptr);
|
||||
CONDITION_LOCKER(locker, somethingReceived);
|
||||
result->fromResponse(std::move(response));
|
||||
LOG_TOPIC("23459", DEBUG, Logger::CLUSTERCOMM) << createResponseInfo(result.get()).rdbuf();
|
||||
somethingReceived.broadcast();
|
||||
};
|
||||
}
|
||||
|
@ -502,6 +551,7 @@ OperationID ClusterComm::asyncRequest(
|
|||
std::move(callbacks),
|
||||
opt);
|
||||
|
||||
LOG_TOPIC("2345a", DEBUG, Logger::CLUSTERCOMM) << createRequestInfo(*newRequest).rdbuf();
|
||||
CONDITION_LOCKER(locker, somethingReceived);
|
||||
auto ticketId = communicatorPtr->addRequest(std::move(newRequest));
|
||||
|
||||
|
@ -581,6 +631,7 @@ std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
|
|||
callbacks,
|
||||
opt);
|
||||
|
||||
LOG_TOPIC("34567", TRACE, Logger::CLUSTERCOMM) << createRequestInfo(*newRequest).rdbuf();
|
||||
CONDITION_LOCKER(isen, cv);
|
||||
// can't move callbacks here
|
||||
communicator()->addRequest(std::move(newRequest));
|
||||
|
@ -588,6 +639,8 @@ std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
|
|||
while (!wasSignaled) {
|
||||
cv.wait(100000);
|
||||
}
|
||||
|
||||
LOG_TOPIC("2345b", DEBUG, Logger::CLUSTERCOMM) << createResponseInfo(result.get()).rdbuf();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,6 @@
|
|||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::graph;
|
||||
using UserTransaction = transaction::Methods;
|
||||
|
||||
std::shared_ptr<transaction::Context> GraphOperations::ctx() const {
|
||||
return transaction::StandaloneContext::Create(_vocbase);
|
||||
|
@ -686,7 +685,7 @@ OperationResult GraphOperations::createEdge(const std::string& definitionName,
|
|||
transaction::Options trxOptions;
|
||||
trxOptions.waitForSync = waitForSync;
|
||||
|
||||
UserTransaction trx(ctx(), readCollections, writeCollections, {}, trxOptions);
|
||||
transaction::Methods trx(ctx(), readCollections, writeCollections, {}, trxOptions);
|
||||
|
||||
Result res = trx.begin();
|
||||
if (!res.ok()) {
|
||||
|
@ -728,7 +727,7 @@ OperationResult GraphOperations::createVertex(const std::string& collectionName,
|
|||
|
||||
std::vector<std::string> writeCollections;
|
||||
writeCollections.emplace_back(collectionName);
|
||||
UserTransaction trx(ctx(), {}, writeCollections, {}, trxOptions);
|
||||
transaction::Methods trx(ctx(), {}, writeCollections, {}, trxOptions);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
|
@ -788,7 +787,7 @@ OperationResult GraphOperations::removeEdgeOrVertex(const std::string& collectio
|
|||
auto ctx = std::make_shared<transaction::StandaloneSmartContext>(_vocbase);
|
||||
transaction::Options trxOptions;
|
||||
trxOptions.waitForSync = waitForSync;
|
||||
UserTransaction trx{ctx, {}, trxCollections, {}, trxOptions};
|
||||
transaction::Methods trx{ctx, {}, trxCollections, {}, trxOptions};
|
||||
|
||||
res = trx.begin();
|
||||
|
||||
|
@ -799,7 +798,7 @@ OperationResult GraphOperations::removeEdgeOrVertex(const std::string& collectio
|
|||
OperationResult result = trx.remove(collectionName, search, options);
|
||||
|
||||
{
|
||||
aql::QueryString const queryString = aql::QueryString{
|
||||
aql::QueryString const queryString{
|
||||
"FOR e IN @@collection "
|
||||
"FILTER e._from == @toDeleteId "
|
||||
"OR e._to == @toDeleteId "
|
||||
|
@ -808,8 +807,7 @@ OperationResult GraphOperations::removeEdgeOrVertex(const std::string& collectio
|
|||
std::string const toDeleteId = collectionName + "/" + key;
|
||||
|
||||
for (auto const& edgeCollection : edgeCollections) {
|
||||
std::shared_ptr<VPackBuilder> bindVars{std::make_shared<VPackBuilder>()};
|
||||
|
||||
auto bindVars = std::make_shared<VPackBuilder>();
|
||||
bindVars->add(VPackValue(VPackValueType::Object));
|
||||
bindVars->add("@collection", VPackValue(edgeCollection));
|
||||
bindVars->add("toDeleteId", VPackValue(toDeleteId));
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include <velocypack/StringRef.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::graph;
|
||||
|
||||
|
@ -58,7 +57,6 @@ bool KShortestPathsFinder::startKShortestPathsTraversal(
|
|||
_shortestPaths.clear();
|
||||
_candidatePaths.clear();
|
||||
|
||||
|
||||
TRI_IF_FAILURE("TraversalOOMInitialize") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
@ -93,7 +91,9 @@ bool KShortestPathsFinder::computeShortestPath(VertexRef const& start, VertexRef
|
|||
return found;
|
||||
}
|
||||
|
||||
void KShortestPathsFinder::computeNeighbourhoodOfVertexCache(VertexRef vertex, Direction direction, std::vector<Step>*& res) {
|
||||
void KShortestPathsFinder::computeNeighbourhoodOfVertexCache(VertexRef vertex,
|
||||
Direction direction,
|
||||
std::vector<Step>*& res) {
|
||||
auto lookup = _vertexCache.emplace(vertex, FoundVertex(vertex)).first;
|
||||
auto& cache = lookup->second; // want to update the cached vertex in place
|
||||
|
||||
|
@ -209,8 +209,8 @@ bool KShortestPathsFinder::advanceFrontier(Ball& source, Ball const& target,
|
|||
}
|
||||
} else {
|
||||
source._frontier.insert(s._vertex,
|
||||
std::make_unique<DijkstraInfo>(s._vertex,
|
||||
std::move(s._edge), vr, weight));
|
||||
std::make_unique<DijkstraInfo>(s._vertex, std::move(s._edge),
|
||||
vr, weight));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -303,8 +303,11 @@ bool KShortestPathsFinder::computeNextShortestPath(Path& result) {
|
|||
candidate.append(tmpPath, 0, tmpPath.length() - 1);
|
||||
candidate._branchpoint = i;
|
||||
|
||||
auto it = find_if(_candidatePaths.begin(), _candidatePaths.end(), [candidate](Path const& v) { return v._weight >= candidate._weight; } );
|
||||
if (!(*it == candidate)) {
|
||||
auto it = find_if(_candidatePaths.begin(), _candidatePaths.end(),
|
||||
[candidate](Path const& v) {
|
||||
return v._weight >= candidate._weight;
|
||||
});
|
||||
if (it == _candidatePaths.end() || !(*it == candidate)) {
|
||||
_candidatePaths.emplace(it, candidate);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1603,7 +1603,9 @@ arangodb::Result IResearchAnalyzerFeature::loadAnalyzers( // load
|
|||
}
|
||||
|
||||
void IResearchAnalyzerFeature::prepare() {
|
||||
ApplicationFeature::prepare();
|
||||
if (!isEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// load all known analyzers
|
||||
::iresearch::analysis::analyzers::init();
|
||||
|
@ -1619,7 +1621,7 @@ arangodb::Result IResearchAnalyzerFeature::remove( // remove analyzer
|
|||
if (split.first.null()) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_FORBIDDEN, // code
|
||||
"static analyzers cannot be removed" // message
|
||||
"built-in analyzers cannot be removed" // message
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1774,7 +1776,9 @@ arangodb::Result IResearchAnalyzerFeature::remove( // remove analyzer
|
|||
}
|
||||
|
||||
void IResearchAnalyzerFeature::start() {
|
||||
ApplicationFeature::start();
|
||||
if (!isEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// register analyzer functions
|
||||
{
|
||||
|
@ -1801,14 +1805,16 @@ void IResearchAnalyzerFeature::start() {
|
|||
}
|
||||
|
||||
void IResearchAnalyzerFeature::stop() {
|
||||
if (!isEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
WriteMutex mutex(_mutex);
|
||||
SCOPED_LOCK(mutex); // '_analyzers' can be asynchronously read
|
||||
|
||||
_analyzers = getStaticAnalyzers(); // clear cache and reload static analyzers
|
||||
}
|
||||
|
||||
ApplicationFeature::stop();
|
||||
}
|
||||
|
||||
arangodb::Result IResearchAnalyzerFeature::storeAnalyzer(AnalyzerPool& pool) {
|
||||
|
|
|
@ -50,6 +50,12 @@
|
|||
|
||||
namespace {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief the suffix appened to the index_meta filename to generate the
|
||||
/// backup filename to be used for renaming
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
const irs::string_ref IRESEARCH_BACKUP_SUFFIX(".backup");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief the suffix appened to the index_meta filename to generate the
|
||||
/// corresponding checkpoint file
|
||||
|
@ -1003,7 +1009,7 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback,
|
|||
try {
|
||||
recovery_reader = irs::directory_reader::open(*(_dataStore._directory));
|
||||
} catch (irs::index_not_found const&) {
|
||||
// ingore
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1011,19 +1017,54 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback,
|
|||
// '.checkpoint' file for the last state of the data store
|
||||
// if it's missing them probably the WAL tail was lost
|
||||
if (recovery_reader) {
|
||||
auto& checkpoint = recovery_reader.meta().filename;
|
||||
auto checkpointFile = checkpoint + std::string(IRESEARCH_CHECKPOINT_SUFFIX);
|
||||
auto ref = irs::directory_utils::reference( // create a reference
|
||||
irs::index_file_refs::ref_t ref;
|
||||
|
||||
// find the latest segment state with a checkpoint file
|
||||
for(;;) {
|
||||
auto& filename = recovery_reader.meta().filename; // segment state filename
|
||||
auto checkpointFile = // checkpoint filename
|
||||
filename + std::string(IRESEARCH_CHECKPOINT_SUFFIX);
|
||||
|
||||
ref = irs::directory_utils::reference( // create a reference
|
||||
*(_dataStore._directory), checkpointFile, false // args
|
||||
);
|
||||
|
||||
if (!ref) {
|
||||
if (ref) {
|
||||
break; // found checkpoint file for latest state
|
||||
}
|
||||
|
||||
auto src = _dataStore._path;
|
||||
auto& srcFilename = filename;
|
||||
auto dst = src;
|
||||
auto dstFilename = filename + std::string(IRESEARCH_BACKUP_SUFFIX);
|
||||
|
||||
src /= srcFilename;
|
||||
dst /= dstFilename;
|
||||
|
||||
// move segment state file without a matching checkpint out of the way
|
||||
if (!src.rename(dst)) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_ILLEGAL_STATE, // code
|
||||
std::string("failed rename the latest data store state file for arangosearch link '") + std::to_string(id()) + "', source '" + srcFilename + "' destination '" + dstFilename + "' in path: " + _dataStore._path.utf8()
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
recovery_reader.reset(); // unset to allow for checking for success below
|
||||
recovery_reader = irs::directory_reader::open(*(_dataStore._directory)); // retry opening
|
||||
} catch (irs::index_not_found const&) {
|
||||
// ignore
|
||||
}
|
||||
|
||||
if (!recovery_reader) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_ILLEGAL_STATE, // code
|
||||
std::string("failed to find checkpoint file matching the latest data store state for arangosearch link '") + std::to_string(id()) + "', expecting file '" + checkpointFile + "' in path: " + _dataStore._path.utf8()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
auto& checkpointFile = *ref; // ref non-null ensured by above loop
|
||||
auto in = _dataStore._directory->open( // open checkpoint file
|
||||
checkpointFile, irs::IOAdvice::NORMAL // args, use 'NORMAL' since the file could be empty
|
||||
);
|
||||
|
|
|
@ -103,20 +103,29 @@ arangodb::Result canUseAnalyzers( // validate
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
bool createLink( // create link
|
||||
arangodb::Result createLink( // create link
|
||||
arangodb::LogicalCollection& collection, // link collection
|
||||
arangodb::LogicalView const& view, // link view
|
||||
arangodb::velocypack::Slice definition // link definition
|
||||
) {
|
||||
try {
|
||||
bool isNew = false;
|
||||
auto link = collection.createIndex(definition, isNew);
|
||||
LOG_TOPIC_IF("2c861", DEBUG, arangodb::iresearch::TOPIC, link)
|
||||
<< "added link '" << link->id() << "'";
|
||||
|
||||
return link && isNew;
|
||||
if (!(link && isNew)) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("failed to create link between arangosearch view '") + view.name() + "' and collection '" + collection.name() + "'"
|
||||
);
|
||||
}
|
||||
} catch (arangodb::basics::Exception const& e) {
|
||||
return arangodb::Result(e.code(), e.what());
|
||||
}
|
||||
|
||||
bool createLink( // create link
|
||||
return arangodb::Result();
|
||||
}
|
||||
|
||||
arangodb::Result createLink( // create link
|
||||
arangodb::LogicalCollection& collection, // link collection
|
||||
arangodb::iresearch::IResearchViewCoordinator const& view, // link view
|
||||
arangodb::velocypack::Slice definition // link definition
|
||||
|
@ -141,7 +150,10 @@ bool createLink( // create link
|
|||
);
|
||||
|
||||
if (!arangodb::iresearch::mergeSliceSkipKeys(builder, definition, acceptor)) {
|
||||
return false;
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("failed to generate definition while creating link between arangosearch view '") + view.name() + "' and collection '" + collection.name() + "'"
|
||||
);
|
||||
}
|
||||
|
||||
builder.close();
|
||||
|
@ -150,20 +162,27 @@ bool createLink( // create link
|
|||
|
||||
return arangodb::methods::Indexes::ensureIndex( // ensure index
|
||||
&collection, builder.slice(), true, tmp // args
|
||||
).ok();
|
||||
);
|
||||
}
|
||||
|
||||
template<typename ViewType>
|
||||
bool dropLink( // drop link
|
||||
arangodb::Result dropLink( // drop link
|
||||
arangodb::LogicalCollection& collection, // link collection
|
||||
arangodb::iresearch::IResearchLink const& link // link to drop
|
||||
) {
|
||||
// don't need to create an extra transaction inside arangodb::methods::Indexes::drop(...)
|
||||
return collection.dropIndex(link.id());
|
||||
if (!collection.dropIndex(link.id())) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("failed to drop link '") + std::to_string(link.id()) + "' from collection '" + collection.name() + "'"
|
||||
);
|
||||
}
|
||||
|
||||
return arangodb::Result();
|
||||
}
|
||||
|
||||
template<>
|
||||
bool dropLink<arangodb::iresearch::IResearchViewCoordinator>( // drop link
|
||||
arangodb::Result dropLink<arangodb::iresearch::IResearchViewCoordinator>( // drop link
|
||||
arangodb::LogicalCollection& collection, // link collection
|
||||
arangodb::iresearch::IResearchLink const& link // link to drop
|
||||
) {
|
||||
|
@ -176,7 +195,7 @@ bool dropLink<arangodb::iresearch::IResearchViewCoordinator>( // drop link
|
|||
);
|
||||
builder.close();
|
||||
|
||||
return arangodb::methods::Indexes::drop(&collection, builder.slice()).ok();
|
||||
return arangodb::methods::Indexes::drop(&collection, builder.slice());
|
||||
}
|
||||
|
||||
template <typename ViewType>
|
||||
|
@ -195,17 +214,15 @@ arangodb::Result modifyLinks( // modify links
|
|||
|
||||
struct State {
|
||||
std::shared_ptr<arangodb::LogicalCollection> _collection;
|
||||
size_t _collectionsToLockOffset; // std::numeric_limits<size_t>::max() ==
|
||||
// removal only
|
||||
size_t _collectionsToLockOffset; // std::numeric_limits<size_t>::max() == removal only
|
||||
std::shared_ptr<arangodb::iresearch::IResearchLink> _link;
|
||||
size_t _linkDefinitionsOffset;
|
||||
arangodb::Result _result; // operation result
|
||||
bool _stale = false; // request came from the stale list
|
||||
bool _valid = true;
|
||||
explicit State(size_t collectionsToLockOffset)
|
||||
: State(collectionsToLockOffset, std::numeric_limits<size_t>::max()) {}
|
||||
State(size_t collectionsToLockOffset, size_t linkDefinitionsOffset)
|
||||
: _collectionsToLockOffset(collectionsToLockOffset),
|
||||
_linkDefinitionsOffset(linkDefinitionsOffset) {}
|
||||
: _collectionsToLockOffset(collectionsToLockOffset), _linkDefinitionsOffset(linkDefinitionsOffset) {}
|
||||
};
|
||||
std::vector<std::string> collectionsToLock;
|
||||
std::vector<std::pair<arangodb::velocypack::Builder, arangodb::iresearch::IResearchLinkMeta>> linkDefinitions;
|
||||
|
@ -215,12 +232,10 @@ arangodb::Result modifyLinks( // modify links
|
|||
auto collection = linksItr.key();
|
||||
|
||||
if (!collection.isString()) {
|
||||
return arangodb::Result(TRI_ERROR_BAD_PARAMETER,
|
||||
std::string(
|
||||
"error parsing link parameters from json for "
|
||||
"arangosearch view '") +
|
||||
view.name() + "' offset '" +
|
||||
arangodb::basics::StringUtils::itoa(linksItr.index()) + '"');
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_BAD_PARAMETER, // code
|
||||
std::string("error parsing link parameters from json for arangosearch view '") + view.name() + "' offset '" + arangodb::basics::StringUtils::itoa(linksItr.index()) + '"'
|
||||
);
|
||||
}
|
||||
|
||||
auto link = linksItr.value();
|
||||
|
@ -465,19 +480,20 @@ arangodb::Result modifyLinks( // modify links
|
|||
// execute removals
|
||||
for (auto& state : linkModifications) {
|
||||
if (state._link) { // link removal or recreate request
|
||||
LOG_TOPIC("9da74", DEBUG, arangodb::iresearch::TOPIC)
|
||||
<< "removed link '" << state._link->id() << "'";
|
||||
state._valid = dropLink<ViewType>(*(state._collection), *(state._link));
|
||||
state._result = dropLink<ViewType>(*(state._collection), *(state._link));
|
||||
modified.emplace(state._collection->id());
|
||||
}
|
||||
}
|
||||
|
||||
// execute additions
|
||||
for (auto& state: linkModifications) {
|
||||
if (state._valid && state._linkDefinitionsOffset < linkDefinitions.size()) {
|
||||
state._valid =
|
||||
createLink(*(state._collection), view,
|
||||
linkDefinitions[state._linkDefinitionsOffset].first.slice());
|
||||
if (state._result.ok() // valid state (unmodified or after removal)
|
||||
&& state._linkDefinitionsOffset < linkDefinitions.size()) {
|
||||
state._result = createLink( // create link
|
||||
*(state._collection), // collection
|
||||
view, // view
|
||||
linkDefinitions[state._linkDefinitionsOffset].first.slice() // definition
|
||||
);
|
||||
modified.emplace(state._collection->id());
|
||||
}
|
||||
}
|
||||
|
@ -486,8 +502,11 @@ arangodb::Result modifyLinks( // modify links
|
|||
|
||||
// validate success
|
||||
for (auto& state: linkModifications) {
|
||||
if (!state._valid) {
|
||||
error.append(error.empty() ? "" : ", ").append(collectionsToLock[state._collectionsToLockOffset]);
|
||||
if (!state._result.ok()) {
|
||||
error.append(error.empty() ? "" : ", ") // separator
|
||||
.append(collectionsToLock[state._collectionsToLockOffset]) // collection name
|
||||
.append(": ").append(std::to_string(state._result.errorNumber())) // error code
|
||||
.append(" ").append(state._result.errorMessage()); // error message
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -495,11 +514,10 @@ arangodb::Result modifyLinks( // modify links
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_ARANGO_ILLEGAL_STATE,
|
||||
std::string("failed to update links while updating arangosearch view '") +
|
||||
view.name() +
|
||||
"', retry same request or examine errors for collections: " + error);
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_ILLEGAL_STATE, // code
|
||||
std::string("failed to update links while updating arangosearch view '") + view.name() + "', retry same request or examine errors for collections: " + error
|
||||
);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -36,10 +36,11 @@
|
|||
|
||||
namespace {
|
||||
|
||||
const std::string POLICY_BYTES_ACCUM =
|
||||
"bytes_accum"; // {threshold} > (segment_bytes +
|
||||
// sum_of_merge_candidate_segment_bytes) / all_segment_bytes
|
||||
const std::string POLICY_TIER = "tier"; // scoring policy based on byte size and live docs
|
||||
// {threshold} > (segment_bytes + // sum_of_merge_candidate_segment_bytes) / all_segment_bytes
|
||||
const std::string POLICY_BYTES_ACCUM = "bytes_accum";
|
||||
|
||||
// scoring policy based on byte size and live docs
|
||||
const std::string POLICY_TIER = "tier";
|
||||
|
||||
template <typename T>
|
||||
arangodb::iresearch::IResearchViewMeta::ConsolidationPolicy createConsolidationPolicy(
|
||||
|
@ -158,14 +159,30 @@ arangodb::iresearch::IResearchViewMeta::ConsolidationPolicy createConsolidationP
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
// optional double
|
||||
static const std::string fieldName("minScore");
|
||||
|
||||
if (slice.hasKey(fieldName)) {
|
||||
auto field = slice.get(fieldName);
|
||||
|
||||
if (!field.isNumber<double_t>()) {
|
||||
errorField = fieldName;
|
||||
|
||||
return arangodb::iresearch::IResearchViewMeta::ConsolidationPolicy();
|
||||
}
|
||||
|
||||
options.min_score = field.getNumber<double_t>();
|
||||
}
|
||||
}
|
||||
|
||||
properties.openObject();
|
||||
properties.add("type", arangodb::velocypack::Value(POLICY_TIER));
|
||||
properties.add("lookahead", arangodb::velocypack::Value(size_t(1))); // FIXME remove in 3.5
|
||||
properties.add("segmentsBytesFloor",
|
||||
arangodb::velocypack::Value(options.floor_segment_bytes));
|
||||
properties.add("segmentsBytesMax", arangodb::velocypack::Value(options.max_segments_bytes));
|
||||
properties.add("segmentsMax", arangodb::velocypack::Value(options.max_segments));
|
||||
properties.add("segmentsMin", arangodb::velocypack::Value(options.min_segments));
|
||||
properties.add("type", VPackValue(POLICY_TIER));
|
||||
properties.add("segmentsBytesFloor", VPackValue(options.floor_segment_bytes));
|
||||
properties.add("segmentsBytesMax", VPackValue(options.max_segments_bytes));
|
||||
properties.add("segmentsMax", VPackValue(options.max_segments));
|
||||
properties.add("segmentsMin", VPackValue(options.min_segments));
|
||||
properties.add("minScore", VPackValue(options.min_score));
|
||||
properties.close();
|
||||
|
||||
return arangodb::iresearch::IResearchViewMeta::ConsolidationPolicy{
|
||||
|
@ -202,9 +219,9 @@ IResearchViewMeta::IResearchViewMeta()
|
|||
std::string errorField;
|
||||
|
||||
_consolidationPolicy =
|
||||
createConsolidationPolicy<irs::index_utils::consolidate_bytes_accum>(
|
||||
createConsolidationPolicy<irs::index_utils::consolidate_tier>(
|
||||
arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"type\": \"bytes_accum\", \"threshold\": 0.1 }")
|
||||
"{ \"type\": \"tier\" }")
|
||||
->slice(),
|
||||
errorField);
|
||||
assert(_consolidationPolicy.policy()); // ensure above syntax is correct
|
||||
|
|
|
@ -53,9 +53,6 @@ RestTransactionHandler::RestTransactionHandler(GeneralRequest* request, GeneralR
|
|||
RestStatus RestTransactionHandler::execute() {
|
||||
|
||||
switch (_request->requestType()) {
|
||||
case rest::RequestType::GET:
|
||||
executeGetState();
|
||||
break;
|
||||
|
||||
case rest::RequestType::POST:
|
||||
if (_request->suffixes().size() == 1 &&
|
||||
|
@ -76,6 +73,10 @@ RestStatus RestTransactionHandler::execute() {
|
|||
executeAbort();
|
||||
break;
|
||||
|
||||
case rest::RequestType::GET:
|
||||
executeGetState();
|
||||
break;
|
||||
|
||||
default:
|
||||
generateError(rest::ResponseCode::METHOD_NOT_ALLOWED, TRI_ERROR_HTTP_METHOD_NOT_ALLOWED);
|
||||
break;
|
||||
|
@ -131,7 +132,7 @@ void RestTransactionHandler::executeBegin() {
|
|||
TRI_ASSERT(tid != 0);
|
||||
TRI_ASSERT(!transaction::isLegacyTransactionId(tid));
|
||||
} else {
|
||||
if (!(ServerState::isCoordinator(role) || ServerState::isSingleServer(role))) {
|
||||
if (!ServerState::isCoordinator(role) && !ServerState::isSingleServer(role)) {
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_NOT_IMPLEMENTED,
|
||||
"Not supported on this server type");
|
||||
return;
|
||||
|
|
|
@ -63,7 +63,7 @@ ManagerFeature::ManagerFeature(application_features::ApplicationServer& server)
|
|||
}
|
||||
|
||||
void ManagerFeature::prepare() {
|
||||
TRI_ASSERT(MANAGER == nullptr);
|
||||
TRI_ASSERT(MANAGER.get() == nullptr);
|
||||
TRI_ASSERT(EngineSelectorFeature::ENGINE != nullptr);
|
||||
MANAGER = EngineSelectorFeature::ENGINE->createTransactionManager();
|
||||
}
|
||||
|
|
|
@ -561,11 +561,11 @@ void JS_List(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
for (size_t i = 0, count = result.size(); i < count; ++i) {
|
||||
auto analyzer = WrapAnalyzer(isolate, result[i]);
|
||||
|
||||
if (analyzer.IsEmpty()) {
|
||||
if (analyzer.IsEmpty() || i > std::numeric_limits<uint32_t>::max()) {
|
||||
TRI_V8_THROW_EXCEPTION_MEMORY();
|
||||
}
|
||||
|
||||
v8Result->Set(i, analyzer);
|
||||
v8Result->Set(static_cast<uint32_t>(i), analyzer); // cast safe because of check above
|
||||
}
|
||||
|
||||
TRI_V8_RETURN(v8Result);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
leak:create_conn
|
||||
leak:curl_multi_perform
|
||||
leak:curl_multi_init
|
||||
leak:CRYPTO_zalloc
|
||||
|
|
|
@ -85,11 +85,9 @@ actions.defineHttp({
|
|||
let msg = "";
|
||||
let used = [];
|
||||
while (++count <= 60) {
|
||||
let preconditions = {};
|
||||
preconditions['/arango/Supervision/Health/' + serverId + '/Status'] = {'old': 'FAILED'};
|
||||
// need to make sure it is not responsible for anything
|
||||
used = [];
|
||||
preconditions = reducePlanServers(function (data, agencyKey, servers) {
|
||||
let preconditions = reducePlanServers(function (data, agencyKey, servers) {
|
||||
data[agencyKey] = {'old': servers};
|
||||
if (servers.indexOf(serverId) !== -1) {
|
||||
used.push(agencyKey);
|
||||
|
@ -104,6 +102,7 @@ actions.defineHttp({
|
|||
return data;
|
||||
}, preconditions);
|
||||
|
||||
preconditions['/arango/Supervision/Health/' + serverId + '/Status'] = {'old': 'FAILED'};
|
||||
preconditions["/arango/Supervision/DBServers/" + serverId]
|
||||
= { "oldEmpty": true };
|
||||
|
||||
|
@ -112,6 +111,7 @@ actions.defineHttp({
|
|||
operations['/arango/Plan/Coordinators/' + serverId] = {'op': 'delete'};
|
||||
operations['/arango/Plan/DBServers/' + serverId] = {'op': 'delete'};
|
||||
operations['/arango/Current/ServersRegistered/' + serverId] = {'op': 'delete'};
|
||||
operations['/arango/Current/DBServers/' + serverId] = {'op': 'delete'};
|
||||
operations['/arango/Supervision/Health/' + serverId] = {'op': 'delete'};
|
||||
operations['/arango/Target/MapUniqueToShortID/' + serverId] = {'op': 'delete'};
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
"frontend/css/highlightjs.css",
|
||||
"frontend/css/jsoneditor.css",
|
||||
"frontend/css/grids-responsive-min.css",
|
||||
"frontend/css/tippy.css",
|
||||
"frontend/css/dygraph.css",
|
||||
"frontend/css/leaflet.css",
|
||||
"frontend/css/nv.d3.css",
|
||||
|
@ -59,6 +58,7 @@
|
|||
"frontend/js/lib/numeral.min.js",
|
||||
"frontend/js/lib/moment.min.js",
|
||||
"frontend/js/lib/randomColor.js",
|
||||
"frontend/js/lib/popper.js",
|
||||
"frontend/js/lib/tippy.js",
|
||||
// START SIGMA LIBRARIES
|
||||
"frontend/js/lib/sigma.min.js",
|
||||
|
@ -182,7 +182,7 @@
|
|||
},
|
||||
files: [{
|
||||
expand: true,
|
||||
src: ['frontend/build/app.min.js', 'frontend/build/libs.min.js'],
|
||||
src: ['frontend/build/app.min.js', 'frontend/build/libs.min.js', 'frontend/build/templates.min.js'],
|
||||
dest: '.',
|
||||
ext: '.min.js.gz'
|
||||
}]
|
||||
|
@ -193,7 +193,7 @@
|
|||
},
|
||||
files: [{
|
||||
expand: true,
|
||||
src: ['frontend/build/app.js', 'frontend/build/libs.min.js'],
|
||||
src: ['frontend/build/app.js', 'frontend/build/libs.min.js', 'frontend/build/templates.min.js'],
|
||||
dest: '.',
|
||||
ext: '.js.gz'
|
||||
}]
|
||||
|
@ -292,7 +292,6 @@
|
|||
src: [
|
||||
"frontend/html/start.html.part",
|
||||
"frontend/html/head.html.part",
|
||||
"frontend/js/templates/*.ejs",
|
||||
"frontend/html/body.html.part",
|
||||
"frontend/build/scripts.html.part",
|
||||
"frontend/html/end.html.part"
|
||||
|
@ -345,10 +344,30 @@
|
|||
]
|
||||
},
|
||||
|
||||
jst: {
|
||||
compile: {
|
||||
options: {
|
||||
//namespace: "anotherNameThanJST", //Default: 'JST'
|
||||
prettify: false, //Default: false|true
|
||||
amdWrapper: false, //Default: false|true
|
||||
templateSettings: {
|
||||
},
|
||||
processName: function(filename) {
|
||||
//Shortens the file path for the template.
|
||||
return filename.slice(filename.indexOf("template"), filename.length);
|
||||
}
|
||||
},
|
||||
files: {
|
||||
'frontend/build/templates.js': ['frontend/js/templates/*.ejs']
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
uglify: {
|
||||
default1: {
|
||||
files: {
|
||||
'frontend/build/app.min.js': 'frontend/build/app.js'
|
||||
'frontend/build/app.min.js': 'frontend/build/app.js',
|
||||
'frontend/build/templates.min.js': 'frontend/build/templates.js'
|
||||
}
|
||||
},
|
||||
libs2: {
|
||||
|
@ -393,8 +412,7 @@
|
|||
},
|
||||
html: {
|
||||
files: [
|
||||
'frontend/html/*',
|
||||
'frontend/js/templates/*.ejs'
|
||||
'frontend/html/*'
|
||||
],
|
||||
tasks: [
|
||||
'concat_in_order:htmlStandalone',
|
||||
|
@ -407,6 +425,7 @@
|
|||
|
||||
grunt.loadNpmTasks("grunt-babel");
|
||||
grunt.loadNpmTasks("grunt-sass");
|
||||
grunt.loadNpmTasks('grunt-contrib-jst');
|
||||
grunt.loadNpmTasks("grunt-contrib-imagemin");
|
||||
grunt.loadNpmTasks('grunt-contrib-cssmin');
|
||||
grunt.loadNpmTasks('grunt-contrib-compress');
|
||||
|
@ -420,6 +439,7 @@
|
|||
|
||||
grunt.registerTask('default', [
|
||||
'eslint',
|
||||
'jst',
|
||||
'sass:dev',
|
||||
'replace',
|
||||
'concat',
|
||||
|
@ -433,6 +453,7 @@
|
|||
|
||||
grunt.registerTask('devel', [
|
||||
'sass:dev',
|
||||
'jst',
|
||||
'replace',
|
||||
'concat',
|
||||
'concat_in_order:default',
|
||||
|
@ -444,6 +465,7 @@
|
|||
grunt.registerTask('deploy', [
|
||||
'sass:dev',
|
||||
'eslint',
|
||||
'jst',
|
||||
'replace',
|
||||
'imagemin',
|
||||
'concat',
|
||||
|
|
File diff suppressed because one or more lines are too long
Binary file not shown.
|
@ -1,2 +1,3 @@
|
|||
<script src="libs.js?version=__VERSION"></script>
|
||||
<script src="app.js?version=__VERSION"></script>
|
||||
<script src="templates.js?version=__VERSION"></script>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* jshint unused: false */
|
||||
/* global Blob, window, Joi, sigma, $, Tippy, document, _, arangoHelper, frontendConfig, arangoHelper, sessionStorage, localStorage, XMLHttpRequest */
|
||||
/* global Blob, window, Joi, sigma, $, tippy, document, _, arangoHelper, frontendConfig, arangoHelper, sessionStorage, localStorage, XMLHttpRequest */
|
||||
|
||||
(function () {
|
||||
'use strict';
|
||||
|
@ -230,14 +230,16 @@
|
|||
|
||||
var settings = {
|
||||
arrow: true,
|
||||
animation: 'fade',
|
||||
animateFill: false,
|
||||
multiple: false,
|
||||
hideDuration: 1
|
||||
content: function (reference) {
|
||||
var title = reference.getAttribute('title');
|
||||
reference.removeAttribute('title');
|
||||
return title;
|
||||
}
|
||||
};
|
||||
|
||||
if (position) {
|
||||
settings.position = position;
|
||||
settings.placement = position;
|
||||
}
|
||||
|
||||
if (!selector) {
|
||||
|
@ -246,16 +248,16 @@
|
|||
|
||||
if (typeof selector === 'object') {
|
||||
_.each(selector, function (elem) {
|
||||
self.lastTooltips = new Tippy(elem, settings);
|
||||
self.lastTooltips = new tippy(elem, settings);
|
||||
});
|
||||
} else {
|
||||
if (selector.indexOf(',') > -1) {
|
||||
var selectors = selector.split(',');
|
||||
_.each(selectors, function (elem) {
|
||||
self.lastTooltips = new Tippy(elem, settings);
|
||||
self.lastTooltips = new tippy(elem, settings);
|
||||
});
|
||||
}
|
||||
this.lastTooltips = new Tippy(selector, settings);
|
||||
this.lastTooltips = new tippy(selector, settings);
|
||||
}
|
||||
},
|
||||
|
||||
|
@ -1208,9 +1210,9 @@
|
|||
tableContent.push(
|
||||
window.modalView.createCheckboxEntry(
|
||||
'new-app-replace',
|
||||
'Keep configuration and dependency files?',
|
||||
'Discard configuration and dependency files?',
|
||||
true,
|
||||
"Should this app's configuration be saved before replacing the app?",
|
||||
"Should this service's existing configuration and settings be removed completely before replacing the service?",
|
||||
false
|
||||
)
|
||||
);
|
||||
|
|
|
@ -11,10 +11,7 @@
|
|||
var template = $('#' + id.replace('.', '\\.')).html();
|
||||
return {
|
||||
render: function (params) {
|
||||
var tmp = _.template(template);
|
||||
tmp = tmp(params);
|
||||
|
||||
return tmp;
|
||||
return window['JST']["templates/" + id](params);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,4 +1,3 @@
|
|||
<script id="applicationDetailView.ejs" type="text/template">
|
||||
<div class="application-detail-view">
|
||||
<div class="headerBar" style="width: 100%">
|
||||
|
||||
|
@ -151,4 +150,3 @@
|
|||
</dl>
|
||||
</aside>-->
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="applicationListView.ejs" type="text/template">
|
||||
<tr class="foxx-store-row">
|
||||
<td class="foxx-store-main">
|
||||
<div class="foxx-name"><%=name%><%=legacy?' (legacy)':''%></div>
|
||||
|
@ -12,4 +11,3 @@
|
|||
<button class="button-success install-app" appId="<%=name %>" appVersion="<%=latestVersion %>">Install</button>
|
||||
</td>
|
||||
</tr>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="applicationsView.ejs" type="text/template">
|
||||
<div class="headerBar">
|
||||
<div class="headerButtonBar">
|
||||
<ul class="headerButtonList">
|
||||
|
@ -54,4 +53,3 @@
|
|||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="applierView.ejs" type="text/template">
|
||||
<div id="applierContent" class="innerContent applierContent replicationContent">
|
||||
<div class="repl-applier">
|
||||
|
||||
|
@ -47,4 +46,3 @@
|
|||
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="arangoTabbar.ejs" type="text/template">
|
||||
<div class="arango-tabbar" id="<%=content.id%>">
|
||||
<% _.each(content.titles, function(k,v) { %>
|
||||
<% var name = content.titles[v][0]; %>
|
||||
|
@ -6,4 +5,3 @@
|
|||
<button class="arangodb-tabbar" id="<%=elid%>"><%=name%></button>
|
||||
<%});%>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="arangoTable.ejs" type="text/template">
|
||||
<div class="tableWrapper">
|
||||
<% var type = type; %>
|
||||
<table class="arango-table" id="<%=content.id%>">
|
||||
|
@ -42,4 +41,3 @@
|
|||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="clusterView.ejs" type="text/template">
|
||||
|
||||
<div class="headerBar" style="margin-top: -60px">
|
||||
<div class="headerButtonBar">
|
||||
|
@ -69,4 +68,3 @@
|
|||
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="collectionsItemView.ejs" type="text/template">
|
||||
<div class="paddingBox">
|
||||
<div class="borderBox"></div>
|
||||
<i class="collection-type-icon fa <%= model.get('picture') %>"></i>
|
||||
|
@ -37,4 +36,3 @@
|
|||
<h5 class="collectionName"><%= model.get('name') %></h5>
|
||||
<% } %>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="collectionsView.ejs" type="text/template">
|
||||
<div class="headerBar">
|
||||
<div class="search-field">
|
||||
<input type="text" id="searchInput" class="search-input" placeholder="Search..."/>
|
||||
|
@ -113,4 +112,3 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="dashboardView.ejs" type="text/template">
|
||||
<% var subBar = function(title) { %>
|
||||
<div class="dashboard-sub-bar">
|
||||
<div class="dashboard-sub-bar-title"><%= title %></div>
|
||||
|
@ -145,4 +144,3 @@
|
|||
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="databaseView.ejs" type="text/template">
|
||||
<div class="headerBar">
|
||||
<div class="search-field">
|
||||
<input type="text" value="<%=searchString%>" id="databaseSearchInput" class="search-input" placeholder="Search..."/>
|
||||
|
@ -71,4 +70,3 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="dbSelectionView.ejs" type="text/template">
|
||||
<a href="#" class="tab disabled" id="dbselection"><div class="dbselection"><i class="fa fa-database"></i><span class="db-name">DB: </span> <%=current%>
|
||||
<!-- <i class="fa fa-caret-square-o-down"></i> -->
|
||||
</div>
|
||||
|
@ -34,4 +33,3 @@ if (list.length > 0) {
|
|||
<% } %>
|
||||
</ul>
|
||||
-->
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="documentView.ejs" type="text/template">
|
||||
|
||||
<div class="headerBar">
|
||||
<div class="headerButtonBar">
|
||||
|
@ -65,4 +64,3 @@
|
|||
<div class="pull-left shortcuts showHotkeyHelp"></div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="documentsView.ejs" type="text/template">
|
||||
<div id="transparentHeader" class="headerBar marginTop5">
|
||||
|
||||
<div id="documentsToolbar" class="headerButtonBar">
|
||||
|
@ -141,4 +140,3 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="edgeDefinitionTable.ejs" type="text/template">
|
||||
<tr class="tableRow" id="row_newEdgeDefinitions<%= number%>">
|
||||
<th class="collectionTh">Edge definitions*:</th>
|
||||
<th class="collectionTh">
|
||||
|
@ -27,4 +26,3 @@
|
|||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="editListEntryView.ejs" type="text/template">
|
||||
<td class="writable sorting_1">
|
||||
<% if (isReadOnly) { %>
|
||||
<span class="key"><%=key%></span>
|
||||
|
@ -19,4 +18,3 @@
|
|||
<span class="icon_arangodb_roundminus" data-original-title="Delete attribute"></span>
|
||||
</a>
|
||||
</td>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="filterSelect.ejs" type="text/template">
|
||||
|
||||
<div class="filterSelectBox">
|
||||
<div class="filterLabel"><%=name%><span><i class="fa fa-close" id="closeFilter"></i></span></div>
|
||||
|
@ -33,4 +32,3 @@
|
|||
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="footerView.ejs" type="text/template">
|
||||
<%
|
||||
var n,v,db;
|
||||
if (name) {
|
||||
|
@ -24,4 +23,3 @@
|
|||
<div class="footer-right">
|
||||
<p><% if(n) { %><a><%=n%> <%=v%> <% } %></a></p>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="foxxActiveView.ejs" type="text/template">
|
||||
<div class="paddingBox">
|
||||
<div class="foxxDesc">
|
||||
<p><span class="foxxMount"><%=model.get("mount")%></span></p>
|
||||
|
@ -27,4 +26,3 @@
|
|||
</div>
|
||||
<% } %>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="foxxEditView.ejs" type="text/template">
|
||||
<%var appInfos = attributes.app.split(":"); %>
|
||||
<div id="change-foxx" class="modal hide fade" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true" style="display:none">
|
||||
<div class="modal-header">
|
||||
|
@ -13,7 +12,7 @@
|
|||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Documentation:</th>
|
||||
<th class="collectionTh"><%=documentationJsonUrl</th>
|
||||
<th class="collectionTh"><%=documentationJsonUrl%></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Mount:</th>
|
||||
|
@ -61,4 +60,3 @@
|
|||
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="foxxMountView.ejs" type="text/template">
|
||||
<div id="install-foxx" class="modal hide fade" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true" style="display:none">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
|
||||
|
@ -40,4 +39,3 @@
|
|||
<button id="cancel" class="button-danger pull-right">Cancel</button>
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="foxxRepoView.ejs" type="text/template">
|
||||
<div class="paddingBox">
|
||||
<div class="foxxDesc">
|
||||
<p><span class="foxxName"><%=model.name%></span></p>
|
||||
|
@ -15,4 +14,3 @@
|
|||
<div class="borderBox"></div>
|
||||
<img src="<%= thumbnail %>" height="50" width="50" alt="Icon for Service" class="icon">
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="graphManagementView.ejs" type="text/template">
|
||||
<div class="headerBar">
|
||||
<div class="search-field">
|
||||
<input type="text" value="<%=searchString%>" id="graphManagementSearchInput" class="search-input" placeholder="Search..."/>
|
||||
|
@ -73,4 +72,3 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
<script id="graphSettingsView.ejs" type="text/template">
|
||||
|
||||
<% var genClass = 'pure-u-1-3'; %>
|
||||
<% var genClass2 = 'pure-u-2-3'; %>
|
||||
|
||||
<% var formatName = function(name) { %>
|
||||
<% var formattedName = %>
|
||||
<% return name.charAt(0).toUpperCase() + string.slice(1);%>
|
||||
<% }; %>
|
||||
|
||||
|
@ -122,4 +120,3 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="graphViewGroupByEntry.ejs" type="text/template">
|
||||
<div class="control-group">
|
||||
<label for="<%=type %>_<%=id%>" class="control-label">Attribute <%=id%></label>
|
||||
<div class="controls">
|
||||
|
@ -6,4 +5,3 @@
|
|||
<button id="remove_<%=type %>_<%=id%>" class="graphViewer-icon-button gv_internal_remove_line gv-icon-small delete" />
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="graphViewer2.ejs" type="text/template">
|
||||
<div class="graphContent" id="graphContainer">
|
||||
|
||||
<div class="headerBar">
|
||||
|
@ -49,4 +48,3 @@
|
|||
<div id="graph-container" oncontextmenu="return false;"></div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
<script id="helpUsView.ejs" type="text/template">
|
||||
|
||||
<div class="helpUs">
|
||||
<iframe src="https://docs.google.com/forms/d/1vsIwy0mJSeToEnfo_jnBaQebewbcURL730IkZIrkyEE/viewform?embedded=true" scrolling="no" width="100%" height="1300px" frameborder="0" marginheight="0" marginwidth="0">Loading...</iframe>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="indicesView.ejs" type="text/template">
|
||||
<% if (typeof supported !== 'undefined') { %>
|
||||
<div class="contentIn" id="indexHeaderContent">
|
||||
<div id="indexEditView">
|
||||
|
@ -469,4 +468,3 @@
|
|||
</div>
|
||||
</div>
|
||||
<% } %>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="lineChartDetailView.ejs" type="text/template">
|
||||
<div id="lineChartDetail" class="modal hide fade modal-chart-detail" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true" style="display:none">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
|
||||
|
@ -6,4 +5,3 @@
|
|||
</div>
|
||||
<div id="dashboardDetailedLineChart" class="dashboardDetailChart" style="position: absolute"></div>
|
||||
</div>
|
||||
</script>
|
|
@ -1,4 +1,3 @@
|
|||
<script id="loadingTableView.ejs" type="text/template">
|
||||
<thead>
|
||||
<tr role="row">
|
||||
<th class="sorting_disabled docsFirstCol">Content</th>
|
||||
|
@ -14,4 +13,3 @@
|
|||
</tr>
|
||||
</tbody>
|
||||
</script>
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="loggerView.ejs" type="text/template">
|
||||
|
||||
<div id="loggerContent" class="logger-content-id innerContent">
|
||||
|
||||
|
@ -56,4 +55,3 @@
|
|||
<div class="logBorder"></div>
|
||||
</div>
|
||||
<% }); %>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="loginView.ejs" type="text/template">
|
||||
<div class="loginFixedWindow">
|
||||
|
||||
<div id="loginWindow" class="login-window">
|
||||
|
@ -33,4 +32,3 @@
|
|||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalApplicationMount.ejs" type="text/template">
|
||||
<table>
|
||||
<tr class="tableRow">
|
||||
<% if (content === true) { %>
|
||||
|
@ -172,4 +171,3 @@ Upload a Foxx service bundle. The Foxx service bundle should be a zip archive co
|
|||
<div id="upload-foxx-zip">Upload File</div>
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalBase.ejs" type="text/template">
|
||||
<div id="modal-dialog" class="modal hide fade createModalDialog" tabindex="-1" role="dialog"
|
||||
aria-labelledby="myModalLabel" aria-hidden="true">
|
||||
<% if (title !== null) { %>
|
||||
|
@ -65,4 +64,3 @@
|
|||
<button id="modal-abort-delete" class="button-neutral pull-right">No</button>
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalCollectionInfo.ejs" type="text/template">
|
||||
<%
|
||||
var figuresData = content.figures;
|
||||
var revision = content.revision;
|
||||
|
@ -361,4 +360,3 @@
|
|||
</table>
|
||||
<% } %>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
<script id="modalDownloadFoxx.ejs" type="text/template">
|
||||
<div>
|
||||
Your new Foxx Service is ready for download.
|
||||
You can edit it on your local system and repack it in a zip file to publish it on ArangoDB.
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
<script id="modalGraph.ejs" type="text/template">
|
||||
<div class="detail-chart">
|
||||
<div id="lineChartDetail" class="modal-inner-detail"></div>
|
||||
<div class="modal-dashboard-legend">
|
||||
<div class="dashboard-legend-inner" id="detailLegend"></div>
|
||||
</div>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalGraphTable.ejs" type="text/template">
|
||||
|
||||
<ul id="graphTab" class="nav nav-tabs">
|
||||
<li class="active"><a href="#createGraph" data-toggle="tab" id="tab-createGraph">Graph</a></li>
|
||||
|
@ -181,4 +180,3 @@
|
|||
</div>
|
||||
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalHotkeys.ejs" type="text/template">
|
||||
|
||||
<ul class="hotkeysList">
|
||||
<% _.each(content, function(categories) { %>
|
||||
|
@ -13,4 +12,3 @@
|
|||
<% }); %>
|
||||
<ul>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalTable.ejs" type="text/template">
|
||||
<%
|
||||
var createTR = function(row) {
|
||||
var mandatory = '';
|
||||
|
@ -115,4 +114,3 @@
|
|||
</div>
|
||||
</div>
|
||||
<% } %>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="modalTestResults.ejs" type="text/template">
|
||||
<%
|
||||
function createSuite(suite) {
|
||||
%>
|
||||
|
@ -74,4 +73,3 @@
|
|||
</div>
|
||||
<% } %>
|
||||
</div>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="navigationView.ejs" type="text/template">
|
||||
<ul class="navlist arango-collection-ul" id="arangoCollectionUl">
|
||||
<% if (isCluster) { %>
|
||||
<li class="cluster-menu"><a id="cluster" class="tab" href="#cluster"><i class="fa fa-circle-o"></i>Cluster</a></li>
|
||||
|
@ -102,4 +101,3 @@
|
|||
<p><a href="https://groups.google.com/group/arangodb" target="_blank"><i class="fa fa-google"></i></a></p>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
<script id="nodeView.ejs" type="text/template">
|
||||
|
||||
<div id="nodeContent" class="innerContent">
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="nodesView.ejs" type="text/template">
|
||||
|
||||
<div id="nodesContent" class="innerContent">
|
||||
|
||||
|
@ -172,4 +171,3 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="notificationItem.ejs" type="text/template">
|
||||
<% notifications.forEach(function(n) { %>
|
||||
<li class="dropdown-item">
|
||||
<div class="notificationItem">
|
||||
|
@ -13,4 +12,3 @@
|
|||
</div>
|
||||
</li>
|
||||
<% }); %>
|
||||
</script>
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
<script id="notificationView.ejs" type="text/template">
|
||||
<ul class="navlist" id="notificationViewUl">
|
||||
|
||||
<div class="navlogo">
|
||||
|
@ -14,4 +13,3 @@
|
|||
</li>
|
||||
|
||||
</ul>
|
||||
</script>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue