1
0
Fork 0

Merge branch 'engine-api' of github.com:arangodb/arangodb into engine-api

This commit is contained in:
Michael Hackstein 2017-03-09 16:37:51 +02:00
commit 046793815c
95 changed files with 1847 additions and 1386 deletions

View File

@ -4,7 +4,7 @@
# General
# ------------------------------------------------------------------------------
cmake_minimum_required(VERSION 2.8)
cmake_minimum_required(VERSION 3.2)
if (POLICY CMP0037)
cmake_policy(SET CMP0037 NEW)
@ -31,7 +31,9 @@ if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug"
message(FATAL_ERROR "expecting CMAKE_BUILD_TYPE: None Debug Release RelWithDebInfo MinSizeRel, got ${CMAKE_BUILD_TYPE}.")
endif ()
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "deployment target for MacOSX; adjust to your sysem")
if (NOT CMAKE_OSX_DEPLOYMENT_TARGET)
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "deployment target for MacOSX; adjust to your sysem")
endif ()
if (WIN32)
project(arangodb3 CXX C)
@ -630,7 +632,7 @@ if (CMAKE_COMPILER_IS_GNUCC)
set(CMAKE_C_FLAGS_RELEASE "-O3 -fomit-frame-pointer" CACHE INTERNAL "C release flags")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -fno-omit-frame-pointer" CACHE INTERNAL "C release with debug info flags")
set(CMAKE_CXX_FLAGS "-g" CACHE INTERNAL "default C++ compiler flags")
set(CMAKE_CXX_FLAGS "-g -Wnon-virtual-dtor" CACHE INTERNAL "default C++ compiler flags")
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -D_DEBUG=1" CACHE INTERNAL "C++ debug flags")
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os" CACHE INTERNAL "C++ minimal size flags")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fomit-frame-pointer" CACHE INTERNAL "C++ release flags")
@ -649,7 +651,7 @@ elseif (CMAKE_COMPILER_IS_CLANG)
set(CMAKE_C_FLAGS_RELEASE "-O3 -fomit-frame-pointer" CACHE INTERNAL "C release flags")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -fno-omit-frame-pointer" CACHE INTERNAL "C release with debug info flags")
set(CMAKE_CXX_FLAGS "-g" CACHE INTERNAL "default C++ compiler flags")
set(CMAKE_CXX_FLAGS "-g -Wnon-virtual-dtor" CACHE INTERNAL "default C++ compiler flags")
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -D_DEBUG=1" CACHE INTERNAL "C++ debug flags")
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os" CACHE INTERNAL "C++ minimal size flags")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fomit-frame-pointer" CACHE INTERNAL "C++ release flags")

View File

@ -1,4 +1,17 @@
Administration
==============
Most administration can be managed using the *arangosh*.
Most administration can be managed using the *arangosh*.
Filesystems
===========
As one would expect for a database, we recommend a localy mounted filesystems.
NFS or similar network filesystems will not work.
On Linux we recommend the use of ext4fs, on Windows NTFS and on MacOS HFS+.
We recommend **not** to use BTRFS on linux, it's known to not work well in conjunction with ArangoDB.
We experienced that arangodb facing latency issues on accessing its database files on BTRFS partitions.
In conjunction with BTRFS and AUFS we also saw data loss on restart.

View File

@ -118,10 +118,6 @@ is returned if the import would trigger a unique key violation and
is returned if the server cannot auto-generate a document key (out of keys
error) for a document with no user-defined key.
@RESTRETURNCODE{501}
The server will respond with *HTTP 501* if this API is called on a cluster
coordinator.
@EXAMPLES
Importing two documents, with attributes `_key`, `value1` and `value2` each. One

View File

@ -125,10 +125,6 @@ is returned if the import would trigger a unique key violation and
is returned if the server cannot auto-generate a document key (out of keys
error) for a document with no user-defined key.
@RESTRETURNCODE{501}
The server will respond with *HTTP 501* if this API is called on a cluster
coordinator.
@EXAMPLES
Importing documents with heterogenous attributes from a JSON array

View File

@ -16,7 +16,7 @@ warning:
@echo " cmake .. -DCMAKE_BUILD_TYPE=Release"
@echo " make"
@echo ""
@if test `uname` == 'Darwin'; then make warning-mac; fi
@if test "`uname`" = 'Darwin'; then make warning-mac; fi
@echo "Use 'make help' to see more options."
warning-mac:

View File

@ -89,24 +89,13 @@ Other features of ArangoDB include:
For more in-depth information read the [design goals of ArangoDB](https://www.arangodb.com/2012/03/07/avocadodbs-design-objectives)
Latest Release - ArangoDB 3.0
-----------------------------
Latest Release
--------------
The [What's new in ArangoDB 3.0](https://docs.arangodb.com/3.0/Manual/ReleaseNotes/NewFeatures30.html) can be found in the documentation.
Packages for all supported platforms can be downloaded from [https://www.arangodb.com/download](https://www.arangodb.com/download/).
Key features of the 3.0 release are:
Please also check [what's new in ArangoDB](https://docs.arangodb.com/latest/Manual/ReleaseNotes/).
- Use of VelocyPack as internal storage format
- AQL improvements
- Much better cluster state management
- Synchronous replication (master/master)
- Unified APIs for CRUD operations
- Persistent indexes
- Upgraded version of V8
- New web admin interface
- Foxx improvements
- Logging improvements
- Improved documentation
More Information
----------------

View File

@ -95,7 +95,8 @@ describe ArangoDB do
found.should have_key("runTime")
found["runTime"].should be_kind_of(Numeric)
found.should have_key("started")
found["state"].should eq("executing");
found.should have_key("state")
found["state"].should be_kind_of(String)
end
it "should track running queries, with bind parameters" do
@ -113,7 +114,8 @@ describe ArangoDB do
found.should have_key("runTime")
found["runTime"].should be_kind_of(Numeric)
found.should have_key("started")
found["state"].should eq("executing")
found.should have_key("state")
found["state"].should be_kind_of(String)
end
it "should track slow queries by threshold" do

View File

@ -198,14 +198,14 @@ void TRI_InitV8Agency(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
v8g->AgentTempl.Reset(isolate, rt);
ft->SetClassName(TRI_V8_ASCII_STRING("ArangoAgentCtor"));
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoAgentCtor"),
ft->GetFunction(), true);
// register the global object
v8::Handle<v8::Object> aa = rt->NewInstance();
if (!aa.IsEmpty()) {
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoAgent"), aa);
}
}

View File

@ -60,6 +60,7 @@ GatherBlock::GatherBlock(ExecutionEngine* engine, GatherNode const* en)
: ExecutionBlock(engine, en),
_sortRegisters(),
_isSimple(en->getElements().empty()) {
if (!_isSimple) {
for (auto const& p : en->getElements()) {
// We know that planRegisters has been run, so
@ -156,7 +157,11 @@ int GatherBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
}
}
_done = false;
if (_dependencies.empty()) {
_done = true;
} else {
_done = false;
}
return TRI_ERROR_NO_ERROR;
// cppcheck-suppress style
@ -201,7 +206,7 @@ int64_t GatherBlock::remaining() {
/// otherwise.
bool GatherBlock::hasMore() {
DEBUG_BEGIN_BLOCK();
if (_done) {
if (_done || _dependencies.empty()) {
return false;
}
@ -232,6 +237,11 @@ bool GatherBlock::hasMore() {
AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) {
DEBUG_BEGIN_BLOCK();
traceGetSomeBegin();
if (_dependencies.empty()) {
_done = true;
}
if (_done) {
traceGetSomeEnd(nullptr);
return nullptr;
@ -256,6 +266,9 @@ AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) {
size_t index = 0; // an index of a non-empty buffer
// pull more blocks from dependencies . . .
TRI_ASSERT(_gatherBlockBuffer.size() == _dependencies.size());
TRI_ASSERT(_gatherBlockBuffer.size() == _gatherBlockPos.size());
for (size_t i = 0; i < _dependencies.size(); i++) {
if (_gatherBlockBuffer.at(i).empty()) {
if (getBlock(i, atLeast, atMost)) {
@ -266,7 +279,7 @@ AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) {
index = i;
}
auto cur = _gatherBlockBuffer.at(i);
auto const& cur = _gatherBlockBuffer.at(i);
if (!cur.empty()) {
available += cur.at(0)->size() - _gatherBlockPos.at(i).second;
for (size_t j = 1; j < cur.size(); j++) {
@ -428,14 +441,11 @@ bool GatherBlock::getBlock(size_t i, size_t atLeast, size_t atMost) {
DEBUG_BEGIN_BLOCK();
TRI_ASSERT(i < _dependencies.size());
TRI_ASSERT(!_isSimple);
AqlItemBlock* docs = _dependencies.at(i)->getSome(atLeast, atMost);
std::unique_ptr<AqlItemBlock> docs(_dependencies.at(i)->getSome(atLeast, atMost));
if (docs != nullptr) {
try {
_gatherBlockBuffer.at(i).emplace_back(docs);
} catch (...) {
delete docs;
throw;
}
_gatherBlockBuffer.at(i).emplace_back(docs.get());
docs.release();
return true;
}
@ -449,10 +459,10 @@ bool GatherBlock::getBlock(size_t i, size_t atLeast, size_t atMost) {
bool GatherBlock::OurLessThan::operator()(std::pair<size_t, size_t> const& a,
std::pair<size_t, size_t> const& b) {
// nothing in the buffer is maximum!
if (_gatherBlockBuffer.at(a.first).empty()) {
if (_gatherBlockBuffer[a.first].empty()) {
return false;
}
if (_gatherBlockBuffer.at(b.first).empty()) {
if (_gatherBlockBuffer[b.first].empty()) {
return true;
}
@ -463,14 +473,14 @@ bool GatherBlock::OurLessThan::operator()(std::pair<size_t, size_t> const& a,
if (reg.attributePath.empty()) {
cmp = AqlValue::Compare(
_trx,
_gatherBlockBuffer.at(a.first).front()->getValue(a.second, reg.reg),
_gatherBlockBuffer.at(b.first).front()->getValue(b.second, reg.reg),
_gatherBlockBuffer[a.first].front()->getValue(a.second, reg.reg),
_gatherBlockBuffer[b.first].front()->getValue(b.second, reg.reg),
true);
} else {
// Take attributePath into consideration:
AqlValue topA = _gatherBlockBuffer.at(a.first).front()->getValue(a.second,
AqlValue topA = _gatherBlockBuffer[a.first].front()->getValue(a.second,
reg.reg);
AqlValue topB = _gatherBlockBuffer.at(b.first).front()->getValue(b.second,
AqlValue topB = _gatherBlockBuffer[b.first].front()->getValue(b.second,
reg.reg);
bool mustDestroyA;
AqlValue aa = topA.get(_trx, reg.attributePath, mustDestroyA, false);
@ -658,6 +668,9 @@ int ScatterBlock::shutdown(int errorCode) {
/// @brief hasMoreForShard: any more for shard <shardId>?
bool ScatterBlock::hasMoreForShard(std::string const& shardId) {
DEBUG_BEGIN_BLOCK();
TRI_ASSERT(_nrClients != 0);
size_t clientId = getClientId(shardId);
if (_doneForClient.at(clientId)) {
@ -684,6 +697,7 @@ bool ScatterBlock::hasMoreForShard(std::string const& shardId) {
/// in the buffer and _dependencies[0]->remaining()
int64_t ScatterBlock::remainingForShard(std::string const& shardId) {
DEBUG_BEGIN_BLOCK();
size_t clientId = getClientId(shardId);
if (_doneForClient.at(clientId)) {
return 0;

View File

@ -108,10 +108,35 @@ std::shared_ptr<std::vector<std::string>> Collection::shardIds() const {
}
return res;
}
return clusterInfo->getShardList(
arangodb::basics::StringUtils::itoa(getPlanId()));
}
/// @brief returns the filtered list of shard ids of a collection
std::shared_ptr<std::vector<std::string>> Collection::shardIds(std::unordered_set<std::string> const& includedShards) const {
// use the simple method first
auto copy = shardIds();
if (includedShards.empty()) {
// no shards given => return them all!
return copy;
}
// copy first as we will modify the result
auto result = std::make_shared<std::vector<std::string>>();
// post-filter the result
for (auto const& it : *copy) {
if (includedShards.find(it) == includedShards.end()) {
continue;
}
result->emplace_back(it);
}
return result;
}
/// @brief returns the shard keys of a collection
std::vector<std::string> Collection::shardKeys() const {
auto coll = getCollection();

View File

@ -74,6 +74,9 @@ struct Collection {
/// @brief returns the shard ids of a collection
std::shared_ptr<std::vector<std::string>> shardIds() const;
/// @brief returns the filtered list of shard ids of a collection
std::shared_ptr<std::vector<std::string>> shardIds(std::unordered_set<std::string> const& includedShards) const;
/// @brief returns the shard keys of a collection
std::vector<std::string> shardKeys() const;

View File

@ -57,7 +57,8 @@ using namespace arangodb::aql;
/// @brief helper function to create a block
static ExecutionBlock* CreateBlock(
ExecutionEngine* engine, ExecutionNode const* en,
std::unordered_map<ExecutionNode*, ExecutionBlock*> const& cache) {
std::unordered_map<ExecutionNode*, ExecutionBlock*> const& cache,
std::unordered_set<std::string> const& includedShards) {
switch (en->getType()) {
case ExecutionNode::SINGLETON: {
return new SingletonBlock(engine, static_cast<SingletonNode const*>(en));
@ -142,13 +143,13 @@ static ExecutionBlock* CreateBlock(
}
case ExecutionNode::SCATTER: {
auto shardIds =
static_cast<ScatterNode const*>(en)->collection()->shardIds();
static_cast<ScatterNode const*>(en)->collection()->shardIds(includedShards);
return new ScatterBlock(engine, static_cast<ScatterNode const*>(en),
*shardIds);
}
case ExecutionNode::DISTRIBUTE: {
auto shardIds =
static_cast<DistributeNode const*>(en)->collection()->shardIds();
static_cast<DistributeNode const*>(en)->collection()->shardIds(includedShards);
return new DistributeBlock(
engine, static_cast<DistributeNode const*>(en), *shardIds,
static_cast<DistributeNode const*>(en)->collection());
@ -210,7 +211,7 @@ struct Instanciator final : public WalkerWorker<ExecutionNode> {
// We have to prepare the options before we build the block
static_cast<TraversalNode*>(en)->prepareOptions();
}
std::unique_ptr<ExecutionBlock> eb(CreateBlock(engine, en, cache));
std::unique_ptr<ExecutionBlock> eb(CreateBlock(engine, en, cache, std::unordered_set<std::string>()));
if (eb == nullptr) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "illegal node type");
@ -348,8 +349,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
idOfRemoteNode(idOfRemoteNode),
collection(nullptr),
auxiliaryCollections(),
populated(false) {
}
populated(false) {}
void populate() {
// mop: compiler should inline that I suppose :S
@ -419,6 +419,11 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
bool populated;
// in the original plan that needs this engine
};
void includedShards(std::unordered_set<std::string> const& allowed) {
_includedShards = allowed;
}
Query* query;
QueryRegistry* queryRegistry;
@ -429,6 +434,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
std::vector<size_t> engineStack; // stack of engine ids, used for
// RemoteNodes
std::unordered_set<std::string> collNamesSeenOnDBServer;
std::unordered_set<std::string> _includedShards;
// names of sharded collections that we have already seen on a DBserver
// this is relevant to decide whether or not the engine there is a main
// query or a dependent one.
@ -603,7 +609,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
arangodb::CoordTransactionID& coordTransactionID,
Collection* collection) {
// pick up the remote query ids
auto shardIds = collection->shardIds();
auto shardIds = collection->shardIds(_includedShards);
std::string error;
int count = 0;
@ -648,10 +654,11 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
}
}
}
size_t numShards = shardIds->size();
//LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "GOT ALL RESPONSES FROM DB SERVERS: " << nrok << "\n";
if (nrok != (int)shardIds->size()) {
if (nrok != static_cast<int>(numShards)) {
if (errorCode == TRI_ERROR_NO_ERROR) {
errorCode = TRI_ERROR_INTERNAL; // must have an error
}
@ -677,7 +684,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
// nullptr only happens on controlled shutdown
// iterate over all shards of the collection
size_t nr = 0;
auto shardIds = collection->shardIds();
auto shardIds = collection->shardIds(_includedShards);
for (auto const& shardId : *shardIds) {
// inject the current shard id into the collection
VPackBuilder b;
@ -727,7 +734,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
}
// for all node types but REMOTEs, we create blocks
ExecutionBlock* eb = CreateBlock(engine.get(), (*en), cache);
ExecutionBlock* eb = CreateBlock(engine.get(), (*en), cache, _includedShards);
if (eb == nullptr) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
@ -763,7 +770,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
auto gatherNode = static_cast<GatherNode const*>(*en);
Collection const* collection = gatherNode->collection();
auto shardIds = collection->shardIds();
auto shardIds = collection->shardIds(_includedShards);
for (auto const& shardId : *shardIds) {
std::string theId =
arangodb::basics::StringUtils::itoa(remoteNode->id()) + ":" +
@ -854,7 +861,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
info.first.resize(length);
}
for (size_t i = 0; i < length; ++i) {
auto shardIds = edges[i]->shardIds();
auto shardIds = edges[i]->shardIds(_includedShards);
for (auto const& shard : *shardIds) {
auto serverList = clusterInfo->getResponsibleServer(shard);
TRI_ASSERT(!serverList->empty());
@ -881,7 +888,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
if (knownEdges.find(collection.second->getName()) == knownEdges.end()) {
// This collection is not one of the edge collections used in this
// graph.
auto shardIds = collection.second->shardIds();
auto shardIds = collection.second->shardIds(_includedShards);
for (auto const& shard : *shardIds) {
auto serverList = clusterInfo->getResponsibleServer(shard);
TRI_ASSERT(!serverList->empty());
@ -897,7 +904,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
entry.second.second.emplace(it->getName(),
std::vector<ShardID>());
}
auto shardIds = it->shardIds();
auto shardIds = it->shardIds(_includedShards);
for (auto const& shard : *shardIds) {
auto serverList = clusterInfo->getResponsibleServer(shard);
TRI_ASSERT(!serverList->empty());
@ -1185,6 +1192,9 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
// instantiate the engine on the coordinator
auto inst =
std::make_unique<CoordinatorInstanciator>(query, queryRegistry);
// optionally restrict query to certain shards
inst->includedShards(query->includedShards());
plan->root()->walk(inst.get());
try {

View File

@ -1208,7 +1208,9 @@ double EnumerateCollectionNode::estimateCost(size_t& nrItems) const {
nrItems = incoming * count;
// We do a full collection scan for each incoming item.
// random iteration is slightly more expensive than linear iteration
return depCost + nrItems * (_random ? 1.005 : 1.0);
// we also penalize each EnumerateCollectionNode slightly (and do not
// do the same for IndexNodes) so IndexNodes will be preferred
return depCost + nrItems * (_random ? 1.005 : 1.0) + 1.0;
}
EnumerateListNode::EnumerateListNode(ExecutionPlan* plan,

View File

@ -56,9 +56,6 @@ void OptimizerRulesFeature::prepare() {
addRules();
}
void OptimizerRulesFeature::unprepare() {
}
/// @brief register a rule
void OptimizerRulesFeature::registerRule(std::string const& name, RuleFunction func,
OptimizerRule::RuleLevel level, bool canCreateAdditionalPlans,

View File

@ -20,8 +20,8 @@
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_AQL_AQL_OPTIMIZER_RULES_FEATURE_H
#define ARANGOD_AQL_AQL_OPTIMIZER_RULES_FEATURE_H 1
#ifndef ARANGOD_AQL_OPTIMIZER_RULES_FEATURE_H
#define ARANGOD_AQL_OPTIMIZER_RULES_FEATURE_H 1
#include "ApplicationFeatures/ApplicationFeature.h"
#include "Aql/OptimizerRule.h"
@ -37,7 +37,6 @@ class OptimizerRulesFeature final : public application_features::ApplicationFeat
public:
void prepare() override final;
void unprepare() override final;
/// @brief translate a list of rule ids into rule name
static std::vector<std::string> translateRules(std::vector<int> const&);

View File

@ -1160,6 +1160,36 @@ bool Query::getBooleanOption(char const* option, bool defaultValue) const {
return value.getBool();
}
/// @brief return the included shards from the options
std::unordered_set<std::string> Query::includedShards() const {
std::unordered_set<std::string> result;
if (_options == nullptr) {
return result;
}
VPackSlice options = _options->slice();
if (!options.isObject()) {
return result;
}
VPackSlice value = options.get("shardIds");
if (!value.isArray()) {
return result;
}
VPackArrayIterator it(value);
while (it.valid()) {
VPackSlice value = it.value();
if (value.isString()) {
result.emplace(value.copyString());
}
it.next();
}
return result;
}
/// @brief convert the list of warnings to VelocyPack.
/// Will add a new entry { ..., warnings: <warnings>, } if there are
/// warnings. If there are none it will not modify the builder

View File

@ -259,14 +259,16 @@ class Query {
/// @brief fetch a boolean value from the options
bool getBooleanOption(char const*, bool) const;
std::unordered_set<std::string> includedShards() const;
/// @brief add the list of warnings to VelocyPack.
/// Will add a new entry { ..., warnings: <warnings>, } if there are
/// warnings. If there are none it will not modify the builder
void addWarningsToVelocyPackObject(arangodb::velocypack::Builder&) const;
void addWarningsToVelocyPackObject(arangodb::velocypack::Builder&) const;
/// @brief transform the list of warnings to VelocyPack.
/// NOTE: returns nullptr if there are no warnings.
std::shared_ptr<arangodb::velocypack::Builder> warningsToVelocyPack() const;
std::shared_ptr<arangodb::velocypack::Builder> warningsToVelocyPack() const;
/// @brief fetch the query memory limit
static uint64_t MemoryLimit() { return MemoryLimitValue; }

View File

@ -45,6 +45,12 @@ static_assert(sizeof(StateNames) / sizeof(std::string) ==
static_cast<size_t>(QueryExecutionState::ValueType::INVALID_STATE) + 1,
"invalid number of ExecutionState values");
QueryExecutionState::ValueType QueryExecutionState::fromNumber(size_t value) {
TRI_ASSERT(value < static_cast<size_t>(QueryExecutionState::ValueType::INVALID_STATE));
return static_cast<QueryExecutionState::ValueType>(value);
}
/// @brief get a description of the query's current state
std::string QueryExecutionState::toString(QueryExecutionState::ValueType state) {
return StateNames[static_cast<int>(state)];

View File

@ -47,6 +47,7 @@ enum class ValueType {
INVALID_STATE
};
QueryExecutionState::ValueType fromNumber(size_t value);
std::string toString(QueryExecutionState::ValueType state);
std::string toStringWithPrefix(QueryExecutionState::ValueType state);

View File

@ -25,6 +25,7 @@
#include "Aql/Query.h"
#include "Aql/QueryList.h"
#include "Basics/EnumIterator.h"
#include "VocBase/vocbase.h"
#include <velocypack/Builder.h>
@ -35,7 +36,11 @@ using namespace arangodb::aql;
/// @brief create a profile
QueryProfile::QueryProfile(Query* query)
: query(query), results(), stamp(query->startTime()), tracked(false) {
: query(query), stamp(query->startTime()), tracked(false) {
for (auto& it : timers) {
it = 0.0; // reset timers
}
auto queryList = query->vocbase()->queryList();
try {
@ -63,7 +68,7 @@ void QueryProfile::setDone(QueryExecutionState::ValueType state) {
if (state != QueryExecutionState::ValueType::INVALID_STATE) {
// record duration of state
results.emplace_back(state, now - stamp);
timers[static_cast<int>(state)] = now - stamp;
}
// set timestamp
@ -75,9 +80,13 @@ std::shared_ptr<VPackBuilder> QueryProfile::toVelocyPack() {
auto result = std::make_shared<VPackBuilder>();
{
VPackObjectBuilder b(result.get());
for (auto const& it : results) {
result->add(QueryExecutionState::toString(it.first),
VPackValue(it.second));
for (auto state : ENUM_ITERATOR(QueryExecutionState::ValueType, INITIALIZATION, FINISHED)) {
double const value = timers[static_cast<size_t>(state)];
if (value > 0.0) {
result->add(QueryExecutionState::toString(state), VPackValue(value));
}
}
}
return result;

View File

@ -27,7 +27,7 @@
#include "Basics/Common.h"
#include "Aql/QueryExecutionState.h"
#include <velocypack/Builder.h>
#include <array>
namespace arangodb {
@ -52,11 +52,16 @@ struct QueryProfile {
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack();
Query* query;
std::vector<std::pair<QueryExecutionState::ValueType, double>> results;
std::array<double, static_cast<size_t>(QueryExecutionState::ValueType::INVALID_STATE)> timers;
double stamp;
bool tracked;
};
// we want the number of execution states to be quite low
// as we reserve a statically sized array for it
static_assert(static_cast<int>(QueryExecutionState::ValueType::INITIALIZATION) == 0, "unexpected min QueryExecutionState enum value");
static_assert(static_cast<int>(QueryExecutionState::ValueType::INVALID_STATE) < 10, "unexpected max QueryExecutionState enum value");
}
}

View File

@ -220,48 +220,6 @@ SET(ARANGOD_SOURCES
Indexes/IndexIterator.cpp
Indexes/SimpleAttributeEqualityMatcher.cpp
InternalRestHandler/InternalRestTraverserHandler.cpp
MMFiles/fulltext-handles.cpp
MMFiles/fulltext-index.cpp
MMFiles/fulltext-list.cpp
MMFiles/fulltext-query.cpp
MMFiles/fulltext-result.cpp
MMFiles/geo-index.cpp
MMFiles/MMFilesAllocatorThread.cpp
MMFiles/MMFilesAqlFunctions.cpp
MMFiles/MMFilesCleanupThread.cpp
MMFiles/MMFilesCollection.cpp
MMFiles/MMFilesCollectorThread.cpp
MMFiles/MMFilesCompactorThread.cpp
MMFiles/MMFilesDatafile.cpp
MMFiles/MMFilesDatafileStatistics.cpp
MMFiles/MMFilesDitch.cpp
MMFiles/MMFilesDocumentOperation.cpp
MMFiles/MMFilesEdgeIndex.cpp
MMFiles/MMFilesEngine.cpp
MMFiles/MMFilesIndexElement.cpp
MMFiles/MMFilesIndexFactory.cpp
MMFiles/MMFilesLogfileManager.cpp
MMFiles/MMFilesFulltextIndex.cpp
MMFiles/MMFilesGeoIndex.cpp
MMFiles/MMFilesHashIndex.cpp
MMFiles/MMFilesOptimizerRules.cpp
MMFiles/MMFilesPathBasedIndex.cpp
MMFiles/MMFilesPersistentIndexFeature.cpp
MMFiles/MMFilesPersistentIndex.cpp
MMFiles/MMFilesPersistentIndexKeyComparator.cpp
MMFiles/MMFilesPrimaryIndex.cpp
MMFiles/MMFilesRemoverThread.cpp
MMFiles/MMFilesRevisionsCache.cpp
MMFiles/MMFilesSkiplistIndex.cpp
MMFiles/MMFilesSynchronizerThread.cpp
MMFiles/MMFilesTransactionCollection.cpp
MMFiles/MMFilesTransactionContextData.cpp
MMFiles/MMFilesTransactionState.cpp
MMFiles/MMFilesWalLogfile.cpp
MMFiles/MMFilesWalRecoverState.cpp
MMFiles/MMFilesWalRecoveryFeature.cpp
MMFiles/MMFilesWalSlot.cpp
MMFiles/MMFilesWalSlots.cpp
Replication/ContinuousSyncer.cpp
Replication/InitialSyncer.cpp
Replication/Syncer.cpp
@ -384,6 +342,54 @@ SET(ARANGOD_SOURCES
${ADDITIONAL_BIN_ARANGOD_SOURCES}
)
# add sources for mmfiles engine
set(ARANGOD_SOURCES
${ARANGOD_SOURCES}
MMFiles/fulltext-handles.cpp
MMFiles/fulltext-index.cpp
MMFiles/fulltext-list.cpp
MMFiles/fulltext-query.cpp
MMFiles/fulltext-result.cpp
MMFiles/geo-index.cpp
MMFiles/MMFilesAllocatorThread.cpp
MMFiles/MMFilesAqlFunctions.cpp
MMFiles/MMFilesCleanupThread.cpp
MMFiles/MMFilesCollection.cpp
MMFiles/MMFilesCollectorThread.cpp
MMFiles/MMFilesCompactorThread.cpp
MMFiles/MMFilesDatafile.cpp
MMFiles/MMFilesDatafileStatistics.cpp
MMFiles/MMFilesDitch.cpp
MMFiles/MMFilesDocumentOperation.cpp
MMFiles/MMFilesEdgeIndex.cpp
MMFiles/MMFilesEngine.cpp
MMFiles/MMFilesIndexElement.cpp
MMFiles/MMFilesIndexFactory.cpp
MMFiles/MMFilesLogfileManager.cpp
MMFiles/MMFilesFulltextIndex.cpp
MMFiles/MMFilesGeoIndex.cpp
MMFiles/MMFilesHashIndex.cpp
MMFiles/MMFilesOptimizerRules.cpp
MMFiles/MMFilesPathBasedIndex.cpp
MMFiles/MMFilesPersistentIndexFeature.cpp
MMFiles/MMFilesPersistentIndex.cpp
MMFiles/MMFilesPersistentIndexKeyComparator.cpp
MMFiles/MMFilesPrimaryIndex.cpp
MMFiles/MMFilesRemoverThread.cpp
MMFiles/MMFilesRevisionsCache.cpp
MMFiles/MMFilesSkiplistIndex.cpp
MMFiles/MMFilesSynchronizerThread.cpp
MMFiles/MMFilesTransactionCollection.cpp
MMFiles/MMFilesTransactionContextData.cpp
MMFiles/MMFilesTransactionState.cpp
MMFiles/MMFilesV8Functions.cpp
MMFiles/MMFilesWalLogfile.cpp
MMFiles/MMFilesWalRecoverState.cpp
MMFiles/MMFilesWalRecoveryFeature.cpp
MMFiles/MMFilesWalSlot.cpp
MMFiles/MMFilesWalSlots.cpp
)
if (NOT MSVC)
set(ARANGOD_SOURCES ${ARANGOD_SOURCES} Scheduler/AcceptorUnixDomain.cpp Scheduler/SocketUnixDomain.cpp)
endif()

View File

@ -2025,14 +2025,14 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
v8g->AgencyTempl.Reset(isolate, rt);
ft->SetClassName(TRI_V8_ASCII_STRING("ArangoAgencyCtor"));
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoAgencyCtor"),
ft->GetFunction(), true);
// register the global object
v8::Handle<v8::Object> aa = rt->NewInstance();
if (!aa.IsEmpty()) {
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoAgency"), aa);
}
@ -2075,14 +2075,14 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
JS_UniqidClusterInfo);
v8g->ClusterInfoTempl.Reset(isolate, rt);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoClusterInfoCtor"),
ft->GetFunction(), true);
// register the global object
v8::Handle<v8::Object> ci = rt->NewInstance();
if (!ci.IsEmpty()) {
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoClusterInfo"), ci);
}
@ -2146,14 +2146,14 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
JS_StatusServerState);
v8g->ServerStateTempl.Reset(isolate, rt);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoServerStateCtor"),
ft->GetFunction(), true);
// register the global object
v8::Handle<v8::Object> ss = rt->NewInstance();
if (!ss.IsEmpty()) {
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoServerState"), ss);
}
@ -2177,17 +2177,17 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("getId"), JS_GetId);
v8g->ClusterCommTempl.Reset(isolate, rt);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoClusterCommCtor"),
ft->GetFunction(), true);
// register the global object
ss = rt->NewInstance();
if (!ss.IsEmpty()) {
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoClusterComm"), ss);
}
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_CLUSTER_DOWNLOAD"),
isolate, TRI_V8_ASCII_STRING("SYS_CLUSTER_DOWNLOAD"),
JS_ClusterDownload);
}

View File

@ -423,8 +423,7 @@ AqlValue MMFilesAqlFunctions::Within(
return buildGeoResult(trx, index->collection(), query, cors, cid, attributeName);
}
void MMFilesAqlFunctions::RegisterFunctions() {
void MMFilesAqlFunctions::registerResources() {
auto functions = AqlFunctionFeature::AQLFUNCTIONS;
TRI_ASSERT(functions != nullptr);

View File

@ -35,13 +35,13 @@ struct MMFilesAqlFunctions : public aql::Functions {
static aql::AqlValue Fulltext(arangodb::aql::Query*, transaction::Methods*,
aql::VPackFunctionParameters const&);
static aql::AqlValue Near(arangodb::aql::Query*, transaction::Methods*,
aql::VPackFunctionParameters const&);
static aql::AqlValue Near(arangodb::aql::Query*, transaction::Methods*,
aql::VPackFunctionParameters const&);
static aql::AqlValue Within(arangodb::aql::Query*, transaction::Methods*,
aql::VPackFunctionParameters const&);
static aql::AqlValue Within(arangodb::aql::Query*, transaction::Methods*,
aql::VPackFunctionParameters const&);
static void RegisterFunctions();
static void registerResources();
};
} // namespace arangodb

View File

@ -71,9 +71,9 @@ using Helper = arangodb::basics::VelocyPackHelper;
namespace {
/// @brief helper class for filling indexes
class IndexFillerTask : public basics::LocalTask {
class MMFilesIndexFillerTask : public basics::LocalTask {
public:
IndexFillerTask(
MMFilesIndexFillerTask(
basics::LocalTaskQueue* queue, transaction::Methods* trx,
Index* idx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents)
@ -1436,8 +1436,8 @@ void MMFilesCollection::fillIndex(
try {
// move task into thread pool
std::shared_ptr<::IndexFillerTask> worker;
worker.reset(new ::IndexFillerTask(queue, trx, idx, documents));
std::shared_ptr<::MMFilesIndexFillerTask> worker;
worker.reset(new ::MMFilesIndexFillerTask(queue, trx, idx, documents));
queue->enqueue(worker);
} catch (...) {
// set error code

View File

@ -398,7 +398,7 @@ int MMFilesCollectorThread::collectLogfiles(bool& worked) {
// reset collector status
broadcastCollectorResult(res);
PersistentIndexFeature::syncWal();
MMFilesPersistentIndexFeature::syncWal();
_logfileManager->setCollectionDone(logfile);
} else {

View File

@ -44,6 +44,7 @@
#include "MMFiles/MMFilesTransactionCollection.h"
#include "MMFiles/MMFilesTransactionContextData.h"
#include "MMFiles/MMFilesTransactionState.h"
#include "MMFiles/MMFilesV8Functions.h"
#include "Random/RandomGenerator.h"
#include "RestServer/DatabaseFeature.h"
#include "RestServer/DatabasePathFeature.h"
@ -130,19 +131,16 @@ std::string const MMFilesEngine::FeatureName("MMFilesEngine");
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory())
, _isUpgrade(false)
, _maxTick(0)
{
startsAfter("PersistentIndex");
}
MMFilesEngine::~MMFilesEngine() {
, _maxTick(0) {
startsAfter("MMFilesPersistentIndex");
}
MMFilesEngine::~MMFilesEngine() {}
// perform a physical deletion of the database
void MMFilesEngine::dropDatabase(Database* database, int& status) {
// delete persistent indexes for this database
PersistentIndexFeature::dropDatabase(database->id());
MMFilesPersistentIndexFeature::dropDatabase(database->id());
// To shutdown the database (which destroys all LogicalCollection
// objects of all collections) we need to make sure that the
@ -382,7 +380,7 @@ void MMFilesEngine::getDatabases(arangodb::velocypack::Builder& result) {
// delete persistent indexes for this database
TRI_voc_tick_t id = static_cast<TRI_voc_tick_t>(
basics::StringUtils::uint64(idSlice.copyString()));
PersistentIndexFeature::dropDatabase(id);
MMFilesPersistentIndexFeature::dropDatabase(id);
dropDatabaseDirectory(directory);
continue;
@ -813,7 +811,7 @@ void MMFilesEngine::destroyCollection(TRI_vocbase_t* vocbase, arangodb::LogicalC
unregisterCollectionPath(vocbase->id(), collection->cid());
// delete persistent indexes
PersistentIndexFeature::dropCollection(vocbase->id(), collection->cid());
MMFilesPersistentIndexFeature::dropCollection(vocbase->id(), collection->cid());
// rename collection directory
if (physical->path().empty()) {
@ -2246,13 +2244,18 @@ int MMFilesEngine::transferMarkers(LogicalCollection* collection,
}
/// @brief Add engine-specific AQL functions.
void MMFilesEngine::addAqlFunctions() const {
MMFilesAqlFunctions::RegisterFunctions();
void MMFilesEngine::addAqlFunctions() {
MMFilesAqlFunctions::registerResources();
}
/// @brief Add engine-specific optimizer rules
void MMFilesEngine::addOptimizerRules() const {
MMFilesOptimizerRules::RegisterRules();
void MMFilesEngine::addOptimizerRules() {
MMFilesOptimizerRules::registerResources();
}
/// @brief Add engine-specific V8 functions
void MMFilesEngine::addV8Functions() {
MMFilesV8Functions::registerResources();
}
/// @brief transfer markers into a collection, actual work

View File

@ -267,16 +267,19 @@ public:
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection, bool ignoreErrors) override;
/// @brief Add engine-specific AQL functions.
void addAqlFunctions() override;
/// @brief Add engine-specific optimizer rules
void addOptimizerRules() override;
/// @brief Add engine-specific V8 functions
void addV8Functions() override;
/// @brief transfer markers into a collection
int transferMarkers(LogicalCollection* collection, MMFilesCollectorCache*,
MMFilesOperationsType const&);
/// @brief Add engine-specific AQL functions.
void addAqlFunctions() const override;
/// @brief Add engine-specific optimizer rules
void addOptimizerRules() const override;
private:
/// @brief: check the initial markers in a datafile
bool checkDatafileHeader(MMFilesDatafile* datafile, std::string const& filename) const;

View File

@ -43,7 +43,7 @@
using namespace arangodb;
LookupBuilder::LookupBuilder(
MMFilesHashIndexLookupBuilder::MMFilesHashIndexLookupBuilder(
transaction::Methods* trx, arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference,
std::vector<std::vector<arangodb::basics::AttributeName>> const& fields)
@ -138,9 +138,9 @@ LookupBuilder::LookupBuilder(
buildNextSearchValue();
}
VPackSlice LookupBuilder::lookup() { return _builder->slice(); }
VPackSlice MMFilesHashIndexLookupBuilder::lookup() { return _builder->slice(); }
bool LookupBuilder::hasAndGetNext() {
bool MMFilesHashIndexLookupBuilder::hasAndGetNext() {
_builder->clear();
if (!_usesIn || _isEmpty) {
return false;
@ -152,7 +152,7 @@ bool LookupBuilder::hasAndGetNext() {
return true;
}
void LookupBuilder::reset() {
void MMFilesHashIndexLookupBuilder::reset() {
if (_isEmpty) {
return;
}
@ -164,7 +164,7 @@ void LookupBuilder::reset() {
buildNextSearchValue();
}
bool LookupBuilder::incrementInPosition() {
bool MMFilesHashIndexLookupBuilder::incrementInPosition() {
size_t i = _coveredFields - 1;
while (true) {
auto it = _inPosition.find(i);
@ -185,7 +185,7 @@ bool LookupBuilder::incrementInPosition() {
}
}
void LookupBuilder::buildNextSearchValue() {
void MMFilesHashIndexLookupBuilder::buildNextSearchValue() {
if (_isEmpty) {
return;
}

View File

@ -50,7 +50,7 @@ class LocalTaskQueue;
class MMFilesHashIndex;
/// @brief Class to build Slice lookups out of AST Conditions
class LookupBuilder {
class MMFilesHashIndexLookupBuilder {
private:
transaction::BuilderLeaser _builder;
bool _usesIn;
@ -64,7 +64,7 @@ class LookupBuilder {
transaction::BuilderLeaser _inStorage;
public:
LookupBuilder(
MMFilesHashIndexLookupBuilder(
transaction::Methods*, arangodb::aql::AstNode const*,
arangodb::aql::Variable const*,
std::vector<std::vector<arangodb::basics::AttributeName>> const&);
@ -100,7 +100,7 @@ class MMFilesHashIndexIterator final : public IndexIterator {
private:
MMFilesHashIndex const* _index;
LookupBuilder _lookups;
MMFilesHashIndexLookupBuilder _lookups;
std::vector<MMFilesHashIndexElement*> _buffer;
size_t _posInBuffer;
};

View File

@ -421,7 +421,7 @@ std::shared_ptr<Index> MMFilesIndexFactory::prepareIndexFromSlice(
break;
}
case arangodb::Index::TRI_IDX_TYPE_ROCKSDB_INDEX: {
newIdx.reset(new arangodb::PersistentIndex(iid, col, info));
newIdx.reset(new arangodb::MMFilesPersistentIndex(iid, col, info));
break;
}
case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: {

View File

@ -40,7 +40,7 @@ using namespace arangodb;
using namespace arangodb::aql;
using EN = arangodb::aql::ExecutionNode;
void MMFilesOptimizerRules::RegisterRules() {
void MMFilesOptimizerRules::registerResources() {
// patch update statements
OptimizerRulesFeature::registerRule("geo-index-optimizer", geoIndexRule,
OptimizerRule::applyMMFilesGeoIndexRule, false, true);

View File

@ -34,7 +34,7 @@ struct OptimizerRule;
}
struct MMFilesOptimizerRules {
static void RegisterRules();
static void registerResources();
static void geoIndexRule(aql::Optimizer* opt, std::unique_ptr<aql::ExecutionPlan> plan, aql::OptimizerRule const* rule);
};

View File

@ -86,10 +86,10 @@ static size_t sortWeight(arangodb::aql::AstNode const* node) {
// lists: lexicographically and within each slot according to these rules.
// ...........................................................................
PersistentIndexIterator::PersistentIndexIterator(LogicalCollection* collection,
MMFilesPersistentIndexIterator::MMFilesPersistentIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
arangodb::PersistentIndex const* index,
arangodb::MMFilesPersistentIndex const* index,
arangodb::MMFilesPrimaryIndex* primaryIndex,
rocksdb::OptimisticTransactionDB* db,
bool reverse,
@ -102,17 +102,17 @@ PersistentIndexIterator::PersistentIndexIterator(LogicalCollection* collection,
_probe(false) {
TRI_idx_iid_t const id = index->id();
std::string const prefix = PersistentIndex::buildPrefix(
std::string const prefix = MMFilesPersistentIndex::buildPrefix(
trx->vocbase()->id(), _primaryIndex->collection()->cid(), id);
TRI_ASSERT(prefix.size() == PersistentIndex::keyPrefixSize());
TRI_ASSERT(prefix.size() == MMFilesPersistentIndex::keyPrefixSize());
_leftEndpoint.reset(new arangodb::velocypack::Buffer<char>());
_leftEndpoint->reserve(PersistentIndex::keyPrefixSize() + left.byteSize());
_leftEndpoint->reserve(MMFilesPersistentIndex::keyPrefixSize() + left.byteSize());
_leftEndpoint->append(prefix.c_str(), prefix.size());
_leftEndpoint->append(left.startAs<char const>(), left.byteSize());
_rightEndpoint.reset(new arangodb::velocypack::Buffer<char>());
_rightEndpoint->reserve(PersistentIndex::keyPrefixSize() + right.byteSize());
_rightEndpoint->reserve(MMFilesPersistentIndex::keyPrefixSize() + right.byteSize());
_rightEndpoint->append(prefix.c_str(), prefix.size());
_rightEndpoint->append(right.startAs<char const>(), right.byteSize());
@ -129,7 +129,7 @@ PersistentIndexIterator::PersistentIndexIterator(LogicalCollection* collection,
}
/// @brief Reset the cursor
void PersistentIndexIterator::reset() {
void MMFilesPersistentIndexIterator::reset() {
if (_reverse) {
_probe = true;
_cursor->Seek(rocksdb::Slice(_rightEndpoint->data(), _rightEndpoint->size()));
@ -141,8 +141,8 @@ void PersistentIndexIterator::reset() {
}
}
bool PersistentIndexIterator::next(TokenCallback const& cb, size_t limit) {
auto comparator = PersistentIndexFeature::instance()->comparator();
bool MMFilesPersistentIndexIterator::next(TokenCallback const& cb, size_t limit) {
auto comparator = MMFilesPersistentIndexFeature::instance()->comparator();
while (limit > 0) {
if (!_cursor->Valid()) {
// We are exhausted already, sorry
@ -150,10 +150,10 @@ bool PersistentIndexIterator::next(TokenCallback const& cb, size_t limit) {
}
rocksdb::Slice key = _cursor->key();
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "cursor key: " << VPackSlice(key.data() + PersistentIndex::keyPrefixSize()).toJson();
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "cursor key: " << VPackSlice(key.data() + MMFilesPersistentIndex::keyPrefixSize()).toJson();
int res = comparator->Compare(key, rocksdb::Slice(_leftEndpoint->data(), _leftEndpoint->size()));
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "comparing: " << VPackSlice(key.data() + PersistentIndex::keyPrefixSize()).toJson() << " with " << VPackSlice((char const*) _leftEndpoint->data() + PersistentIndex::keyPrefixSize()).toJson() << " - res: " << res;
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "comparing: " << VPackSlice(key.data() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " with " << VPackSlice((char const*) _leftEndpoint->data() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " - res: " << res;
if (res < 0) {
if (_reverse) {
@ -166,7 +166,7 @@ bool PersistentIndexIterator::next(TokenCallback const& cb, size_t limit) {
}
res = comparator->Compare(key, rocksdb::Slice(_rightEndpoint->data(), _rightEndpoint->size()));
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "comparing: " << VPackSlice(key.data() + PersistentIndex::keyPrefixSize()).toJson() << " with " << VPackSlice((char const*) _rightEndpoint->data() + PersistentIndex::keyPrefixSize()).toJson() << " - res: " << res;
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "comparing: " << VPackSlice(key.data() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " with " << VPackSlice((char const*) _rightEndpoint->data() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " - res: " << res;
if (res <= 0) {
@ -207,21 +207,21 @@ bool PersistentIndexIterator::next(TokenCallback const& cb, size_t limit) {
}
/// @brief create the index
PersistentIndex::PersistentIndex(TRI_idx_iid_t iid,
MMFilesPersistentIndex::MMFilesPersistentIndex(TRI_idx_iid_t iid,
arangodb::LogicalCollection* collection,
arangodb::velocypack::Slice const& info)
: MMFilesPathBasedIndex(iid, collection, info, 0, true),
_db(PersistentIndexFeature::instance()->db()) {}
_db(MMFilesPersistentIndexFeature::instance()->db()) {}
/// @brief destroy the index
PersistentIndex::~PersistentIndex() {}
MMFilesPersistentIndex::~MMFilesPersistentIndex() {}
size_t PersistentIndex::memory() const {
size_t MMFilesPersistentIndex::memory() const {
return 0; // TODO
}
/// @brief return a VelocyPack representation of the index
void PersistentIndex::toVelocyPack(VPackBuilder& builder,
void MMFilesPersistentIndex::toVelocyPack(VPackBuilder& builder,
bool withFigures) const {
Index::toVelocyPack(builder, withFigures);
builder.add("unique", VPackValue(_unique));
@ -229,15 +229,15 @@ void PersistentIndex::toVelocyPack(VPackBuilder& builder,
}
/// @brief return a VelocyPack representation of the index figures
void PersistentIndex::toVelocyPackFigures(VPackBuilder& builder) const {
void MMFilesPersistentIndex::toVelocyPackFigures(VPackBuilder& builder) const {
TRI_ASSERT(builder.isOpenObject());
builder.add("memory", VPackValue(memory()));
}
/// @brief inserts a document into the index
int PersistentIndex::insert(transaction::Methods* trx, TRI_voc_rid_t revisionId,
int MMFilesPersistentIndex::insert(transaction::Methods* trx, TRI_voc_rid_t revisionId,
VPackSlice const& doc, bool isRollback) {
auto comparator = PersistentIndexFeature::instance()->comparator();
auto comparator = MMFilesPersistentIndexFeature::instance()->comparator();
std::vector<MMFilesSkiplistIndexElement*> elements;
int res;
@ -394,7 +394,7 @@ int PersistentIndex::insert(transaction::Methods* trx, TRI_voc_rid_t revisionId,
}
/// @brief removes a document from the index
int PersistentIndex::remove(transaction::Methods* trx, TRI_voc_rid_t revisionId,
int MMFilesPersistentIndex::remove(transaction::Methods* trx, TRI_voc_rid_t revisionId,
VPackSlice const& doc, bool isRollback) {
std::vector<MMFilesSkiplistIndexElement*> elements;
@ -460,21 +460,21 @@ int PersistentIndex::remove(transaction::Methods* trx, TRI_voc_rid_t revisionId,
return res;
}
int PersistentIndex::unload() {
int MMFilesPersistentIndex::unload() {
// nothing to do
return TRI_ERROR_NO_ERROR;
}
/// @brief called when the index is dropped
int PersistentIndex::drop() {
return PersistentIndexFeature::instance()->dropIndex(_collection->vocbase()->id(),
int MMFilesPersistentIndex::drop() {
return MMFilesPersistentIndexFeature::instance()->dropIndex(_collection->vocbase()->id(),
_collection->cid(), _iid);
}
/// @brief attempts to locate an entry in the index
/// Warning: who ever calls this function is responsible for destroying
/// the PersistentIndexIterator* results
PersistentIndexIterator* PersistentIndex::lookup(transaction::Methods* trx,
/// the MMFilesPersistentIndexIterator* results
MMFilesPersistentIndexIterator* MMFilesPersistentIndex::lookup(transaction::Methods* trx,
ManagedDocumentResult* mmdr,
VPackSlice const searchValues,
bool reverse) const {
@ -573,17 +573,17 @@ PersistentIndexIterator* PersistentIndex::lookup(transaction::Methods* trx,
// Same for the iterator
auto physical = static_cast<MMFilesCollection*>(_collection->getPhysical());
auto idx = physical->primaryIndex();
return new PersistentIndexIterator(_collection, trx, mmdr, this, idx, _db, reverse, leftBorder, rightBorder);
return new MMFilesPersistentIndexIterator(_collection, trx, mmdr, this, idx, _db, reverse, leftBorder, rightBorder);
}
bool PersistentIndex::accessFitsIndex(
bool MMFilesPersistentIndex::accessFitsIndex(
arangodb::aql::AstNode const* access, arangodb::aql::AstNode const* other,
arangodb::aql::AstNode const* op, arangodb::aql::Variable const* reference,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&
found,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) const {
if (!this->canUseConditionPart(access, other, op, reference, nonNullAttributes, isExecution)) {
if (!canUseConditionPart(access, other, op, reference, nonNullAttributes, isExecution)) {
return false;
}
@ -674,7 +674,7 @@ bool PersistentIndex::accessFitsIndex(
return false;
}
void PersistentIndex::matchAttributes(
void MMFilesPersistentIndex::matchAttributes(
arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&
@ -716,7 +716,7 @@ void PersistentIndex::matchAttributes(
}
}
bool PersistentIndex::supportsFilterCondition(
bool MMFilesPersistentIndex::supportsFilterCondition(
arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference, size_t itemsInIndex,
size_t& estimatedItems, double& estimatedCost) const {
@ -816,7 +816,7 @@ bool PersistentIndex::supportsFilterCondition(
return false;
}
bool PersistentIndex::supportsSortCondition(
bool MMFilesPersistentIndex::supportsSortCondition(
arangodb::aql::SortCondition const* sortCondition,
arangodb::aql::Variable const* reference, size_t itemsInIndex,
double& estimatedCost, size_t& coveredAttributes) const {
@ -862,7 +862,7 @@ bool PersistentIndex::supportsSortCondition(
return false;
}
IndexIterator* PersistentIndex::iteratorForCondition(
IndexIterator* MMFilesPersistentIndex::iteratorForCondition(
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
arangodb::aql::AstNode const* node,
@ -1054,7 +1054,7 @@ IndexIterator* PersistentIndex::iteratorForCondition(
}
/// @brief specializes the condition for use with the index
arangodb::aql::AstNode* PersistentIndex::specializeCondition(
arangodb::aql::AstNode* MMFilesPersistentIndex::specializeCondition(
arangodb::aql::AstNode* node,
arangodb::aql::Variable const* reference) const {
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
@ -1116,7 +1116,7 @@ arangodb::aql::AstNode* PersistentIndex::specializeCondition(
return node;
}
bool PersistentIndex::isDuplicateOperator(
bool MMFilesPersistentIndex::isDuplicateOperator(
arangodb::aql::AstNode const* node,
std::unordered_set<int> const& operatorsFound) const {
auto type = node->type;

View File

@ -50,28 +50,27 @@ struct Variable;
class LogicalCollection;
class MMFilesPrimaryIndex;
class PersistentIndex;
class MMFilesPersistentIndex;
namespace transaction {
class Methods;
}
;
/// @brief Iterator structure for RocksDB. We require a start and stop node
class PersistentIndexIterator final : public IndexIterator {
class MMFilesPersistentIndexIterator final : public IndexIterator {
private:
friend class PersistentIndex;
friend class MMFilesPersistentIndex;
public:
PersistentIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
MMFilesPersistentIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr,
arangodb::PersistentIndex const* index,
arangodb::MMFilesPersistentIndex const* index,
arangodb::MMFilesPrimaryIndex* primaryIndex,
rocksdb::OptimisticTransactionDB* db,
bool reverse,
arangodb::velocypack::Slice const& left,
arangodb::velocypack::Slice const& right);
~PersistentIndexIterator() = default;
~MMFilesPersistentIndexIterator() = default;
public:
@ -93,16 +92,16 @@ class PersistentIndexIterator final : public IndexIterator {
bool _probe;
};
class PersistentIndex final : public MMFilesPathBasedIndex {
friend class PersistentIndexIterator;
class MMFilesPersistentIndex final : public MMFilesPathBasedIndex {
friend class MMFilesPersistentIndexIterator;
public:
PersistentIndex() = delete;
MMFilesPersistentIndex() = delete;
PersistentIndex(TRI_idx_iid_t, LogicalCollection*,
MMFilesPersistentIndex(TRI_idx_iid_t, LogicalCollection*,
arangodb::velocypack::Slice const&);
~PersistentIndex();
~MMFilesPersistentIndex();
public:
IndexType type() const override {
@ -164,8 +163,8 @@ class PersistentIndex final : public MMFilesPathBasedIndex {
/// @brief attempts to locate an entry in the index
///
/// Warning: who ever calls this function is responsible for destroying
/// the velocypack::Slice and the PersistentIndexIterator* results
PersistentIndexIterator* lookup(transaction::Methods*,
/// the velocypack::Slice and the MMFilesPersistentIndexIterator* results
MMFilesPersistentIndexIterator* lookup(transaction::Methods*,
ManagedDocumentResult* mmdr,
arangodb::velocypack::Slice const,
bool reverse) const;

View File

@ -50,11 +50,11 @@ using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::options;
static PersistentIndexFeature* Instance = nullptr;
static MMFilesPersistentIndexFeature* Instance = nullptr;
PersistentIndexFeature::PersistentIndexFeature(
MMFilesPersistentIndexFeature::MMFilesPersistentIndexFeature(
application_features::ApplicationServer* server)
: application_features::ApplicationFeature(server, "PersistentIndex"),
: application_features::ApplicationFeature(server, "MMFilesPersistentIndex"),
_db(nullptr), _comparator(nullptr), _path(), _active(true),
_writeBufferSize(0), _maxWriteBufferNumber(2),
_delayedWriteRate(2 * 1024 * 1024), _minWriteBufferNumberToMerge(1),
@ -69,7 +69,7 @@ PersistentIndexFeature::PersistentIndexFeature(
startsAfter("DatabasePath");
}
PersistentIndexFeature::~PersistentIndexFeature() {
MMFilesPersistentIndexFeature::~MMFilesPersistentIndexFeature() {
try {
delete _db;
} catch (...) {
@ -80,7 +80,7 @@ PersistentIndexFeature::~PersistentIndexFeature() {
}
}
void PersistentIndexFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
void MMFilesPersistentIndexFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
options->addSection("rocksdb", "Configure the RocksDB engine");
options->addOption(
@ -175,7 +175,7 @@ void PersistentIndexFeature::collectOptions(std::shared_ptr<ProgramOptions> opti
new UInt64Parameter(&_compactionReadaheadSize));
}
void PersistentIndexFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
void MMFilesPersistentIndexFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
if (!_active) {
forceDisable();
} else {
@ -201,7 +201,7 @@ void PersistentIndexFeature::validateOptions(std::shared_ptr<ProgramOptions> opt
}
}
void PersistentIndexFeature::start() {
void MMFilesPersistentIndexFeature::start() {
Instance = this;
if (!isEnabled()) {
@ -214,7 +214,7 @@ void PersistentIndexFeature::start() {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "initializing rocksdb, path: " << _path;
_comparator = new RocksDBKeyComparator();
_comparator = new MMFilesPersistentIndexKeyComparator();
rocksdb::BlockBasedTableOptions tableOptions;
tableOptions.cache_index_and_filter_blocks = true;
@ -222,7 +222,7 @@ void PersistentIndexFeature::start() {
// TODO: using the prefix extractor will lead to the comparator being
// called with just the key prefix (which the comparator currently cannot handle)
// _options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(PersistentIndex::minimalPrefixSize()));
// _options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(MMFilesPersistentIndex::minimalPrefixSize()));
// _options.table_factory.reset(rocksdb::NewBlockBasedTableFactory(tableOptions));
_options.create_if_missing = true;
@ -264,7 +264,7 @@ void PersistentIndexFeature::start() {
}
}
void PersistentIndexFeature::unprepare() {
void MMFilesPersistentIndexFeature::unprepare() {
if (!isEnabled()) {
return;
}
@ -283,11 +283,11 @@ void PersistentIndexFeature::unprepare() {
syncWal();
}
PersistentIndexFeature* PersistentIndexFeature::instance() {
MMFilesPersistentIndexFeature* MMFilesPersistentIndexFeature::instance() {
return Instance;
}
int PersistentIndexFeature::syncWal() {
int MMFilesPersistentIndexFeature::syncWal() {
#ifndef _WIN32
// SyncWAL() always reports a "not implemented" error on Windows
if (Instance == nullptr || !Instance->isEnabled()) {
@ -306,31 +306,31 @@ int PersistentIndexFeature::syncWal() {
return TRI_ERROR_NO_ERROR;
}
int PersistentIndexFeature::dropDatabase(TRI_voc_tick_t databaseId) {
int MMFilesPersistentIndexFeature::dropDatabase(TRI_voc_tick_t databaseId) {
if (Instance == nullptr) {
return TRI_ERROR_INTERNAL;
}
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB database: " << databaseId;
return Instance->dropPrefix(PersistentIndex::buildPrefix(databaseId));
return Instance->dropPrefix(MMFilesPersistentIndex::buildPrefix(databaseId));
}
int PersistentIndexFeature::dropCollection(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId) {
int MMFilesPersistentIndexFeature::dropCollection(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId) {
if (Instance == nullptr) {
return TRI_ERROR_INTERNAL;
}
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB database: " << databaseId << ", collection: " << collectionId;
return Instance->dropPrefix(PersistentIndex::buildPrefix(databaseId, collectionId));
return Instance->dropPrefix(MMFilesPersistentIndex::buildPrefix(databaseId, collectionId));
}
int PersistentIndexFeature::dropIndex(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId, TRI_idx_iid_t indexId) {
int MMFilesPersistentIndexFeature::dropIndex(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId, TRI_idx_iid_t indexId) {
if (Instance == nullptr) {
return TRI_ERROR_INTERNAL;
}
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB database: " << databaseId << ", collection: " << collectionId << ", index: " << indexId;
return Instance->dropPrefix(PersistentIndex::buildPrefix(databaseId, collectionId, indexId));
return Instance->dropPrefix(MMFilesPersistentIndex::buildPrefix(databaseId, collectionId, indexId));
}
int PersistentIndexFeature::dropPrefix(std::string const& prefix) {
int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
if (!isEnabled()) {
return TRI_ERROR_NO_ERROR;
}
@ -349,7 +349,7 @@ int PersistentIndexFeature::dropPrefix(std::string const& prefix) {
l.reserve(prefix.size() + builder.slice().byteSize());
l.append(prefix);
// extend the prefix to at least 24 bytes
while (l.size() < PersistentIndex::keyPrefixSize()) {
while (l.size() < MMFilesPersistentIndex::keyPrefixSize()) {
uint64_t value = 0;
l.append(reinterpret_cast<char const*>(&value), sizeof(uint64_t));
}
@ -364,7 +364,7 @@ int PersistentIndexFeature::dropPrefix(std::string const& prefix) {
u.reserve(prefix.size() + builder.slice().byteSize());
u.append(prefix);
// extend the prefix to at least 24 bytes
while (u.size() < PersistentIndex::keyPrefixSize()) {
while (u.size() < MMFilesPersistentIndex::keyPrefixSize()) {
uint64_t value = UINT64_MAX;
u.append(reinterpret_cast<char const*>(&value), sizeof(uint64_t));
}
@ -381,7 +381,7 @@ int PersistentIndexFeature::dropPrefix(std::string const& prefix) {
}
}
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB range: " << VPackSlice(l.c_str() + PersistentIndex::keyPrefixSize()).toJson() << " - " << VPackSlice(u.c_str() + PersistentIndex::keyPrefixSize()).toJson();
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB range: " << VPackSlice(l.c_str() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " - " << VPackSlice(u.c_str() + MMFilesPersistentIndex::keyPrefixSize()).toJson();
#endif
// delete files in range lower..upper
@ -401,7 +401,7 @@ int PersistentIndexFeature::dropPrefix(std::string const& prefix) {
// go on and delete the remaining keys (delete files in range does not necessarily
// find them all, just complete files)
auto comparator = PersistentIndexFeature::instance()->comparator();
auto comparator = MMFilesPersistentIndexFeature::instance()->comparator();
rocksdb::DB* db = _db->GetBaseDB();
rocksdb::WriteBatch batch;

View File

@ -21,8 +21,8 @@
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_INDEXES_ROCKS_DB_FEATURE_H
#define ARANGOD_INDEXES_ROCKS_DB_FEATURE_H 1
#ifndef ARANGOD_MMFILES_MMFILES_PERSISTENT_INDEX_FEATURE_H
#define ARANGOD_MMFILES_MMFILES_PERSISTENT_INDEX_FEATURE_H 1
#include "ApplicationFeatures/ApplicationFeature.h"
#include "Basics/Common.h"
@ -35,12 +35,12 @@ class OptimisticTransactionDB;
}
namespace arangodb {
class RocksDBKeyComparator;
class MMFilesPersistentIndexKeyComparator;
class PersistentIndexFeature final : public application_features::ApplicationFeature {
class MMFilesPersistentIndexFeature final : public application_features::ApplicationFeature {
public:
explicit PersistentIndexFeature(application_features::ApplicationServer* server);
~PersistentIndexFeature();
explicit MMFilesPersistentIndexFeature(application_features::ApplicationServer* server);
~MMFilesPersistentIndexFeature();
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
@ -48,14 +48,14 @@ class PersistentIndexFeature final : public application_features::ApplicationFea
void unprepare() override final;
inline rocksdb::OptimisticTransactionDB* db() const { return _db; }
inline RocksDBKeyComparator* comparator() const { return _comparator; }
inline MMFilesPersistentIndexKeyComparator* comparator() const { return _comparator; }
static int syncWal();
static int dropDatabase(TRI_voc_tick_t);
static int dropCollection(TRI_voc_tick_t, TRI_voc_cid_t);
static int dropIndex(TRI_voc_tick_t, TRI_voc_cid_t, TRI_idx_iid_t);
static PersistentIndexFeature* instance();
static MMFilesPersistentIndexFeature* instance();
private:
@ -65,7 +65,7 @@ class PersistentIndexFeature final : public application_features::ApplicationFea
rocksdb::OptimisticTransactionDB* _db;
rocksdb::Options _options;
RocksDBKeyComparator* _comparator;
MMFilesPersistentIndexKeyComparator* _comparator;
std::string _path;
bool _active;
uint64_t _writeBufferSize;

View File

@ -32,12 +32,12 @@
using namespace arangodb;
int RocksDBKeyComparator::Compare(rocksdb::Slice const& lhs, rocksdb::Slice const& rhs) const {
int MMFilesPersistentIndexKeyComparator::Compare(rocksdb::Slice const& lhs, rocksdb::Slice const& rhs) const {
TRI_ASSERT(lhs.size() > 8);
TRI_ASSERT(rhs.size() > 8);
// compare by index id first
int res = memcmp(lhs.data(), rhs.data(), PersistentIndex::keyPrefixSize());
int res = memcmp(lhs.data(), rhs.data(), MMFilesPersistentIndex::keyPrefixSize());
if (res != 0) {
return res;

View File

@ -21,8 +21,8 @@
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_INDEXES_ROCKS_DB_KEY_COMPARATOR_H
#define ARANGOD_INDEXES_ROCKS_DB_KEY_COMPARATOR_H 1
#ifndef ARANGOD_MMFILES_MMFILES_PERSISTENT_INDEX_KEY_COMPARATOR_H
#define ARANGOD_MMFILES_MMFILES_PERSISTENT_INDEX_KEY_COMPARATOR_H 1
#include "Basics/Common.h"
#include "MMFiles/MMFilesPersistentIndex.h"
@ -34,13 +34,13 @@
namespace arangodb {
class RocksDBKeyComparator : public rocksdb::Comparator {
class MMFilesPersistentIndexKeyComparator final : public rocksdb::Comparator {
public:
RocksDBKeyComparator() = default;
~RocksDBKeyComparator() = default;
MMFilesPersistentIndexKeyComparator() = default;
~MMFilesPersistentIndexKeyComparator() = default;
static inline arangodb::velocypack::Slice extractKeySlice(rocksdb::Slice const& slice) {
return arangodb::velocypack::Slice(slice.data() + PersistentIndex::keyPrefixSize());
return arangodb::velocypack::Slice(slice.data() + MMFilesPersistentIndex::keyPrefixSize());
}
int Compare(rocksdb::Slice const& lhs, rocksdb::Slice const& rhs) const;

View File

@ -128,7 +128,7 @@ bool MMFilesPrimaryIndexIterator::next(TokenCallback const& cb, size_t limit) {
void MMFilesPrimaryIndexIterator::reset() { _iterator.reset(); }
AllIndexIterator::AllIndexIterator(LogicalCollection* collection,
MMFilesAllIndexIterator::MMFilesAllIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
@ -136,7 +136,7 @@ AllIndexIterator::AllIndexIterator(LogicalCollection* collection,
bool reverse)
: IndexIterator(collection, trx, mmdr, index), _index(indexImpl), _reverse(reverse), _total(0) {}
bool AllIndexIterator::next(TokenCallback const& cb, size_t limit) {
bool MMFilesAllIndexIterator::next(TokenCallback const& cb, size_t limit) {
while (limit > 0) {
MMFilesSimpleIndexElement element;
if (_reverse) {
@ -154,15 +154,15 @@ bool AllIndexIterator::next(TokenCallback const& cb, size_t limit) {
return true;
}
void AllIndexIterator::reset() { _position.reset(); }
void MMFilesAllIndexIterator::reset() { _position.reset(); }
AnyIndexIterator::AnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
MMFilesAnyIndexIterator::MMFilesAnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl)
: IndexIterator(collection, trx, mmdr, index), _index(indexImpl), _step(0), _total(0) {}
bool AnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
bool MMFilesAnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
while (limit > 0) {
MMFilesSimpleIndexElement element =
_index->findRandom(&_context, _initial, _position, _step, _total);
@ -176,7 +176,7 @@ bool AnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
return true;
}
void AnyIndexIterator::reset() {
void MMFilesAnyIndexIterator::reset() {
_step = 0;
_total = 0;
_position = _initial;
@ -313,7 +313,7 @@ MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupSequential(
IndexIterator* MMFilesPrimaryIndex::allIterator(transaction::Methods* trx,
ManagedDocumentResult* mmdr,
bool reverse) const {
return new AllIndexIterator(_collection, trx, mmdr, this, _primaryIndex, reverse);
return new MMFilesAllIndexIterator(_collection, trx, mmdr, this, _primaryIndex, reverse);
}
/// @brief request an iterator over all elements in the index in
@ -321,7 +321,7 @@ IndexIterator* MMFilesPrimaryIndex::allIterator(transaction::Methods* trx,
/// exactly once unless the collection is modified.
IndexIterator* MMFilesPrimaryIndex::anyIterator(transaction::Methods* trx,
ManagedDocumentResult* mmdr) const {
return new AnyIndexIterator(_collection, trx, mmdr, this, _primaryIndex);
return new MMFilesAnyIndexIterator(_collection, trx, mmdr, this, _primaryIndex);
}
/// @brief a method to iterate over all elements in the index in

View File

@ -68,16 +68,16 @@ class MMFilesPrimaryIndexIterator final : public IndexIterator {
arangodb::velocypack::ArrayIterator _iterator;
};
class AllIndexIterator final : public IndexIterator {
class MMFilesAllIndexIterator final : public IndexIterator {
public:
AllIndexIterator(LogicalCollection* collection,
MMFilesAllIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl,
bool reverse);
~AllIndexIterator() {}
~MMFilesAllIndexIterator() {}
char const* typeName() const override { return "all-index-iterator"; }
@ -92,14 +92,14 @@ class AllIndexIterator final : public IndexIterator {
uint64_t _total;
};
class AnyIndexIterator final : public IndexIterator {
class MMFilesAnyIndexIterator final : public IndexIterator {
public:
AnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
MMFilesAnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl);
~AnyIndexIterator() {}
~MMFilesAnyIndexIterator() {}
char const* typeName() const override { return "any-index-iterator"; }

View File

@ -104,25 +104,25 @@ static int CompareElementElement(void* userData,
return arangodb::basics::VelocyPackHelper::compare(l, r, true);
}
bool BaseSkiplistLookupBuilder::isEquality() const { return _isEquality; }
bool MMFilesBaseSkiplistLookupBuilder::isEquality() const { return _isEquality; }
VPackSlice const* BaseSkiplistLookupBuilder::getLowerLookup() const {
VPackSlice const* MMFilesBaseSkiplistLookupBuilder::getLowerLookup() const {
return &_lowerSlice;
}
bool BaseSkiplistLookupBuilder::includeLower() const { return _includeLower; }
bool MMFilesBaseSkiplistLookupBuilder::includeLower() const { return _includeLower; }
VPackSlice const* BaseSkiplistLookupBuilder::getUpperLookup() const {
VPackSlice const* MMFilesBaseSkiplistLookupBuilder::getUpperLookup() const {
return &_upperSlice;
}
bool BaseSkiplistLookupBuilder::includeUpper() const { return _includeUpper; }
bool MMFilesBaseSkiplistLookupBuilder::includeUpper() const { return _includeUpper; }
SkiplistLookupBuilder::SkiplistLookupBuilder(
MMFilesSkiplistLookupBuilder::MMFilesSkiplistLookupBuilder(
transaction::Methods* trx,
std::vector<std::vector<arangodb::aql::AstNode const*>>& ops,
arangodb::aql::Variable const* var, bool reverse)
: BaseSkiplistLookupBuilder(trx) {
: MMFilesBaseSkiplistLookupBuilder(trx) {
_lowerBuilder->openArray();
if (ops.empty()) {
// We only use this skiplist to sort. use empty array for lookup
@ -249,17 +249,17 @@ SkiplistLookupBuilder::SkiplistLookupBuilder(
}
}
bool SkiplistLookupBuilder::next() {
bool MMFilesSkiplistLookupBuilder::next() {
// The first search value is created during creation.
// So next is always false.
return false;
}
SkiplistInLookupBuilder::SkiplistInLookupBuilder(
MMFilesSkiplistInLookupBuilder::MMFilesSkiplistInLookupBuilder(
transaction::Methods* trx,
std::vector<std::vector<arangodb::aql::AstNode const*>>& ops,
arangodb::aql::Variable const* var, bool reverse)
: BaseSkiplistLookupBuilder(trx), _dataBuilder(trx), _done(false) {
: MMFilesBaseSkiplistLookupBuilder(trx), _dataBuilder(trx), _done(false) {
TRI_ASSERT(!ops.empty()); // We certainly do not need IN here
transaction::BuilderLeaser tmp(trx);
std::set<VPackSlice, arangodb::basics::VelocyPackHelper::VPackSorted<true>>
@ -422,7 +422,7 @@ SkiplistInLookupBuilder::SkiplistInLookupBuilder(
buildSearchValues();
}
bool SkiplistInLookupBuilder::next() {
bool MMFilesSkiplistInLookupBuilder::next() {
if (_done || !forwardInPosition()) {
return false;
}
@ -430,7 +430,7 @@ bool SkiplistInLookupBuilder::next() {
return true;
}
bool SkiplistInLookupBuilder::forwardInPosition() {
bool MMFilesSkiplistInLookupBuilder::forwardInPosition() {
std::list<PosStruct>::reverse_iterator it = _inPositions.rbegin();
while (it != _inPositions.rend()) {
it->current++;
@ -448,7 +448,7 @@ bool SkiplistInLookupBuilder::forwardInPosition() {
return false;
}
void SkiplistInLookupBuilder::buildSearchValues() {
void MMFilesSkiplistInLookupBuilder::buildSearchValues() {
auto inPos = _inPositions.begin();
_lowerBuilder->clear();
_lowerBuilder->openArray();
@ -507,7 +507,7 @@ MMFilesSkiplistIterator::MMFilesSkiplistIterator(LogicalCollection* collection,
TRI_Skiplist const* skiplist, size_t numPaths,
std::function<int(void*, MMFilesSkiplistIndexElement const*, MMFilesSkiplistIndexElement const*,
MMFilesSkiplistCmpType)> const& CmpElmElm,
bool reverse, BaseSkiplistLookupBuilder* builder)
bool reverse, MMFilesBaseSkiplistLookupBuilder* builder)
: IndexIterator(collection, trx, mmdr, index),
_skiplistIndex(skiplist),
_numPaths(numPaths),
@ -1235,14 +1235,14 @@ IndexIterator* MMFilesSkiplistIndex::iteratorForCondition(
}
if (usesIn) {
auto builder = std::make_unique<SkiplistInLookupBuilder>(
auto builder = std::make_unique<MMFilesSkiplistInLookupBuilder>(
trx, mapping, reference, reverse);
return new MMFilesSkiplistIterator(_collection, trx, mmdr, this,
_skiplistIndex, numPaths(), CmpElmElm,
reverse, builder.release());
}
auto builder =
std::make_unique<SkiplistLookupBuilder>(trx, mapping, reference, reverse);
std::make_unique<MMFilesSkiplistLookupBuilder>(trx, mapping, reference, reverse);
return new MMFilesSkiplistIterator(_collection, trx, mmdr, this,
_skiplistIndex, numPaths(), CmpElmElm,
reverse, builder.release());

View File

@ -47,12 +47,9 @@ class MMFilesSkiplistIndex;
namespace transaction {
class Methods;
}
;
/// @brief Abstract Builder for lookup values in skiplist index
class BaseSkiplistLookupBuilder {
class MMFilesBaseSkiplistLookupBuilder {
protected:
bool _isEquality;
bool _includeLower;
@ -65,9 +62,8 @@ class BaseSkiplistLookupBuilder {
arangodb::velocypack::Slice _upperSlice;
public:
explicit BaseSkiplistLookupBuilder(transaction::Methods* trx) :
_lowerBuilder(trx), _upperBuilder(trx)
{
explicit MMFilesBaseSkiplistLookupBuilder(transaction::Methods* trx) :
_lowerBuilder(trx), _upperBuilder(trx) {
_isEquality = true;
_includeUpper = true;
_includeLower = true;
@ -76,7 +72,7 @@ class BaseSkiplistLookupBuilder {
_upperBuilder->clear();
}
virtual ~BaseSkiplistLookupBuilder() {};
virtual ~MMFilesBaseSkiplistLookupBuilder() {}
/// @brief Compute the next lookup values
/// If returns false there is no further lookup
@ -109,15 +105,15 @@ class BaseSkiplistLookupBuilder {
/// returned in the correct ordering. And no
/// lookup is returned twice.
class SkiplistLookupBuilder : public BaseSkiplistLookupBuilder {
class MMFilesSkiplistLookupBuilder final : public MMFilesBaseSkiplistLookupBuilder {
public:
SkiplistLookupBuilder(
MMFilesSkiplistLookupBuilder(
transaction::Methods* trx,
std::vector<std::vector<arangodb::aql::AstNode const*>>&,
arangodb::aql::Variable const*, bool);
~SkiplistLookupBuilder() {}
~MMFilesSkiplistLookupBuilder() {}
/// @brief Compute the next lookup values
/// If returns false there is no further lookup
@ -125,7 +121,7 @@ class SkiplistLookupBuilder : public BaseSkiplistLookupBuilder {
};
class SkiplistInLookupBuilder : public BaseSkiplistLookupBuilder {
class MMFilesSkiplistInLookupBuilder final : public MMFilesBaseSkiplistLookupBuilder {
private:
@ -145,12 +141,12 @@ class SkiplistInLookupBuilder : public BaseSkiplistLookupBuilder {
bool _done;
public:
SkiplistInLookupBuilder(
MMFilesSkiplistInLookupBuilder(
transaction::Methods* trx,
std::vector<std::vector<arangodb::aql::AstNode const*>>&,
arangodb::aql::Variable const*, bool);
~SkiplistInLookupBuilder() {}
~MMFilesSkiplistInLookupBuilder() {}
/// @brief Compute the next lookup values
/// If returns false there is no further lookup
@ -190,7 +186,7 @@ class MMFilesSkiplistIterator final : public IndexIterator {
std::vector<std::pair<Node*, Node*>> _intervals;
size_t _currentInterval;
BaseSkiplistLookupBuilder* _builder;
MMFilesBaseSkiplistLookupBuilder* _builder;
std::function<int(void*, MMFilesSkiplistIndexElement const*, MMFilesSkiplistIndexElement const*,
MMFilesSkiplistCmpType)> _CmpElmElm;
@ -202,7 +198,7 @@ class MMFilesSkiplistIterator final : public IndexIterator {
TRI_Skiplist const* skiplist, size_t numPaths,
std::function<int(void*, MMFilesSkiplistIndexElement const*, MMFilesSkiplistIndexElement const*,
MMFilesSkiplistCmpType)> const& CmpElmElm,
bool reverse, BaseSkiplistLookupBuilder* builder);
bool reverse, MMFilesBaseSkiplistLookupBuilder* builder);
~MMFilesSkiplistIterator() {
delete _builder;

View File

@ -64,7 +64,7 @@ MMFilesTransactionState::~MMFilesTransactionState() {
/// @brief get (or create) a rocksdb WriteTransaction
rocksdb::Transaction* MMFilesTransactionState::rocksTransaction() {
if (_rocksTransaction == nullptr) {
_rocksTransaction = PersistentIndexFeature::instance()->db()->BeginTransaction(
_rocksTransaction = MMFilesPersistentIndexFeature::instance()->db()->BeginTransaction(
rocksdb::WriteOptions(), rocksdb::OptimisticTransactionOptions());
}
return _rocksTransaction;
@ -278,7 +278,7 @@ int MMFilesTransactionState::addOperation(TRI_voc_rid_t revisionId,
}
if (localWaitForSync) {
// also sync RocksDB WAL
PersistentIndexFeature::syncWal();
MMFilesPersistentIndexFeature::syncWal();
}
operation.setTick(slotInfo.tick);
fid = slotInfo.logfileId;
@ -473,7 +473,7 @@ int MMFilesTransactionState::writeCommitMarker() {
if (_waitForSync) {
// also sync RocksDB WAL
PersistentIndexFeature::syncWal();
MMFilesPersistentIndexFeature::syncWal();
}
TRI_IF_FAILURE("TransactionWriteCommitMarkerThrow") {

View File

@ -0,0 +1,520 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "MMFilesV8Functions.h"
#include "Basics/Exceptions.h"
#include "Cluster/ClusterMethods.h"
#include "Cluster/ServerState.h"
#include "MMFiles/MMFilesEngine.h"
#include "MMFiles/MMFilesLogfileManager.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "V8/v8-conv.h"
#include "V8/v8-globals.h"
#include "V8/v8-utils.h"
#include "V8Server/v8-vocbaseprivate.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/PhysicalCollection.h"
#include <v8.h>
using namespace arangodb;
/// @brief returns information about the datafiles
/// the collection must be unloaded.
static void JS_DatafilesVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_THROW_SHARDING_COLLECTION_NOT_YET_IMPLEMENTED(collection);
// TODO: move this into engine
StorageEngine* engine = EngineSelectorFeature::ENGINE;
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
MMFilesEngineCollectionFiles structure = dynamic_cast<MMFilesEngine*>(engine)->scanCollectionDirectory(collection->getPhysical()->path());
// build result
v8::Handle<v8::Object> result = v8::Object::New(isolate);
// journals
v8::Handle<v8::Array> journals = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("journals"), journals);
uint32_t i = 0;
for (auto& it : structure.journals) {
journals->Set(i++, TRI_V8_STD_STRING(it));
}
// compactors
v8::Handle<v8::Array> compactors = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("compactors"), compactors);
i = 0;
for (auto& it : structure.compactors) {
compactors->Set(i++, TRI_V8_STD_STRING(it));
}
// datafiles
v8::Handle<v8::Array> datafiles = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("datafiles"), datafiles);
i = 0;
for (auto& it : structure.datafiles) {
datafiles->Set(i++, TRI_V8_STD_STRING(it));
}
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
/// @brief returns information about the datafiles
/// Returns information about the datafiles. The collection must be unloaded.
static void JS_DatafileScanVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
if (args.Length() != 1) {
TRI_V8_THROW_EXCEPTION_USAGE("datafileScan(<path>)");
}
std::string path = TRI_ObjectToString(args[0]);
v8::Handle<v8::Object> result;
{
// TODO Check with JAN Okay to just remove the lock?
// READ_LOCKER(readLocker, collection->_lock);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
DatafileScan scan = MMFilesDatafile::scan(path);
// build result
result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("currentSize"),
v8::Number::New(isolate, scan.currentSize));
result->Set(TRI_V8_ASCII_STRING("maximalSize"),
v8::Number::New(isolate, scan.maximalSize));
result->Set(TRI_V8_ASCII_STRING("endPosition"),
v8::Number::New(isolate, scan.endPosition));
result->Set(TRI_V8_ASCII_STRING("numberMarkers"),
v8::Number::New(isolate, scan.numberMarkers));
result->Set(TRI_V8_ASCII_STRING("status"),
v8::Number::New(isolate, scan.status));
result->Set(TRI_V8_ASCII_STRING("isSealed"),
v8::Boolean::New(isolate, scan.isSealed));
v8::Handle<v8::Array> entries = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("entries"), entries);
uint32_t i = 0;
for (auto const& entry : scan.entries) {
v8::Handle<v8::Object> o = v8::Object::New(isolate);
o->Set(TRI_V8_ASCII_STRING("position"),
v8::Number::New(isolate, entry.position));
o->Set(TRI_V8_ASCII_STRING("size"),
v8::Number::New(isolate, entry.size));
o->Set(TRI_V8_ASCII_STRING("realSize"),
v8::Number::New(isolate, entry.realSize));
o->Set(TRI_V8_ASCII_STRING("tick"), V8TickId(isolate, entry.tick));
o->Set(TRI_V8_ASCII_STRING("type"),
v8::Number::New(isolate, static_cast<int>(entry.type)));
o->Set(TRI_V8_ASCII_STRING("status"),
v8::Number::New(isolate, static_cast<int>(entry.status)));
if (!entry.key.empty()) {
o->Set(TRI_V8_ASCII_STRING("key"), TRI_V8_STD_STRING(entry.key));
}
if (entry.typeName != nullptr) {
o->Set(TRI_V8_ASCII_STRING("typeName"),
TRI_V8_ASCII_STRING(entry.typeName));
}
if (!entry.diagnosis.empty()) {
o->Set(TRI_V8_ASCII_STRING("diagnosis"),
TRI_V8_STD_STRING(entry.diagnosis));
}
entries->Set(i++, o);
}
}
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
/// @brief tries to repair a datafile
static void JS_TryRepairDatafileVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_THROW_SHARDING_COLLECTION_NOT_YET_IMPLEMENTED(collection);
if (args.Length() != 1) {
TRI_V8_THROW_EXCEPTION_USAGE("tryRepairDatafile(<datafile>)");
}
std::string path = TRI_ObjectToString(args[0]);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
bool result = MMFilesDatafile::tryRepair(path);
if (result) {
TRI_V8_RETURN_TRUE();
}
TRI_V8_RETURN_FALSE();
TRI_V8_TRY_CATCH_END
}
/// @brief truncates a datafile
static void JS_TruncateDatafileVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_THROW_SHARDING_COLLECTION_NOT_YET_IMPLEMENTED(collection);
if (args.Length() != 2) {
TRI_V8_THROW_EXCEPTION_USAGE("truncateDatafile(<datafile>, <size>)");
}
std::string path = TRI_ObjectToString(args[0]);
size_t size = (size_t)TRI_ObjectToInt64(args[1]);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
int res = MMFilesDatafile::truncate(path, static_cast<TRI_voc_size_t>(size));
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION_MESSAGE(res, "cannot truncate datafile");
}
TRI_V8_RETURN_UNDEFINED();
TRI_V8_TRY_CATCH_END
}
static void JS_PropertiesWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (args.Length() > 1 || (args.Length() == 1 && !args[0]->IsObject())) {
TRI_V8_THROW_EXCEPTION_USAGE("properties(<object>)");
}
auto l = MMFilesLogfileManager::instance();
if (args.Length() == 1) {
// set the properties
v8::Handle<v8::Object> object = v8::Handle<v8::Object>::Cast(args[0]);
if (object->Has(TRI_V8_ASCII_STRING("allowOversizeEntries"))) {
bool value = TRI_ObjectToBoolean(
object->Get(TRI_V8_ASCII_STRING("allowOversizeEntries")));
l->allowOversizeEntries(value);
}
if (object->Has(TRI_V8_ASCII_STRING("logfileSize"))) {
uint32_t value = static_cast<uint32_t>(TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("logfileSize")), true));
l->filesize(value);
}
if (object->Has(TRI_V8_ASCII_STRING("historicLogfiles"))) {
uint32_t value = static_cast<uint32_t>(TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("historicLogfiles")), true));
l->historicLogfiles(value);
}
if (object->Has(TRI_V8_ASCII_STRING("reserveLogfiles"))) {
uint32_t value = static_cast<uint32_t>(TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("reserveLogfiles")), true));
l->reserveLogfiles(value);
}
if (object->Has(TRI_V8_ASCII_STRING("throttleWait"))) {
uint64_t value = TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("throttleWait")), true);
l->maxThrottleWait(value);
}
if (object->Has(TRI_V8_ASCII_STRING("throttleWhenPending"))) {
uint64_t value = TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("throttleWhenPending")), true);
l->throttleWhenPending(value);
}
}
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("allowOversizeEntries"),
v8::Boolean::New(isolate, l->allowOversizeEntries()));
result->Set(TRI_V8_ASCII_STRING("logfileSize"),
v8::Number::New(isolate, l->filesize()));
result->Set(TRI_V8_ASCII_STRING("historicLogfiles"),
v8::Number::New(isolate, l->historicLogfiles()));
result->Set(TRI_V8_ASCII_STRING("reserveLogfiles"),
v8::Number::New(isolate, l->reserveLogfiles()));
result->Set(TRI_V8_ASCII_STRING("syncInterval"),
v8::Number::New(isolate, (double)l->syncInterval()));
result->Set(TRI_V8_ASCII_STRING("throttleWait"),
v8::Number::New(isolate, (double)l->maxThrottleWait()));
result->Set(TRI_V8_ASCII_STRING("throttleWhenPending"),
v8::Number::New(isolate, (double)l->throttleWhenPending()));
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
bool waitForSync = false;
bool waitForCollector = false;
bool writeShutdownFile = false;
if (args.Length() > 0) {
if (args[0]->IsObject()) {
v8::Handle<v8::Object> obj = args[0]->ToObject();
if (obj->Has(TRI_V8_ASCII_STRING("waitForSync"))) {
waitForSync =
TRI_ObjectToBoolean(obj->Get(TRI_V8_ASCII_STRING("waitForSync")));
}
if (obj->Has(TRI_V8_ASCII_STRING("waitForCollector"))) {
waitForCollector = TRI_ObjectToBoolean(
obj->Get(TRI_V8_ASCII_STRING("waitForCollector")));
}
if (obj->Has(TRI_V8_ASCII_STRING("writeShutdownFile"))) {
writeShutdownFile = TRI_ObjectToBoolean(
obj->Get(TRI_V8_ASCII_STRING("writeShutdownFile")));
}
} else {
waitForSync = TRI_ObjectToBoolean(args[0]);
if (args.Length() > 1) {
waitForCollector = TRI_ObjectToBoolean(args[1]);
if (args.Length() > 2) {
writeShutdownFile = TRI_ObjectToBoolean(args[2]);
}
}
}
}
int res;
if (ServerState::instance()->isCoordinator()) {
res = flushWalOnAllDBServers(waitForSync, waitForCollector);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_TRUE();
}
res = MMFilesLogfileManager::instance()->flush(
waitForSync, waitForCollector, writeShutdownFile);
if (res != TRI_ERROR_NO_ERROR) {
if (res == TRI_ERROR_LOCK_TIMEOUT) {
// improved diagnostic message for this special case
TRI_V8_THROW_EXCEPTION_MESSAGE(res, "timed out waiting for WAL flush operation");
}
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
/// @brief wait for WAL collector to finish operations for the specified
/// collection
static void JS_WaitCollectorWal(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (ServerState::instance()->isCoordinator()) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
if (vocbase == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
}
if (args.Length() < 1) {
TRI_V8_THROW_EXCEPTION_USAGE(
"WAL_WAITCOLLECTOR(<collection-id>, <timeout>)");
}
std::string const name = TRI_ObjectToString(args[0]);
arangodb::LogicalCollection* col = vocbase->lookupCollection(name);
if (col == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
}
double timeout = 30.0;
if (args.Length() > 1) {
timeout = TRI_ObjectToDouble(args[1]);
}
int res = MMFilesLogfileManager::instance()->waitForCollectorQueue(
col->cid(), timeout);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
/// @brief get information about the currently running transactions
static void JS_TransactionsWal(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
auto const& info =
MMFilesLogfileManager::instance()->runningTransactions();
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->ForceSet(
TRI_V8_ASCII_STRING("runningTransactions"),
v8::Number::New(isolate, static_cast<double>(std::get<0>(info))));
// lastCollectedId
{
auto value = std::get<1>(info);
if (value == UINT64_MAX) {
result->ForceSet(TRI_V8_ASCII_STRING("minLastCollected"),
v8::Null(isolate));
} else {
result->ForceSet(TRI_V8_ASCII_STRING("minLastCollected"),
V8TickId(isolate, static_cast<TRI_voc_tick_t>(value)));
}
}
// lastSealedId
{
auto value = std::get<2>(info);
if (value == UINT64_MAX) {
result->ForceSet(TRI_V8_ASCII_STRING("minLastSealed"), v8::Null(isolate));
} else {
result->ForceSet(TRI_V8_ASCII_STRING("minLastSealed"),
V8TickId(isolate, static_cast<TRI_voc_tick_t>(value)));
}
}
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
void MMFilesV8Functions::registerResources() {
ISOLATE;
v8::HandleScope scope(isolate);
TRI_GET_GLOBALS();
// patch ArangoCollection object
v8::Handle<v8::ObjectTemplate> rt = v8::Handle<v8::ObjectTemplate>::New(isolate, v8g->VocbaseColTempl);
TRI_ASSERT(!rt.IsEmpty());
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("truncateDatafile"),
JS_TruncateDatafileVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("tryRepairDatafile"),
JS_TryRepairDatafileVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("datafiles"),
JS_DatafilesVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("datafileScan"),
JS_DatafileScanVocbaseCol, true);
// add global WAL handling functions
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING("WAL_FLUSH"), JS_FlushWal, true);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("WAL_WAITCOLLECTOR"),
JS_WaitCollectorWal, true);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("WAL_PROPERTIES"),
JS_PropertiesWal, true);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("WAL_TRANSACTIONS"),
JS_TransactionsWal, true);
}

View File

@ -1,7 +1,8 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
@ -17,16 +18,20 @@
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "FileResult.h"
#ifndef ARANGOD_MMFILES_MMFILES_V8_FUNCTIONS_H
#define ARANGOD_MMFILES_MMFILES_V8_FUNCTIONS_H 1
#include "Aql/Functions.h"
namespace arangodb {
FileResult::FileResult(bool state)
: Result(), _state(state), _sysErrorNumber(0) {}
FileResult::FileResult(bool state, int sysErrorNumber)
: Result(TRI_ERROR_SYS_ERROR, strerror(sysErrorNumber)),
_state(state), _sysErrorNumber(sysErrorNumber) {}
}
struct MMFilesV8Functions {
static void registerResources();
};
} // namespace arangodb
#endif

View File

@ -790,7 +790,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
auto physical = static_cast<MMFilesCollection*>(col->getPhysical());
TRI_ASSERT(physical != nullptr);
PersistentIndexFeature::dropIndex(databaseId, collectionId, indexId);
MMFilesPersistentIndexFeature::dropIndex(databaseId, collectionId, indexId);
std::string const indexName("index-" + std::to_string(indexId) +
".json");
@ -868,7 +868,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
vocbase->dropCollection(collection, true);
}
PersistentIndexFeature::dropCollection(databaseId, collectionId);
MMFilesPersistentIndexFeature::dropCollection(databaseId, collectionId);
// check if there is another collection with the same name as the one
// that
@ -987,7 +987,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
state->databaseFeature->dropDatabase(nameString, true, false);
}
PersistentIndexFeature::dropDatabase(databaseId);
MMFilesPersistentIndexFeature::dropDatabase(databaseId);
vocbase = nullptr;
/* TODO: check what TRI_ERROR_ARANGO_DATABASE_NOT_FOUND means here
@ -1052,7 +1052,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
TRI_ASSERT(physical != nullptr);
col->dropIndex(indexId);
PersistentIndexFeature::dropIndex(databaseId, collectionId, indexId);
MMFilesPersistentIndexFeature::dropIndex(databaseId, collectionId, indexId);
// additionally remove the index file
std::string const indexName("index-" + std::to_string(indexId) +
@ -1094,7 +1094,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
if (collection != nullptr) {
vocbase->dropCollection(collection, true);
}
PersistentIndexFeature::dropCollection(databaseId, collectionId);
MMFilesPersistentIndexFeature::dropCollection(databaseId, collectionId);
break;
}
@ -1114,7 +1114,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
state->databaseFeature->dropDatabase(databaseId, true, false);
}
PersistentIndexFeature::dropDatabase(databaseId);
MMFilesPersistentIndexFeature::dropDatabase(databaseId);
break;
}

View File

@ -41,7 +41,7 @@ MMFilesWalRecoveryFeature::MMFilesWalRecoveryFeature(ApplicationServer* server)
requiresElevatedPrivileges(false);
startsAfter("Database");
startsAfter("MMFilesLogfileManager");
startsAfter("PersistentIndex");
startsAfter("MMFilesPersistentIndex");
}
/// @brief run the recovery procedure

View File

@ -1013,6 +1013,10 @@ void DatabaseFeature::updateContexts() {
TRI_InitV8Queries(isolate, context);
TRI_InitV8Cluster(isolate, context);
TRI_InitV8Agency(isolate, context);
StorageEngine* engine = EngineSelectorFeature::ENGINE;
TRI_ASSERT(engine != nullptr); // Engine not loaded. Startup broken
engine->addV8Functions();
},
vocbase);
}

View File

@ -78,16 +78,12 @@
#include "Statistics/StatisticsFeature.h"
#include "StorageEngine/EngineSelectorFeature.h"
// TODO - the following MMFiles includes should probably be removed
// TODO - move the following MMFiles includes to the storage engine
#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/MMFilesPersistentIndexFeature.h"
#include "MMFiles/MMFilesWalRecoveryFeature.h"
// #include "StorageEngine/RocksDBEngine.h" // enable when adding Rocksdb Engine
// this include will be disabled until
// we begin to implement the RocksDB
// engine
#include "MMFiles/MMFilesEngine.h"
#include "V8Server/FoxxQueuesFeature.h"
#include "V8Server/V8DealerFeature.h"
@ -152,16 +148,13 @@ static int runServer(int argc, char** argv) {
server.addFeature(new InitDatabaseFeature(&server, nonServerFeatures));
server.addFeature(new LanguageFeature(&server));
server.addFeature(new LockfileFeature(&server));
server.addFeature(new MMFilesLogfileManager(&server));
server.addFeature(new LoggerBufferFeature(&server));
server.addFeature(new LoggerFeature(&server, true));
server.addFeature(new NonceFeature(&server));
server.addFeature(new PageSizeFeature(&server));
server.addFeature(new PrivilegeFeature(&server));
server.addFeature(new QueryRegistryFeature(&server));
server.addFeature(new TraverserEngineRegistryFeature(&server));
server.addFeature(new RandomFeature(&server));
server.addFeature(new PersistentIndexFeature(&server));
server.addFeature(new QueryRegistryFeature(&server));
server.addFeature(new SchedulerFeature(&server));
server.addFeature(new ScriptFeature(&server, &ret));
server.addFeature(new ServerFeature(&server, &ret));
@ -171,6 +164,7 @@ static int runServer(int argc, char** argv) {
server.addFeature(new StatisticsFeature(&server));
server.addFeature(new TempFeature(&server, name));
server.addFeature(new TransactionManagerFeature(&server));
server.addFeature(new TraverserEngineRegistryFeature(&server));
server.addFeature(new UnitTestsFeature(&server, &ret));
server.addFeature(new UpgradeFeature(&server, &ret, nonServerFeatures));
server.addFeature(new V8DealerFeature(&server));
@ -196,8 +190,8 @@ static int runServer(int argc, char** argv) {
// storage engines
server.addFeature(new MMFilesEngine(&server));
server.addFeature(new MMFilesWalRecoveryFeature(&server));
// server.addFeature(new RocksDBEngine(&server)); //enable RocksDB storage
// here
server.addFeature(new MMFilesLogfileManager(&server));
server.addFeature(new MMFilesPersistentIndexFeature(&server));
try {
server.run(argc, argv);

View File

@ -333,10 +333,13 @@ class StorageEngine : public application_features::ApplicationFeature {
// -------------
/// @brief Add engine-specific AQL functions.
virtual void addAqlFunctions() const = 0;
virtual void addAqlFunctions() = 0;
/// @brief Add engine-specific optimizer rules
virtual void addOptimizerRules() const = 0;
virtual void addOptimizerRules() = 0;
/// @brief Add engine-specific V8 functions
virtual void addV8Functions() = 0;
protected:
void registerCollection(TRI_vocbase_t* vocbase,

View File

@ -1003,7 +1003,7 @@ void V8DealerFeature::initializeContext(size_t i) {
{
v8::HandleScope scope(isolate);
TRI_AddGlobalVariableVocbase(isolate, localContext,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("APP_PATH"),
TRI_V8_STD_STRING(_appPath));

View File

@ -75,8 +75,6 @@ class V8DealerFeature final : public application_features::ApplicationFeature {
void setNumberContexts(size_t nr) { _forceNrContexts = nr; }
void increaseContexts() { ++_nrAdditionalContexts; }
void shutdownContexts();
void defineBoolean(std::string const& name, bool value) {
_definedBooleans[name] = value;
}
@ -90,6 +88,7 @@ class V8DealerFeature final : public application_features::ApplicationFeature {
void loadJavascriptFiles(TRI_vocbase_t*, std::string const&, size_t);
private:
void shutdownContexts();
V8Context* pickFreeContextForGc();
void initializeContext(size_t);
void shutdownV8Instance(V8Context*);

View File

@ -1353,23 +1353,23 @@ void TRI_InitV8Actions(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
// create the global functions
// .............................................................................
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_DEFINE_ACTION"),
JS_DefineAction);
TRI_AddGlobalFunctionVocbase(
isolate, context,
isolate,
TRI_V8_ASCII_STRING("SYS_EXECUTE_GLOBAL_CONTEXT_FUNCTION"),
JS_ExecuteGlobalContextFunction);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_GET_CURRENT_REQUEST"),
JS_GetCurrentRequest);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_GET_CURRENT_RESPONSE"),
JS_GetCurrentResponse);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_RAW_REQUEST_BODY"),
JS_RawRequestBody, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_REQUEST_PARTS"),
JS_RequestParts, true);
}
@ -1586,17 +1586,17 @@ void TRI_InitV8DebugUtils(v8::Isolate* isolate, v8::Handle<v8::Context> context,
std::string const& startupPath,
std::string const& modules) {
// debugging functions
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_DEBUG_CLEAR_FAILAT"),
JS_DebugClearFailAt);
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_DEBUG_SEGFAULT"),
JS_DebugSegfault);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_DEBUG_SET_FAILAT"),
JS_DebugSetFailAt);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_DEBUG_REMOVE_FAILAT"),
JS_DebugRemoveFailAt);
#endif

View File

@ -37,8 +37,6 @@
#include "Cluster/ClusterInfo.h"
#include "Cluster/FollowerInfo.h"
#include "Cluster/ClusterMethods.h"
#include "MMFiles/MMFilesDatafile.h"
#include "MMFiles/MMFilesEngine.h"
#include "RestServer/DatabaseFeature.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
@ -2342,88 +2340,6 @@ static void JS_TruncateVocbaseCol(
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief truncates a datafile
////////////////////////////////////////////////////////////////////////////////
static void JS_TruncateDatafileVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_THROW_SHARDING_COLLECTION_NOT_YET_IMPLEMENTED(collection);
if (args.Length() != 2) {
TRI_V8_THROW_EXCEPTION_USAGE("truncateDatafile(<datafile>, <size>)");
}
std::string path = TRI_ObjectToString(args[0]);
size_t size = (size_t)TRI_ObjectToInt64(args[1]);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
int res = MMFilesDatafile::truncate(path, static_cast<TRI_voc_size_t>(size));
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION_MESSAGE(res, "cannot truncate datafile");
}
TRI_V8_RETURN_UNDEFINED();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief truncates a datafile
////////////////////////////////////////////////////////////////////////////////
static void JS_TryRepairDatafileVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_THROW_SHARDING_COLLECTION_NOT_YET_IMPLEMENTED(collection);
if (args.Length() != 1) {
TRI_V8_THROW_EXCEPTION_USAGE("tryRepairDatafile(<datafile>)");
}
std::string path = TRI_ObjectToString(args[0]);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
bool result = MMFilesDatafile::tryRepair(path);
if (result) {
TRI_V8_RETURN_TRUE();
}
TRI_V8_RETURN_FALSE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief was docuBlock collectionType
////////////////////////////////////////////////////////////////////////////////
@ -2844,171 +2760,6 @@ static void JS_CountVocbaseCol(
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief returns information about the datafiles
/// `collection.datafiles()`
///
/// Returns information about the datafiles. The collection must be unloaded.
////////////////////////////////////////////////////////////////////////////////
static void JS_DatafilesVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_THROW_SHARDING_COLLECTION_NOT_YET_IMPLEMENTED(collection);
// TODO: move this into engine
StorageEngine* engine = EngineSelectorFeature::ENGINE;
if (std::string(engine->typeName()) != MMFilesEngine::EngineName) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "operation only supported in MMFiles engine");
}
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
MMFilesEngineCollectionFiles structure = dynamic_cast<MMFilesEngine*>(engine)->scanCollectionDirectory(collection->getPhysical()->path());
// build result
v8::Handle<v8::Object> result = v8::Object::New(isolate);
// journals
v8::Handle<v8::Array> journals = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("journals"), journals);
uint32_t i = 0;
for (auto& it : structure.journals) {
journals->Set(i++, TRI_V8_STD_STRING(it));
}
// compactors
v8::Handle<v8::Array> compactors = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("compactors"), compactors);
i = 0;
for (auto& it : structure.compactors) {
compactors->Set(i++, TRI_V8_STD_STRING(it));
}
// datafiles
v8::Handle<v8::Array> datafiles = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("datafiles"), datafiles);
i = 0;
for (auto& it : structure.datafiles) {
datafiles->Set(i++, TRI_V8_STD_STRING(it));
}
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief returns information about the datafiles
///
/// @FUN{@FA{collection}.datafileScan(@FA{path})}
///
/// Returns information about the datafiles. The collection must be unloaded.
////////////////////////////////////////////////////////////////////////////////
static void JS_DatafileScanVocbaseCol(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
arangodb::LogicalCollection* collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(), WRP_VOCBASE_COL_TYPE);
if (collection == nullptr) {
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
if (args.Length() != 1) {
TRI_V8_THROW_EXCEPTION_USAGE("datafileScan(<path>)");
}
std::string path = TRI_ObjectToString(args[0]);
v8::Handle<v8::Object> result;
{
// TODO Check with JAN Okay to just remove the lock?
// READ_LOCKER(readLocker, collection->_lock);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_UNLOADED &&
status != TRI_VOC_COL_STATUS_CORRUPTED) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_UNLOADED);
}
DatafileScan scan = MMFilesDatafile::scan(path);
// build result
result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("currentSize"),
v8::Number::New(isolate, scan.currentSize));
result->Set(TRI_V8_ASCII_STRING("maximalSize"),
v8::Number::New(isolate, scan.maximalSize));
result->Set(TRI_V8_ASCII_STRING("endPosition"),
v8::Number::New(isolate, scan.endPosition));
result->Set(TRI_V8_ASCII_STRING("numberMarkers"),
v8::Number::New(isolate, scan.numberMarkers));
result->Set(TRI_V8_ASCII_STRING("status"),
v8::Number::New(isolate, scan.status));
result->Set(TRI_V8_ASCII_STRING("isSealed"),
v8::Boolean::New(isolate, scan.isSealed));
v8::Handle<v8::Array> entries = v8::Array::New(isolate);
result->Set(TRI_V8_ASCII_STRING("entries"), entries);
uint32_t i = 0;
for (auto const& entry : scan.entries) {
v8::Handle<v8::Object> o = v8::Object::New(isolate);
o->Set(TRI_V8_ASCII_STRING("position"),
v8::Number::New(isolate, entry.position));
o->Set(TRI_V8_ASCII_STRING("size"),
v8::Number::New(isolate, entry.size));
o->Set(TRI_V8_ASCII_STRING("realSize"),
v8::Number::New(isolate, entry.realSize));
o->Set(TRI_V8_ASCII_STRING("tick"), V8TickId(isolate, entry.tick));
o->Set(TRI_V8_ASCII_STRING("type"),
v8::Number::New(isolate, static_cast<int>(entry.type)));
o->Set(TRI_V8_ASCII_STRING("status"),
v8::Number::New(isolate, static_cast<int>(entry.status)));
if (!entry.key.empty()) {
o->Set(TRI_V8_ASCII_STRING("key"), TRI_V8_STD_STRING(entry.key));
}
if (entry.typeName != nullptr) {
o->Set(TRI_V8_ASCII_STRING("typeName"),
TRI_V8_ASCII_STRING(entry.typeName));
}
if (!entry.diagnosis.empty()) {
o->Set(TRI_V8_ASCII_STRING("diagnosis"),
TRI_V8_STD_STRING(entry.diagnosis));
}
entries->Set(i++, o);
}
}
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
// .............................................................................
// generate the arangodb::LogicalCollection template
// .............................................................................
@ -3052,10 +2803,6 @@ void TRI_InitV8Collection(v8::Handle<v8::Context> context,
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("count"),
JS_CountVocbaseCol);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("datafiles"),
JS_DatafilesVocbaseCol);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("datafileScan"),
JS_DatafileScanVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("document"),
JS_DocumentVocbaseCol);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("drop"),
@ -3101,10 +2848,6 @@ void TRI_InitV8Collection(v8::Handle<v8::Context> context,
JS_StatusVocbaseCol);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("TRUNCATE"),
JS_TruncateVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("truncateDatafile"),
JS_TruncateDatafileVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("tryRepairDatafile"),
JS_TryRepairDatafileVocbaseCol, true);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("type"),
JS_TypeVocbaseCol);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("unload"),
@ -3114,10 +2857,10 @@ void TRI_InitV8Collection(v8::Handle<v8::Context> context,
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("version"),
JS_VersionVocbaseCol);
TRI_InitV8indexCollection(isolate, rt);
TRI_InitV8IndexCollection(isolate, rt);
v8g->VocbaseColTempl.Reset(isolate, rt);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoCollection"),
ft->GetFunction());
}

View File

@ -612,15 +612,15 @@ void TRI_InitV8Dispatcher(v8::Isolate* isolate,
v8::HandleScope scope(isolate);
// we need a scheduler and a dispatcher to define periodic tasks
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_REGISTER_TASK"),
JS_RegisterTask);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_UNREGISTER_TASK"),
JS_UnregisterTask);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_GET_TASK"), JS_GetTask);
}

View File

@ -839,37 +839,37 @@ void TRI_InitV8Replication(v8::Isolate* isolate,
TRI_vocbase_t* vocbase,
size_t threadNumber, TRI_v8_global_t* v8g) {
// replication functions. not intended to be used by end users
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("REPLICATION_LOGGER_STATE"),
JS_StateLoggerReplication, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("REPLICATION_LOGGER_LAST"),
JS_LastLoggerReplication, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("REPLICATION_LOGGER_TICK_RANGES"),
isolate, TRI_V8_ASCII_STRING("REPLICATION_LOGGER_TICK_RANGES"),
JS_TickRangesLoggerReplication, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("REPLICATION_LOGGER_FIRST_TICK"),
isolate, TRI_V8_ASCII_STRING("REPLICATION_LOGGER_FIRST_TICK"),
JS_FirstTickLoggerReplication, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("REPLICATION_SYNCHRONIZE"),
JS_SynchronizeReplication, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("REPLICATION_SERVER_ID"),
JS_ServerIdReplication, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("REPLICATION_APPLIER_CONFIGURE"),
isolate, TRI_V8_ASCII_STRING("REPLICATION_APPLIER_CONFIGURE"),
JS_ConfigureApplierReplication, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("REPLICATION_APPLIER_START"),
JS_StartApplierReplication, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("REPLICATION_APPLIER_SHUTDOWN"),
isolate, TRI_V8_ASCII_STRING("REPLICATION_APPLIER_SHUTDOWN"),
JS_ShutdownApplierReplication, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("REPLICATION_APPLIER_STATE"),
JS_StateApplierReplication, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("REPLICATION_APPLIER_FORGET"),
isolate, TRI_V8_ASCII_STRING("REPLICATION_APPLIER_FORGET"),
JS_ForgetApplierReplication, true);
}

View File

@ -254,30 +254,30 @@ void TRI_InitV8Statistics(v8::Isolate* isolate,
// create the global functions
// .............................................................................
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_ENABLED_STATISTICS"),
JS_EnabledStatistics);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_CLIENT_STATISTICS"),
JS_ClientStatistics);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_HTTP_STATISTICS"),
JS_HttpStatistics);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SERVER_STATISTICS"),
JS_ServerStatistics);
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("CONNECTION_TIME_DISTRIBUTION"),
isolate, TRI_V8_ASCII_STRING("CONNECTION_TIME_DISTRIBUTION"),
DistributionList(isolate,
TRI_ConnectionTimeDistributionVectorStatistics));
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("REQUEST_TIME_DISTRIBUTION"),
isolate, TRI_V8_ASCII_STRING("REQUEST_TIME_DISTRIBUTION"),
DistributionList(isolate, TRI_RequestTimeDistributionVectorStatistics));
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("BYTES_SENT_DISTRIBUTION"),
isolate, TRI_V8_ASCII_STRING("BYTES_SENT_DISTRIBUTION"),
DistributionList(isolate, TRI_BytesSentDistributionVectorStatistics));
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("BYTES_RECEIVED_DISTRIBUTION"),
isolate, TRI_V8_ASCII_STRING("BYTES_RECEIVED_DISTRIBUTION"),
DistributionList(isolate, TRI_BytesReceivedDistributionVectorStatistics));
}

View File

@ -2618,63 +2618,63 @@ void TRI_InitV8UserStructures(v8::Isolate* isolate,
// NOTE: the following functions are all experimental and might
// change without further notice
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_CREATE"),
JS_KeyspaceCreate, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_DROP"),
JS_KeyspaceDrop, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_COUNT"),
JS_KeyspaceCount, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_EXISTS"),
JS_KeyspaceExists, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_KEYS"),
JS_KeyspaceKeys, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_REMOVE"),
JS_KeyspaceRemove, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEYSPACE_GET"),
JS_KeyspaceGet, true);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("KEY_SET"),
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("KEY_SET"),
JS_KeySet, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_SET_CAS"), JS_KeySetCas, true);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("KEY_GET"),
isolate, TRI_V8_ASCII_STRING("KEY_SET_CAS"), JS_KeySetCas, true);
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("KEY_GET"),
JS_KeyGet, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_REMOVE"), JS_KeyRemove, true);
isolate, TRI_V8_ASCII_STRING("KEY_REMOVE"), JS_KeyRemove, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_EXISTS"), JS_KeyExists, true);
isolate, TRI_V8_ASCII_STRING("KEY_EXISTS"), JS_KeyExists, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_TYPE"), JS_KeyType, true);
isolate, TRI_V8_ASCII_STRING("KEY_TYPE"), JS_KeyType, true);
// numeric functions
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_INCR"), JS_KeyIncr, true);
isolate, TRI_V8_ASCII_STRING("KEY_INCR"), JS_KeyIncr, true);
// list / array functions
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_UPDATE"), JS_KeyUpdate, true);
isolate, TRI_V8_ASCII_STRING("KEY_UPDATE"), JS_KeyUpdate, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_KEYS"), JS_KeyKeys, true);
isolate, TRI_V8_ASCII_STRING("KEY_KEYS"), JS_KeyKeys, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_VALUES"), JS_KeyValues, true);
isolate, TRI_V8_ASCII_STRING("KEY_VALUES"), JS_KeyValues, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_COUNT"), JS_KeyCount, true);
isolate, TRI_V8_ASCII_STRING("KEY_COUNT"), JS_KeyCount, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_PUSH"), JS_KeyPush, true);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("KEY_POP"),
isolate, TRI_V8_ASCII_STRING("KEY_PUSH"), JS_KeyPush, true);
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("KEY_POP"),
JS_KeyPop, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("KEY_TRANSFER"),
JS_KeyTransfer, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_GET_AT"), JS_KeyGetAt, true);
isolate, TRI_V8_ASCII_STRING("KEY_GET_AT"), JS_KeyGetAt, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("KEY_SET_AT"), JS_KeySetAt, true);
isolate, TRI_V8_ASCII_STRING("KEY_SET_AT"), JS_KeySetAt, true);
}

View File

@ -411,244 +411,6 @@ static void JS_Transaction(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief was docuBlock walPropertiesGet
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief was docuBlock walPropertiesSet
////////////////////////////////////////////////////////////////////////////////
static void JS_PropertiesWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (args.Length() > 1 || (args.Length() == 1 && !args[0]->IsObject())) {
TRI_V8_THROW_EXCEPTION_USAGE("properties(<object>)");
}
auto l = MMFilesLogfileManager::instance();
if (args.Length() == 1) {
// set the properties
v8::Handle<v8::Object> object = v8::Handle<v8::Object>::Cast(args[0]);
if (object->Has(TRI_V8_ASCII_STRING("allowOversizeEntries"))) {
bool value = TRI_ObjectToBoolean(
object->Get(TRI_V8_ASCII_STRING("allowOversizeEntries")));
l->allowOversizeEntries(value);
}
if (object->Has(TRI_V8_ASCII_STRING("logfileSize"))) {
uint32_t value = static_cast<uint32_t>(TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("logfileSize")), true));
l->filesize(value);
}
if (object->Has(TRI_V8_ASCII_STRING("historicLogfiles"))) {
uint32_t value = static_cast<uint32_t>(TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("historicLogfiles")), true));
l->historicLogfiles(value);
}
if (object->Has(TRI_V8_ASCII_STRING("reserveLogfiles"))) {
uint32_t value = static_cast<uint32_t>(TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("reserveLogfiles")), true));
l->reserveLogfiles(value);
}
if (object->Has(TRI_V8_ASCII_STRING("throttleWait"))) {
uint64_t value = TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("throttleWait")), true);
l->maxThrottleWait(value);
}
if (object->Has(TRI_V8_ASCII_STRING("throttleWhenPending"))) {
uint64_t value = TRI_ObjectToUInt64(
object->Get(TRI_V8_ASCII_STRING("throttleWhenPending")), true);
l->throttleWhenPending(value);
}
}
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("allowOversizeEntries"),
v8::Boolean::New(isolate, l->allowOversizeEntries()));
result->Set(TRI_V8_ASCII_STRING("logfileSize"),
v8::Number::New(isolate, l->filesize()));
result->Set(TRI_V8_ASCII_STRING("historicLogfiles"),
v8::Number::New(isolate, l->historicLogfiles()));
result->Set(TRI_V8_ASCII_STRING("reserveLogfiles"),
v8::Number::New(isolate, l->reserveLogfiles()));
result->Set(TRI_V8_ASCII_STRING("syncInterval"),
v8::Number::New(isolate, (double)l->syncInterval()));
result->Set(TRI_V8_ASCII_STRING("throttleWait"),
v8::Number::New(isolate, (double)l->maxThrottleWait()));
result->Set(TRI_V8_ASCII_STRING("throttleWhenPending"),
v8::Number::New(isolate, (double)l->throttleWhenPending()));
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief was docuBlock walFlush
////////////////////////////////////////////////////////////////////////////////
static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
bool waitForSync = false;
bool waitForCollector = false;
bool writeShutdownFile = false;
if (args.Length() > 0) {
if (args[0]->IsObject()) {
v8::Handle<v8::Object> obj = args[0]->ToObject();
if (obj->Has(TRI_V8_ASCII_STRING("waitForSync"))) {
waitForSync =
TRI_ObjectToBoolean(obj->Get(TRI_V8_ASCII_STRING("waitForSync")));
}
if (obj->Has(TRI_V8_ASCII_STRING("waitForCollector"))) {
waitForCollector = TRI_ObjectToBoolean(
obj->Get(TRI_V8_ASCII_STRING("waitForCollector")));
}
if (obj->Has(TRI_V8_ASCII_STRING("writeShutdownFile"))) {
writeShutdownFile = TRI_ObjectToBoolean(
obj->Get(TRI_V8_ASCII_STRING("writeShutdownFile")));
}
} else {
waitForSync = TRI_ObjectToBoolean(args[0]);
if (args.Length() > 1) {
waitForCollector = TRI_ObjectToBoolean(args[1]);
if (args.Length() > 2) {
writeShutdownFile = TRI_ObjectToBoolean(args[2]);
}
}
}
}
int res;
if (ServerState::instance()->isCoordinator()) {
res = flushWalOnAllDBServers(waitForSync, waitForCollector);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_TRUE();
}
res = MMFilesLogfileManager::instance()->flush(
waitForSync, waitForCollector, writeShutdownFile);
if (res != TRI_ERROR_NO_ERROR) {
if (res == TRI_ERROR_LOCK_TIMEOUT) {
// improved diagnostic message for this special case
TRI_V8_THROW_EXCEPTION_MESSAGE(res, "timed out waiting for WAL flush operation");
}
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief wait for WAL collector to finish operations for the specified
/// collection
////////////////////////////////////////////////////////////////////////////////
static void JS_WaitCollectorWal(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (ServerState::instance()->isCoordinator()) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
if (vocbase == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
}
if (args.Length() < 1) {
TRI_V8_THROW_EXCEPTION_USAGE(
"WAL_WAITCOLLECTOR(<collection-id>, <timeout>)");
}
std::string const name = TRI_ObjectToString(args[0]);
arangodb::LogicalCollection* col = vocbase->lookupCollection(name);
if (col == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
}
double timeout = 30.0;
if (args.Length() > 1) {
timeout = TRI_ObjectToDouble(args[1]);
}
int res = MMFilesLogfileManager::instance()->waitForCollectorQueue(
col->cid(), timeout);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief get information about the currently running transactions
////////////////////////////////////////////////////////////////////////////////
static void JS_TransactionsWal(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
auto const& info =
MMFilesLogfileManager::instance()->runningTransactions();
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->ForceSet(
TRI_V8_ASCII_STRING("runningTransactions"),
v8::Number::New(isolate, static_cast<double>(std::get<0>(info))));
// lastCollectedId
{
auto value = std::get<1>(info);
if (value == UINT64_MAX) {
result->ForceSet(TRI_V8_ASCII_STRING("minLastCollected"),
v8::Null(isolate));
} else {
result->ForceSet(TRI_V8_ASCII_STRING("minLastCollected"),
V8TickId(isolate, static_cast<TRI_voc_tick_t>(value)));
}
}
// lastSealedId
{
auto value = std::get<2>(info);
if (value == UINT64_MAX) {
result->ForceSet(TRI_V8_ASCII_STRING("minLastSealed"), v8::Null(isolate));
} else {
result->ForceSet(TRI_V8_ASCII_STRING("minLastSealed"),
V8TickId(isolate, static_cast<TRI_voc_tick_t>(value)));
}
}
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief normalize UTF 16 strings
////////////////////////////////////////////////////////////////////////////////
@ -699,7 +461,7 @@ static void JS_Debug(v8::FunctionCallbackInfo<v8::Value> const& args) {
v8::Local<v8::Object> callerScope;
if (args.Length() >= 1) {
TRI_AddGlobalVariableVocbase(isolate, isolate->GetCurrentContext(), debug,
TRI_AddGlobalVariableVocbase(isolate, debug,
args[0]);
}
@ -2914,12 +2676,12 @@ void TRI_InitV8VocBridge(v8::Isolate* isolate, v8::Handle<v8::Context> context,
TRI_InitV8Statistics(isolate, context);
TRI_InitV8indexArangoDB(isolate, ArangoNS);
TRI_InitV8IndexArangoDB(isolate, ArangoNS);
TRI_InitV8Collection(context, vocbase, threadNumber, v8g, isolate, ArangoNS);
v8g->VocbaseTempl.Reset(isolate, ArangoNS);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoDatabase"),
ft->GetFunction());
@ -2930,123 +2692,112 @@ void TRI_InitV8VocBridge(v8::Isolate* isolate, v8::Handle<v8::Context> context,
// .............................................................................
// AQL functions. not intended to be used directly by end users
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_EXECUTE"),
JS_ExecuteAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_EXECUTEJSON"),
JS_ExecuteAqlJson, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_EXPLAIN"),
JS_ExplainAql, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("AQL_PARSE"), JS_ParseAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("AQL_PARSE"), JS_ParseAql, true);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_WARNING"),
JS_WarningAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_QUERIES_PROPERTIES"),
JS_QueriesPropertiesAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_QUERIES_CURRENT"),
JS_QueriesCurrentAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_QUERIES_SLOW"),
JS_QueriesSlowAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_QUERIES_KILL"),
JS_QueriesKillAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_QUERY_SLEEP"),
JS_QuerySleepAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AQL_QUERY_IS_KILLED"),
JS_QueryIsKilledAql, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("AQL_QUERY_CACHE_PROPERTIES"),
isolate, TRI_V8_ASCII_STRING("AQL_QUERY_CACHE_PROPERTIES"),
JS_QueryCachePropertiesAql, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("AQL_QUERY_CACHE_INVALIDATE"),
isolate, TRI_V8_ASCII_STRING("AQL_QUERY_CACHE_INVALIDATE"),
JS_QueryCacheInvalidateAql, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("OBJECT_HASH"),
JS_ObjectHash, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("THROW_COLLECTION_NOT_LOADED"),
isolate, TRI_V8_ASCII_STRING("THROW_COLLECTION_NOT_LOADED"),
JS_ThrowCollectionNotLoaded, true);
TRI_InitV8Replication(isolate, context, vocbase, threadNumber, v8g);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("COMPARE_STRING"),
JS_CompareString);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("NORMALIZE_STRING"),
JS_NormalizeString);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("TIMEZONES"), JS_GetIcuTimezones);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("LOCALES"),
isolate, TRI_V8_ASCII_STRING("TIMEZONES"), JS_GetIcuTimezones);
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("LOCALES"),
JS_GetIcuLocales);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FORMAT_DATETIME"),
JS_FormatDatetime);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("PARSE_DATETIME"),
JS_ParseDatetime);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("ENDPOINTS"), JS_Endpoints, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("ENDPOINTS"), JS_Endpoints, true);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("RELOAD_AUTH"),
JS_ReloadAuth, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("TRANSACTION"),
JS_Transaction, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("WAL_FLUSH"), JS_FlushWal, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_V8_ASCII_STRING("WAL_WAITCOLLECTOR"),
JS_WaitCollectorWal, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_V8_ASCII_STRING("WAL_PROPERTIES"),
JS_PropertiesWal, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_V8_ASCII_STRING("WAL_TRANSACTIONS"),
JS_TransactionsWal, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ENABLE_NATIVE_BACKTRACES"),
JS_EnableNativeBacktraces, true);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("Debug"),
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("Debug"),
JS_Debug, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("CLEAR_TIMERS"),
JS_ClearTimers, true);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("GET_TIMERS"), JS_GetTimers, true);
isolate, TRI_V8_ASCII_STRING("GET_TIMERS"), JS_GetTimers, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("AUTHENTICATION_ENABLED"),
JS_AuthenticationEnabled, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("TRUSTED_PROXIES"),
JS_TrustedProxies, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_IS_ENTERPRISE"),
JS_IsEnterprise);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("DECODE_REV"),
JS_DecodeRev);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("ARANGODB_CONTEXT"),
JS_ArangoDBContext,
true);
@ -3059,7 +2810,7 @@ void TRI_InitV8VocBridge(v8::Isolate* isolate, v8::Handle<v8::Context> context,
if (v.IsEmpty()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "out of memory when initializing VocBase";
} else {
TRI_AddGlobalVariableVocbase(isolate, context, TRI_V8_ASCII_STRING("db"),
TRI_AddGlobalVariableVocbase(isolate, TRI_V8_ASCII_STRING("db"),
v);
}

View File

@ -211,10 +211,10 @@ void TRI_InitV8cursor(v8::Handle<v8::Context> context, TRI_v8_global_t* v8g) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
// cursor functions. not intended to be used by end users
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("CREATE_CURSOR"),
JS_CreateCursor, true);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("JSON_CURSOR"),
JS_JsonCursor, true);
}

View File

@ -777,7 +777,7 @@ static void JS_CreateEdgeCollectionVocbase(
TRI_V8_TRY_CATCH_END
}
void TRI_InitV8indexArangoDB(v8::Isolate* isolate,
void TRI_InitV8IndexArangoDB(v8::Isolate* isolate,
v8::Handle<v8::ObjectTemplate> rt) {
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("_create"),
JS_CreateVocbase, true);
@ -789,7 +789,7 @@ void TRI_InitV8indexArangoDB(v8::Isolate* isolate,
JS_CreateDocumentCollectionVocbase);
}
void TRI_InitV8indexCollection(v8::Isolate* isolate,
void TRI_InitV8IndexCollection(v8::Isolate* isolate,
v8::Handle<v8::ObjectTemplate> rt) {
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("dropIndex"),
JS_DropIndexVocbaseCol);

View File

@ -43,10 +43,10 @@ std::shared_ptr<arangodb::Index> TRI_LookupIndexByHandle(
arangodb::LogicalCollection const* collection, v8::Handle<v8::Value> const val,
bool ignoreNotFound);
void TRI_InitV8indexArangoDB(v8::Isolate* isolate,
void TRI_InitV8IndexArangoDB(v8::Isolate* isolate,
v8::Handle<v8::ObjectTemplate> ArangoDBNS);
void TRI_InitV8indexCollection(v8::Isolate* isolate,
void TRI_InitV8IndexCollection(v8::Isolate* isolate,
v8::Handle<v8::ObjectTemplate> rt);
int EnsureIndexCoordinator(std::string const& dbName, std::string const& cid,

View File

@ -597,6 +597,25 @@ std::shared_ptr<ShardMap> LogicalCollection::shardIds() const {
return _shardIds;
}
// return a filtered list of the collection's shards
std::shared_ptr<ShardMap> LogicalCollection::shardIds(std::unordered_set<std::string> const& includedShards) const {
if (includedShards.empty()) {
return _shardIds;
}
std::shared_ptr<ShardMap> copy = _shardIds;
auto result = std::make_shared<ShardMap>();
for (auto const& it : *copy) {
if (includedShards.find(it.first) == includedShards.end()) {
// a shard we are not interested in
continue;
}
result->emplace(it.first, it.second);
}
return result;
}
void LogicalCollection::setShardMap(std::shared_ptr<ShardMap>& map) {
_shardIds = map;
}

View File

@ -212,6 +212,8 @@ class LogicalCollection {
virtual bool usesDefaultShardKeys() const;
std::vector<std::string> const& shardKeys() const;
std::shared_ptr<ShardMap> shardIds() const;
// return a filtered list of the collection's shards
std::shared_ptr<ShardMap> shardIds(std::unordered_set<std::string> const& includedShards) const;
void setShardMap(std::shared_ptr<ShardMap>& map);
/// @brief a method to skip certain documents in AQL write operations,

View File

@ -67,14 +67,14 @@ DumpFeature::DumpFeature(application_features::ApplicationServer* server,
_result(result),
_batchId(0),
_clusterMode(false),
_stats{ 0, 0, 0 } {
_stats{0, 0, 0} {
requiresElevatedPrivileges(false);
setOptional(false);
startsAfter("Client");
startsAfter("Logger");
_outputDirectory =
FileUtils::buildFilename(FileUtils::currentDirectory(), "dump");
FileUtils::buildFilename(FileUtils::currentDirectory().result(), "dump");
}
void DumpFeature::collectOptions(
@ -117,8 +117,9 @@ void DumpFeature::collectOptions(
options->addOption("--tick-end", "last tick to be included in data dump",
new UInt64Parameter(&_tickEnd));
options->addOption("--compat28", "produce a dump compatible with ArangoDB 2.8",
options->addOption("--compat28",
"produce a dump compatible with ArangoDB 2.8",
new BooleanParameter(&_compat28));
}
@ -130,8 +131,9 @@ void DumpFeature::validateOptions(
if (1 == n) {
_outputDirectory = positionals[0];
} else if (1 < n) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "expecting at most one directory, got " +
StringUtils::join(positionals, ", ");
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "expecting at most one directory, got " +
StringUtils::join(positionals, ", ");
FATAL_ERROR_EXIT();
}
@ -144,7 +146,8 @@ void DumpFeature::validateOptions(
}
if (_tickStart < _tickEnd) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid values for --tick-start or --tick-end";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "invalid values for --tick-start or --tick-end";
FATAL_ERROR_EXIT();
}
@ -165,23 +168,28 @@ void DumpFeature::prepare() {
isDirectory = TRI_IsDirectory(_outputDirectory.c_str());
if (isDirectory) {
std::vector<std::string> files(TRI_FullTreeDirectory(_outputDirectory.c_str()));
std::vector<std::string> files(
TRI_FullTreeDirectory(_outputDirectory.c_str()));
// we don't care if the target directory is empty
isEmptyDirectory = (files.size() <= 1); // TODO: TRI_FullTreeDirectory always returns at least one element (""), even if directory is empty?
isEmptyDirectory = (files.size() <= 1); // TODO: TRI_FullTreeDirectory
// always returns at least one
// element (""), even if
// directory is empty?
}
}
if (_outputDirectory.empty() ||
(TRI_ExistsFile(_outputDirectory.c_str()) && !isDirectory)) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot write to output directory '" << _outputDirectory
<< "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot write to output directory '" << _outputDirectory << "'";
FATAL_ERROR_EXIT();
}
if (isDirectory && !isEmptyDirectory && !_overwrite) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "output directory '" << _outputDirectory
<< "' already exists. use \"--overwrite true\" to "
"overwrite data in it";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "output directory '" << _outputDirectory
<< "' already exists. use \"--overwrite true\" to "
"overwrite data in it";
FATAL_ERROR_EXIT();
}
@ -192,8 +200,9 @@ void DumpFeature::prepare() {
errorMessage);
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "unable to create output directory '" << _outputDirectory
<< "': " << errorMessage;
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "unable to create output directory '" << _outputDirectory
<< "': " << errorMessage;
FATAL_ERROR_EXIT();
}
}
@ -209,9 +218,8 @@ int DumpFeature::startBatch(std::string DBserver, std::string& errorMsg) {
urlExt = "?DBserver=" + DBserver;
}
std::unique_ptr<SimpleHttpResult> response(
_httpClient->request(rest::RequestType::POST, url + urlExt,
body.c_str(), body.size()));
std::unique_ptr<SimpleHttpResult> response(_httpClient->request(
rest::RequestType::POST, url + urlExt, body.c_str(), body.size()));
if (response == nullptr || !response->isComplete()) {
errorMsg =
@ -262,9 +270,8 @@ void DumpFeature::extendBatch(std::string DBserver) {
urlExt = "?DBserver=" + DBserver;
}
std::unique_ptr<SimpleHttpResult> response(
_httpClient->request(rest::RequestType::PUT, url + urlExt,
body.c_str(), body.size()));
std::unique_ptr<SimpleHttpResult> response(_httpClient->request(
rest::RequestType::PUT, url + urlExt, body.c_str(), body.size()));
// ignore any return value
}
@ -294,8 +301,8 @@ int DumpFeature::dumpCollection(int fd, std::string const& cid,
std::string& errorMsg) {
uint64_t chunkSize = _chunkSize;
std::string const baseUrl = "/_api/replication/dump?collection=" + cid +
"&ticks=false&flush=false";
std::string const baseUrl =
"/_api/replication/dump?collection=" + cid + "&ticks=false&flush=false";
uint64_t fromTick = _tickStart;
@ -313,8 +320,8 @@ int DumpFeature::dumpCollection(int fd, std::string const& cid,
_stats._totalBatches++;
std::unique_ptr<SimpleHttpResult> response(_httpClient->request(
rest::RequestType::GET, url, nullptr, 0));
std::unique_ptr<SimpleHttpResult> response(
_httpClient->request(rest::RequestType::GET, url, nullptr, 0));
if (response == nullptr || !response->isComplete()) {
errorMsg =
@ -543,7 +550,8 @@ int DumpFeature::runDump(std::string& dbName, std::string& errorMsg) {
return TRI_ERROR_INTERNAL;
}
uint64_t const cid = arangodb::basics::VelocyPackHelper::extractIdValue(parameters);
uint64_t const cid =
arangodb::basics::VelocyPackHelper::extractIdValue(parameters);
std::string const name = arangodb::basics::VelocyPackHelper::getStringValue(
parameters, "name", "");
bool const deleted = arangodb::basics::VelocyPackHelper::getBooleanValue(
@ -642,7 +650,8 @@ int DumpFeature::runDump(std::string& dbName, std::string& errorMsg) {
}
extendBatch("");
int res = dumpCollection(fd, std::to_string(cid), name, maxTick, errorMsg);
int res =
dumpCollection(fd, std::to_string(cid), name, maxTick, errorMsg);
TRI_CLOSE(fd);
@ -664,8 +673,7 @@ int DumpFeature::dumpShard(int fd, std::string const& DBserver,
std::string const& name, std::string& errorMsg) {
std::string const baseUrl = "/_api/replication/dump?DBserver=" + DBserver +
"&collection=" + name + "&chunkSize=" +
StringUtils::itoa(_chunkSize) +
"&ticks=false";
StringUtils::itoa(_chunkSize) + "&ticks=false";
uint64_t fromTick = 0;
uint64_t maxTick = UINT64_MAX;
@ -679,8 +687,8 @@ int DumpFeature::dumpShard(int fd, std::string const& DBserver,
_stats._totalBatches++;
std::unique_ptr<SimpleHttpResult> response(_httpClient->request(
rest::RequestType::GET, url, nullptr, 0));
std::unique_ptr<SimpleHttpResult> response(
_httpClient->request(rest::RequestType::GET, url, nullptr, 0));
if (response == nullptr || !response->isComplete()) {
errorMsg =
@ -825,7 +833,8 @@ int DumpFeature::runClusterDump(std::string& errorMsg) {
return TRI_ERROR_INTERNAL;
}
uint64_t const cid = arangodb::basics::VelocyPackHelper::extractIdValue(parameters);
uint64_t const cid =
arangodb::basics::VelocyPackHelper::extractIdValue(parameters);
std::string const name = arangodb::basics::VelocyPackHelper::getStringValue(
parameters, "name", "");
bool const deleted = arangodb::basics::VelocyPackHelper::getBooleanValue(
@ -967,7 +976,9 @@ int DumpFeature::runClusterDump(std::string& errorMsg) {
}
void DumpFeature::start() {
ClientFeature* client = application_features::ApplicationServer::getFeature<ClientFeature>("Client");
ClientFeature* client =
application_features::ApplicationServer::getFeature<ClientFeature>(
"Client");
int ret = EXIT_SUCCESS;
*_result = ret;
@ -975,22 +986,26 @@ void DumpFeature::start() {
try {
_httpClient = client->createHttpClient();
} catch (...) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot create server connection, giving up!";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot create server connection, giving up!";
FATAL_ERROR_EXIT();
}
std::string dbName = client->databaseName();
_httpClient->setLocationRewriter(static_cast<void*>(client), &rewriteLocation);
_httpClient->setLocationRewriter(static_cast<void*>(client),
&rewriteLocation);
_httpClient->setUserNamePassword("/", client->username(), client->password());
std::string const versionString = _httpClient->getServerVersion();
if (!_httpClient->isConnected()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Could not connect to endpoint '" << client->endpoint()
<< "', database: '" << dbName << "', username: '"
<< client->username() << "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Error message: '" << _httpClient->getErrorMessage() << "'";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Could not connect to endpoint '" << client->endpoint()
<< "', database: '" << dbName << "', username: '" << client->username()
<< "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "Error message: '" << _httpClient->getErrorMessage() << "'";
FATAL_ERROR_EXIT();
}
@ -1003,8 +1018,8 @@ void DumpFeature::start() {
if (version.first < 3) {
// we can connect to 3.x
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Error: got incompatible server version '" << versionString
<< "'";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Error: got incompatible server version '" << versionString << "'";
if (!_force) {
FATAL_ERROR_EXIT();
@ -1015,16 +1030,19 @@ void DumpFeature::start() {
if (_clusterMode) {
if (_tickStart != 0 || _tickEnd != 0) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Error: cannot use tick-start or tick-end on a cluster";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Error: cannot use tick-start or tick-end on a cluster";
FATAL_ERROR_EXIT();
}
}
if (!_httpClient->isConnected()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Lost connection to endpoint '" << client->endpoint()
<< "', database: '" << dbName << "', username: '"
<< client->username() << "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Error message: '" << _httpClient->getErrorMessage() << "'";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Lost connection to endpoint '" << client->endpoint()
<< "', database: '" << dbName << "', username: '" << client->username()
<< "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "Error message: '" << _httpClient->getErrorMessage() << "'";
FATAL_ERROR_EXIT();
}
@ -1063,7 +1081,8 @@ void DumpFeature::start() {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "caught exception " << ex.what();
res = TRI_ERROR_INTERNAL;
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Error: caught unknown exception";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Error: caught unknown exception";
res = TRI_ERROR_INTERNAL;
}

View File

@ -32,9 +32,9 @@
#include "SimpleHttpClient/SimpleHttpClient.h"
#include "SimpleHttpClient/SimpleHttpResult.h"
#include <regex>
#include <boost/property_tree/detail/xml_parser_utils.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/property_tree/detail/xml_parser_utils.hpp>
#include <regex>
using namespace arangodb;
using namespace arangodb::basics;
@ -67,8 +67,8 @@ ExportFeature::ExportFeature(application_features::ApplicationServer* server,
startsAfter("Config");
startsAfter("Logger");
_outputDirectory =
FileUtils::buildFilename(FileUtils::currentDirectory(), "export");
_outputDirectory = FileUtils::buildFilename(
FileUtils::currentDirectory().result(), "export");
}
void ExportFeature::collectOptions(
@ -84,7 +84,8 @@ void ExportFeature::collectOptions(
options->addOption("--xgmml-label-only", "export only xgmml label",
new BooleanParameter(&_xgmmlLabelOnly));
options->addOption("--xgmml-label-attribute", "specify document attribute that will be the xgmml label",
options->addOption("--xgmml-label-attribute",
"specify document attribute that will be the xgmml label",
new StringParameter(&_xgmmlLabelAttribute));
options->addOption("--output-directory", "output directory",
@ -96,12 +97,15 @@ void ExportFeature::collectOptions(
options->addOption("--progress", "show progress",
new BooleanParameter(&_progress));
options->addOption("--fields", "comma separated list of fileds to export into a csv file",
options->addOption("--fields",
"comma separated list of fileds to export into a csv file",
new StringParameter(&_csvFieldOptions));
std::unordered_set<std::string> exports = {"csv", "json", "jsonl", "xgmml", "xml"};
std::unordered_set<std::string> exports = {"csv", "json", "jsonl", "xgmml",
"xml"};
options->addOption(
"--type", "type of export", new DiscreteValuesParameter<StringParameter>(&_typeExport, exports));
"--type", "type of export",
new DiscreteValuesParameter<StringParameter>(&_typeExport, exports));
}
void ExportFeature::validateOptions(
@ -112,8 +116,9 @@ void ExportFeature::validateOptions(
if (1 == n) {
_outputDirectory = positionals[0];
} else if (1 < n) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at most one directory, got " +
StringUtils::join(positionals, ", ");
LOG_TOPIC(FATAL, Logger::CONFIG)
<< "expecting at most one directory, got " +
StringUtils::join(positionals, ", ");
FATAL_ERROR_EXIT();
}
@ -126,24 +131,28 @@ void ExportFeature::validateOptions(
}
if (_graphName.empty() && _collections.empty()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at least one collection or one graph name";
LOG_TOPIC(FATAL, Logger::CONFIG)
<< "expecting at least one collection or one graph name";
FATAL_ERROR_EXIT();
}
if (_typeExport == "xgmml" && _graphName.empty()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting a graph name to dump a graph";
LOG_TOPIC(FATAL, Logger::CONFIG)
<< "expecting a graph name to dump a graph";
FATAL_ERROR_EXIT();
}
if ( (_typeExport == "json" || _typeExport == "jsonl" || _typeExport == "csv") &&
_collections.empty()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at least one collection";
FATAL_ERROR_EXIT();
if ((_typeExport == "json" || _typeExport == "jsonl" ||
_typeExport == "csv") &&
_collections.empty()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at least one collection";
FATAL_ERROR_EXIT();
}
if (_typeExport == "csv") {
if (_csvFieldOptions.empty()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at least one field definition";
LOG_TOPIC(FATAL, Logger::CONFIG)
<< "expecting at least one field definition";
FATAL_ERROR_EXIT();
}
@ -159,23 +168,28 @@ void ExportFeature::prepare() {
isDirectory = TRI_IsDirectory(_outputDirectory.c_str());
if (isDirectory) {
std::vector<std::string> files(TRI_FullTreeDirectory(_outputDirectory.c_str()));
std::vector<std::string> files(
TRI_FullTreeDirectory(_outputDirectory.c_str()));
// we don't care if the target directory is empty
isEmptyDirectory = (files.size() <= 1); // TODO: TRI_FullTreeDirectory always returns at least one element (""), even if directory is empty?
isEmptyDirectory = (files.size() <= 1); // TODO: TRI_FullTreeDirectory
// always returns at least one
// element (""), even if
// directory is empty?
}
}
if (_outputDirectory.empty() ||
(TRI_ExistsFile(_outputDirectory.c_str()) && !isDirectory)) {
LOG_TOPIC(FATAL, Logger::SYSCALL) << "cannot write to output directory '" << _outputDirectory
<< "'";
LOG_TOPIC(FATAL, Logger::SYSCALL) << "cannot write to output directory '"
<< _outputDirectory << "'";
FATAL_ERROR_EXIT();
}
if (isDirectory && !isEmptyDirectory && !_overwrite) {
LOG_TOPIC(FATAL, Logger::SYSCALL) << "output directory '" << _outputDirectory
<< "' already exists. use \"--overwrite true\" to "
"overwrite data in it";
LOG_TOPIC(FATAL, Logger::SYSCALL)
<< "output directory '" << _outputDirectory
<< "' already exists. use \"--overwrite true\" to "
"overwrite data in it";
FATAL_ERROR_EXIT();
}
@ -186,15 +200,18 @@ void ExportFeature::prepare() {
errorMessage);
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, Logger::SYSCALL) << "unable to create output directory '" << _outputDirectory
<< "': " << errorMessage;
LOG_TOPIC(ERR, Logger::SYSCALL) << "unable to create output directory '"
<< _outputDirectory
<< "': " << errorMessage;
FATAL_ERROR_EXIT();
}
}
}
void ExportFeature::start() {
ClientFeature* client = application_features::ApplicationServer::getFeature<ClientFeature>("Client");
ClientFeature* client =
application_features::ApplicationServer::getFeature<ClientFeature>(
"Client");
int ret = EXIT_SUCCESS;
*_result = ret;
@ -204,7 +221,8 @@ void ExportFeature::start() {
try {
httpClient = client->createHttpClient();
} catch (...) {
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << "cannot create server connection, giving up!";
LOG_TOPIC(FATAL, Logger::COMMUNICATION)
<< "cannot create server connection, giving up!";
FATAL_ERROR_EXIT();
}
@ -215,10 +233,12 @@ void ExportFeature::start() {
httpClient->getServerVersion();
if (!httpClient->isConnected()) {
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "Could not connect to endpoint '" << client->endpoint()
<< "', database: '" << client->databaseName() << "', username: '"
<< client->username() << "'";
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << httpClient->getErrorMessage() << "'";
LOG_TOPIC(ERR, Logger::COMMUNICATION)
<< "Could not connect to endpoint '" << client->endpoint()
<< "', database: '" << client->databaseName() << "', username: '"
<< client->username() << "'";
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << httpClient->getErrorMessage()
<< "'";
FATAL_ERROR_EXIT();
}
@ -231,12 +251,14 @@ void ExportFeature::start() {
uint64_t exportedSize = 0;
if (_typeExport == "json" || _typeExport == "jsonl" || _typeExport == "xml" || _typeExport == "csv") {
if (_typeExport == "json" || _typeExport == "jsonl" || _typeExport == "xml" ||
_typeExport == "csv") {
if (_collections.size()) {
collectionExport(httpClient.get());
for (auto const& collection : _collections) {
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR +
collection + "." + _typeExport;
int64_t fileSize = TRI_SizeFile(filePath.c_str());
if (0 < fileSize) {
@ -246,7 +268,8 @@ void ExportFeature::start() {
}
} else if (_typeExport == "xgmml" && _graphName.size()) {
graphExport(httpClient.get());
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR +
_graphName + "." + _typeExport;
int64_t fileSize = TRI_SizeFile(filePath.c_str());
if (0 < fileSize) {
@ -254,7 +277,9 @@ void ExportFeature::start() {
}
}
std::cout << "Processed " << _collections.size() << " collection(s), wrote " << exportedSize << " byte(s), " << _httpRequestsDone << " HTTP request(s)" << std::endl;
std::cout << "Processed " << _collections.size() << " collection(s), wrote "
<< exportedSize << " byte(s), " << _httpRequestsDone
<< " HTTP request(s)" << std::endl;
*_result = ret;
}
@ -264,13 +289,14 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
for (auto const& collection : _collections) {
if (_progress) {
std::cout << "# Exporting collection '" << collection << "'..." << std::endl;
std::cout << "# Exporting collection '" << collection << "'..."
<< std::endl;
}
_currentCollection = collection;
std::string fileName =
_outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
std::string fileName = _outputDirectory + TRI_DIR_SEPARATOR_STR +
collection + "." + _typeExport;
// remove an existing file first
if (TRI_ExistsFile(fileName.c_str())) {
@ -287,17 +313,19 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
post.close();
post.close();
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
std::shared_ptr<VPackBuilder> parsedBody =
httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
VPackSlice body = parsedBody->slice();
int fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
S_IRUSR | S_IWUSR);
int fd =
TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
S_IRUSR | S_IWUSR);
if (fd < 0) {
errorMsg = "cannot write to file '" + fileName + "'";
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
}
TRI_DEFER(TRI_CLOSE(fd));
_firstLine = true;
@ -306,8 +334,9 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
writeToFile(fd, openingBracket, fileName);
} else if (_typeExport == "xml") {
std::string xmlHeader = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n"
"<collection name=\"";
std::string xmlHeader =
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n"
"<collection name=\"";
xmlHeader.append(encode_char_entities(collection));
xmlHeader.append("\">\n");
writeToFile(fd, xmlHeader, fileName);
@ -315,7 +344,7 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
} else if (_typeExport == "csv") {
std::string firstLine = "";
bool isFirstValue = true;
for(auto const& str : _csvFields) {
for (auto const& str : _csvFields) {
if (isFirstValue) {
firstLine += str;
isFirstValue = false;
@ -334,7 +363,8 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
parsedBody = httpCall(httpClient, url, rest::RequestType::PUT);
body = parsedBody->slice();
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")),
fileName);
}
if (_typeExport == "json") {
@ -347,7 +377,8 @@ void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
}
}
void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::string const& fileName) {
void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it,
std::string const& fileName) {
std::string line;
line.reserve(1024);
@ -375,7 +406,7 @@ void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::str
line.clear();
bool isFirstValue = true;
for(auto const& key : _csvFields) {
for (auto const& key : _csvFields) {
std::string value = "";
if (isFirstValue) {
@ -399,7 +430,8 @@ void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::str
value = std::regex_replace(value, std::regex("\""), "\"\"");
if (value.find(",") != std::string::npos || value.find("\"\"") != std::string::npos) {
if (value.find(",") != std::string::npos ||
value.find("\"\"") != std::string::npos) {
value = "\"" + value;
value.append("\"");
}
@ -426,14 +458,17 @@ void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::str
}
}
void ExportFeature::writeToFile(int fd, std::string const& line, std::string const& fileName) {
void ExportFeature::writeToFile(int fd, std::string const& line,
std::string const& fileName) {
if (!TRI_WritePointer(fd, line.c_str(), line.size())) {
std::string errorMsg = "cannot write to file '" + fileName + "'";
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
}
}
std::shared_ptr<VPackBuilder> ExportFeature::httpCall(SimpleHttpClient* httpClient, std::string const& url, rest::RequestType requestType, std::string postBody) {
std::shared_ptr<VPackBuilder> ExportFeature::httpCall(
SimpleHttpClient* httpClient, std::string const& url,
rest::RequestType requestType, std::string postBody) {
std::string errorMsg;
std::unique_ptr<SimpleHttpResult> response(
@ -449,12 +484,13 @@ std::shared_ptr<VPackBuilder> ExportFeature::httpCall(SimpleHttpClient* httpClie
std::shared_ptr<VPackBuilder> parsedBody;
if (response->wasHttpError()) {
if (response->getHttpReturnCode() == 404) {
if (_currentGraph.size()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "Graph '" << _currentGraph << "' not found.";
LOG_TOPIC(FATAL, Logger::CONFIG) << "Graph '" << _currentGraph
<< "' not found.";
} else if (_currentCollection.size()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "Collection " << _currentCollection << " not found.";
LOG_TOPIC(FATAL, Logger::CONFIG) << "Collection " << _currentCollection
<< " not found.";
}
FATAL_ERROR_EXIT();
@ -462,8 +498,8 @@ std::shared_ptr<VPackBuilder> ExportFeature::httpCall(SimpleHttpClient* httpClie
parsedBody = response->getBodyVelocyPack();
std::cout << parsedBody->toJson() << std::endl;
errorMsg = "got invalid response from server: HTTP " +
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
response->getHttpReturnMessage();
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
response->getHttpReturnMessage();
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
}
}
@ -494,20 +530,23 @@ void ExportFeature::graphExport(SimpleHttpClient* httpClient) {
if (_progress) {
std::cout << "# Export graph '" << _graphName << "'" << std::endl;
}
std::string const url = "/_api/gharial/" + StringUtils::urlEncode(_graphName);
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::GET);
std::string const url =
"/_api/gharial/" + StringUtils::urlEncode(_graphName);
std::shared_ptr<VPackBuilder> parsedBody =
httpCall(httpClient, url, rest::RequestType::GET);
VPackSlice body = parsedBody->slice();
std::unordered_set<std::string> collections;
for(auto const& edgeDefs : VPackArrayIterator(body.get("graph").get("edgeDefinitions"))) {
for (auto const& edgeDefs :
VPackArrayIterator(body.get("graph").get("edgeDefinitions"))) {
collections.insert(edgeDefs.get("collection").copyString());
for(auto const& from : VPackArrayIterator(edgeDefs.get("from"))) {
for (auto const& from : VPackArrayIterator(edgeDefs.get("from"))) {
collections.insert(from.copyString());
}
for(auto const& to : VPackArrayIterator(edgeDefs.get("to"))) {
for (auto const& to : VPackArrayIterator(edgeDefs.get("to"))) {
collections.insert(to.copyString());
}
}
@ -517,18 +556,23 @@ void ExportFeature::graphExport(SimpleHttpClient* httpClient) {
}
} else {
if (_progress) {
std::cout << "# Export graph with collections " << StringUtils::join(_collections, ", ") << " as '" << _graphName << "'" << std::endl;
std::cout << "# Export graph with collections "
<< StringUtils::join(_collections, ", ") << " as '"
<< _graphName << "'" << std::endl;
}
}
std::string fileName = _outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
std::string fileName =
_outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
// remove an existing file first
if (TRI_ExistsFile(fileName.c_str())) {
TRI_UnlinkFile(fileName.c_str());
}
int fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC, S_IRUSR | S_IWUSR);
int fd =
TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
S_IRUSR | S_IWUSR);
if (fd < 0) {
errorMsg = "cannot write to file '" + fileName + "'";
@ -536,7 +580,8 @@ void ExportFeature::graphExport(SimpleHttpClient* httpClient) {
}
TRI_DEFER(TRI_CLOSE(fd));
std::string xmlHeader = R"(<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
std::string xmlHeader =
R"(<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<graph label=")";
writeToFile(fd, xmlHeader, fileName);
writeToFile(fd, _graphName, fileName);
@ -549,7 +594,8 @@ directed="1">
for (auto const& collection : _collections) {
if (_progress) {
std::cout << "# Exporting collection '" << collection << "'..." << std::endl;
std::cout << "# Exporting collection '" << collection << "'..."
<< std::endl;
}
std::string const url = "_api/cursor";
@ -562,7 +608,8 @@ directed="1">
post.close();
post.close();
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
std::shared_ptr<VPackBuilder> parsedBody =
httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
VPackSlice body = parsedBody->slice();
writeGraphBatch(fd, VPackArrayIterator(body.get("result")), fileName);
@ -579,17 +626,26 @@ directed="1">
writeToFile(fd, closingGraphTag, fileName);
if (_skippedDeepNested) {
std::cout << "skipped " << _skippedDeepNested << " deep nested objects / arrays" << std::endl;
std::cout << "skipped " << _skippedDeepNested
<< " deep nested objects / arrays" << std::endl;
}
}
void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string const& fileName) {
void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it,
std::string const& fileName) {
std::string xmlTag;
for(auto const& doc : it) {
for (auto const& doc : it) {
if (doc.hasKey("_from")) {
xmlTag = "<edge label=\"" + encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) && doc.get(_xgmmlLabelAttribute).isString() ? doc.get(_xgmmlLabelAttribute).copyString() : "Default-Label") +
"\" source=\"" + encode_char_entities(doc.get("_from").copyString()) + "\" target=\"" + encode_char_entities(doc.get("_to").copyString()) + "\"";
xmlTag =
"<edge label=\"" +
encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) &&
doc.get(_xgmmlLabelAttribute).isString()
? doc.get(_xgmmlLabelAttribute).copyString()
: "Default-Label") +
"\" source=\"" + encode_char_entities(doc.get("_from").copyString()) +
"\" target=\"" + encode_char_entities(doc.get("_to").copyString()) +
"\"";
writeToFile(fd, xmlTag, fileName);
if (!_xgmmlLabelOnly) {
xmlTag = ">\n";
@ -608,8 +664,13 @@ void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string c
}
} else {
xmlTag = "<node label=\"" + encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) && doc.get(_xgmmlLabelAttribute).isString() ? doc.get(_xgmmlLabelAttribute).copyString() : "Default-Label") +
"\" id=\"" + encode_char_entities(doc.get("_id").copyString()) + "\"";
xmlTag =
"<node label=\"" +
encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) &&
doc.get(_xgmmlLabelAttribute).isString()
? doc.get(_xgmmlLabelAttribute).copyString()
: "Default-Label") +
"\" id=\"" + encode_char_entities(doc.get("_id").copyString()) + "\"";
writeToFile(fd, xmlTag, fileName);
if (!_xgmmlLabelOnly) {
xmlTag = ">\n";
@ -630,11 +691,13 @@ void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string c
}
}
void ExportFeature::xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackSlice const& slice, std::string const& name, int deep) {
void ExportFeature::xgmmlWriteOneAtt(int fd, std::string const& fileName,
VPackSlice const& slice,
std::string const& name, int deep) {
std::string value, type, xmlTag;
if (deep == 0 &&
(name == "_id" || name == "_key" || name == "_rev" || name == "_from" || name == "_to")) {
if (deep == 0 && (name == "_id" || name == "_key" || name == "_rev" ||
name == "_from" || name == "_to")) {
return;
}
@ -664,17 +727,21 @@ void ExportFeature::xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackS
}
} else {
xmlTag = " <att name=\"" + encode_char_entities(name) + "\" type=\"string\" value=\"" + encode_char_entities(slice.toString()) + "\"/>\n";
xmlTag = " <att name=\"" + encode_char_entities(name) +
"\" type=\"string\" value=\"" +
encode_char_entities(slice.toString()) + "\"/>\n";
writeToFile(fd, xmlTag, fileName);
return;
}
if (!type.empty()) {
xmlTag = " <att name=\"" + encode_char_entities(name) + "\" type=\"" + type + "\" value=\"" + encode_char_entities(value) + "\"/>\n";
xmlTag = " <att name=\"" + encode_char_entities(name) + "\" type=\"" +
type + "\" value=\"" + encode_char_entities(value) + "\"/>\n";
writeToFile(fd, xmlTag, fileName);
} else if (slice.isArray()) {
xmlTag = " <att name=\"" + encode_char_entities(name) + "\" type=\"list\">\n";
xmlTag =
" <att name=\"" + encode_char_entities(name) + "\" type=\"list\">\n";
writeToFile(fd, xmlTag, fileName);
for (auto const& val : VPackArrayIterator(slice)) {
@ -685,7 +752,8 @@ void ExportFeature::xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackS
writeToFile(fd, xmlTag, fileName);
} else if (slice.isObject()) {
xmlTag = " <att name=\"" + encode_char_entities(name) + "\" type=\"list\">\n";
xmlTag =
" <att name=\"" + encode_char_entities(name) + "\" type=\"list\">\n";
writeToFile(fd, xmlTag, fileName);
for (auto const& it : VPackObjectIterator(slice)) {

View File

@ -68,14 +68,14 @@ RestoreFeature::RestoreFeature(application_features::ApplicationServer* server,
_defaultNumberOfShards(1),
_defaultReplicationFactor(1),
_result(result),
_stats{ 0, 0, 0 } {
_stats{0, 0, 0} {
requiresElevatedPrivileges(false);
setOptional(false);
startsAfter("Client");
startsAfter("Logger");
_inputDirectory =
FileUtils::buildFilename(FileUtils::currentDirectory(), "dump");
FileUtils::buildFilename(FileUtils::currentDirectory().result(), "dump");
}
void RestoreFeature::collectOptions(
@ -112,8 +112,7 @@ void RestoreFeature::collectOptions(
options->addOption("--overwrite", "overwrite collections if they exist",
new BooleanParameter(&_overwrite));
options->addOption("--recycle-ids",
"recycle collection ids from dump",
options->addOption("--recycle-ids", "recycle collection ids from dump",
new BooleanParameter(&_recycleIds));
options->addOption("--default-number-of-shards",
@ -137,8 +136,9 @@ void RestoreFeature::validateOptions(
if (1 == n) {
_inputDirectory = positionals[0];
} else if (1 < n) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "expecting at most one directory, got " +
StringUtils::join(positionals, ", ");
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "expecting at most one directory, got " +
StringUtils::join(positionals, ", ");
FATAL_ERROR_EXIT();
}
@ -162,7 +162,8 @@ void RestoreFeature::prepare() {
// .............................................................................
if (_inputDirectory == "" || !TRI_IsDirectory(_inputDirectory.c_str())) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "input directory '" << _inputDirectory << "' does not exist";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "input directory '" << _inputDirectory << "' does not exist";
FATAL_ERROR_EXIT();
}
@ -188,9 +189,8 @@ int RestoreFeature::tryCreateDatabase(ClientFeature* client,
std::string const body = builder.slice().toJson();
std::unique_ptr<SimpleHttpResult> response(
_httpClient->request(rest::RequestType::POST, "/_api/database",
body.c_str(), body.size()));
std::unique_ptr<SimpleHttpResult> response(_httpClient->request(
rest::RequestType::POST, "/_api/database", body.c_str(), body.size()));
if (response == nullptr || !response->isComplete()) {
return TRI_ERROR_INTERNAL;
@ -202,7 +202,7 @@ int RestoreFeature::tryCreateDatabase(ClientFeature* client,
returnCode == static_cast<int>(rest::ResponseCode::CREATED)) {
// all ok
return TRI_ERROR_NO_ERROR;
}
}
if (returnCode == static_cast<int>(rest::ResponseCode::UNAUTHORIZED) ||
returnCode == static_cast<int>(rest::ResponseCode::FORBIDDEN)) {
// invalid authorization
@ -238,14 +238,14 @@ int RestoreFeature::sendRestoreCollection(VPackSlice const& slice,
<< _defaultNumberOfShards << std::endl;
url += "&numberOfShards=" + std::to_string(_defaultNumberOfShards);
}
if (!slice.hasKey(std::vector<std::string>({"parameters", "replicationFactor"}))) {
if (!slice.hasKey(
std::vector<std::string>({"parameters", "replicationFactor"}))) {
// No replication factor given, so take the default:
std::cerr << "# no replication information specified for collection '"
<< name << "', using default replication factor "
<< _defaultReplicationFactor << std::endl;
url += "&replicationFactor=" + std::to_string(_defaultReplicationFactor);
}
}
std::string const body = slice.toJson();
@ -312,8 +312,8 @@ int RestoreFeature::sendRestoreData(std::string const& cname,
(_recycleIds ? "true" : "false") + "&force=" +
(_force ? "true" : "false");
std::unique_ptr<SimpleHttpResult> response(_httpClient->request(
rest::RequestType::PUT, url, buffer, bufferSize));
std::unique_ptr<SimpleHttpResult> response(
_httpClient->request(rest::RequestType::PUT, url, buffer, bufferSize));
if (response == nullptr || !response->isComplete()) {
errorMsg =
@ -344,7 +344,7 @@ static bool SortCollections(VPackSlice const& l, VPackSlice const& r) {
// We first have to create collections defining the distribution.
VPackSlice leftDist = left.get("distributeShardsLike");
VPackSlice rightDist = right.get("distributeShardsLike");
if (leftDist.isNone() && !rightDist.isNone()) {
return true;
}
@ -471,7 +471,7 @@ int RestoreFeature::processInputDirectory(std::string& errorMsg) {
}
std::sort(collections.begin(), collections.end(), SortCollections);
StringBuffer buffer(TRI_UNKNOWN_MEM_ZONE);
// step2: run the actual import
@ -654,7 +654,9 @@ int RestoreFeature::processInputDirectory(std::string& errorMsg) {
}
void RestoreFeature::start() {
ClientFeature* client = application_features::ApplicationServer::getFeature<ClientFeature>("Client");
ClientFeature* client =
application_features::ApplicationServer::getFeature<ClientFeature>(
"Client");
int ret = EXIT_SUCCESS;
*_result = ret;
@ -662,13 +664,15 @@ void RestoreFeature::start() {
try {
_httpClient = client->createHttpClient();
} catch (...) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot create server connection, giving up!";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot create server connection, giving up!";
FATAL_ERROR_EXIT();
}
std::string dbName = client->databaseName();
_httpClient->setLocationRewriter(static_cast<void*>(client), &rewriteLocation);
_httpClient->setLocationRewriter(static_cast<void*>(client),
&rewriteLocation);
_httpClient->setUserNamePassword("/", client->username(), client->password());
int err = TRI_ERROR_NO_ERROR;
@ -683,8 +687,10 @@ void RestoreFeature::start() {
int res = tryCreateDatabase(client, dbName);
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Could not create database '" << dbName << "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << _httpClient->getErrorMessage() << "'";
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Could not create database '"
<< dbName << "'";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< _httpClient->getErrorMessage() << "'";
FATAL_ERROR_EXIT();
}
@ -696,9 +702,11 @@ void RestoreFeature::start() {
}
if (!_httpClient->isConnected()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Could not connect to endpoint "
<< _httpClient->getEndpointSpecification();
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << _httpClient->getErrorMessage() << "'";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Could not connect to endpoint "
<< _httpClient->getEndpointSpecification();
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << _httpClient->getErrorMessage()
<< "'";
FATAL_ERROR_EXIT();
}
@ -710,7 +718,8 @@ void RestoreFeature::start() {
if (version.first < 3) {
// we can connect to 3.x
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "got incompatible server version '" << versionString << "'";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "got incompatible server version '" << versionString << "'";
if (!_force) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "giving up!";
@ -735,7 +744,8 @@ void RestoreFeature::start() {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "caught exception " << ex.what();
res = TRI_ERROR_INTERNAL;
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Error: caught unknown exception";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Error: caught unknown exception";
res = TRI_ERROR_INTERNAL;
}
@ -759,6 +769,6 @@ void RestoreFeature::start() {
<< std::endl;
}
}
*_result = ret;
}

View File

@ -1698,14 +1698,14 @@ void V8ClientConnection::initServer(v8::Isolate* isolate,
connection_inst->SetInternalFieldCount(2);
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("ArangoConnection"),
connection_proto->NewInstance());
ConnectionTempl.Reset(isolate, connection_inst);
// add the client connection to the context:
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_ARANGO"),
WrapV8ClientConnection(isolate, this));
}

View File

@ -659,7 +659,7 @@ bool V8ShellFeature::runUnitTests(std::vector<std::string> const& files,
++i;
}
TRI_AddGlobalVariableVocbase(_isolate, context,
TRI_AddGlobalVariableVocbase(_isolate,
TRI_V8_ASCII_STRING2(_isolate, "SYS_UNIT_TESTS"),
sysTestFiles);
@ -838,30 +838,30 @@ void V8ShellFeature::initGlobals() {
// set pretty print default
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "PRETTY_PRINT"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "PRETTY_PRINT"),
v8::Boolean::New(_isolate, _console->prettyPrint()));
// add colors for print.js
TRI_AddGlobalVariableVocbase(_isolate, context,
TRI_AddGlobalVariableVocbase(_isolate,
TRI_V8_ASCII_STRING2(_isolate, "COLOR_OUTPUT"),
v8::Boolean::New(_isolate, _console->colors()));
// string functions
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "NORMALIZE_STRING"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "NORMALIZE_STRING"),
v8::FunctionTemplate::New(_isolate, JS_NormalizeString)->GetFunction());
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "COMPARE_STRING"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "COMPARE_STRING"),
v8::FunctionTemplate::New(_isolate, JS_CompareString)->GetFunction());
TRI_AddGlobalVariableVocbase(
_isolate, context,
_isolate,
TRI_V8_ASCII_STRING2(_isolate, "ARANGODB_CLIENT_VERSION"),
v8::FunctionTemplate::New(_isolate, JS_VersionClient)->GetFunction());
// is quite
TRI_AddGlobalVariableVocbase(_isolate, context,
TRI_AddGlobalVariableVocbase(_isolate,
TRI_V8_ASCII_STRING2(_isolate, "ARANGO_QUIET"),
v8::Boolean::New(_isolate, _console->quiet()));
@ -894,7 +894,7 @@ void V8ShellFeature::initGlobals() {
}
if (_currentModuleDirectory) {
modules += sep + FileUtils::currentDirectory();
modules += sep + FileUtils::currentDirectory().result();
}
// we take the last entry in _startupDirectory as global path;
@ -908,25 +908,23 @@ void V8ShellFeature::initGlobals() {
v8::Local<v8::Value> console = v8::External::New(_isolate, _console);
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "SYS_OUTPUT"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "SYS_OUTPUT"),
v8::FunctionTemplate::New(_isolate, JS_PagerOutput, console)
->GetFunction());
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "SYS_START_PAGER"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "SYS_START_PAGER"),
v8::FunctionTemplate::New(_isolate, JS_StartOutputPager, console)
->GetFunction());
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "SYS_STOP_PAGER"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "SYS_STOP_PAGER"),
v8::FunctionTemplate::New(_isolate, JS_StopOutputPager, console)
->GetFunction());
}
void V8ShellFeature::initMode(ShellFeature::RunMode runMode,
std::vector<std::string> const& positionals) {
auto context = _isolate->GetCurrentContext();
// add positional arguments
v8::Handle<v8::Array> p = v8::Array::New(_isolate, (int)positionals.size());
@ -934,31 +932,31 @@ void V8ShellFeature::initMode(ShellFeature::RunMode runMode,
p->Set(i, TRI_V8_STD_STRING2(_isolate, positionals[i]));
}
TRI_AddGlobalVariableVocbase(_isolate, context,
TRI_AddGlobalVariableVocbase(_isolate,
TRI_V8_ASCII_STRING2(_isolate, "ARGUMENTS"), p);
// set mode flags
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_EXECUTE_SCRIPT"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "IS_EXECUTE_SCRIPT"),
v8::Boolean::New(_isolate,
runMode == ShellFeature::RunMode::EXECUTE_SCRIPT));
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_EXECUTE_STRING"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "IS_EXECUTE_STRING"),
v8::Boolean::New(_isolate,
runMode == ShellFeature::RunMode::EXECUTE_STRING));
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_CHECK_SCRIPT"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "IS_CHECK_SCRIPT"),
v8::Boolean::New(_isolate,
runMode == ShellFeature::RunMode::CHECK_SYNTAX));
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_UNIT_TESTS"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "IS_UNIT_TESTS"),
v8::Boolean::New(_isolate, runMode == ShellFeature::RunMode::UNIT_TESTS));
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_JS_LINT"),
_isolate, TRI_V8_ASCII_STRING2(_isolate, "IS_JS_LINT"),
v8::Boolean::New(_isolate, runMode == ShellFeature::RunMode::JSLINT));
}

View File

@ -11,7 +11,7 @@ endif()
# Global macros ----------------------------------------------------------------
macro (generate_root_config name)
message(INFO "reading configuration file ${PROJECT_SOURCE_DIR}/etc/arangodb3/${name}.conf.in")
message(STATUS "reading configuration file ${PROJECT_SOURCE_DIR}/etc/arangodb3/${name}.conf.in")
FILE(READ ${PROJECT_SOURCE_DIR}/etc/arangodb3/${name}.conf.in FileContent)
STRING(REPLACE "@PKGDATADIR@" "@ROOTDIR@/${CMAKE_INSTALL_DATAROOTDIR_ARANGO}"

View File

@ -1,4 +1,5 @@
/* jshint strict: false */
/*global ArangoClusterInfo */
// //////////////////////////////////////////////////////////////////////////////
// / @brief querying and managing collections
@ -414,10 +415,17 @@ function get_api_collection (req, res) {
result = collectionRepresentation(collection, false, false, false);
result.revision = collection.revision();
actions.resultOk(req, res, actions.HTTP_OK, result);
}
else if (sub === 'shards') {
result = collectionRepresentation(collection, false, false, false);
result.shards = Object.keys(ArangoClusterInfo.getCollectionInfo(arangodb.db._name(), collection.name()).shardShorts);
actions.resultOk(req, res, actions.HTTP_OK, result);
} else {
actions.resultNotFound(req, res, arangodb.ERROR_HTTP_NOT_FOUND,
"expecting one of the resources 'count',"
+ " 'figures', 'properties', 'parameter'");
"expecting one of the resources 'checksum', 'count',"
+ " 'figures', 'properties', 'revision', 'shards'");
}
} else {
actions.resultBad(req, res, arangodb.ERROR_HTTP_BAD_PARAMETER,

View File

@ -2320,7 +2320,7 @@
var callback = function (error) {
if (error) {
arangoHelper.arangoError('Query', 'Could not reload Queries');
arangoHelper.arangoError('Query', 'Could not reload queries');
} else {
self.updateLocalQueries();
self.updateQueryTable();
@ -2379,21 +2379,31 @@
};
var first = true;
var part = [];
// self.tableDescription.rows.push(;
var headers = {}; // quick lookup cache
var pos = 0;
_.each(data.original, function (obj) {
if (first === true) {
tableDescription.titles = Object.keys(obj);
tableDescription.titles.forEach(function (t) {
headers[String(t)] = pos++;
});
first = false;
}
_.each(obj, function (val) {
var part = Array(pos);
_.each(obj, function (val, key) {
if (!headers.hasOwnProperty(key)) {
// different attribute
return;
}
if (typeof val === 'object') {
val = JSON.stringify(val);
}
part.push(val);
part[headers[key]] = val;
});
tableDescription.rows.push(part);
part = [];
});
$('#outputTable' + counter).append(this.table.render({content: tableDescription}));

View File

@ -214,6 +214,14 @@ ArangoCollection.prototype._edgesQuery = function (vertex, direction) {
return requestResult.edges;
};
ArangoCollection.prototype.shards = function () {
var requestResult = this._database._connection.GET(this._baseurl('shards'), '');
arangosh.checkRequestResult(requestResult);
return requestResult.shards;
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief converts into an array
// //////////////////////////////////////////////////////////////////////////////

View File

@ -60,6 +60,32 @@ function ClusterCollectionSuite () {
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test create, single shard
////////////////////////////////////////////////////////////////////////////////
testCreateSingleShard : function () {
var c = db._create("UnitTestsClusterCrud");
assertEqual("UnitTestsClusterCrud", c.name());
assertEqual(2, c.type());
assertEqual(1, c.shards().length);
assertTrue(typeof c.shards()[0] === 'string');
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test create, multiple shards
////////////////////////////////////////////////////////////////////////////////
testCreateMultipleShards : function () {
var c = db._create("UnitTestsClusterCrud", { numberOfShards: 8 });
assertEqual("UnitTestsClusterCrud", c.name());
assertEqual(2, c.type());
assertEqual(8, c.shards().length);
for (var i = 0; i < 8; ++i) {
assertTrue(typeof c.shards()[i] === 'string');
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test create
////////////////////////////////////////////////////////////////////////////////
@ -88,6 +114,21 @@ function ClusterCollectionSuite () {
assertEqual(c.name(), db._collection("UnitTestsClusterCrud").name());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test create
////////////////////////////////////////////////////////////////////////////////
testCreateEdgeMultipleShards : function () {
var c = db._createEdgeCollection("UnitTestsClusterCrud", { numberOfShards: 8 });
assertEqual("UnitTestsClusterCrud", c.name());
assertEqual(3, c.type());
assertEqual(3, c.status());
assertTrue(c.hasOwnProperty("_id"));
assertEqual(8, c.shards().length);
assertEqual(c.name(), db._collection("UnitTestsClusterCrud").name());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test create / drop
////////////////////////////////////////////////////////////////////////////////

View File

@ -43,6 +43,20 @@ var ERRORS = arangodb.errors;
function CollectionSuite () {
'use strict';
return {
testShards : function () {
var cn = "example";
db._drop(cn);
var c = db._create(cn);
try {
c.shards();
fail();
} catch (err) {
assertEqual(ERRORS.ERROR_INTERNAL.code, err.errorNum);
}
db._drop(cn);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief rotate

View File

@ -1,5 +1,5 @@
/*jshint strict: false */
/*global ArangoClusterComm, require, exports, module */
/*global ArangoClusterInfo, ArangoClusterComm, require, exports, module */
// //////////////////////////////////////////////////////////////////////////////
// / @brief ArangoCollection
@ -104,6 +104,11 @@ var simple = require('@arangodb/simple-query');
var ArangoError = require('@arangodb').ArangoError;
var ArangoDatabase = require('@arangodb/arango-database').ArangoDatabase;
ArangoCollection.prototype.shards = function () {
return Object.keys(ArangoClusterInfo.getCollectionInfo(require('internal').db._name(), this.name()).shardShorts);
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief was docuBlock collectionToArray
// //////////////////////////////////////////////////////////////////////////////

View File

@ -148,9 +148,9 @@ void ConfigFeature::loadConfigFile(std::shared_ptr<ProgramOptions> options,
locations.emplace_back(location);
}
locations.emplace_back(FileUtils::currentDirectory());
locations.emplace_back(FileUtils::buildFilename(FileUtils::currentDirectory(),
"etc", "relative"));
std::string current = FileUtils::currentDirectory().result();
locations.emplace_back(current);
locations.emplace_back(FileUtils::buildFilename(current, "etc", "relative"));
locations.emplace_back(
FileUtils::buildFilename(FileUtils::homeDirectory(), ".arangodb"));
locations.emplace_back(FileUtils::configDirectory(binaryPath));

View File

@ -77,8 +77,7 @@ void DaemonFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
logger->setBackgrounded(true);
// make the pid filename absolute
int err = 0;
std::string currentDir = FileUtils::currentDirectory(&err);
std::string currentDir = FileUtils::currentDirectory().result();
char* absoluteFile =
TRI_GetAbsolutePath(_pidFile.c_str(), currentDir.c_str());
@ -235,14 +234,16 @@ int DaemonFeature::forkProcess() {
}
// store current working directory
int err = 0;
_current = FileUtils::currentDirectory(&err);
FileResultString cwd = FileUtils::currentDirectory();
if (err != 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot get current directory";
if (!cwd.ok()) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot get current directory: " << cwd.errorMessage();
FATAL_ERROR_EXIT();
}
_current = cwd.result();
// change the current working directory
if (!_workingDirectory.empty()) {
FileResult res = FileUtils::changeDirectory(_workingDirectory);

67
lib/Basics/EnumIterator.h Normal file
View File

@ -0,0 +1,67 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_BASICS_ENUM_ITERATOR_H
#define ARANGODB_BASICS_ENUM_ITERATOR_H 1
#include "Basics/Common.h"
#include <type_traits>
#define ENUM_ITERATOR(type, start, end) arangodb::EnumIterator<type, type::start, type::end>()
namespace arangodb {
template <typename T, T beginValue, T endValue>
class EnumIterator {
typedef typename std::underlying_type<T>::type ValueType;
public:
EnumIterator(T const& current) noexcept
: current(static_cast<ValueType>(current)) {}
EnumIterator()
: current(static_cast<ValueType>(beginValue)) {}
EnumIterator operator++() noexcept {
++current;
return *this;
}
T operator*() noexcept { return static_cast<T>(current); }
EnumIterator begin() noexcept { return *this; }
EnumIterator end() noexcept {
static const EnumIterator endIter = ++EnumIterator(endValue);
return endIter;
}
bool operator!=(EnumIterator const& other) const noexcept {
return current != other.current;
}
private:
int current;
};
}
#endif

View File

@ -28,15 +28,16 @@
namespace arangodb {
class FileResult : public Result {
public:
explicit FileResult(bool state);
FileResult(bool state, int sysErrorNumber);
FileResult() : Result(), _sysErrorNumber(0) {}
explicit FileResult(int sysErrorNumber)
: Result(TRI_ERROR_SYS_ERROR, strerror(sysErrorNumber)),
_sysErrorNumber(sysErrorNumber) {}
public:
bool state() const { return _state; }
int sysErrorNumber() const { return _sysErrorNumber; }
private:
bool const _state;
protected:
int const _sysErrorNumber;
};
}

View File

@ -0,0 +1,47 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_BASICS_FILE_RESULT_STRING_H
#define ARANGODB_BASICS_FILE_RESULT_STRING_H 1
#include "Basics/Result.h"
namespace arangodb {
class FileResultString : public FileResult {
public:
FileResultString(std::string result) : FileResult(), _result(result) {}
FileResultString(int sysErrorNumber, std::string result)
: FileResult(sysErrorNumber), _result(result) {}
FileResultString(int sysErrorNumber)
: FileResult(sysErrorNumber), _result() {}
public:
std::string const& result() const { return _result; }
protected:
std::string const _result;
};
}
#endif

View File

@ -34,8 +34,8 @@
#include "Basics/Exceptions.h"
#include "Basics/StringBuffer.h"
#include "Basics/files.h"
#include "Logger/Logger.h"
#include "Basics/tri-strings.h"
#include "Logger/Logger.h"
#if defined(_WIN32) && defined(_MSC_VER)
@ -91,7 +91,8 @@ std::string buildFilename(char const* path, char const* name) {
}
if (!result.empty() && *name == TRI_DIR_SEPARATOR_CHAR) {
// skip initial forward slash in name to avoid having two forward slashes in result
// skip initial forward slash in name to avoid having two forward slashes in
// result
result.append(name + 1);
} else {
result.append(name);
@ -109,7 +110,8 @@ std::string buildFilename(std::string const& path, std::string const& name) {
}
if (!result.empty() && !name.empty() && name[0] == TRI_DIR_SEPARATOR_CHAR) {
// skip initial forward slash in name to avoid having two forward slashes in result
// skip initial forward slash in name to avoid having two forward slashes in
// result
result.append(name.c_str() + 1, name.size() - 1);
} else {
result.append(name);
@ -119,7 +121,7 @@ std::string buildFilename(std::string const& path, std::string const& name) {
return result;
}
void throwFileReadError(int fd, std::string const& filename) {
static void throwFileReadError(int fd, std::string const& filename) {
TRI_set_errno(TRI_ERROR_SYS_ERROR);
int res = TRI_errno();
@ -134,21 +136,6 @@ void throwFileReadError(int fd, std::string const& filename) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_SYS_ERROR);
}
void throwFileWriteError(int fd, std::string const& filename) {
TRI_set_errno(TRI_ERROR_SYS_ERROR);
int res = TRI_errno();
if (fd >= 0) {
TRI_CLOSE(fd);
}
std::string message("write failed for file '" + filename + "': " +
strerror(res));
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "" << message;
THROW_ARANGO_EXCEPTION(TRI_ERROR_SYS_ERROR);
}
std::string slurp(std::string const& filename) {
int fd = TRI_OPEN(filename.c_str(), O_RDONLY | TRI_O_CLOEXEC);
@ -212,6 +199,21 @@ void slurp(std::string const& filename, StringBuffer& result) {
TRI_CLOSE(fd);
}
static void throwFileWriteError(int fd, std::string const& filename) {
TRI_set_errno(TRI_ERROR_SYS_ERROR);
int res = TRI_errno();
if (fd >= 0) {
TRI_CLOSE(fd);
}
std::string message("write failed for file '" + filename + "': " +
strerror(res));
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "" << message;
THROW_ARANGO_EXCEPTION(TRI_ERROR_SYS_ERROR);
}
void spit(std::string const& filename, char const* ptr, size_t len) {
int fd =
TRI_CREATE(filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
@ -356,11 +358,10 @@ bool copyRecursive(std::string const& source, std::string const& target,
bool copyDirectoryRecursive(std::string const& source,
std::string const& target, std::string& error) {
bool rc = true;
auto isSubDirectory = [](std::string const& name) -> bool {
return isDirectory(name);
return isDirectory(name);
};
#ifdef TRI_HAVE_WIN32_LIST_FILES
struct _finddata_t oneItem;
@ -386,7 +387,8 @@ bool copyDirectoryRecursive(std::string const& source,
struct dirent* oneItem = nullptr;
// do not use readdir_r() here anymore as it is not safe and deprecated
// in newer versions of libc: http://man7.org/linux/man-pages/man3/readdir_r.3.html
// in newer versions of libc:
// http://man7.org/linux/man-pages/man3/readdir_r.3.html
// the man page recommends to use plain readdir() because it can be expected
// to be thread-safe in reality, and newer versions of POSIX may require its
// thread-safety formally, and in addition obsolete readdir_r() altogether
@ -561,41 +563,28 @@ FileResult changeDirectory(std::string const& path) {
int res = TRI_CHDIR(path.c_str());
if (res == 0) {
return FileResult(true);
return FileResult();
} else {
return FileResult(false, errno);
return FileResult(errno);
}
}
std::string currentDirectory(int* errorNumber) {
if (errorNumber != 0) {
*errorNumber = 0;
}
FileResultString currentDirectory() {
size_t len = 1000;
char* current = new char[len];
std::unique_ptr<char[]> current(new char[len]);
while (TRI_GETCWD(current, (int)len) == nullptr) {
while (TRI_GETCWD(current.get(), (int)len) == nullptr) {
if (errno == ERANGE) {
len += 1000;
delete[] current;
current = new char[len];
current.reset(new char[len]);
} else {
delete[] current;
if (errorNumber != 0) {
*errorNumber = errno;
}
return ".";
return FileResultString(errno, ".");
}
}
std::string result = current;
std::string result = current.get();
delete[] current;
return result;
return FileResultString(result);
}
std::string homeDirectory() {
@ -610,7 +599,7 @@ std::string configDirectory(char const* binaryPath) {
char* dir = TRI_LocateConfigDirectory(binaryPath);
if (dir == nullptr) {
return currentDirectory();
return currentDirectory().result();
}
std::string result = dir;
@ -631,15 +620,12 @@ std::string dirname(std::string const& name) {
return base;
}
void makePathAbsolute(std::string &path) {
int err = 0;
std::string cwd = FileUtils::currentDirectory(&err);
char * p = TRI_GetAbsolutePath(path.c_str(), cwd.c_str());
void makePathAbsolute(std::string& path) {
std::string cwd = FileUtils::currentDirectory().result();
char* p = TRI_GetAbsolutePath(path.c_str(), cwd.c_str());
path = p;
TRI_FreeString(TRI_CORE_MEM_ZONE, p);
}
}
}
}

View File

@ -26,8 +26,9 @@
#include "Basics/Common.h"
#include "Basics/files.h"
#include "Basics/FileResult.h"
#include "Basics/FileResultString.h"
#include "Basics/files.h"
namespace arangodb {
namespace basics {
@ -55,12 +56,6 @@ inline std::string buildFilename(std::string path, std::string name, Args... arg
return buildFilename(buildFilename(path, name), args...);
}
// throws a read error
void throwFileReadError(int fd, std::string const& filename);
// throws a write error
void throwFileWriteError(int fd, std::string const& filename);
// reads file into string
std::string slurp(std::string const& filename);
void slurp(std::string const& filename, StringBuffer&);
@ -72,6 +67,8 @@ void spit(std::string const& filename, StringBuffer const& content);
// returns true if a file could be removed
bool remove(std::string const& fileName, int* errorNumber = 0);
// returns true if a file could be renamed
bool rename(std::string const& oldName, std::string const& newName,
int* errorNumber = 0);
@ -112,7 +109,7 @@ std::string stripExtension(std::string const& path,
FileResult changeDirectory(std::string const& path);
// returns the current directory
std::string currentDirectory(int* errorNumber = 0);
FileResultString currentDirectory();
// returns the home directory
std::string homeDirectory();

View File

@ -29,20 +29,26 @@ namespace arangodb {
class Result {
public:
Result() : _errorNumber(TRI_ERROR_NO_ERROR) {}
Result(int errorNumber, std::string const& errorMessage)
Result(int errorNumber)
: _errorNumber(errorNumber),
_errorMessage(TRI_errno_string(errorNumber)) {}
Result(int errorNumber, std::string const& errorMessage)
: _errorNumber(errorNumber), _errorMessage(errorMessage) {}
Result(int errorNumber, std::string&& errorMessage)
Result(int errorNumber, std::string&& errorMessage)
: _errorNumber(errorNumber), _errorMessage(std::move(errorMessage)) {}
virtual ~Result() {}
public:
// the default implementations are const, but subclasses might
// really do more work to compute - for example - the error
// string.
bool ok() const { return _errorNumber == TRI_ERROR_NO_ERROR; }
int errorNumber() const { return _errorNumber; }
// the default implementations is const, but sub-classes might
// really do more work to compute.
virtual bool ok() { return _errorNumber == TRI_ERROR_NO_ERROR; }
virtual int errorNumber() { return _errorNumber; }
virtual std::string errorMessage() { return _errorMessage; }
protected:

View File

@ -130,7 +130,6 @@ add_library(${LIB_ARANGO} STATIC
Basics/ConditionVariable.cpp
Basics/DataProtector.cpp
Basics/Exceptions.cpp
Basics/FileResult.cpp
Basics/FileUtils.cpp
Basics/HybridLogicalClock.cpp
Basics/LocalTaskQueue.cpp

View File

@ -1710,7 +1710,7 @@ void TRI_InitV8Buffer(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
TRI_V8_AddMethod(isolate, exports, TRI_V8_ASCII_STRING("SlowBuffer"), ft);
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("EXPORTS_SLOW_BUFFER"), exports);
isolate, TRI_V8_ASCII_STRING("EXPORTS_SLOW_BUFFER"), exports);
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
heap_profiler->SetWrapperClassInfoProvider(TRI_V8_BUFFER_CID, WrapperInfo);

View File

@ -215,6 +215,6 @@ void TRI_InitV8Env(v8::Isolate* isolate, v8::Handle<v8::Context> context,
rt->SetNamedPropertyHandler(EnvGetter, EnvSetter, EnvQuery, EnvDeleter,
EnvEnumerator, v8::Object::New(isolate));
v8g->EnvTempl.Reset(isolate, rt);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("ENV"),
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("ENV"),
ft->GetFunction());
}

View File

@ -250,16 +250,16 @@ void TRI_AddMethodVocbase(
/// @brief adds a global function to the given context
void TRI_AddGlobalFunctionVocbase(
v8::Isolate* isolate, v8::Handle<v8::Context> context,
v8::Isolate* isolate,
v8::Handle<v8::String> name,
void (*func)(v8::FunctionCallbackInfo<v8::Value> const&), bool isHidden) {
// all global functions are read-only
if (isHidden) {
context->Global()->ForceSet(
isolate->GetCurrentContext()->Global()->ForceSet(
name, v8::FunctionTemplate::New(isolate, func)->GetFunction(),
static_cast<v8::PropertyAttribute>(v8::ReadOnly | v8::DontEnum));
} else {
context->Global()->ForceSet(
isolate->GetCurrentContext()->Global()->ForceSet(
name, v8::FunctionTemplate::New(isolate, func)->GetFunction(),
v8::ReadOnly);
}
@ -267,24 +267,22 @@ void TRI_AddGlobalFunctionVocbase(
/// @brief adds a global function to the given context
void TRI_AddGlobalFunctionVocbase(v8::Isolate* isolate,
v8::Handle<v8::Context> context,
v8::Handle<v8::String> name,
v8::Handle<v8::Function> func,
bool isHidden) {
// all global functions are read-only
if (isHidden) {
context->Global()->ForceSet(name, func, static_cast<v8::PropertyAttribute>(
isolate->GetCurrentContext()->Global()->ForceSet(name, func, static_cast<v8::PropertyAttribute>(
v8::ReadOnly | v8::DontEnum));
} else {
context->Global()->ForceSet(name, func, v8::ReadOnly);
isolate->GetCurrentContext()->Global()->ForceSet(name, func, v8::ReadOnly);
}
}
/// @brief adds a global read-only variable to the given context
void TRI_AddGlobalVariableVocbase(v8::Isolate* isolate,
v8::Handle<v8::Context> context,
v8::Handle<v8::String> name,
v8::Handle<v8::Value> value) {
// all global variables are read-only
context->Global()->ForceSet(name, value, v8::ReadOnly);
isolate->GetCurrentContext()->Global()->ForceSet(name, value, v8::ReadOnly);
}

View File

@ -704,21 +704,19 @@ void TRI_AddMethodVocbase(
/// @brief adds a global function to the given context
void TRI_AddGlobalFunctionVocbase(
v8::Isolate* isolate, v8::Handle<v8::Context> context,
v8::Isolate* isolate,
v8::Handle<v8::String> name,
void (*func)(v8::FunctionCallbackInfo<v8::Value> const&),
bool isHidden = false);
/// @brief adds a global function to the given context
void TRI_AddGlobalFunctionVocbase(v8::Isolate* isolate,
v8::Handle<v8::Context> context,
v8::Handle<v8::String> name,
v8::Handle<v8::Function> func,
bool isHidden = false);
/// @brief adds a global read-only variable to the given context
void TRI_AddGlobalVariableVocbase(v8::Isolate* isolate,
v8::Handle<v8::Context> context,
v8::Handle<v8::String> name,
v8::Handle<v8::Value> value);

View File

@ -305,10 +305,10 @@ void TRI_InitV8Shell(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
// create the global functions
// .............................................................................
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_PROCESS_CSV_FILE"),
JS_ProcessCsvFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_PROCESS_JSON_FILE"),
JS_ProcessJsonFile);
@ -419,6 +419,6 @@ void TRI_InitV8Shell(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
: v8::String::Empty(isolate),
v8::ReadOnly);
TRI_AddGlobalVariableVocbase(isolate, context, TRI_V8_ASCII_STRING("COLORS"),
TRI_AddGlobalVariableVocbase(isolate, TRI_V8_ASCII_STRING("COLORS"),
colors);
}

View File

@ -27,9 +27,9 @@
#include "Basics/win-utils.h"
#endif
#include <signal.h>
#include <fstream>
#include <iostream>
#include <signal.h>
#include "3rdParty/valgrind/valgrind.h"
#include "unicode/normalizer2.h"
@ -112,7 +112,8 @@ static void CreateErrorObject(v8::Isolate* isolate, int errorNumber,
std::string const& message) noexcept {
try {
if (errorNumber == TRI_ERROR_OUT_OF_MEMORY) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "encountered out-of-memory error";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "encountered out-of-memory error";
}
v8::Handle<v8::String> errorMessage = TRI_V8_STD_STRING(message);
@ -137,7 +138,7 @@ static void CreateErrorObject(v8::Isolate* isolate, int errorNumber,
}
errorObject->Set(TRI_V8_ASCII_STRING("errorNum"),
v8::Number::New(isolate, errorNumber));
v8::Number::New(isolate, errorNumber));
errorObject->Set(TRI_V8_ASCII_STRING("errorMessage"), errorMessage);
TRI_GET_GLOBALS();
@ -169,8 +170,9 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
char* content = TRI_SlurpFile(TRI_UNKNOWN_MEM_ZONE, filename, &length);
if (content == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load java script file '" << filename
<< "': " << TRI_last_error();
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load java script file '"
<< filename
<< "': " << TRI_last_error();
return false;
}
@ -205,8 +207,9 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
}
if (content == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load java script file '" << filename
<< "': " << TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY);
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "cannot load java script file '" << filename
<< "': " << TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY);
return false;
}
@ -227,8 +230,9 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
// compilation failed, print errors that happened during compilation
if (script.IsEmpty()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load java script file '" << filename
<< "': compilation failed.";
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load java script file '"
<< filename
<< "': compilation failed.";
return false;
}
@ -246,7 +250,8 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
}
}
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "loaded java script file: '" << filename << "'";
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "loaded java script file: '"
<< filename << "'";
return true;
}
@ -260,7 +265,8 @@ static bool LoadJavaScriptDirectory(v8::Isolate* isolate, char const* path,
v8::HandleScope scope(isolate);
bool result;
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "loading JavaScript directory: '" << path << "'";
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "loading JavaScript directory: '"
<< path << "'";
std::vector<std::string> files = TRI_FilesDirectory(path);
@ -412,10 +418,13 @@ static void JS_Parse(v8::FunctionCallbackInfo<v8::Value> const& args) {
// compilation failed, we have caught an exception
if (tryCatch.HasCaught()) {
if (tryCatch.CanContinue()) {
v8::Local<v8::Object> exceptionObj = tryCatch.Exception().As<v8::Object>();
v8::Local<v8::Object> exceptionObj =
tryCatch.Exception().As<v8::Object>();
v8::Handle<v8::Message> message = tryCatch.Message();
exceptionObj->Set(TRI_V8_ASCII_STRING("lineNumber"), v8::Number::New(isolate, message->GetLineNumber()));
exceptionObj->Set(TRI_V8_ASCII_STRING("columnNumber"), v8::Number::New(isolate, message->GetStartColumn()));
exceptionObj->Set(TRI_V8_ASCII_STRING("lineNumber"),
v8::Number::New(isolate, message->GetLineNumber()));
exceptionObj->Set(TRI_V8_ASCII_STRING("columnNumber"),
v8::Number::New(isolate, message->GetStartColumn()));
exceptionObj->Set(TRI_V8_ASCII_STRING("fileName"), filename->ToString());
tryCatch.ReThrow();
return;
@ -472,19 +481,21 @@ static void JS_ParseFile(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
v8::TryCatch tryCatch;
v8::Handle<v8::Script> script =
v8::Script::Compile(TRI_V8_PAIR_STRING(content, (int)length),
args[0]->ToString());
v8::Handle<v8::Script> script = v8::Script::Compile(
TRI_V8_PAIR_STRING(content, (int)length), args[0]->ToString());
TRI_FreeString(TRI_UNKNOWN_MEM_ZONE, content);
// compilation failed, we have caught an exception
if (tryCatch.HasCaught()) {
if (tryCatch.CanContinue()) {
v8::Local<v8::Object> exceptionObj = tryCatch.Exception().As<v8::Object>();
v8::Local<v8::Object> exceptionObj =
tryCatch.Exception().As<v8::Object>();
v8::Handle<v8::Message> message = tryCatch.Message();
exceptionObj->Set(TRI_V8_ASCII_STRING("lineNumber"), v8::Number::New(isolate, message->GetLineNumber()));
exceptionObj->Set(TRI_V8_ASCII_STRING("columnNumber"), v8::Number::New(isolate, message->GetStartColumn()));
exceptionObj->Set(TRI_V8_ASCII_STRING("lineNumber"),
v8::Number::New(isolate, message->GetLineNumber()));
exceptionObj->Set(TRI_V8_ASCII_STRING("columnNumber"),
v8::Number::New(isolate, message->GetStartColumn()));
exceptionObj->Set(TRI_V8_ASCII_STRING("fileName"), args[0]);
tryCatch.ReThrow();
return;
@ -659,7 +670,7 @@ void JS_Download(v8::FunctionCallbackInfo<v8::Value> const& args) {
if (options.IsEmpty()) {
TRI_V8_THROW_EXCEPTION_USAGE(signature);
}
// ssl protocol
if (options->Has(TRI_V8_ASCII_STRING("sslProtocol"))) {
if (sslProtocol >= SSL_LAST) {
@ -667,14 +678,15 @@ void JS_Download(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
"invalid option value for sslProtocol");
}
sslProtocol = TRI_ObjectToUInt64(options->Get(TRI_V8_ASCII_STRING("sslProtocol")), false);
sslProtocol = TRI_ObjectToUInt64(
options->Get(TRI_V8_ASCII_STRING("sslProtocol")), false);
}
// method
if (options->Has(TRI_V8_ASCII_STRING("method"))) {
std::string methodString =
TRI_ObjectToString(isolate, options->Get(TRI_V8_ASCII_STRING("method")));
std::string methodString = TRI_ObjectToString(
isolate, options->Get(TRI_V8_ASCII_STRING("method")));
method = HttpRequest::translateMethod(methodString);
}
@ -819,8 +831,9 @@ void JS_Download(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_SYNTAX_ERROR("unsupported URL specified");
}
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "downloading file. endpoint: " << endpoint
<< ", relative URL: " << url;
LOG_TOPIC(TRACE, arangodb::Logger::FIXME)
<< "downloading file. endpoint: " << endpoint
<< ", relative URL: " << url;
std::unique_ptr<Endpoint> ep(Endpoint::clientFactory(endpoint));
@ -829,7 +842,8 @@ void JS_Download(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
std::unique_ptr<GeneralClientConnection> connection(
GeneralClientConnection::factory(ep.get(), timeout, timeout, 3, sslProtocol));
GeneralClientConnection::factory(ep.get(), timeout, timeout, 3,
sslProtocol));
if (connection == nullptr) {
TRI_V8_THROW_EXCEPTION_MEMORY();
@ -993,8 +1007,8 @@ static void JS_Execute(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_Utf8ValueNFC keyName(TRI_UNKNOWN_MEM_ZONE, key);
if (*keyName != nullptr) {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "copying key '" << *keyName
<< "' from sandbox to context";
LOG_TOPIC(TRACE, arangodb::Logger::FIXME)
<< "copying key '" << *keyName << "' from sandbox to context";
}
}
@ -1023,11 +1037,15 @@ static void JS_Execute(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
if (tryCatch.CanContinue()) {
v8::Local<v8::Object> exceptionObj = tryCatch.Exception().As<v8::Object>();
v8::Local<v8::Object> exceptionObj =
tryCatch.Exception().As<v8::Object>();
v8::Handle<v8::Message> message = tryCatch.Message();
exceptionObj->Set(TRI_V8_ASCII_STRING("lineNumber"), v8::Number::New(isolate, message->GetLineNumber()));
exceptionObj->Set(TRI_V8_ASCII_STRING("columnNumber"), v8::Number::New(isolate, message->GetStartColumn()));
exceptionObj->Set(TRI_V8_ASCII_STRING("fileName"), filename->ToString());
exceptionObj->Set(TRI_V8_ASCII_STRING("lineNumber"),
v8::Number::New(isolate, message->GetLineNumber()));
exceptionObj->Set(TRI_V8_ASCII_STRING("columnNumber"),
v8::Number::New(isolate, message->GetStartColumn()));
exceptionObj->Set(TRI_V8_ASCII_STRING("fileName"),
filename->ToString());
tryCatch.ReThrow();
return;
} else {
@ -1071,8 +1089,8 @@ static void JS_Execute(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_Utf8ValueNFC keyName(TRI_UNKNOWN_MEM_ZONE, key);
if (*keyName != nullptr) {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "copying key '" << *keyName
<< "' from context to sandbox";
LOG_TOPIC(TRACE, arangodb::Logger::FIXME)
<< "copying key '" << *keyName << "' from context to sandbox";
}
}
@ -1361,20 +1379,22 @@ static void JS_MakeAbsolute(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_TYPE_ERROR("<path> must be a string");
}
int err = 0;
std::string cwd = arangodb::basics::FileUtils::currentDirectory(&err);
if (0 != err) {
TRI_V8_THROW_EXCEPTION_MESSAGE(err, "cannot get current working directory");
FileResultString cwd = FileUtils::currentDirectory();
if (!cwd.ok()) {
TRI_V8_THROW_EXCEPTION_MESSAGE(
cwd.sysErrorNumber(),
"cannot get current working directory: " + cwd.errorMessage());
}
char* abs = TRI_GetAbsolutePath(*name, cwd.c_str());
char* abs = TRI_GetAbsolutePath(*name, cwd.result().c_str());
v8::Handle<v8::String> res;
if (0 != abs) {
if (nullptr != abs) {
res = TRI_V8_STRING(abs);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, abs);
} else {
res = TRI_V8_STD_STRING(cwd);
res = TRI_V8_STD_STRING(cwd.result());
}
// return result
@ -3183,7 +3203,7 @@ static void JS_PBKDF2HS1(v8::FunctionCallbackInfo<v8::Value> const& args) {
std::string result =
SslInterface::sslPBKDF2HS1(salt.c_str(), salt.size(), password.c_str(),
password.size(), iterations, keyLength);
password.size(), iterations, keyLength);
TRI_V8_RETURN_STD_STRING(result);
TRI_V8_TRY_CATCH_END
}
@ -3205,7 +3225,8 @@ static void JS_PBKDF2(v8::FunctionCallbackInfo<v8::Value> const& args) {
if (args.Length() < 4 || !args[0]->IsString() || !args[1]->IsString() ||
!args[2]->IsNumber() || !args[3]->IsNumber()) {
TRI_V8_THROW_EXCEPTION_USAGE(
"PBKDF2_SHA(<salt>, <password>, <iterations>, <keyLength>, <algorithm>)");
"PBKDF2_SHA(<salt>, <password>, <iterations>, <keyLength>, "
"<algorithm>)");
}
SslInterface::Algorithm al = SslInterface::Algorithm::ALGORITHM_SHA1;
@ -3650,7 +3671,8 @@ static void JS_KillExternal(v8::FunctionCallbackInfo<v8::Value> const& args) {
// extract the arguments
if (args.Length() < 1 || args.Length() > 2) {
TRI_V8_THROW_EXCEPTION_USAGE("killExternal(<external-identifier>, <signal>)");
TRI_V8_THROW_EXCEPTION_USAGE(
"killExternal(<external-identifier>, <signal>)");
}
int signal = SIGTERM;
if (args.Length() == 2) {
@ -3812,11 +3834,13 @@ static void JS_V8ToVPack(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
VPackSlice slice = builder.slice();
V8Buffer* buffer = V8Buffer::New(isolate, slice.startAs<char const>(), slice.byteSize());
v8::Local<v8::Object> bufferObject = v8::Local<v8::Object>::New(isolate, buffer->_handle);
V8Buffer* buffer =
V8Buffer::New(isolate, slice.startAs<char const>(), slice.byteSize());
v8::Local<v8::Object> bufferObject =
v8::Local<v8::Object>::New(isolate, buffer->_handle);
TRI_V8_RETURN(bufferObject);
TRI_V8_RETURN_FALSE();
TRI_V8_TRY_CATCH_END
}
@ -3834,9 +3858,9 @@ static void JS_VPackToV8(v8::FunctionCallbackInfo<v8::Value> const& args) {
if (args[0]->IsString() || args[0]->IsStringObject()) {
// supplied argument is a string
std::string const value = TRI_ObjectToString(isolate, args[0]);
VPackValidator validator;
validator.validate(value.c_str(), value.size(), false);
validator.validate(value.c_str(), value.size(), false);
VPackSlice slice(value.c_str());
v8::Handle<v8::Value> result = TRI_VPackToV8(isolate, slice);
@ -3847,16 +3871,17 @@ static void JS_VPackToV8(v8::FunctionCallbackInfo<v8::Value> const& args) {
size_t size = V8Buffer::length(args[0].As<v8::Object>());
VPackValidator validator;
validator.validate(data, size, false);
validator.validate(data, size, false);
VPackSlice slice(data);
v8::Handle<v8::Value> result = TRI_VPackToV8(isolate, slice);
TRI_V8_RETURN(result);
} else {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "invalid argument type for VPACK_TO_V8()");
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
"invalid argument type for VPACK_TO_V8()");
}
TRI_V8_RETURN_FALSE();
TRI_V8_TRY_CATCH_END
}
@ -4092,7 +4117,8 @@ void TRI_LogV8Exception(v8::Isolate* isolate, v8::TryCatch* tryCatch) {
if (exceptionString == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception";
} else {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception: " << exceptionString;
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception: "
<< exceptionString;
}
} else {
TRI_Utf8ValueNFC filename(TRI_UNKNOWN_MEM_ZONE,
@ -4108,16 +4134,18 @@ void TRI_LogV8Exception(v8::Isolate* isolate, v8::TryCatch* tryCatch) {
if (exceptionString == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception";
} else {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception: " << exceptionString;
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception: "
<< exceptionString;
}
} else {
if (exceptionString == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception in file '" << filenameString
<< "' at " << linenum << "," << start;
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "JavaScript exception in file '" << filenameString << "' at "
<< linenum << "," << start;
} else {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "JavaScript exception in file '" << filenameString
<< "' at " << linenum << "," << start << ": "
<< exceptionString;
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "JavaScript exception in file '" << filenameString << "' at "
<< linenum << "," << start << ": " << exceptionString;
}
}
@ -4225,7 +4253,8 @@ v8::Handle<v8::Value> TRI_ExecuteJavaScriptString(
}
}
} else {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "no output function defined in Javascript context";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "no output function defined in Javascript context";
}
}
@ -4462,7 +4491,7 @@ void TRI_InitV8Utils(v8::Isolate* isolate, v8::Handle<v8::Context> context,
->SetPrototype(ErrorPrototype);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("ArangoError"), ArangoErrorFunc);
isolate, TRI_V8_ASCII_STRING("ArangoError"), ArangoErrorFunc);
rt = ft->InstanceTemplate();
v8g->ArangoErrorTempl.Reset(isolate, rt);
@ -4471,213 +4500,213 @@ void TRI_InitV8Utils(v8::Isolate* isolate, v8::Handle<v8::Context> context,
// create the global functions
// .............................................................................
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_CHMOD"), JS_ChMod);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_EXISTS"), JS_Exists);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_FILESIZE"), JS_SizeFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_GET_TEMP_FILE"),
JS_GetTempFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_GET_TEMP_PATH"),
JS_GetTempPath);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("FS_IS_DIRECTORY"), JS_IsDirectory);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("FS_IS_DIRECTORY"), JS_IsDirectory);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_IS_FILE"), JS_IsFile);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("FS_LIST"),
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("FS_LIST"),
JS_List);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("FS_LIST_TREE"), JS_ListTree);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("FS_LIST_TREE"), JS_ListTree);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_MAKE_ABSOLUTE"),
JS_MakeAbsolute);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_MAKE_DIRECTORY"),
JS_MakeDirectory);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("FS_MAKE_DIRECTORY_RECURSIVE"),
isolate, TRI_V8_ASCII_STRING("FS_MAKE_DIRECTORY_RECURSIVE"),
JS_MakeDirectoryRecursive);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("FS_MOVE"),
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("FS_MOVE"),
JS_MoveFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_COPY_RECURSIVE"),
JS_CopyRecursive);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("FS_COPY_FILE"), JS_CopyFile);
isolate, TRI_V8_ASCII_STRING("FS_COPY_FILE"), JS_CopyFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_MTIME"), JS_MTime);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_REMOVE"), JS_Remove);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_REMOVE_DIRECTORY"),
JS_RemoveDirectory);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("FS_REMOVE_RECURSIVE_DIRECTORY"),
isolate, TRI_V8_ASCII_STRING("FS_REMOVE_RECURSIVE_DIRECTORY"),
JS_RemoveRecursiveDirectory);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("FS_UNZIP_FILE"), JS_UnzipFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("FS_UNZIP_FILE"), JS_UnzipFile);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("FS_ZIP_FILE"), JS_ZipFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_APPEND"), JS_Append);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_BASE64DECODE"),
JS_Base64Decode);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_BASE64ENCODE"),
JS_Base64Encode);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_CHECK_AND_MARK_NONCE"),
JS_MarkNonce);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_CREATE_NONCE"),
JS_CreateNonce);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_DOWNLOAD"), JS_Download);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("SYS_DOWNLOAD"), JS_Download);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_EXECUTE"), JS_Execute);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_EXECUTE_EXTERNAL"),
JS_ExecuteExternal);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_EXECUTE_EXTERNAL_AND_WAIT"),
isolate, TRI_V8_ASCII_STRING("SYS_EXECUTE_EXTERNAL_AND_WAIT"),
JS_ExecuteAndWaitExternal);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_GEN_RANDOM_ALPHA_NUMBERS"),
isolate, TRI_V8_ASCII_STRING("SYS_GEN_RANDOM_ALPHA_NUMBERS"),
JS_RandomAlphaNum);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_GEN_RANDOM_NUMBERS"),
JS_RandomNumbers);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_GEN_RANDOM_SALT"),
JS_RandomSalt);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_GETLINE"), JS_Getline);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_HMAC"), JS_HMAC);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_IS_IP"), JS_IsIP);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SPLIT_WORDS_ICU"),
JS_SplitWordlist);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_KILL_EXTERNAL"),
JS_KillExternal);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SUSPEND_EXTERNAL"),
JS_SuspendExternal);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_CONTINUE_EXTERNAL"),
JS_ContinueExternal);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_LOAD"), JS_Load);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("SYS_LOG"),
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("SYS_LOG"),
JS_Log);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_LOG_LEVEL"), JS_LogLevel);
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("SYS_MD5"),
isolate, TRI_V8_ASCII_STRING("SYS_LOG_LEVEL"), JS_LogLevel);
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("SYS_MD5"),
JS_Md5);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_OPTIONS"), JS_Options);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_OUTPUT"), JS_Output);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_PARSE"), JS_Parse);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_PARSE_FILE"), JS_ParseFile);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_V8_ASCII_STRING("SYS_PBKDF2HS1"), JS_PBKDF2HS1);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("SYS_PARSE_FILE"), JS_ParseFile);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING("SYS_PBKDF2HS1"), JS_PBKDF2HS1);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_PBKDF2"), JS_PBKDF2);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_PROCESS_STATISTICS"),
JS_ProcessStatistics);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_RAND"), JS_Rand);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_READ"), JS_Read);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_READ64"), JS_Read64);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_READ_BUFFER"), JS_ReadBuffer);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("SYS_READ_BUFFER"), JS_ReadBuffer);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SHA1"), JS_Sha1);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SHA224"), JS_Sha224);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SHA256"), JS_Sha256);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SHA384"), JS_Sha384);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SHA512"), JS_Sha512);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SLEEP"), JS_Sleep);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_SPRINTF"), JS_SPrintF);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_STATUS_EXTERNAL"),
JS_StatusExternal);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_TEST_PORT"), JS_TestPort);
TRI_AddGlobalFunctionVocbase(isolate, context,
isolate, TRI_V8_ASCII_STRING("SYS_TEST_PORT"), JS_TestPort);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_TIME"), JS_Time);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_WAIT"), JS_Wait);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_WRITE"), JS_Write);
TRI_AddGlobalFunctionVocbase(isolate, context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_DEBUG_CAN_USE_FAILAT"),
JS_DebugCanUseFailAt);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("SYS_IS_STOPPING"), JS_IsStopping);
isolate, TRI_V8_ASCII_STRING("SYS_IS_STOPPING"), JS_IsStopping);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("V8_TO_VPACK"), JS_V8ToVPack);
isolate, TRI_V8_ASCII_STRING("V8_TO_VPACK"), JS_V8ToVPack);
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("VPACK_TO_V8"), JS_VPackToV8);
isolate, TRI_V8_ASCII_STRING("VPACK_TO_V8"), JS_VPackToV8);
// .............................................................................
// create the global variables
// .............................................................................
TRI_AddGlobalVariableVocbase(isolate, context, TRI_V8_ASCII_STRING("HOME"),
TRI_AddGlobalVariableVocbase(isolate, TRI_V8_ASCII_STRING("HOME"),
TRI_V8_STD_STRING(FileUtils::homeDirectory()));
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("MODULES_PATH"),
V8PathList(isolate, modules));
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("STARTUP_PATH"),
TRI_V8_STD_STRING(startupPath));
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("PATH_SEPARATOR"),
TRI_V8_ASCII_STRING(TRI_DIR_SEPARATOR_STR));
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("VALGRIND"),
isolate, TRI_V8_ASCII_STRING("VALGRIND"),
v8::Boolean::New(isolate, (RUNNING_ON_VALGRIND > 0)));
#ifdef COVERAGE
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("COVERAGE"), v8::True(isolate));
isolate, TRI_V8_ASCII_STRING("COVERAGE"), v8::True(isolate));
#else
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("COVERAGE"), v8::False(isolate));
isolate, TRI_V8_ASCII_STRING("COVERAGE"), v8::False(isolate));
#endif
TRI_AddGlobalVariableVocbase(isolate, context, TRI_V8_ASCII_STRING("VERSION"),
TRI_AddGlobalVariableVocbase(isolate, TRI_V8_ASCII_STRING("VERSION"),
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
TRI_AddGlobalVariableVocbase(isolate, context,
TRI_AddGlobalVariableVocbase(isolate,
TRI_V8_ASCII_STRING("SYS_PLATFORM"),
TRI_V8_ASCII_STRING(TRI_PLATFORM));

View File

@ -52,6 +52,11 @@ target_link_libraries(
target_include_directories(arangodbtests PRIVATE
${INCLUDE_DIRECTORIES}
)
# add these includes as system includes because otherwise
# the compiler will emit warnings for fakeit.hpp
target_include_directories(arangodbtests SYSTEM PRIVATE
${CMAKE_SOURCE_DIR}/3rdParty/catch
${CMAKE_SOURCE_DIR}/3rdParty/fakeit
)

View File

@ -1,3 +1,2 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "fakeit.hpp"