1
0
Fork 0

Add engine specific collection APIs (#6977)

This commit is contained in:
Simon 2018-10-19 17:46:33 +02:00 committed by Jan
parent 042a95683a
commit 4c1e8819c2
33 changed files with 685 additions and 189 deletions

View File

@ -18,3 +18,6 @@ Modifying a Collection
<!-- js/actions/api-collection.js -->
@startDocuBlock put_api_collection_rotate
<!-- js/actions/api-collection.js -->
@startDocuBlock put_api_collection_recalculate_count

View File

@ -0,0 +1,29 @@
@startDocuBlock put_api_collection_recalculate_count
@brief recalculates the document count of a collection
@RESTHEADER{PUT /_api/collection/{collection-name}/recalculateCount, Recalculate count of a collection}
@RESTURLPARAMETERS
@RESTURLPARAM{collection-name,string,required}
The name of the collection.
@RESTDESCRIPTION
Recalculates the document count of a collection, if it ever becomes inconsistent.
It returns an object with the attributes
- *result*: will be *true* if recalculating the document count succeeded.
**Note**: this method is specific for the RocksDB storage engine
@RESTRETURNCODES
@RESTRETURNCODE{200}
If the document count was recalculated successfully, *HTTP 200* is returned.
@RESTRETURNCODE{404}
If the *collection-name* is unknown, then a *HTTP 404* is returned.
@endDocuBlock

View File

@ -1513,67 +1513,6 @@ int truncateCollectionOnCoordinator(std::string const& dbname,
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief rotate the active journals for the collection on all DBServers
////////////////////////////////////////////////////////////////////////////////
int rotateActiveJournalOnAllDBServers(std::string const& dbname,
std::string const& collname) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
return TRI_ERROR_SHUTTING_DOWN;
}
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
try {
collinfo = ci->getCollection(dbname, collname);
} catch (...) {
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
}
TRI_ASSERT(collinfo != nullptr);
// Some stuff to prepare cluster-intern requests:
// We have to contact everybody:
unsigned int expected = 0;
auto shards = collinfo->shardIds();
CoordTransactionID coordTransactionID = TRI_NewTickServer();
std::unordered_map<std::string, std::string> headers;
for (auto const& p : *shards) {
auto serverList = ci->getResponsibleServer(p.first);
for (auto& s : *serverList) {
cc->asyncRequest(coordTransactionID, "server:" + s,
arangodb::rest::RequestType::PUT,
"/_db/" + StringUtils::urlEncode(dbname) +
"/_api/collection/" + p.first + "/rotate",
std::shared_ptr<std::string>(), headers, nullptr, 600.0);
++expected;
}
}
// Now listen to the results:
unsigned int nrok = 0;
for (unsigned int count = expected; count > 0; count--) {
auto res = cc->wait(coordTransactionID, 0, "", 0.0);
if (res.status == CL_COMM_RECEIVED) {
if (res.answer_code == arangodb::rest::ResponseCode::OK) {
nrok++;
}
}
}
// Note that nrok is always at least 1!
if (nrok < expected) {
return TRI_ERROR_FAILED;
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief get a document in a coordinator
////////////////////////////////////////////////////////////////////////////////

View File

@ -8,6 +8,7 @@ set(CLUSTER_ENGINE_SOURCES
ClusterEngine/ClusterEngine.cpp
ClusterEngine/ClusterIndex.cpp
ClusterEngine/ClusterIndexFactory.cpp
ClusterEngine/ClusterRestCollectionHandler.cpp
ClusterEngine/ClusterRestExportHandler.cpp
ClusterEngine/ClusterRestHandlers.cpp
ClusterEngine/ClusterRestReplicationHandler.cpp
@ -17,5 +18,7 @@ set(CLUSTER_ENGINE_SOURCES
ClusterEngine/ClusterTransactionContextData.h
ClusterEngine/ClusterTransactionState.cpp
ClusterEngine/ClusterV8Functions.cpp
ClusterEngine/MMFilesMethods.cpp
ClusterEngine/RocksDBMethods.cpp
)
set(CLUSTER_ENGINE_SOURCES ${CLUSTER_ENGINE_SOURCES} PARENT_SCOPE)

View File

@ -169,11 +169,10 @@ bool ClusterIndex::isSorted() const {
void ClusterIndex::updateProperties(velocypack::Slice const& slice) {
VPackBuilder merge;
merge.openObject();
ClusterEngine* ce =
static_cast<ClusterEngine*>(EngineSelectorFeature::ENGINE);
if (ce->isMMFiles()) {
if (_engineType == ClusterEngineType::MMFilesEngine) {
// nothing to update here
} else if (ce->isRocksDB()) {
} else if (_engineType == ClusterEngineType::RocksDBEngine) {
merge.add("cacheEnabled", VPackValue(Helper::readBooleanValue(
slice, "cacheEnabled", false)));

View File

@ -0,0 +1,55 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#include "ClusterRestCollectionHandler.h"
#include "ClusterEngine/MMFilesMethods.h"
#include "ClusterEngine/RocksDBMethods.h"
#include "VocBase/LogicalCollection.h"
using namespace arangodb;
ClusterRestCollectionHandler::ClusterRestCollectionHandler(GeneralRequest* request,
GeneralResponse* response)
: RestCollectionHandler(request, response) {}
Result ClusterRestCollectionHandler::handleExtraCommandPut(LogicalCollection& coll,
std::string const& suffix,
velocypack::Builder& builder) {
if (suffix == "recalculateCount") {
Result res = arangodb::rocksdb::recalculateCountsOnAllDBServers(_vocbase.name(), coll.name());
if (res.ok()) {
VPackObjectBuilder guard(&builder);
builder.add("result", VPackValue(true));
}
return res;
} else if (suffix == "rotate") {
Result res = arangodb::mmfiles::rotateActiveJournalOnAllDBServers(_vocbase.name(), coll.name());
if (res.ok()) {
VPackObjectBuilder guard(&builder);
builder.add("result", VPackValue(true));
}
return res;
}
return TRI_ERROR_NOT_IMPLEMENTED;
}

View File

@ -0,0 +1,40 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_CLUSTER_CLUSTER_REST_COLLECTION_HANDLER_H
#define ARANGOD_CLUSTER_CLUSTER_REST_COLLECTION_HANDLER_H 1
#include "RestHandler/RestCollectionHandler.h"
namespace arangodb {
class ClusterRestCollectionHandler : public arangodb::RestCollectionHandler {
public:
ClusterRestCollectionHandler(GeneralRequest*, GeneralResponse*);
protected:
Result handleExtraCommandPut(LogicalCollection& coll, std::string const& command,
velocypack::Builder& builder) override final;
};
}
#endif

View File

@ -34,6 +34,6 @@ ClusterRestExportHandler::ClusterRestExportHandler(GeneralRequest* request,
RestStatus ClusterRestExportHandler::execute() {
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
TRI_ERROR_CLUSTER_UNSUPPORTED,
"'/_api/export' is not yet supported in a cluster");
"'/_api/export' is not supported in a cluster");
return RestStatus::DONE;
}

View File

@ -23,6 +23,7 @@
#include "ClusterRestHandlers.h"
#include "GeneralServer/RestHandlerFactory.h"
#include "RestHandler/RestHandlerCreator.h"
#include "ClusterEngine/ClusterRestCollectionHandler.h"
#include "ClusterEngine/ClusterRestExportHandler.h"
#include "ClusterEngine/ClusterRestReplicationHandler.h"
#include "ClusterEngine/ClusterRestWalHandler.h"
@ -31,13 +32,11 @@ using namespace arangodb;
void ClusterRestHandlers::registerResources(
rest::RestHandlerFactory* handlerFactory) {
handlerFactory->addPrefixHandler( "/_api/export",
handlerFactory->addPrefixHandler(RestVocbaseBaseHandler::COLLECTION_PATH,
RestHandlerCreator<ClusterRestCollectionHandler>::createNoData);
handlerFactory->addPrefixHandler("/_api/export",
RestHandlerCreator<ClusterRestExportHandler>::createNoData);
handlerFactory->addPrefixHandler(
"/_api/replication",
handlerFactory->addPrefixHandler("/_api/replication",
RestHandlerCreator<ClusterRestReplicationHandler>::createNoData);
handlerFactory->addPrefixHandler("/_admin/wal", RestHandlerCreator<ClusterRestWalHandler>::createNoData);
}

View File

@ -0,0 +1,88 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#include "MMFilesMethods.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterInfo.h"
#include "ClusterEngine/ClusterEngine.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "VocBase/LogicalCollection.h"
namespace arangodb {
namespace mmfiles {
////////////////////////////////////////////////////////////////////////////////
/// @brief rotate the active journals for the collection on all DBServers
////////////////////////////////////////////////////////////////////////////////
int rotateActiveJournalOnAllDBServers(std::string const& dbname,
std::string const& collname) {
ClusterEngine* ce = static_cast<ClusterEngine*>(EngineSelectorFeature::ENGINE);
if (!ce->isMMFiles()) {
return TRI_ERROR_NOT_IMPLEMENTED;
}
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
return TRI_ERROR_SHUTTING_DOWN;
}
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
try {
collinfo = ci->getCollection(dbname, collname);
} catch (...) {
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
}
TRI_ASSERT(collinfo != nullptr);
std::string const baseUrl = "/_db/" + basics::StringUtils::urlEncode(dbname)
+ "/_api/collection/";
std::shared_ptr<std::string> body;
// now we notify all leader and follower shards
std::shared_ptr<ShardMap> shardList = collinfo->shardIds();
std::vector<ClusterCommRequest> requests;
for (auto const& shard : *shardList) {
for (ServerID const& server : shard.second) {
std::string uri = baseUrl + basics::StringUtils::urlEncode(shard.first) + "/rotate";
requests.emplace_back("server:" + server, arangodb::rest::RequestType::PUT,
std::move(uri), body);
}
}
size_t nrDone = 0;
size_t nrGood = cc->performRequests(requests, 600.0, nrDone, Logger::ENGINES, false);
if (nrGood < requests.size()) {
return TRI_ERROR_FAILED;
}
return TRI_ERROR_NO_ERROR;
}
}}

View File

@ -0,0 +1,38 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_CLUSTER_ENGINE_MMFILES_METHODS_H
#define ARANGOD_CLUSTER_ENGINE_MMFILES_METHODS_H 1
#include <string>
namespace arangodb {
namespace mmfiles {
////////////////////////////////////////////////////////////////////////////////
/// @brief rotate the active journals for the collection on all DBServers
////////////////////////////////////////////////////////////////////////////////
int rotateActiveJournalOnAllDBServers(std::string const& dbname,
std::string const& collname);
}}
#endif

View File

@ -0,0 +1,88 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#include "RocksDBMethods.h"
#include "Basics/StringUtils.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterInfo.h"
#include "ClusterEngine/ClusterEngine.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "VocBase/LogicalCollection.h"
namespace arangodb {
namespace rocksdb {
////////////////////////////////////////////////////////////////////////////////
/// @brief rotate the active journals for the collection on all DBServers
////////////////////////////////////////////////////////////////////////////////
Result recalculateCountsOnAllDBServers(std::string const& dbname,
std::string const& collname) {
ClusterEngine* ce = static_cast<ClusterEngine*>(EngineSelectorFeature::ENGINE);
if (!ce->isRocksDB()) {
return TRI_ERROR_NOT_IMPLEMENTED;
}
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
return TRI_ERROR_SHUTTING_DOWN;
}
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
try {
collinfo = ci->getCollection(dbname, collname);
} catch (...) {
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
}
TRI_ASSERT(collinfo != nullptr);
std::string const baseUrl = "/_db/" + basics::StringUtils::urlEncode(dbname)
+ "/_api/collection/";
std::shared_ptr<std::string> body;
// now we notify all leader and follower shards
std::shared_ptr<ShardMap> shardList = collinfo->shardIds();
std::vector<ClusterCommRequest> requests;
for (auto const& shard : *shardList) {
for (ServerID const& server : shard.second) {
std::string uri = baseUrl + basics::StringUtils::urlEncode(shard.first) + "/recalculateCount";
requests.emplace_back("server:" + server, arangodb::rest::RequestType::PUT,
std::move(uri), body);
}
}
size_t nrDone = 0;
size_t nrGood = cc->performRequests(requests, 600.0, nrDone, Logger::ENGINES, false);
if (nrGood < requests.size()) {
return TRI_ERROR_FAILED;
}
return TRI_ERROR_NO_ERROR;
}
}}

View File

@ -0,0 +1,44 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_CLUSTER_ENGINE_ROCKSDB_METHODS_H
#define ARANGOD_CLUSTER_ENGINE_ROCKSDB_METHODS_H 1
#include "Basics/Result.h"
namespace arangodb {
namespace rocksdb {
////////////////////////////////////////////////////////////////////////////////
/// @brief recalculate collection count on all DBServers
////////////////////////////////////////////////////////////////////////////////
Result recalculateCountsOnAllDBServers(std::string const& dbname,
std::string const& collname);
////////////////////////////////////////////////////////////////////////////////
/// @brief wait for estimator sync on all DBServers
////////////////////////////////////////////////////////////////////////////////
Result waitForEstimatorSync();
}}
#endif

View File

@ -336,10 +336,6 @@ void GeneralServerFeature::defineHandlers() {
RestVocbaseBaseHandler::BATCH_PATH,
RestHandlerCreator<RestBatchHandler>::createNoData);
_handlerFactory->addPrefixHandler(
RestVocbaseBaseHandler::COLLECTION_PATH,
RestHandlerCreator<RestCollectionHandler>::createNoData);
_handlerFactory->addPrefixHandler(
RestVocbaseBaseHandler::CONTROL_PREGEL_PATH,
RestHandlerCreator<RestControlPregelHandler>::createNoData);

View File

@ -34,6 +34,7 @@ set(MMFILES_SOURCES
MMFiles/MMFilesPersistentIndexKeyComparator.cpp
MMFiles/MMFilesPrimaryIndex.cpp
MMFiles/MMFilesRemoverThread.cpp
MMFiles/MMFilesRestCollectionHandler.cpp
MMFiles/MMFilesRestExportHandler.cpp
MMFiles/MMFilesRestHandlers.cpp
MMFiles/MMFilesRestReplicationHandler.cpp

View File

@ -135,7 +135,7 @@ class MMFilesCollection final : public PhysicalCollection {
std::vector<MMFilesDatafile*>&& compactors);
/// @brief rotate the active journal - will do nothing if there is no journal
int rotateActiveJournal() override;
int rotateActiveJournal();
/// @brief sync the active journal - will do nothing if there is no journal
/// or if the journal is volatile

View File

@ -0,0 +1,69 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#include "MMFilesRestCollectionHandler.h"
#include "MMFiles/MMFilesCollection.h"
#include "Transaction/StandaloneContext.h"
#include "Utils/SingleCollectionTransaction.h"
using namespace arangodb;
MMFilesRestCollectionHandler::MMFilesRestCollectionHandler(GeneralRequest* request,
GeneralResponse* response)
: RestCollectionHandler(request, response) {}
Result MMFilesRestCollectionHandler::handleExtraCommandPut(LogicalCollection& coll,
std::string const& command,
velocypack::Builder& builder) {
if (command == "rotate") {
auto ctx = transaction::StandaloneContext::Create(_vocbase);
SingleCollectionTransaction trx(ctx, coll, AccessMode::Type::WRITE);
Result res = trx.begin();
if (res.ok()) {
MMFilesCollection* mcoll = static_cast<MMFilesCollection*>(coll.getPhysical());
try {
res = mcoll->rotateActiveJournal();
} catch (basics::Exception const& ex) {
res.reset(ex.code(), ex.what());
} catch (std::exception const& ex) {
res.reset(TRI_ERROR_INTERNAL, ex.what());
}
res = trx.finish(res);
}
if (res.ok()) {
builder.openObject();
builder.add("result", VPackValue(true));
builder.close();
}
return res;
}
return TRI_ERROR_NOT_IMPLEMENTED;
}

View File

@ -0,0 +1,40 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_MMFILES_MMFILES_REST_COLLECTION_HANDLER_H
#define ARANGOD_MMFILES_MMFILES_REST_COLLECTION_HANDLER_H 1
#include "RestHandler/RestCollectionHandler.h"
namespace arangodb {
class MMFilesRestCollectionHandler : public arangodb::RestCollectionHandler {
public:
MMFilesRestCollectionHandler(GeneralRequest*, GeneralResponse*);
protected:
Result handleExtraCommandPut(LogicalCollection& coll, std::string const& command,
velocypack::Builder& builder) override final;
};
}
#endif

View File

@ -23,6 +23,7 @@
#include "MMFilesRestHandlers.h"
#include "GeneralServer/RestHandlerFactory.h"
#include "MMFiles/MMFilesRestCollectionHandler.h"
#include "MMFiles/MMFilesRestExportHandler.h"
#include "MMFiles/MMFilesRestReplicationHandler.h"
#include "MMFiles/MMFilesRestWalHandler.h"
@ -32,6 +33,8 @@ using namespace arangodb;
void MMFilesRestHandlers::registerResources(
rest::RestHandlerFactory* handlerFactory) {
handlerFactory->addPrefixHandler(RestVocbaseBaseHandler::COLLECTION_PATH,
RestHandlerCreator<MMFilesRestCollectionHandler>::createNoData);
handlerFactory->addPrefixHandler(
"/_admin/wal", RestHandlerCreator<MMFilesRestWalHandler>::createNoData);

View File

@ -72,10 +72,8 @@ static void JS_RotateVocbaseCol(
TRI_V8_THROW_EXCEPTION(res);
}
OperationResult result =
trx.rotateActiveJournal(collection->name(), OperationOptions());
res.reset(result.result);
MMFilesCollection* mcoll = static_cast<MMFilesCollection*>(collection->getPhysical());
res.reset(mcoll->rotateActiveJournal());
trx.finish(res);
if (!res.ok()) {

View File

@ -453,23 +453,6 @@ void RestCollectionHandler::handleCommandPut() {
/*showFigures*/ false, /*showCount*/ false,
/*detailedCount*/ true);
}
} else if (sub == "rotate") {
auto ctx = transaction::StandaloneContext::Create(_vocbase);
SingleCollectionTransaction trx(ctx, *coll, AccessMode::Type::WRITE);
res = trx.begin();
if (res.ok()) {
auto result =
trx.rotateActiveJournal(coll->name(), OperationOptions());
res = trx.finish(result.result);
}
builder.openObject();
builder.add("result", VPackValue(true));
builder.close();
} else if (sub == "loadIndexesIntoMemory") {
res = methods::Collections::warmup(_vocbase, *coll);
@ -477,9 +460,12 @@ void RestCollectionHandler::handleCommandPut() {
obj->add("result", VPackValue(res.ok()));
} else {
res.reset(TRI_ERROR_HTTP_NOT_FOUND,
"expecting one of the actions 'load', 'unload', 'truncate',"
" 'properties', 'rename', 'loadIndexesIntoMemory'");
res = handleExtraCommandPut(*coll, sub, builder);
if (res.is(TRI_ERROR_NOT_IMPLEMENTED)) {
res.reset(TRI_ERROR_HTTP_NOT_FOUND,
"expecting one of the actions 'load', 'unload', 'truncate',"
" 'properties', 'rename', 'loadIndexesIntoMemory'");
}
}
}
);

View File

@ -38,13 +38,10 @@ class RestCollectionHandler : public arangodb::RestVocbaseBaseHandler {
public:
char const* name() const override final { return "RestCollectionHandler"; }
RequestLane lane() const override final { return RequestLane::CLIENT_SLOW; }
RestStatus execute() override;
RestStatus execute() override final;
protected:
private:
void handleCommandGet();
void handleCommandPost();
void handleCommandPut();
void handleCommandDelete();
void collectionRepresentation(VPackBuilder& builder, std::string const& name,
bool showProperties, bool showFigures,
bool showCount, bool detailedCount);
@ -62,6 +59,15 @@ class RestCollectionHandler : public arangodb::RestVocbaseBaseHandler {
methods::Collections::Context& ctxt,
bool showProperties, bool showFigures,
bool showCount, bool detailedCount);
virtual Result handleExtraCommandPut(LogicalCollection& coll, std::string const& command,
velocypack::Builder& builder) = 0;
private:
void handleCommandGet();
void handleCommandPost();
void handleCommandPut();
void handleCommandDelete();
};
}

View File

@ -64,6 +64,7 @@ set(ROCKSDB_SOURCES
RocksDBEngine/RocksDBReplicationContext.cpp
RocksDBEngine/RocksDBReplicationManager.cpp
RocksDBEngine/RocksDBReplicationTailing.cpp
RocksDBEngine/RocksDBRestCollectionHandler.cpp
RocksDBEngine/RocksDBRestExportHandler.cpp
RocksDBEngine/RocksDBRestHandlers.cpp
RocksDBEngine/RocksDBRestReplicationHandler.cpp

View File

@ -0,0 +1,67 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#include "RocksDBRestCollectionHandler.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "RocksDBEngine/RocksDBCollection.h"
#include "VocBase/LogicalCollection.h"
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
RocksDBRestCollectionHandler::RocksDBRestCollectionHandler(GeneralRequest* request,
GeneralResponse* response)
: RestCollectionHandler(request, response) {}
Result RocksDBRestCollectionHandler::handleExtraCommandPut(LogicalCollection& coll,
std::string const& suffix,
velocypack::Builder& builder) {
if (suffix == "recalculateCount") {
if (ExecContext::CURRENT != nullptr) {
if (!ExecContext::CURRENT->canUseCollection(coll.name(), auth::Level::RW)) {
return Result(TRI_ERROR_FORBIDDEN);
}
}
auto physical = toRocksDBCollection(coll.getPhysical());
Result res;
uint64_t count = 0;
try {
count = physical->recalculateCounts();
} catch(basics::Exception const& e) {
res.reset(e.code(), e.message());
}
if (res.ok()) {
VPackObjectBuilder guard(&builder);
builder.add("result", VPackValue(true));
builder.add("count", VPackValue(count));
}
return res;
}
return TRI_ERROR_NOT_IMPLEMENTED;
}

View File

@ -0,0 +1,40 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_ROCKSDB_ROCKSDB_REST_COLLECTION_HANDLER_H
#define ARANGOD_ROCKSDB_ROCKSDB_REST_COLLECTION_HANDLER_H 1
#include "RestHandler/RestCollectionHandler.h"
namespace arangodb {
class RocksDBRestCollectionHandler : public arangodb::RestCollectionHandler {
public:
RocksDBRestCollectionHandler(GeneralRequest*, GeneralResponse*);
protected:
Result handleExtraCommandPut(LogicalCollection& coll, std::string const& command,
velocypack::Builder& builder) override final;
};
}
#endif

View File

@ -27,6 +27,7 @@
#include "GeneralServer/RestHandlerFactory.h"
#include "RestHandler/RestHandlerCreator.h"
#include "RestServer/QueryRegistryFeature.h"
#include "RocksDBEngine/RocksDBRestCollectionHandler.h"
#include "RocksDBEngine/RocksDBRestExportHandler.h"
#include "RocksDBEngine/RocksDBRestReplicationHandler.h"
#include "RocksDBEngine/RocksDBRestWalHandler.h"
@ -35,17 +36,12 @@ using namespace arangodb;
void RocksDBRestHandlers::registerResources(
rest::RestHandlerFactory* handlerFactory) {
handlerFactory->addPrefixHandler(RestVocbaseBaseHandler::COLLECTION_PATH,
RestHandlerCreator<RocksDBRestCollectionHandler>::createNoData);
auto queryRegistry = QueryRegistryFeature::QUERY_REGISTRY.load();
handlerFactory->addPrefixHandler(
"/_api/export",
RestHandlerCreator<RocksDBRestExportHandler>::createData<aql::QueryRegistry*>,
queryRegistry);
handlerFactory->addPrefixHandler(
"/_api/replication",
RestHandlerCreator<RocksDBRestReplicationHandler>::createNoData);
handlerFactory->addPrefixHandler(
"/_admin/wal", RestHandlerCreator<RocksDBRestWalHandler>::createNoData);
handlerFactory->addPrefixHandler("/_api/export",
RestHandlerCreator<RocksDBRestExportHandler>::createData<aql::QueryRegistry*>, queryRegistry);
handlerFactory->addPrefixHandler("/_api/replication",
RestHandlerCreator<RocksDBRestReplicationHandler>::createNoData);
handlerFactory->addPrefixHandler("/_admin/wal", RestHandlerCreator<RocksDBRestWalHandler>::createNoData);
}

View File

@ -30,6 +30,7 @@
#include "RocksDBEngine/RocksDBCommon.h"
#include "RocksDBEngine/RocksDBEngine.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "Utils/ExecContext.h"
#include "V8/v8-conv.h"
#include "V8/v8-globals.h"
#include "V8/v8-utils.h"
@ -140,6 +141,13 @@ static void JS_RecalculateCounts(
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
if (ExecContext::CURRENT != nullptr) {
if (!ExecContext::CURRENT->canUseCollection(collection->name(), auth::Level::RW)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_FORBIDDEN);
}
}
auto* physical = toRocksDBCollection(*collection);
v8::Handle<v8::Value> result = v8::Number::New(

View File

@ -71,9 +71,6 @@ class PhysicalCollection {
virtual void load() = 0;
virtual void unload() = 0;
/// @brief rotate the active journal - will do nothing if there is no journal
virtual int rotateActiveJournal() { return TRI_ERROR_NO_ERROR; }
// @brief Return the number of documents in this collection
virtual uint64_t numberDocuments(transaction::Methods* trx) const = 0;

View File

@ -2439,49 +2439,6 @@ OperationResult transaction::Methods::truncateLocal(
return OperationResult(res);
}
/// @brief rotate all active journals of a collection
OperationResult transaction::Methods::rotateActiveJournal(
std::string const& collectionName, OperationOptions const& options) {
TRI_ASSERT(_state->status() == transaction::Status::RUNNING);
OperationResult result;
if (_state->isCoordinator()) {
result = rotateActiveJournalCoordinator(collectionName, options);
} else {
result = rotateActiveJournalLocal(collectionName, options);
}
return result;
}
/// @brief rotate the journal of a collection
OperationResult transaction::Methods::rotateActiveJournalCoordinator(
std::string const& collectionName, OperationOptions const& options) {
return OperationResult(
rotateActiveJournalOnAllDBServers(vocbase().name(), collectionName)
);
}
/// @brief rotate the journal of a collection
OperationResult transaction::Methods::rotateActiveJournalLocal(
std::string const& collectionName, OperationOptions const& options) {
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
LogicalCollection* collection = documentCollection(trxCollection(cid));
Result res;
try {
res.reset(collection->getPhysical()->rotateActiveJournal());
} catch (basics::Exception const& ex) {
return OperationResult(Result(ex.code(), ex.what()));
} catch (std::exception const& ex) {
return OperationResult(Result(TRI_ERROR_INTERNAL, ex.what()));
}
return OperationResult(res);
}
/// @brief count the number of documents in a collection
OperationResult transaction::Methods::count(std::string const& collectionName,
transaction::CountType type) {

View File

@ -324,10 +324,6 @@ class Methods {
OperationResult truncate(std::string const& collectionName,
OperationOptions const& options);
/// @brief rotate all active journals of the collection
OperationResult rotateActiveJournal(std::string const& collectionName,
OperationOptions const& options);
/// @brief count the number of documents in a collection
virtual OperationResult count(std::string const& collectionName, CountType type);
@ -513,10 +509,6 @@ class Methods {
OperationResult rotateActiveJournalCoordinator(std::string const& collectionName,
OperationOptions const& options);
OperationResult rotateActiveJournalLocal(std::string const& collectionName,
OperationOptions const& options);
protected:
OperationResult countCoordinator(std::string const& collectionName,

View File

@ -405,6 +405,19 @@ ArangoCollection.prototype.rotate = function () {
return requestResult.result;
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief recalculate counts of a acollection
// //////////////////////////////////////////////////////////////////////////////
ArangoCollection.prototype.recalculateCount = function () {
var requestResult = this._database._connection.PUT(this._baseurl('recalculateCount'), null);
arangosh.checkRequestResult(requestResult);
return requestResult.result;
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief gets the figures of a collection
// //////////////////////////////////////////////////////////////////////////////

View File

@ -123,7 +123,7 @@ function getApplierState(endpoint) {
});
assertTrue(res instanceof request.Response);
assertTrue(res.hasOwnProperty('statusCode'));
assertEqual(res.statusCode, 200);
assertEqual(res.statusCode, 200, JSON.stringify(res));
assertTrue(res.hasOwnProperty('json'));
return arangosh.checkRequestResult(res.json);
}
@ -166,8 +166,8 @@ function checkData(server) {
});
assertTrue(res instanceof request.Response);
//assertTrue(res.hasOwnProperty('statusCode'));
assertTrue(res.statusCode === 200);
assertTrue(res.hasOwnProperty('statusCode'));
assertEqual(res.statusCode, 200, JSON.stringify(res));
return res.json.count;
}
@ -280,8 +280,8 @@ function ActiveFailoverSuite() {
assertTrue(checkInSync(currentLead, servers));
let endpoints = getClusterEndpoints();
assertTrue(endpoints.length === servers.length);
assertTrue(endpoints[0] === currentLead);
assertEqual(endpoints.length, servers.length);
assertEqual(endpoints[0], currentLead);
},
// Basic test if followers get in sync
@ -340,8 +340,8 @@ function ActiveFailoverSuite() {
// we assume the second leader is still the leader
let endpoints = getClusterEndpoints();
assertTrue(endpoints.length === servers.length);
assertTrue(endpoints[0] === currentLead);
assertEqual(endpoints.length, servers.length);
assertEqual(endpoints[0], currentLead);
print("Starting data creation task on ", currentLead, " (expect it to fail later)");
connectToServer(currentLead);
@ -405,7 +405,7 @@ function ActiveFailoverSuite() {
// await failover and check that follower get in sync
let oldLead = currentLead;
currentLead = checkForFailover(currentLead);
assertTrue(currentLead === nextLead, "Did not fail to best in-sync follower");
assertEqual(currentLead, nextLead, "Did not fail to best in-sync follower");
internal.wait(5); // settle down, heartbeat interval is 1s
let cc = checkData(currentLead);
@ -446,7 +446,7 @@ function ActiveFailoverSuite() {
// await failover and check that follower get in sync
currentLead = checkForFailover(currentLead);
assertTrue(currentLead === firstLeader, "Did not fail to original leader");
assertEqual(currentLead, firstLeader, "Did not fail to original leader");
suspended.forEach(arangod => {
print("Resuming: ", arangod.endpoint);

View File

@ -169,7 +169,7 @@ function checkData(server) {
assertTrue(res instanceof request.Response);
//assertTrue(res.hasOwnProperty('statusCode'));
assertTrue(res.statusCode === 200);
assertEqual(res.statusCode, 200);
return res.json.count;
}
@ -258,11 +258,12 @@ function setReadOnly(endpoint, ro) {
print(JSON.stringify(res));
assertTrue(res instanceof request.Response);
assertTrue(res.hasOwnProperty('statusCode') && res.statusCode === 200);
assertTrue(res.hasOwnProperty('statusCode'));
assertEqual(res.statusCode, 200, JSON.stringify(res));
assertTrue(res.hasOwnProperty('json'));
let json = arangosh.checkRequestResult(res.json);
assertTrue(json.hasOwnProperty('mode'));
assertTrue(json.mode === (ro ? "readonly" : "default"));
assertEqual(json.mode, (ro ? "readonly" : "default"));
}
// Testsuite that checks the read-only mode in the context
@ -306,8 +307,8 @@ function ActiveFailoverSuite() {
assertTrue(checkInSync(currentLead, servers));
let endpoints = getClusterEndpoints();
assertTrue(endpoints.length === servers.length);
assertTrue(endpoints[0] === currentLead);
assertEqual(endpoints.length, servers.length);
assertEqual(endpoints[0], currentLead);
},
testReadFromLeader: function () {