1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into feature/use-open-addressing-hash

This commit is contained in:
jsteemann 2019-09-17 11:06:39 +02:00
commit e3a417ba39
21 changed files with 219 additions and 202 deletions

View File

@ -1232,10 +1232,12 @@ add_subdirectory(arangod)
if (USE_GOOGLE_TESTS)
add_subdirectory(tests)
endif()
endif ()
add_dependencies(arangobench zlibstatic)
add_dependencies(arangobackup zlibstatic)
if (USE_ENTERPRISE)
add_dependencies(arangobackup zlibstatic)
endif ()
add_dependencies(arangod zlibstatic)
add_dependencies(arangodump zlibstatic)
add_dependencies(arangoexport zlibstatic)
@ -1243,21 +1245,26 @@ add_dependencies(arangoimport zlibstatic)
add_dependencies(arangorestore zlibstatic)
add_dependencies(arangosh zlibstatic)
if(UNIX)
if (UNIX)
add_dependencies(arangobench man)
add_dependencies(arangobackup man)
if (USE_ENTERPRISE)
add_dependencies(arangobackup man)
endif ()
add_dependencies(arangod man)
add_dependencies(arangodump man)
add_dependencies(arangoexport man)
add_dependencies(arangoimport man)
add_dependencies(arangorestore man)
add_dependencies(arangosh man)
endif()
endif ()
if (NOT USE_PRECOMPILED_V8)
# all binaries depend on v8_build because it contains ICU as well
add_dependencies(arangobench v8_build)
add_dependencies(arangobackup v8_build)
if (USE_ENTERPRISE)
add_dependencies(arangobackup v8_build)
endif ()
add_dependencies(arangod v8_build)
add_dependencies(arangodump v8_build)
add_dependencies(arangoexport v8_build)
@ -1266,7 +1273,7 @@ if (NOT USE_PRECOMPILED_V8)
add_dependencies(arangosh v8_build)
if (USE_GOOGLE_TESTS)
add_dependencies(arangodbtests v8_build)
endif()
endif ()
endif ()
add_custom_target(packages
@ -1279,11 +1286,11 @@ add_custom_target(copy_packages
add_custom_target(clean_packages
DEPENDS ${CLEAN_PACKAGES_LIST}
)
)
add_custom_target(clean_autogenerated_files
DEPENDS ${CLEAN_AUTOGENERATED_FILES}
)
)
message(STATUS "building for git revision: ${ARANGODB_BUILD_REPOSITORY}")

View File

@ -1,3 +1,5 @@
STARTER_REV "0.14.12"
SYNCER_REV "0.6.5"
GCC_LINUX "8.3.0"
MSVC_WINDOWS "2019"
USE_RCLONE "true"

View File

@ -351,30 +351,6 @@ static void mergeResultsAllShards(std::vector<VPackSlice> const& results,
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief Extract all error baby-style error codes and store them in a map
////////////////////////////////////////////////////////////////////////////////
static void extractErrorCodes(ClusterCommResult const& res,
std::unordered_map<int, size_t>& errorCounter,
bool includeNotFound) {
auto const& resultHeaders = res.answer->headers();
auto codes = resultHeaders.find(StaticStrings::ErrorCodes);
if (codes != resultHeaders.end()) {
auto parsedCodes = VPackParser::fromJson(codes->second);
VPackSlice codesSlice = parsedCodes->slice();
TRI_ASSERT(codesSlice.isObject());
for (auto const& code : VPackObjectIterator(codesSlice)) {
VPackValueLength codeLength;
char const* codeString = code.key.getString(codeLength);
int codeNr = NumberUtils::atoi_zero<int>(codeString, codeString + codeLength);
if (includeNotFound || codeNr != TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND) {
errorCounter[codeNr] += code.value.getNumericValue<size_t>();
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief Distribute one document onto a shard map. If this returns
/// TRI_ERROR_NO_ERROR the correct shard could be determined, if
@ -510,47 +486,6 @@ static int distributeBabyOnShards(
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief Collect the results from all shards (fastpath variant)
/// All result bodies are stored in resultMap
////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void collectResultsFromAllShards(
std::unordered_map<ShardID, std::vector<T>> const& shardMap,
std::vector<ClusterCommRequest>& requests, std::unordered_map<int, size_t>& errorCounter,
std::unordered_map<ShardID, std::shared_ptr<VPackBuilder>>& resultMap,
rest::ResponseCode& responseCode) {
// If none of the shards responds we return a SERVER_ERROR;
responseCode = rest::ResponseCode::SERVER_ERROR;
for (auto const& req : requests) {
auto res = req.result;
int commError = handleGeneralCommErrors(&res);
if (commError != TRI_ERROR_NO_ERROR) {
auto tmpBuilder = std::make_shared<VPackBuilder>();
// If there was no answer whatsoever, we cannot rely on the shardId
// being present in the result struct:
ShardID sId = req.destination.substr(6);
auto weSend = shardMap.find(sId);
TRI_ASSERT(weSend != shardMap.end()); // We send sth there earlier.
size_t count = weSend->second.size();
for (size_t i = 0; i < count; ++i) {
tmpBuilder->openObject();
tmpBuilder->add(StaticStrings::Error, VPackValue(true));
tmpBuilder->add(StaticStrings::ErrorNum, VPackValue(commError));
tmpBuilder->close();
}
resultMap.emplace(sId, tmpBuilder);
} else {
TRI_ASSERT(res.answer != nullptr);
resultMap.emplace(res.shardID, res.answer->toVelocyPackBuilderPtrNoUniquenessChecks());
extractErrorCodes(res, errorCounter, true);
responseCode = res.answer_code;
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief compute a shard distribution for a new collection, the list
/// dbServers must be a list of DBserver ids to distribute across.
@ -1213,6 +1148,29 @@ static void collectResponsesFromAllShards(
}
}
static OperationResult checkResponsesFromAllShards(
std::vector<futures::Try<arangodb::network::Response>>& responses) {
// If none of the shards responds we return a SERVER_ERROR;
Result result;
for (Try<arangodb::network::Response> const& tryRes : responses) {
network::Response const& res = tryRes.get(); // throws exceptions upwards
int commError = network::fuerteToArangoErrorCode(res);
if (commError != TRI_ERROR_NO_ERROR) {
result.reset(commError);
break;
} else {
std::vector<VPackSlice> const& slices = res.response->slices();
if (!slices.empty()) {
VPackSlice answer = slices[0];
if (VelocyPackHelper::readBooleanValue(answer, StaticStrings::Error, false)) {
result = network::resultFromBody(answer, TRI_ERROR_NO_ERROR);
}
}
}
}
return OperationResult(result);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief creates one or many documents in a coordinator
///
@ -1577,14 +1535,15 @@ Future<OperationResult> removeDocumentOnCoordinator(arangodb::transaction::Metho
/// @brief truncate a cluster collection on a coordinator
////////////////////////////////////////////////////////////////////////////////
Result truncateCollectionOnCoordinator(transaction::Methods& trx, std::string const& collname) {
futures::Future<OperationResult> truncateCollectionOnCoordinator(transaction::Methods& trx,
std::string const& collname) {
Result res;
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
return res.reset(TRI_ERROR_SHUTTING_DOWN);
return futures::makeFuture(OperationResult(res.reset(TRI_ERROR_SHUTTING_DOWN)));
}
std::string const& dbname = trx.vocbase().name();
@ -1592,7 +1551,8 @@ Result truncateCollectionOnCoordinator(transaction::Methods& trx, std::string co
std::shared_ptr<LogicalCollection> collinfo;
collinfo = ci->getCollectionNT(dbname, collname);
if (collinfo == nullptr) {
return res.reset(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
return futures::makeFuture(
OperationResult(res.reset(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)));
}
// Some stuff to prepare cluster-intern requests:
@ -1603,44 +1563,36 @@ Result truncateCollectionOnCoordinator(transaction::Methods& trx, std::string co
if (trx.state()->hasHint(transaction::Hints::Hint::GLOBAL_MANAGED)) {
res = ::beginTransactionOnAllLeaders(trx, *shardIds);
if (res.fail()) {
return res;
return futures::makeFuture(OperationResult(res));
}
}
CoordTransactionID coordTransactionID = TRI_NewTickServer();
std::unordered_map<std::string, std::string> headers;
std::vector<Future<network::Response>> futures;
futures.reserve(shardIds->size());
for (auto const& p : *shardIds) {
std::unordered_map<std::string, std::string> headers;
// handler expects valid velocypack body (empty object minimum)
VPackBuffer<uint8_t> buffer;
VPackBuilder builder(buffer);
builder.openObject();
builder.close();
network::Headers headers;
addTransactionHeaderForShard(trx, *shardIds, /*shard*/ p.first, headers);
cc->asyncRequest(coordTransactionID, "shard:" + p.first,
arangodb::rest::RequestType::PUT,
"/_db/" + StringUtils::urlEncode(dbname) +
"/_api/collection/" + p.first + "/truncate",
std::shared_ptr<std::string>(), headers, nullptr, 600.0);
}
// Now listen to the results:
unsigned int count;
unsigned int nrok = 0;
for (count = (unsigned int)shardIds->size(); count > 0; count--) {
auto ccRes = cc->wait(coordTransactionID, 0, "", 0.0);
if (ccRes.status == CL_COMM_RECEIVED) {
if (ccRes.answer_code == arangodb::rest::ResponseCode::OK) {
nrok++;
} else if (ccRes.answer->payload().isObject()) {
VPackSlice answer = ccRes.answer->payload();
return res.reset(VelocyPackHelper::readNumericValue(answer, StaticStrings::ErrorNum,
TRI_ERROR_TRANSACTION_INTERNAL),
VelocyPackHelper::getStringValue(answer, StaticStrings::ErrorMessage,
""));
}
}
auto future =
network::sendRequestRetry("shard:" + p.first, fuerte::RestVerb::Put,
"/_db/" + StringUtils::urlEncode(dbname) +
"/_api/collection/" + p.first +
"/truncate",
std::move(buffer), network::Timeout(600.0),
headers, /*retryNotFound*/ true);
futures.emplace_back(std::move(future));
}
// Note that nrok is always at least 1!
if (nrok < shardIds->size()) {
return res.reset(TRI_ERROR_CLUSTER_COULD_NOT_TRUNCATE_COLLECTION);
}
return res;
auto cb = [](std::vector<Try<network::Response>>&& results) -> OperationResult {
return checkResponsesFromAllShards(results);
};
return futures::collectAll(std::move(futures)).thenValue(std::move(cb));
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -227,7 +227,8 @@ futures::Future<OperationResult> modifyDocumentOnCoordinator(
/// @brief truncate a cluster collection on a coordinator
////////////////////////////////////////////////////////////////////////////////
Result truncateCollectionOnCoordinator(transaction::Methods& trx, std::string const& collname);
futures::Future<OperationResult> truncateCollectionOnCoordinator(transaction::Methods& trx,
std::string const& collname);
////////////////////////////////////////////////////////////////////////////////
/// @brief flush Wal on all DBservers

View File

@ -64,7 +64,7 @@ class ConnectionPool final {
/// @brief simple connection reference counter
struct Ref {
Ref(ConnectionPool::Connection* c);
explicit Ref(ConnectionPool::Connection* c);
Ref(Ref&& r);
Ref& operator=(Ref&&);
Ref(Ref const& other);
@ -78,7 +78,7 @@ class ConnectionPool final {
};
public:
ConnectionPool(ConnectionPool::Config const& config);
explicit ConnectionPool(ConnectionPool::Config const& config);
virtual ~ConnectionPool();
/// @brief request a connection for a specific endpoint
@ -105,7 +105,7 @@ class ConnectionPool final {
protected:
/// @brief connection container
struct Connection {
Connection(std::shared_ptr<fuerte::Connection> f)
explicit Connection(std::shared_ptr<fuerte::Connection> f)
: fuerte(std::move(f)),
numLeased(0),
lastUsed(std::chrono::steady_clock::now()) {}

View File

@ -150,6 +150,8 @@ VertexComputation<SCCValue, int8_t, SenderMessage<uint64_t>>* AsyncSCC::createCo
return new ASCCComputation();
}
namespace {
struct SCCGraphFormat : public GraphFormat<SCCValue, int8_t> {
const std::string _resultField;
uint64_t vertexIdRange = 0;
@ -188,6 +190,8 @@ struct SCCGraphFormat : public GraphFormat<SCCValue, int8_t> {
}
};
} // namespace
GraphFormat<SCCValue, int8_t>* AsyncSCC::inputFormat() const {
return new SCCGraphFormat(_resultField);
}

View File

@ -143,6 +143,8 @@ VertexComputation<SCCValue, int8_t, SenderMessage<uint64_t>>* SCC::createComputa
return new SCCComputation();
}
namespace {
struct SCCGraphFormat : public GraphFormat<SCCValue, int8_t> {
const std::string _resultField;
std::atomic<uint64_t> vertexIdRange;
@ -186,6 +188,8 @@ struct SCCGraphFormat : public GraphFormat<SCCValue, int8_t> {
}
};
} // namespace
GraphFormat<SCCValue, int8_t>* SCC::inputFormat() const {
return new SCCGraphFormat(_resultField);
}

View File

@ -27,6 +27,7 @@
#include "Cluster/ClusterFeature.h"
#include "Cluster/ClusterInfo.h"
#include "Cluster/ServerState.h"
#include "Logger/LogMacros.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/PhysicalCollection.h"
#include "StorageEngine/StorageEngine.h"
@ -58,8 +59,7 @@ RestStatus RestCollectionHandler::execute() {
handleCommandPost();
break;
case rest::RequestType::PUT:
handleCommandPut();
break;
return handleCommandPut();
case rest::RequestType::DELETE_REQ:
handleCommandDelete();
break;
@ -70,6 +70,13 @@ RestStatus RestCollectionHandler::execute() {
return RestStatus::DONE;
}
void RestCollectionHandler::shutdownExecute(bool isFinalized) noexcept {
if (isFinalized) {
// reset the transaction so it releases all locks as early as possible
_activeTrx.reset();
}
}
void RestCollectionHandler::handleCommandGet() {
std::vector<std::string> const& suffixes = _request->decodedSuffixes();
VPackBuilder builder;
@ -354,18 +361,18 @@ void RestCollectionHandler::handleCommandPost() {
}
}
void RestCollectionHandler::handleCommandPut() {
RestStatus RestCollectionHandler::handleCommandPut() {
std::vector<std::string> const& suffixes = _request->decodedSuffixes();
if (suffixes.size() != 2) {
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
"expected PUT /_api/collection/<collection-name>/<action>");
return;
return RestStatus::DONE;
}
bool parseSuccess = false;
VPackSlice body = this->parseVPackBody(parseSuccess);
if (!parseSuccess) {
// error message generated in parseVPackBody
return;
return RestStatus::DONE;
}
std::string const& name = suffixes[0];
@ -380,6 +387,8 @@ void RestCollectionHandler::handleCommandPut() {
Result res;
VPackBuilder builder;
RestStatus status = RestStatus::DONE;
bool generateResponse = true;
auto found = methods::Collections::lookup( // find collection
_vocbase, // vocbase to search
name, // collection name to find
@ -455,33 +464,52 @@ void RestCollectionHandler::handleCommandPut() {
opts.isSynchronousReplicationFrom =
_request->value("isSynchronousReplication");
auto trx = createTransaction(coll->name(), AccessMode::Type::EXCLUSIVE);
trx->addHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS);
trx->addHint(transaction::Hints::Hint::ALLOW_RANGE_DELETE);
res = trx->begin();
_activeTrx = createTransaction(coll->name(), AccessMode::Type::EXCLUSIVE);
_activeTrx->addHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS);
_activeTrx->addHint(transaction::Hints::Hint::ALLOW_RANGE_DELETE);
res = _activeTrx->begin();
if (res.ok()) {
auto result = trx->truncate(coll->name(), opts);
res = trx->finish(result.result);
}
}
// wait for the transaction to finish first. only after that compact the
// data range(s) for the collection
// we shouldn't run compact() as part of the transaction, because the compact
// will be useless inside due to the snapshot the transaction has taken
coll->compact();
generateResponse = false;
status = waitForFuture(
_activeTrx->truncateAsync(coll->name(), opts).thenValue([this, coll](OperationResult&& opres) {
// Will commit if no error occured.
// or abort if an error occured.
// result stays valid!
Result res = _activeTrx->finish(opres.result);
if (opres.fail()) {
generateTransactionError(opres);
return;
}
if (res.ok()) {
if (ServerState::instance()->isCoordinator()) { // ClusterInfo::loadPlan eventually
// updates status
coll->setStatus(TRI_vocbase_col_status_e::TRI_VOC_COL_STATUS_LOADED);
}
if (res.fail()) {
generateTransactionError(coll->name(), res, "");
return;
}
collectionRepresentation(builder, *coll,
/*showProperties*/ false,
/*showFigures*/ false,
/*showCount*/ false,
/*detailedCount*/ true);
_activeTrx.reset();
// wait for the transaction to finish first. only after that compact the
// data range(s) for the collection
// we shouldn't run compact() as part of the transaction, because the compact
// will be useless inside due to the snapshot the transaction has taken
coll->compact();
if (ServerState::instance()->isCoordinator()) { // ClusterInfo::loadPlan eventually
// updates status
coll->setStatus(TRI_vocbase_col_status_e::TRI_VOC_COL_STATUS_LOADED);
}
VPackBuilder builder;
collectionRepresentation(builder, *coll,
/*showProperties*/ false,
/*showFigures*/ false,
/*showCount*/ false,
/*detailedCount*/ true);
generateOk(rest::ResponseCode::OK, builder);
_response->setHeaderNC(StaticStrings::Location, _request->requestPath());
}));
}
}
} else if (sub == "properties") {
// replication checks
@ -560,14 +588,19 @@ void RestCollectionHandler::handleCommandPut() {
}
});
if (found.fail()) {
generateError(found);
} else if (res.ok()) {
generateOk(rest::ResponseCode::OK, builder);
_response->setHeaderNC(StaticStrings::Location, _request->requestPath());
} else {
generateError(res);
if (generateResponse) {
if (found.fail()) {
generateError(found);
} else if (res.ok()) {
// TODO react to status?
generateOk(rest::ResponseCode::OK, builder);
_response->setHeaderNC(StaticStrings::Location, _request->requestPath());
} else {
generateError(res);
}
}
return status;
}
void RestCollectionHandler::handleCommandDelete() {

View File

@ -25,6 +25,7 @@
#define ARANGOD_REST_HANDLER_REST_COLLECTION_HANDLER_H 1
#include "RestHandler/RestVocbaseBaseHandler.h"
#include "Transaction/Methods.h"
#include "VocBase/Methods/Collections.h"
namespace arangodb {
@ -39,6 +40,8 @@ class RestCollectionHandler : public arangodb::RestVocbaseBaseHandler {
char const* name() const override final { return "RestCollectionHandler"; }
RequestLane lane() const override final { return RequestLane::CLIENT_SLOW; }
RestStatus execute() override final;
void shutdownExecute(bool isFinalized) noexcept override final;
protected:
void collectionRepresentation(VPackBuilder& builder, std::string const& name,
@ -59,8 +62,11 @@ class RestCollectionHandler : public arangodb::RestVocbaseBaseHandler {
private:
void handleCommandGet();
void handleCommandPost();
void handleCommandPut();
RestStatus handleCommandPut();
void handleCommandDelete();
private:
std::unique_ptr<transaction::Methods> _activeTrx;
};
} // namespace arangodb

View File

@ -26,6 +26,7 @@
#include "Basics/Common.h"
#include "RestHandler/RestVocbaseBaseHandler.h"
#include "Utils/SingleCollectionTransaction.h"
namespace arangodb {
namespace transaction {
@ -86,7 +87,6 @@ class RestDocumentHandler : public RestVocbaseBaseHandler {
private:
std::unique_ptr<transaction::Methods> _activeTrx;
std::string _cname;
};
} // namespace arangodb

View File

@ -58,7 +58,7 @@ using namespace arangodb::rest;
namespace {
class SimpleTransaction : public transaction::Methods {
public:
SimpleTransaction(std::shared_ptr<transaction::Context>&& transactionContext,
explicit SimpleTransaction(std::shared_ptr<transaction::Context>&& transactionContext,
transaction::Options&& options = transaction::Options())
: Methods(std::move(transactionContext), std::move(options)) {}
};

View File

@ -2404,34 +2404,33 @@ OperationResult transaction::Methods::allLocal(std::string const& collectionName
}
/// @brief remove all documents in a collection
OperationResult transaction::Methods::truncate(std::string const& collectionName,
OperationOptions const& options) {
Future<OperationResult> transaction::Methods::truncateAsync(std::string const& collectionName,
OperationOptions const& options) {
TRI_ASSERT(_state->status() == transaction::Status::RUNNING);
OperationOptions optionsCopy = options;
OperationResult result;
auto cb = [this, collectionName](OperationResult res) {
events::TruncateCollection(vocbase().name(), collectionName, res.errorNumber());
return res;
};
if (_state->isCoordinator()) {
result = truncateCoordinator(collectionName, optionsCopy);
} else {
result = truncateLocal(collectionName, optionsCopy);
return truncateCoordinator(collectionName, optionsCopy).thenValue(cb);
}
events::TruncateCollection(vocbase().name(), collectionName, result.errorNumber());
return result;
return truncateLocal(collectionName, optionsCopy).thenValue(cb);
}
/// @brief remove all documents in a collection, coordinator
#ifndef USE_ENTERPRISE
OperationResult transaction::Methods::truncateCoordinator(std::string const& collectionName,
OperationOptions& options) {
return OperationResult(arangodb::truncateCollectionOnCoordinator(*this, collectionName));
Future<OperationResult> transaction::Methods::truncateCoordinator(std::string const& collectionName,
OperationOptions& options) {
return arangodb::truncateCollectionOnCoordinator(*this, collectionName);
}
#endif
/// @brief remove all documents in a collection, local
OperationResult transaction::Methods::truncateLocal(std::string const& collectionName,
OperationOptions& options) {
Future<OperationResult> transaction::Methods::truncateLocal(std::string const& collectionName,
OperationOptions& options) {
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName, AccessMode::Type::WRITE);
auto const& collection = trxCollection(cid)->collection();
@ -2445,7 +2444,8 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
std::string theLeader = followerInfo->getLeader();
if (theLeader.empty()) {
if (!options.isSynchronousReplicationFrom.empty()) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_REFUSES_REPLICATION);
return futures::makeFuture(
OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_REFUSES_REPLICATION));
}
if (!followerInfo->allowedToWrite()) {
// We cannot fulfill minimum replication Factor.
@ -2455,7 +2455,7 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
<< basics::StringUtils::itoa(collection->minReplicationFactor())
<< " followers in sync. Shard " << collection->name()
<< " is temporarily in read-only mode.";
return OperationResult(TRI_ERROR_ARANGO_READ_ONLY, options);
return futures::makeFuture(OperationResult(TRI_ERROR_ARANGO_READ_ONLY, options));
}
// fetch followers
@ -2467,10 +2467,11 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
} else { // we are a follower following theLeader
replicationType = ReplicationType::FOLLOWER;
if (options.isSynchronousReplicationFrom.empty()) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
return futures::makeFuture(OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED));
}
if (options.isSynchronousReplicationFrom != theLeader) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION);
return futures::makeFuture(
OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION));
}
}
} // isDBServer - early block
@ -2480,7 +2481,7 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
Result lockResult = lockRecursive(cid, AccessMode::Type::WRITE);
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
return OperationResult(lockResult);
return futures::makeFuture(OperationResult(lockResult));
}
TRI_ASSERT(isLocked(collection.get(), AccessMode::Type::WRITE));
@ -2492,7 +2493,7 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
unlockRecursive(cid, AccessMode::Type::WRITE);
}
return OperationResult(res);
return futures::makeFuture(OperationResult(res));
}
// Now see whether or not we have to do synchronous replication:
@ -2530,7 +2531,7 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
// error (note that we use the follower version, since we have
// lost leadership):
if (findRefusal(requests)) {
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
return futures::makeFuture(OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED));
}
// we drop all followers that were not successful:
for (size_t i = 0; i < followers->size(); ++i) {
@ -2561,7 +2562,7 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio
res = unlockRecursive(cid, AccessMode::Type::WRITE);
}
return OperationResult(res);
return futures::makeFuture(OperationResult(res));
}
/// @brief count the number of documents in a collection

View File

@ -355,8 +355,14 @@ class Methods {
ENTERPRISE_VIRT OperationResult all(std::string const& collectionName, uint64_t skip,
uint64_t limit, OperationOptions const& options);
/// @brief deprecated use async variant
OperationResult truncate(std::string const& collectionName, OperationOptions const& options) {
return this->truncateAsync(collectionName, options).get();
}
/// @brief remove all documents in a collection
OperationResult truncate(std::string const& collectionName, OperationOptions const& options);
Future<OperationResult> truncateAsync(std::string const& collectionName,
OperationOptions const& options);
/// @brief count the number of documents in a collection
virtual OperationResult count(std::string const& collectionName, CountType type);
@ -498,10 +504,11 @@ class Methods {
OperationResult anyLocal(std::string const& collectionName);
OperationResult truncateCoordinator(std::string const& collectionName,
OperationOptions& options);
Future<OperationResult> truncateCoordinator(std::string const& collectionName,
OperationOptions& options);
OperationResult truncateLocal(std::string const& collectionName, OperationOptions& options);
Future<OperationResult> truncateLocal(std::string const& collectionName,
OperationOptions& options);
OperationResult rotateActiveJournalCoordinator(std::string const& collectionName,
OperationOptions const& options);

View File

@ -65,6 +65,8 @@ endif()
## arangobackup
################################################################################
if (USE_ENTERPRISE)
if (MSVC AND NOT(SKIP_PACKAGING))
generate_product_version(ProductVersionFiles_arangobackup
NAME arangobackup
@ -120,6 +122,8 @@ if (DARWIN)
target_compile_options(arangobackup PRIVATE -Werror)
endif()
endif () # USE_ENTERPRISE
################################################################################
## arangodump
################################################################################

View File

@ -4,6 +4,9 @@
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/cstrip")
add_custom_target(strip_install_client ALL)
if (${USE_ENTERPRISE})
strip_install_bin_and_config(arangobackup ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client)
endif ()
strip_install_bin_and_config(arangobench ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client)
strip_install_bin_and_config(arangodump ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client)
strip_install_bin_and_config(arangoimport ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client)

View File

@ -13,11 +13,13 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "@CMAKE_BINARY_DIR@/bin")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_X "@CMAKE_BINARY_DIR@/bin/$<CONFIG>/")
set(CMAKE_BUILD_TYPE @CMAKE_BUILD_TYPE@)
set(USE_ENTERPRISE @USE_ENTERPRISE@)
################################################################################
# Substitute the install binaries:
################################################################################
set(BIN_ARANGOBACKUP @BIN_ARANGOBACKUP@)
set(BIN_ARANGOBENCH @BIN_ARANGOBENCH@)
set(BIN_ARANGODUMP @BIN_ARANGODUMP@)
set(BIN_ARANGOEXPORT @BIN_ARANGOEXPORT@)
@ -72,7 +74,7 @@ set(INSTALL_ICU_DT_DEST "@INSTALL_ICU_DT_DEST@")
set(CPACK_PACKAGE_VERSION "${ARANGODB_VERSION}")
set(CPACK_PACKAGE_CONTACT ${ARANGODB_PACKAGE_CONTACT})
if (USE_ENTERPRISE)
if (${USE_ENTERPRISE})
set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_SOURCE_DIR}/enterprise/LICENSE")
else ()
set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_SOURCE_DIR}/LICENSE")
@ -139,6 +141,7 @@ set(CPACK_NSIS_MUI_ICON ${ARANGO_ICON})
set(CPACK_NSIS_MUI_UNIICON ${ARANGO_ICON})
set(CPACK_NSIS_INSTALLED_ICON_NAME ${RELATIVE_ARANGO_ICON})
message(STATUS "USE_ENTERPRISE: ${USE_ENTERPRISE}")
message(STATUS "RELATIVE_ARANGO_ICON: ${RELATIVE_ARANGO_ICON}")
message(STATUS "ARANGO_IMG: ${ARANGO_IMG}")
message(STATUS "ARANGO_ICON: ${ARANGO_ICON}")

View File

@ -267,14 +267,22 @@ function setupBinaries (builddir, buildType, configDir) {
LOGS_DIR = fs.join(TOP_DIR, 'logs');
let checkFiles = [
ARANGOBACKUP_BIN,
ARANGOBENCH_BIN,
ARANGODUMP_BIN,
ARANGOD_BIN,
ARANGOIMPORT_BIN,
ARANGORESTORE_BIN,
ARANGOEXPORT_BIN,
ARANGOSH_BIN];
ARANGOSH_BIN
];
if (global.ARANGODB_CLIENT_VERSION) {
let version = global.ARANGODB_CLIENT_VERSION(true);
if (version.hasOwnProperty('enterprise-version')) {
checkFiles.push(ARANGOBACKUP_BIN);
}
}
for (let b = 0; b < checkFiles.length; ++b) {
if (!fs.isFile(checkFiles[b])) {
throw new Error('unable to locate ' + checkFiles[b]);
@ -1025,7 +1033,8 @@ function abortSurvivors(arangod, options) {
print(Date() + " Killing in the name of: ");
print(arangod);
if (!arangod.hasOwnProperty('exitStatus')) {
killWithCoreDump(options, arangod);
// killWithCoreDump(options, arangod);
arangod.exitStatus = killExternal(arangod.pid, termSignal);
}
}
@ -1472,7 +1481,8 @@ function checkClusterAlive(options, instanceInfo, addArgs) {
instanceInfo.arangods.forEach(arangod => {
if (!arangod.hasOwnProperty('exitStatus') ||
(arangod.exitStatus.status === 'RUNNING')) {
killWithCoreDump(options, arangod);
// killWithCoreDump(options, arangod);
arangod.exitStatus = killExternal(arangod.pid, termSignal);
}
analyzeServerCrash(arangod, options, 'startup timeout; forcefully terminating ' + arangod.role + ' with pid: ' + arangod.pid);
});

View File

@ -530,18 +530,6 @@ char* TRI_SHA256String(char const* source, size_t sourceLen, size_t* dstLen) {
return (char*)dst;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief escapes special characters using C escapes
////////////////////////////////////////////////////////////////////////////////
char* TRI_EscapeControlsCString(char const* in, size_t inLength,
size_t* outLength, bool appendNewline) {
char* buffer =
static_cast<char*>(TRI_Allocate(TRI_MaxLengthEscapeControlsCString(inLength)));
return TRI_EscapeControlsCString(in, inLength, buffer, outLength, appendNewline);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief escapes special characters using C escapes
/// the target buffer must have been allocated already and big enough to hold

View File

@ -131,13 +131,6 @@ constexpr size_t TRI_MaxLengthEscapeControlsCString(size_t inLength) {
return (4 * inLength) + 2; // for newline and 0 byte
}
////////////////////////////////////////////////////////////////////////////////
/// @brief escapes special characters using C escapes
////////////////////////////////////////////////////////////////////////////////
char* TRI_EscapeControlsCString(char const* in, size_t inLength,
size_t* outLength, bool appendNewline);
////////////////////////////////////////////////////////////////////////////////
/// @brief escapes special characters using C escapes
/// the target buffer must have been allocated already and big enough to hold

View File

@ -66,7 +66,7 @@ size_t LogAppenderStream::determineOutputBufferSize(std::string const& message)
size_t LogAppenderStream::writeIntoOutputBuffer(std::string const& message) {
if (_escape) {
size_t escapedLength;
size_t escapedLength = 0;
// this is guaranteed to succeed given that we already have a buffer
TRI_EscapeControlsCString(message.data(), message.size(), _buffer.get(),
&escapedLength, true);

View File

@ -933,4 +933,3 @@ jsunity.run(function MovingShardsSuite_data() {
});
return jsunity.done();