1
0
Fork 0

Port backup size to 3.5. (#10117)

* Port backup size to 3.5.

* Lars' fixes from devel.

* Fix compilation one more time.

* Lars' datetime fix from devel.
This commit is contained in:
Max Neunhöffer 2019-10-01 15:45:28 +02:00 committed by KVS85
parent 13dae8cace
commit c067e17bb6
6 changed files with 140 additions and 16 deletions

View File

@ -5,7 +5,8 @@
@RESTDESCRIPTION
Download a specific local backup from a remote repository, or query
progress on a previously scheduled download operation.
progress on a previously scheduled download operation, or abort
a running download operation.
@RESTBODYPARAM{id,string,optional,string}
The identifier for this backup. This is required when a download
@ -24,8 +25,13 @@ attribute. See the description of the _arangobackup_ program in the manual
for a description of the `config` object.
@RESTBODYPARAM{downloadId,string,optional,string}
Download ID to specify for which download operation progress is queried.
If you specify this, leave out all other body parameters.
Download ID to specify for which download operation progress is queried, or
the download operation to abort.
If you specify this, leave out all the above body parameters.
@RESTBODYPARAM{abort,boolean,optional,boolean}
Set this to `true` if a running download operation should be aborted. In
this case, the only other body parameter which is needed is `downloadId`.
@RESTRETURNCODES

View File

@ -5,7 +5,8 @@
@RESTDESCRIPTION
Upload a specific local backup to a remote repository, or query
progress on a previously scheduled upload operation.
progress on a previously scheduled upload operation, or abort
a running upload operation.
@RESTBODYPARAM{id,string,optional,string}
The identifier for this backup. This is required when an upload
@ -24,8 +25,13 @@ attribute. See the description of the _arangobackup_ program in the manual
for a description of the `config` object.
@RESTBODYPARAM{uploadId,string,optional,string}
Upload ID to specify for which upload operation progress is queried.
If you specify this, leave out all other body parameters.
Upload ID to specify for which upload operation progress is queried or
the upload operation to abort.
If you specify this, leave out all the above body parameters.
@RESTBODYPARAM{abort,boolean,optional,boolean}
Set this to `true` if a running upload operation should be aborted. In
this case, the only other body parameter which is needed is `uploadId`.
@RESTRETURNCODES

View File

@ -3285,6 +3285,8 @@ arangodb::Result hotBackupList(std::vector<ServerID> const& dbServers, VPackSlic
// check here that the backups are all made with the same version
std::string version;
size_t totalSize = 0;
size_t totalFiles = 0;
for (BackupMeta const& meta : i.second) {
if (version.empty()) {
@ -3299,10 +3301,15 @@ arangodb::Result hotBackupList(std::vector<ServerID> const& dbServers, VPackSlic
break;
}
}
totalSize += meta._sizeInBytes;
totalFiles += meta._nrFiles;
}
if (valid) {
BackupMeta& front = i.second.front();
front._sizeInBytes = totalSize;
front._nrFiles = totalFiles;
front._serverId = ""; // makes no sense for whole cluster
hotBackups.insert(std::make_pair(front._id, front));
}
}
@ -3859,7 +3866,8 @@ std::vector<std::string> idPath{"result", "id"};
arangodb::Result hotBackupDBServers(std::string const& backupId, std::string const& timeStamp,
std::vector<ServerID> dbServers,
VPackSlice agencyDump, bool force) {
VPackSlice agencyDump, bool force,
BackupMeta& meta) {
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr happens only during controlled shutdown
@ -3873,6 +3881,7 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
builder.add("agency-dump", agencyDump);
builder.add("timestamp", VPackValue(timeStamp));
builder.add("allowInconsistent", VPackValue(force));
builder.add("nrDBServers", VPackValue(dbServers.size()));
}
auto body = std::make_shared<std::string>(builder.toJson());
@ -3889,6 +3898,10 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
LOG_TOPIC("478ef", DEBUG, Logger::BACKUP) << "Inquiring about backup " << backupId;
// Now listen to the results:
size_t totalSize = 0;
size_t totalFiles = 0;
std::string version;
bool sizeValid = true;
for (auto const& req : requests) {
auto res = req.result;
int commError = handleGeneralCommErrors(&res);
@ -3899,14 +3912,16 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
TRI_ASSERT(res.answer != nullptr);
auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
VPackSlice resSlice = resBody->slice();
if (!resSlice.isObject()) {
if (!resSlice.isObject() || !resSlice.hasKey("result")) {
// Response has invalid format
return arangodb::Result(TRI_ERROR_HTTP_CORRUPTED_JSON,
std::string("result to take snapshot on ") +
req.destination + " not an object");
req.destination + " not an object or has no 'result' attribute");
}
resSlice = resSlice.get("result");
if (!resSlice.hasKey(idPath) || !resSlice.get(idPath).isString()) {
if (!resSlice.hasKey(BackupMeta::ID) ||
!resSlice.get(BackupMeta::ID).isString()) {
LOG_TOPIC("6240a", ERR, Logger::BACKUP)
<< "DB server " << req.destination << "is missing backup " << backupId;
return arangodb::Result(TRI_ERROR_FILE_NOT_FOUND,
@ -3914,10 +3929,35 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
" on server " + req.destination);
}
if (resSlice.hasKey(BackupMeta::SIZEINBYTES)) {
totalSize += VelocyPackHelper::getNumericValue<size_t>(resSlice, BackupMeta::SIZEINBYTES, 0);
} else {
sizeValid = false;
}
if (resSlice.hasKey(BackupMeta::NRFILES)) {
totalFiles += VelocyPackHelper::getNumericValue<size_t>(resSlice, BackupMeta::NRFILES, 0);
} else {
sizeValid = false;
}
if (version.empty() && resSlice.hasKey(BackupMeta::VERSION)) {
VPackSlice verSlice = resSlice.get(BackupMeta::VERSION);
if (verSlice.isString()) {
version = verSlice.copyString();
}
}
LOG_TOPIC("b370d", DEBUG, Logger::BACKUP) << req.destination << " created local backup "
<< resSlice.get(idPath).copyString();
<< resSlice.get(BackupMeta::ID).copyString();
}
if (sizeValid) {
meta = BackupMeta(backupId, version, timeStamp, totalSize, totalFiles, static_cast<unsigned int>(dbServers.size()), "", force);
} else {
meta = BackupMeta(backupId, version, timeStamp, 0, 0, static_cast<unsigned int>(dbServers.size()), "", force);
LOG_TOPIC("54265", WARN, Logger::BACKUP)
<< "Could not determine total size of backup with id '" << backupId
<< "'!";
}
LOG_TOPIC("5c5e9", DEBUG, Logger::BACKUP) << "Have created backup " << backupId;
return arangodb::Result();
@ -4141,8 +4181,9 @@ arangodb::Result hotBackupCoordinator(VPackSlice const payload, VPackBuilder& re
return result;
}
BackupMeta meta(backupId, "", timeStamp, 0, 0, static_cast<unsigned int>(dbServers.size()), "", !gotLocks); // Temporary
result = hotBackupDBServers(backupId, timeStamp, dbServers, agency->slice(),
/* force */ !gotLocks);
/* force */ !gotLocks, meta);
if (!result.ok()) {
unlockDBServerTransactions(backupId, dbServers);
ci->agencyHotBackupUnlock(backupId, timeout, supervisionOff);
@ -4190,6 +4231,10 @@ arangodb::Result hotBackupCoordinator(VPackSlice const payload, VPackBuilder& re
{
VPackObjectBuilder o(&report);
report.add("id", VPackValue(timeStamp + "_" + backupId));
report.add("sizeInBytes", VPackValue(meta._sizeInBytes));
report.add("nrFiles", VPackValue(meta._nrFiles));
report.add("nrDBServers", VPackValue(meta._nrDBServers));
report.add("datetime", VPackValue(meta._datetime));
if (!gotLocks) {
report.add("potentiallyInconsistent", VPackValue(true));
}
@ -4200,7 +4245,7 @@ arangodb::Result hotBackupCoordinator(VPackSlice const payload, VPackBuilder& re
} catch (std::exception const& e) {
return arangodb::Result(
TRI_ERROR_HOT_BACKUP_INTERNAL,
std::string("caught exception cretaing cluster backup: ") + e.what());
std::string("caught exception creating cluster backup: ") + e.what());
}
}

View File

@ -29,6 +29,9 @@
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
#include "Basics/VelocyPackHelper.h"
#include "Cluster/ServerState.h"
namespace arangodb {
constexpr char const* BAD_PARAMS_CREATE = "backup payload must be an object "
@ -43,10 +46,20 @@ struct BackupMeta {
std::string _id;
std::string _version;
std::string _datetime;
size_t _sizeInBytes;
size_t _nrFiles;
unsigned int _nrDBServers;
std::string _serverId;
bool _potentiallyInconsistent;
static constexpr const char *ID = "id";
static constexpr const char *VERSION = "version";
static constexpr const char *DATETIME = "datetime";
static constexpr const char *SIZEINBYTES = "sizeInBytes";
static constexpr const char *NRFILES = "nrFiles";
static constexpr const char *NRDBSERVERS = "nrDBServers";
static constexpr const char *SERVERID = "serverId";
static constexpr const char *POTENTIALLYINCONSISTENT = "potentiallyInconsistent";
void toVelocyPack(VPackBuilder &builder) const {
{
@ -54,6 +67,13 @@ struct BackupMeta {
builder.add(ID, VPackValue(_id));
builder.add(VERSION, VPackValue(_version));
builder.add(DATETIME, VPackValue(_datetime));
builder.add(SIZEINBYTES, VPackValue(_sizeInBytes));
builder.add(NRFILES, VPackValue(_nrFiles));
builder.add(NRDBSERVERS, VPackValue(_nrDBServers));
if (ServerState::instance()->isDBServer()) {
builder.add(SERVERID, VPackValue(_serverId));
}
builder.add(POTENTIALLYINCONSISTENT, VPackValue(_potentiallyInconsistent));
}
}
@ -63,14 +83,24 @@ struct BackupMeta {
meta._id = slice.get(ID).copyString();
meta._version = slice.get(VERSION).copyString();
meta._datetime = slice.get(DATETIME).copyString();
meta._sizeInBytes = basics::VelocyPackHelper::getNumericValue<size_t>(
slice, SIZEINBYTES, 0);
meta._nrFiles = basics::VelocyPackHelper::getNumericValue<size_t>(
slice, NRFILES, 0);
meta._nrDBServers = basics::VelocyPackHelper::getNumericValue<unsigned int>(
slice, NRDBSERVERS, 1);
meta._serverId = basics::VelocyPackHelper::getStringValue(slice, SERVERID, "");
meta._potentiallyInconsistent = basics::VelocyPackHelper::getBooleanValue(slice, POTENTIALLYINCONSISTENT, false);
return meta;
} catch (std::exception const& e) {
return ResultT<BackupMeta>::error(TRI_ERROR_BAD_PARAMETER, e.what());
}
}
BackupMeta(std::string const& id, std::string const& version, std::string const& datetime) :
_id(id), _version(version), _datetime(datetime) {}
BackupMeta(std::string const& id, std::string const& version, std::string const& datetime, size_t sizeInBytes, size_t nrFiles, unsigned int nrDBServers, std::string const& serverId, bool potentiallyInconsistent) :
_id(id), _version(version), _datetime(datetime),
_sizeInBytes(sizeInBytes), _nrFiles(nrFiles), _nrDBServers(nrDBServers),
_serverId(serverId), _potentiallyInconsistent(potentiallyInconsistent) {}
private:
BackupMeta() {}

View File

@ -158,6 +158,25 @@ struct ExecContextScope {
private:
ExecContext const* _old;
};
struct ExecContextSuperuserScope {
explicit ExecContextSuperuserScope()
: _old(ExecContext::CURRENT) {
ExecContext::CURRENT = ExecContext::superuser();
}
explicit ExecContextSuperuserScope(bool cond) : _old(ExecContext::CURRENT) {
if (cond) {
ExecContext::CURRENT = ExecContext::superuser();
}
}
~ExecContextSuperuserScope() { ExecContext::CURRENT = _old; }
private:
ExecContext const* _old;
};
} // namespace arangodb
#endif

View File

@ -242,6 +242,17 @@ arangodb::Result executeList(arangodb::httpclient::SimpleHttpClient& client,
if (meta.ok()) {
LOG_TOPIC("0f208", INFO, arangodb::Logger::BACKUP) << " version: " << meta.get()._version;
LOG_TOPIC("55af7", INFO, arangodb::Logger::BACKUP) << " date/time: " << meta.get()._datetime;
LOG_TOPIC("43522", INFO, arangodb::Logger::BACKUP) << " size in bytes: " << meta.get()._sizeInBytes;
LOG_TOPIC("12532", INFO, arangodb::Logger::BACKUP) << " number of files: " << meta.get()._nrFiles;
LOG_TOPIC("43212", INFO, arangodb::Logger::BACKUP) << " number of DBServers: " << meta.get()._nrDBServers;
if (!meta.get()._serverId.empty()) {
LOG_TOPIC("11112", INFO, arangodb::Logger::BACKUP) << " serverId: " << meta.get()._serverId;
}
if (meta.get()._potentiallyInconsistent) {
LOG_TOPIC("56241", INFO, arangodb::Logger::BACKUP) << " potentiallyInconsistent: true";
} else {
LOG_TOPIC("56242", INFO, arangodb::Logger::BACKUP) << " potentiallyInconsistent: false";
}
}
}
}
@ -315,7 +326,14 @@ arangodb::Result executeCreate(arangodb::httpclient::SimpleHttpClient& client,
LOG_TOPIC("c4d37", INFO, arangodb::Logger::BACKUP)
<< "Backup succeeded. Generated identifier '" << identifier.copyString() << "'";
VPackSlice sizeInBytes = resultObject.get("sizeInBytes");
VPackSlice nrFiles = resultObject.get("nrFiles");
if (sizeInBytes.isInteger() && nrFiles.isInteger()) {
uint64_t size = sizeInBytes.getNumber<uint64_t>();
uint64_t nr = nrFiles.getNumber<uint64_t>();
LOG_TOPIC("ce423", INFO, arangodb::Logger::BACKUP)
<< "Total size of backup: " << size << ", number of files: " << nr;
}
return result;
}