1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/ArangoDB into bug-fix/speedup-tests

This commit is contained in:
Wilfried Goesgens 2019-10-01 16:28:13 +02:00
commit fd4496afb8
10 changed files with 210 additions and 77 deletions

View File

@ -1,6 +1,11 @@
devel devel
----- -----
* Fixed Unintended multiple unlock commands from coordinator to
transaction locked db servers
* Use execvp instead of execv in hotbackup restore.
* DB server locking / unlocking for hot backup revisited and enhanced * DB server locking / unlocking for hot backup revisited and enhanced
* Re-enabled the AQL sort-limit optimization rule in conjunction with fullCount * Re-enabled the AQL sort-limit optimization rule in conjunction with fullCount

View File

@ -5,7 +5,8 @@
@RESTDESCRIPTION @RESTDESCRIPTION
Download a specific local backup from a remote repository, or query Download a specific local backup from a remote repository, or query
progress on a previously scheduled download operation. progress on a previously scheduled download operation, or abort
a running download operation.
@RESTBODYPARAM{id,string,optional,string} @RESTBODYPARAM{id,string,optional,string}
The identifier for this backup. This is required when a download The identifier for this backup. This is required when a download
@ -24,8 +25,13 @@ attribute. See the description of the _arangobackup_ program in the manual
for a description of the `config` object. for a description of the `config` object.
@RESTBODYPARAM{downloadId,string,optional,string} @RESTBODYPARAM{downloadId,string,optional,string}
Download ID to specify for which download operation progress is queried. Download ID to specify for which download operation progress is queried, or
If you specify this, leave out all other body parameters. the download operation to abort.
If you specify this, leave out all the above body parameters.
@RESTBODYPARAM{abort,boolean,optional,boolean}
Set this to `true` if a running download operation should be aborted. In
this case, the only other body parameter which is needed is `downloadId`.
@RESTRETURNCODES @RESTRETURNCODES

View File

@ -5,7 +5,8 @@
@RESTDESCRIPTION @RESTDESCRIPTION
Upload a specific local backup to a remote repository, or query Upload a specific local backup to a remote repository, or query
progress on a previously scheduled upload operation. progress on a previously scheduled upload operation, or abort
a running upload operation.
@RESTBODYPARAM{id,string,optional,string} @RESTBODYPARAM{id,string,optional,string}
The identifier for this backup. This is required when an upload The identifier for this backup. This is required when an upload
@ -24,8 +25,13 @@ attribute. See the description of the _arangobackup_ program in the manual
for a description of the `config` object. for a description of the `config` object.
@RESTBODYPARAM{uploadId,string,optional,string} @RESTBODYPARAM{uploadId,string,optional,string}
Upload ID to specify for which upload operation progress is queried. Upload ID to specify for which upload operation progress is queried or
If you specify this, leave out all other body parameters. the upload operation to abort.
If you specify this, leave out all the above body parameters.
@RESTBODYPARAM{abort,boolean,optional,boolean}
Set this to `true` if a running upload operation should be aborted. In
this case, the only other body parameter which is needed is `uploadId`.
@RESTRETURNCODES @RESTRETURNCODES

View File

@ -3096,6 +3096,8 @@ arangodb::Result hotBackupList(std::vector<ServerID> const& dbServers, VPackSlic
// check here that the backups are all made with the same version // check here that the backups are all made with the same version
std::string version; std::string version;
size_t totalSize = 0;
size_t totalFiles = 0;
for (BackupMeta const& meta : i.second) { for (BackupMeta const& meta : i.second) {
if (version.empty()) { if (version.empty()) {
@ -3110,10 +3112,15 @@ arangodb::Result hotBackupList(std::vector<ServerID> const& dbServers, VPackSlic
break; break;
} }
} }
totalSize += meta._sizeInBytes;
totalFiles += meta._nrFiles;
} }
if (valid) { if (valid) {
BackupMeta& front = i.second.front(); BackupMeta& front = i.second.front();
front._sizeInBytes = totalSize;
front._nrFiles = totalFiles;
front._serverId = ""; // makes no sense for whole cluster
hotBackups.insert(std::make_pair(front._id, front)); hotBackups.insert(std::make_pair(front._id, front));
} }
} }
@ -3677,7 +3684,8 @@ std::vector<std::string> idPath{"result", "id"};
arangodb::Result hotBackupDBServers(std::string const& backupId, std::string const& timeStamp, arangodb::Result hotBackupDBServers(std::string const& backupId, std::string const& timeStamp,
std::vector<ServerID> dbServers, std::vector<ServerID> dbServers,
VPackSlice agencyDump, bool force) { VPackSlice agencyDump, bool force,
BackupMeta& meta) {
auto cc = ClusterComm::instance(); auto cc = ClusterComm::instance();
if (cc == nullptr) { if (cc == nullptr) {
// nullptr happens only during controlled shutdown // nullptr happens only during controlled shutdown
@ -3691,6 +3699,7 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
builder.add("agency-dump", agencyDump); builder.add("agency-dump", agencyDump);
builder.add("timestamp", VPackValue(timeStamp)); builder.add("timestamp", VPackValue(timeStamp));
builder.add("allowInconsistent", VPackValue(force)); builder.add("allowInconsistent", VPackValue(force));
builder.add("nrDBServers", VPackValue(dbServers.size()));
} }
auto body = std::make_shared<std::string>(builder.toJson()); auto body = std::make_shared<std::string>(builder.toJson());
@ -3707,6 +3716,10 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
LOG_TOPIC("478ef", DEBUG, Logger::BACKUP) << "Inquiring about backup " << backupId; LOG_TOPIC("478ef", DEBUG, Logger::BACKUP) << "Inquiring about backup " << backupId;
// Now listen to the results: // Now listen to the results:
size_t totalSize = 0;
size_t totalFiles = 0;
std::string version;
bool sizeValid = true;
for (auto const& req : requests) { for (auto const& req : requests) {
auto res = req.result; auto res = req.result;
int commError = handleGeneralCommErrors(&res); int commError = handleGeneralCommErrors(&res);
@ -3717,14 +3730,16 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
TRI_ASSERT(res.answer != nullptr); TRI_ASSERT(res.answer != nullptr);
auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks(); auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
VPackSlice resSlice = resBody->slice(); VPackSlice resSlice = resBody->slice();
if (!resSlice.isObject()) { if (!resSlice.isObject() || !resSlice.hasKey("result")) {
// Response has invalid format // Response has invalid format
return arangodb::Result(TRI_ERROR_HTTP_CORRUPTED_JSON, return arangodb::Result(TRI_ERROR_HTTP_CORRUPTED_JSON,
std::string("result to take snapshot on ") + std::string("result to take snapshot on ") +
req.destination + " not an object"); req.destination + " not an object or has no 'result' attribute");
} }
resSlice = resSlice.get("result");
if (!resSlice.hasKey(idPath) || !resSlice.get(idPath).isString()) { if (!resSlice.hasKey(BackupMeta::ID) ||
!resSlice.get(BackupMeta::ID).isString()) {
LOG_TOPIC("6240a", ERR, Logger::BACKUP) LOG_TOPIC("6240a", ERR, Logger::BACKUP)
<< "DB server " << req.destination << "is missing backup " << backupId; << "DB server " << req.destination << "is missing backup " << backupId;
return arangodb::Result(TRI_ERROR_FILE_NOT_FOUND, return arangodb::Result(TRI_ERROR_FILE_NOT_FOUND,
@ -3732,10 +3747,35 @@ arangodb::Result hotBackupDBServers(std::string const& backupId, std::string con
" on server " + req.destination); " on server " + req.destination);
} }
if (resSlice.hasKey(BackupMeta::SIZEINBYTES)) {
totalSize += VelocyPackHelper::getNumericValue<size_t>(resSlice, BackupMeta::SIZEINBYTES, 0);
} else {
sizeValid = false;
}
if (resSlice.hasKey(BackupMeta::NRFILES)) {
totalFiles += VelocyPackHelper::getNumericValue<size_t>(resSlice, BackupMeta::NRFILES, 0);
} else {
sizeValid = false;
}
if (version.empty() && resSlice.hasKey(BackupMeta::VERSION)) {
VPackSlice verSlice = resSlice.get(BackupMeta::VERSION);
if (verSlice.isString()) {
version = verSlice.copyString();
}
}
LOG_TOPIC("b370d", DEBUG, Logger::BACKUP) << req.destination << " created local backup " LOG_TOPIC("b370d", DEBUG, Logger::BACKUP) << req.destination << " created local backup "
<< resSlice.get(idPath).copyString(); << resSlice.get(BackupMeta::ID).copyString();
} }
if (sizeValid) {
meta = BackupMeta(backupId, version, timeStamp, totalSize, totalFiles, static_cast<unsigned int>(dbServers.size()), "", force);
} else {
meta = BackupMeta(backupId, version, timeStamp, 0, 0, static_cast<unsigned int>(dbServers.size()), "", force);
LOG_TOPIC("54265", WARN, Logger::BACKUP)
<< "Could not determine total size of backup with id '" << backupId
<< "'!";
}
LOG_TOPIC("5c5e9", DEBUG, Logger::BACKUP) << "Have created backup " << backupId; LOG_TOPIC("5c5e9", DEBUG, Logger::BACKUP) << "Have created backup " << backupId;
return arangodb::Result(); return arangodb::Result();
@ -3931,6 +3971,7 @@ arangodb::Result hotBackupCoordinator(ClusterFeature& feature, VPackSlice const
result = lockDBServerTransactions(backupId, dbServers, lockWait, lockedServers); result = lockDBServerTransactions(backupId, dbServers, lockWait, lockedServers);
if (!result.ok()) { if (!result.ok()) {
unlockDBServerTransactions(backupId, lockedServers); unlockDBServerTransactions(backupId, lockedServers);
lockedServers.clear();
if (result.is(TRI_ERROR_LOCAL_LOCK_FAILED)) { // Unrecoverable if (result.is(TRI_ERROR_LOCAL_LOCK_FAILED)) { // Unrecoverable
ci.agencyHotBackupUnlock(backupId, timeout, supervisionOff); ci.agencyHotBackupUnlock(backupId, timeout, supervisionOff);
return result; return result;
@ -3960,8 +4001,9 @@ arangodb::Result hotBackupCoordinator(ClusterFeature& feature, VPackSlice const
return result; return result;
} }
BackupMeta meta(backupId, "", timeStamp, 0, 0, static_cast<unsigned int>(dbServers.size()), "", !gotLocks); // Temporary
result = hotBackupDBServers(backupId, timeStamp, dbServers, agency->slice(), result = hotBackupDBServers(backupId, timeStamp, dbServers, agency->slice(),
/* force */ !gotLocks); /* force */ !gotLocks, meta);
if (!result.ok()) { if (!result.ok()) {
unlockDBServerTransactions(backupId, dbServers); unlockDBServerTransactions(backupId, dbServers);
ci.agencyHotBackupUnlock(backupId, timeout, supervisionOff); ci.agencyHotBackupUnlock(backupId, timeout, supervisionOff);
@ -4009,6 +4051,10 @@ arangodb::Result hotBackupCoordinator(ClusterFeature& feature, VPackSlice const
{ {
VPackObjectBuilder o(&report); VPackObjectBuilder o(&report);
report.add("id", VPackValue(timeStamp + "_" + backupId)); report.add("id", VPackValue(timeStamp + "_" + backupId));
report.add("sizeInBytes", VPackValue(meta._sizeInBytes));
report.add("nrFiles", VPackValue(meta._nrFiles));
report.add("nrDBServers", VPackValue(meta._nrDBServers));
report.add("datetime", VPackValue(meta._datetime));
if (!gotLocks) { if (!gotLocks) {
report.add("potentiallyInconsistent", VPackValue(true)); report.add("potentiallyInconsistent", VPackValue(true));
} }
@ -4019,7 +4065,7 @@ arangodb::Result hotBackupCoordinator(ClusterFeature& feature, VPackSlice const
} catch (std::exception const& e) { } catch (std::exception const& e) {
return arangodb::Result( return arangodb::Result(
TRI_ERROR_HOT_BACKUP_INTERNAL, TRI_ERROR_HOT_BACKUP_INTERNAL,
std::string("caught exception cretaing cluster backup: ") + e.what()); std::string("caught exception creating cluster backup: ") + e.what());
} }
} }

View File

@ -429,6 +429,8 @@ int main(int argc, char* argv[]) {
if (res != 0) { if (res != 0) {
std::cerr << "WARNING: could not change into directory '" << workdir << "'" << std::endl; std::cerr << "WARNING: could not change into directory '" << workdir << "'" << std::endl;
} }
execv(argv[0], argv); if (execvp(argv[0], argv) == -1) {
std::cerr << "WARNING: could not execvp ourselves, restore will not work!" << std::endl;
}
#endif #endif
} }

View File

@ -29,6 +29,9 @@
#include <velocypack/Slice.h> #include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h> #include <velocypack/velocypack-aliases.h>
#include "Basics/VelocyPackHelper.h"
#include "Cluster/ServerState.h"
namespace arangodb { namespace arangodb {
constexpr char const* BAD_PARAMS_CREATE = "backup payload must be an object " constexpr char const* BAD_PARAMS_CREATE = "backup payload must be an object "
@ -43,10 +46,20 @@ struct BackupMeta {
std::string _id; std::string _id;
std::string _version; std::string _version;
std::string _datetime; std::string _datetime;
size_t _sizeInBytes;
size_t _nrFiles;
unsigned int _nrDBServers;
std::string _serverId;
bool _potentiallyInconsistent;
static constexpr const char *ID = "id"; static constexpr const char *ID = "id";
static constexpr const char *VERSION = "version"; static constexpr const char *VERSION = "version";
static constexpr const char *DATETIME = "datetime"; static constexpr const char *DATETIME = "datetime";
static constexpr const char *SIZEINBYTES = "sizeInBytes";
static constexpr const char *NRFILES = "nrFiles";
static constexpr const char *NRDBSERVERS = "nrDBServers";
static constexpr const char *SERVERID = "serverId";
static constexpr const char *POTENTIALLYINCONSISTENT = "potentiallyInconsistent";
void toVelocyPack(VPackBuilder &builder) const { void toVelocyPack(VPackBuilder &builder) const {
{ {
@ -54,6 +67,13 @@ struct BackupMeta {
builder.add(ID, VPackValue(_id)); builder.add(ID, VPackValue(_id));
builder.add(VERSION, VPackValue(_version)); builder.add(VERSION, VPackValue(_version));
builder.add(DATETIME, VPackValue(_datetime)); builder.add(DATETIME, VPackValue(_datetime));
builder.add(SIZEINBYTES, VPackValue(_sizeInBytes));
builder.add(NRFILES, VPackValue(_nrFiles));
builder.add(NRDBSERVERS, VPackValue(_nrDBServers));
if (ServerState::instance()->isDBServer()) {
builder.add(SERVERID, VPackValue(_serverId));
}
builder.add(POTENTIALLYINCONSISTENT, VPackValue(_potentiallyInconsistent));
} }
} }
@ -63,14 +83,24 @@ struct BackupMeta {
meta._id = slice.get(ID).copyString(); meta._id = slice.get(ID).copyString();
meta._version = slice.get(VERSION).copyString(); meta._version = slice.get(VERSION).copyString();
meta._datetime = slice.get(DATETIME).copyString(); meta._datetime = slice.get(DATETIME).copyString();
meta._sizeInBytes = basics::VelocyPackHelper::getNumericValue<size_t>(
slice, SIZEINBYTES, 0);
meta._nrFiles = basics::VelocyPackHelper::getNumericValue<size_t>(
slice, NRFILES, 0);
meta._nrDBServers = basics::VelocyPackHelper::getNumericValue<unsigned int>(
slice, NRDBSERVERS, 1);
meta._serverId = basics::VelocyPackHelper::getStringValue(slice, SERVERID, "");
meta._potentiallyInconsistent = basics::VelocyPackHelper::getBooleanValue(slice, POTENTIALLYINCONSISTENT, false);
return meta; return meta;
} catch (std::exception const& e) { } catch (std::exception const& e) {
return ResultT<BackupMeta>::error(TRI_ERROR_BAD_PARAMETER, e.what()); return ResultT<BackupMeta>::error(TRI_ERROR_BAD_PARAMETER, e.what());
} }
} }
BackupMeta(std::string const& id, std::string const& version, std::string const& datetime) : BackupMeta(std::string const& id, std::string const& version, std::string const& datetime, size_t sizeInBytes, size_t nrFiles, unsigned int nrDBServers, std::string const& serverId, bool potentiallyInconsistent) :
_id(id), _version(version), _datetime(datetime) {} _id(id), _version(version), _datetime(datetime),
_sizeInBytes(sizeInBytes), _nrFiles(nrFiles), _nrDBServers(nrDBServers),
_serverId(serverId), _potentiallyInconsistent(potentiallyInconsistent) {}
private: private:
BackupMeta() {} BackupMeta() {}

View File

@ -36,7 +36,7 @@ size_t const CollectionKeysRepository::MaxCollectCount = 32;
/// @brief create a collection keys repository /// @brief create a collection keys repository
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
CollectionKeysRepository::CollectionKeysRepository() : _lock(), _keys() { CollectionKeysRepository::CollectionKeysRepository() : _lock(), _keys(), _stopped(false) {
_keys.reserve(64); _keys.reserve(64);
} }
@ -82,7 +82,9 @@ CollectionKeysRepository::~CollectionKeysRepository() {
void CollectionKeysRepository::store(std::unique_ptr<arangodb::CollectionKeys> keys) { void CollectionKeysRepository::store(std::unique_ptr<arangodb::CollectionKeys> keys) {
MUTEX_LOCKER(mutexLocker, _lock); MUTEX_LOCKER(mutexLocker, _lock);
_keys.emplace(keys->id(), std::move(keys)); if (!_stopped) {
_keys.emplace(keys->id(), std::move(keys));
}
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -26,6 +26,7 @@
#include "Basics/Common.h" #include "Basics/Common.h"
#include "Basics/Mutex.h" #include "Basics/Mutex.h"
#include "Basics/MutexLocker.h"
#include "Utils/CollectionKeys.h" #include "Utils/CollectionKeys.h"
#include "VocBase/voc-types.h" #include "VocBase/voc-types.h"
@ -90,6 +91,15 @@ class CollectionKeysRepository {
bool garbageCollect(bool force); bool garbageCollect(bool force);
//////////////////////////////////////////////////////////////////////////
/// @brief stop further stores, this is used on shutdown
//////////////////////////////////////////////////////////////////////////
void stopStores() {
MUTEX_LOCKER(mutexLocker, _lock);
_stopped = true;
}
private: private:
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief mutex for the repository /// @brief mutex for the repository
@ -108,6 +118,13 @@ class CollectionKeysRepository {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
static size_t const MaxCollectCount; static size_t const MaxCollectCount;
////////////////////////////////////////////////////////////////////////////
/// @brief stopped flag, indicating that no more CollectionKeys can be
/// stored
////////////////////////////////////////////////////////////////////////////
bool _stopped;
}; };
} // namespace arangodb } // namespace arangodb

View File

@ -766,6 +766,7 @@ void TRI_vocbase_t::stop() {
// soon, we have to retry, since some of these collection keys might currently // soon, we have to retry, since some of these collection keys might currently
// still being in use: // still being in use:
auto lastTime = TRI_microtime(); auto lastTime = TRI_microtime();
_collectionKeys->stopStores();
while (true) { while (true) {
if (!_collectionKeys->garbageCollect(true)) { if (!_collectionKeys->garbageCollect(true)) {
break; break;

View File

@ -243,6 +243,17 @@ arangodb::Result executeList(arangodb::httpclient::SimpleHttpClient& client,
if (meta.ok()) { if (meta.ok()) {
LOG_TOPIC("0f208", INFO, arangodb::Logger::BACKUP) << " version: " << meta.get()._version; LOG_TOPIC("0f208", INFO, arangodb::Logger::BACKUP) << " version: " << meta.get()._version;
LOG_TOPIC("55af7", INFO, arangodb::Logger::BACKUP) << " date/time: " << meta.get()._datetime; LOG_TOPIC("55af7", INFO, arangodb::Logger::BACKUP) << " date/time: " << meta.get()._datetime;
LOG_TOPIC("43522", INFO, arangodb::Logger::BACKUP) << " size in bytes: " << meta.get()._sizeInBytes;
LOG_TOPIC("12532", INFO, arangodb::Logger::BACKUP) << " number of files: " << meta.get()._nrFiles;
LOG_TOPIC("43212", INFO, arangodb::Logger::BACKUP) << " number of DBServers: " << meta.get()._nrDBServers;
if (!meta.get()._serverId.empty()) {
LOG_TOPIC("11112", INFO, arangodb::Logger::BACKUP) << " serverId: " << meta.get()._serverId;
}
if (meta.get()._potentiallyInconsistent) {
LOG_TOPIC("56241", INFO, arangodb::Logger::BACKUP) << " potentiallyInconsistent: true";
} else {
LOG_TOPIC("56242", INFO, arangodb::Logger::BACKUP) << " potentiallyInconsistent: false";
}
} }
} }
} }
@ -316,7 +327,14 @@ arangodb::Result executeCreate(arangodb::httpclient::SimpleHttpClient& client,
LOG_TOPIC("c4d37", INFO, arangodb::Logger::BACKUP) LOG_TOPIC("c4d37", INFO, arangodb::Logger::BACKUP)
<< "Backup succeeded. Generated identifier '" << identifier.copyString() << "'"; << "Backup succeeded. Generated identifier '" << identifier.copyString() << "'";
VPackSlice sizeInBytes = resultObject.get("sizeInBytes");
VPackSlice nrFiles = resultObject.get("nrFiles");
if (sizeInBytes.isInteger() && nrFiles.isInteger()) {
uint64_t size = sizeInBytes.getNumber<uint64_t>();
uint64_t nr = nrFiles.getNumber<uint64_t>();
LOG_TOPIC("ce423", INFO, arangodb::Logger::BACKUP)
<< "Total size of backup: " << size << ", number of files: " << nr;
}
return result; return result;
} }