mirror of https://gitee.com/bigwinds/arangodb
Bug fix 3.5/fix internal issue 4451 (#10538)
* Fix dump_authentication suite * Fix typos * user the correct attribute name * properly reload user permissions after _users collection restore * fixed foxx restore test * changelog * changed the order of index creation during restore for _users collection
This commit is contained in:
parent
76f84f2c6c
commit
a7b64bd815
|
@ -1,6 +1,8 @@
|
|||
v3.5.3 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* The _users collection is now properly restored when using arangorestore.
|
||||
|
||||
* Allow the optimizer to use indexes when a collection attribute is compared to
|
||||
an expansion followed by an attribute name, e.g.
|
||||
`doc.value IN something[*].name`.
|
||||
|
|
|
@ -394,6 +394,12 @@ VPackBuilder auth::UserManager::allUsers() {
|
|||
return result;
|
||||
}
|
||||
|
||||
void auth::UserManager::triggerCacheRevalidation() {
|
||||
triggerLocalReload();
|
||||
triggerGlobalReload();
|
||||
loadFromDB();
|
||||
}
|
||||
|
||||
/// Trigger eventual reload, user facing API call
|
||||
void auth::UserManager::triggerGlobalReload() {
|
||||
if (!ServerState::instance()->isCoordinator()) {
|
||||
|
|
|
@ -90,6 +90,9 @@ class UserManager {
|
|||
/// Trigger eventual reload on all other coordinators (and in TokenCache)
|
||||
void triggerGlobalReload();
|
||||
|
||||
/// Trigger cache revalidation after user restore
|
||||
void triggerCacheRevalidation();
|
||||
|
||||
/// Create the root user with a default password, will fail if the user
|
||||
/// already exists. Only ever call if you can guarantee to be in charge
|
||||
void createRootUser();
|
||||
|
|
|
@ -1455,7 +1455,7 @@ Result RestReplicationHandler::processRestoreUsersBatch(std::string const& colle
|
|||
VPackSlice allMarkersSlice = allMarkers.slice();
|
||||
|
||||
std::string aql(
|
||||
"FOR u IN @restored UPSERT {name: u.name} INSERT u REPLACE u "
|
||||
"FOR u IN @restored UPSERT {user: u.user} INSERT u REPLACE u "
|
||||
"INTO @@collection OPTIONS {ignoreErrors: true, silent: true, "
|
||||
"waitForSync: false, isRestore: true}");
|
||||
|
||||
|
@ -1508,8 +1508,7 @@ Result RestReplicationHandler::processRestoreUsersBatch(std::string const& colle
|
|||
AuthenticationFeature* af = AuthenticationFeature::instance();
|
||||
TRI_ASSERT(af->userManager() != nullptr);
|
||||
if (af->userManager() != nullptr) {
|
||||
af->userManager()->triggerLocalReload();
|
||||
af->userManager()->triggerGlobalReload();
|
||||
af->userManager()->triggerCacheRevalidation();
|
||||
}
|
||||
|
||||
return queryResult.result;
|
||||
|
|
|
@ -156,8 +156,8 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien
|
|||
if (response == nullptr || !response->isComplete()) {
|
||||
return {TRI_ERROR_INTERNAL,
|
||||
"got invalid response from server: '" + client.getErrorMessage() +
|
||||
"' while executing " + requestAction + (originalRequest.empty() ? "" : " with this payload: '" +
|
||||
originalRequest + "'")};
|
||||
"' while executing " + requestAction +
|
||||
(originalRequest.empty() ? "" : " with this payload: '" + originalRequest + "'")};
|
||||
}
|
||||
if (response->wasHttpError()) {
|
||||
int errorNum = TRI_ERROR_INTERNAL;
|
||||
|
@ -168,9 +168,9 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien
|
|||
errorNum = error.get(arangodb::StaticStrings::ErrorNum).getNumericValue<int>();
|
||||
errorMsg = error.get(arangodb::StaticStrings::ErrorMessage).copyString();
|
||||
}
|
||||
return {errorNum, "got invalid response from server: HTTP " +
|
||||
itoa(response->getHttpReturnCode()) + ": '" +
|
||||
errorMsg + "' while executing '" + requestAction +
|
||||
return {errorNum,
|
||||
"got invalid response from server: HTTP " + itoa(response->getHttpReturnCode()) +
|
||||
": '" + errorMsg + "' while executing '" + requestAction +
|
||||
(originalRequest.empty() ? "" : "' with this payload: '" + originalRequest + "'")};
|
||||
}
|
||||
return {TRI_ERROR_NO_ERROR};
|
||||
|
@ -191,12 +191,10 @@ bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) {
|
|||
// NB: Dependency graph has depth at most 1, no need to manage complex DAG
|
||||
VPackSlice leftDist = left.get("distributeShardsLike");
|
||||
VPackSlice rightDist = right.get("distributeShardsLike");
|
||||
if (leftDist.isNone() && rightDist.isString() &&
|
||||
rightDist.copyString() == leftName) {
|
||||
if (leftDist.isNone() && rightDist.isString() && rightDist.copyString() == leftName) {
|
||||
return true;
|
||||
}
|
||||
if (rightDist.isNone() && leftDist.isString() &&
|
||||
leftDist.copyString() == rightName) {
|
||||
if (rightDist.isNone() && leftDist.isString() && leftDist.copyString() == rightName) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -212,12 +210,10 @@ bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) {
|
|||
|
||||
// Finally, sort by name so we have stable, reproducible results
|
||||
// Sort system collections first
|
||||
if (!leftName.empty() && leftName[0] == '_' &&
|
||||
!rightName.empty() && rightName[0] != '_') {
|
||||
if (!leftName.empty() && leftName[0] == '_' && !rightName.empty() && rightName[0] != '_') {
|
||||
return true;
|
||||
}
|
||||
if (!leftName.empty() && leftName[0] != '_' &&
|
||||
!rightName.empty() && rightName[0] == '_') {
|
||||
if (!leftName.empty() && leftName[0] != '_' && !rightName.empty() && rightName[0] == '_') {
|
||||
return false;
|
||||
}
|
||||
return strcasecmp(leftName.c_str(), rightName.c_str()) < 0;
|
||||
|
@ -546,11 +542,11 @@ arangodb::Result recreateCollection(arangodb::httpclient::SimpleHttpClient& http
|
|||
// re-create collection
|
||||
if (jobData.options.progress) {
|
||||
if (jobData.options.overwrite) {
|
||||
LOG_TOPIC("9b414", INFO, Logger::RESTORE) << "# Re-creating " << collectionType
|
||||
<< " collection '" << cname << "'...";
|
||||
LOG_TOPIC("9b414", INFO, Logger::RESTORE)
|
||||
<< "# Re-creating " << collectionType << " collection '" << cname << "'...";
|
||||
} else {
|
||||
LOG_TOPIC("a9123", INFO, Logger::RESTORE) << "# Creating " << collectionType
|
||||
<< " collection '" << cname << "'...";
|
||||
LOG_TOPIC("a9123", INFO, Logger::RESTORE)
|
||||
<< "# Creating " << collectionType << " collection '" << cname << "'...";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -634,7 +630,8 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
|||
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json");
|
||||
if (!datafile || datafile->status().fail()) {
|
||||
datafile = jobData.directory.readableFile(
|
||||
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json.gz");
|
||||
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) +
|
||||
".data.json.gz");
|
||||
}
|
||||
if (!datafile || datafile->status().fail()) {
|
||||
datafile = jobData.directory.readableFile(cname + ".data.json.gz");
|
||||
|
@ -643,7 +640,8 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
|||
datafile = jobData.directory.readableFile(cname + ".data.json");
|
||||
}
|
||||
if (!datafile || datafile->status().fail()) {
|
||||
result = {TRI_ERROR_CANNOT_READ_FILE, "could not open data file for collection '" + cname + "'"};
|
||||
result = {TRI_ERROR_CANNOT_READ_FILE,
|
||||
"could not open data file for collection '" + cname + "'"};
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -658,7 +656,8 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
|||
int64_t numReadForThisCollection = 0;
|
||||
int64_t numReadSinceLastReport = 0;
|
||||
|
||||
bool const isGzip = (0 == datafile->path().substr(datafile->path().size() - 3).compare(".gz"));
|
||||
bool const isGzip =
|
||||
(0 == datafile->path().substr(datafile->path().size() - 3).compare(".gz"));
|
||||
|
||||
buffer.clear();
|
||||
while (true) {
|
||||
|
@ -731,7 +730,8 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
|||
} else {
|
||||
ofFilesize << " of " << fileSize;
|
||||
percentage << " ("
|
||||
<< int(100. * double(numReadForThisCollection) / double(fileSize)) << " %)";
|
||||
<< int(100. * double(numReadForThisCollection) / double(fileSize))
|
||||
<< " %)";
|
||||
} // else
|
||||
|
||||
LOG_TOPIC("69a73", INFO, Logger::RESTORE)
|
||||
|
@ -1036,11 +1036,12 @@ arangodb::Result processInputDirectory(
|
|||
Result res = ::triggerFoxxHeal(httpClient);
|
||||
if (res.fail()) {
|
||||
LOG_TOPIC("47cd7", WARN, Logger::RESTORE)
|
||||
<< "Reloading of Foxx services failed: " << res.errorMessage()
|
||||
<< "- in the cluster Foxx services will be available eventually, On single servers send "
|
||||
<< "Reloading of Foxx services failed: " << res.errorMessage() << "- in the cluster Foxx services will be available eventually, On single servers send "
|
||||
<< "a POST to '/_api/foxx/_local/heal' on the current database, "
|
||||
<< "with an empty body. Please note that any of this is not necessary if the Foxx APIs "
|
||||
<< "have been turned off on the server using the option `--foxx.api false`.";
|
||||
<< "with an empty body. Please note that any of this is not "
|
||||
"necessary if the Foxx APIs "
|
||||
<< "have been turned off on the server using the option "
|
||||
"`--foxx.api false`.";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1075,6 +1076,23 @@ arangodb::Result processInputDirectory(
|
|||
arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient,
|
||||
arangodb::RestoreFeature::JobData& jobData) {
|
||||
arangodb::Result result;
|
||||
|
||||
VPackSlice const parameters = jobData.collection.get("parameters");
|
||||
std::string const cname =
|
||||
arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", "");
|
||||
|
||||
if (cname == "_users") {
|
||||
// special case: never restore data in the _users collection first as it could
|
||||
// potentially change user permissions. In that case index creation will fail.
|
||||
result = ::restoreIndexes(httpClient, jobData);
|
||||
if (result.fail()) {
|
||||
return result;
|
||||
}
|
||||
result = ::restoreData(httpClient, jobData);
|
||||
if (result.fail()) {
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
if (jobData.options.indexesFirst && jobData.options.importStructure) {
|
||||
// restore indexes first if we are using rocksdb
|
||||
result = ::restoreIndexes(httpClient, jobData);
|
||||
|
@ -1095,6 +1113,7 @@ arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient,
|
|||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
++jobData.stats.restoredCollections;
|
||||
|
||||
|
@ -1106,8 +1125,9 @@ arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient,
|
|||
int type = arangodb::basics::VelocyPackHelper::getNumericValue<int>(parameters,
|
||||
"type", 2);
|
||||
std::string const collectionType(type == 2 ? "document" : "edge");
|
||||
LOG_TOPIC("6ae09", INFO, arangodb::Logger::RESTORE) << "# Successfully restored " << collectionType
|
||||
<< " collection '" << cname << "'";
|
||||
LOG_TOPIC("6ae09", INFO, arangodb::Logger::RESTORE)
|
||||
<< "# Successfully restored " << collectionType << " collection '"
|
||||
<< cname << "'";
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -1186,8 +1206,8 @@ void RestoreFeature::collectOptions(std::shared_ptr<options::ProgramOptions> opt
|
|||
"force usage of the same database name as in the source dump.json file",
|
||||
new BooleanParameter(&_options.forceSameDatabase));
|
||||
|
||||
options->addOption(
|
||||
"--all-databases", "restore data to all databases",
|
||||
options
|
||||
->addOption("--all-databases", "restore data to all databases",
|
||||
new BooleanParameter(&_options.allDatabases))
|
||||
.setIntroducedIn(30500);
|
||||
|
||||
|
@ -1279,13 +1299,15 @@ void RestoreFeature::validateOptions(std::shared_ptr<options::ProgramOptions> op
|
|||
if (_options.allDatabases) {
|
||||
if (options->processingResult().touched("server.database")) {
|
||||
LOG_TOPIC("94d22", FATAL, arangodb::Logger::RESTORE)
|
||||
<< "cannot use --server.database and --all-databases at the same time";
|
||||
<< "cannot use --server.database and --all-databases at the same "
|
||||
"time";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (_options.forceSameDatabase) {
|
||||
LOG_TOPIC("fd66a", FATAL, arangodb::Logger::RESTORE)
|
||||
<< "cannot use --force-same-database and --all-databases at the same time";
|
||||
<< "cannot use --force-same-database and --all-databases at the same "
|
||||
"time";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
|
@ -1409,7 +1431,8 @@ void RestoreFeature::start() {
|
|||
// and we have to process users last of all. otherwise we risk updating the
|
||||
// credentials for the user which users the current arangorestore connection, and
|
||||
// this will make subsequent arangorestore calls to the server fail with "unauthorized"
|
||||
std::sort(databases.begin(), databases.end(), [](std::string const& lhs, std::string const& rhs) {
|
||||
std::sort(databases.begin(), databases.end(),
|
||||
[](std::string const& lhs, std::string const& rhs) {
|
||||
if (lhs == "_system" && rhs != "_system") {
|
||||
return false;
|
||||
} else if (rhs == "_system" && lhs != "_system") {
|
||||
|
@ -1418,7 +1441,9 @@ void RestoreFeature::start() {
|
|||
return lhs < rhs;
|
||||
});
|
||||
if (databases.empty()) {
|
||||
LOG_TOPIC("b41d9", FATAL, Logger::RESTORE) << "Unable to find per-database subdirectories in input directory '" << _options.inputPath << "'. No data will be restored!";
|
||||
LOG_TOPIC("b41d9", FATAL, Logger::RESTORE)
|
||||
<< "Unable to find per-database subdirectories in input directory '"
|
||||
<< _options.inputPath << "'. No data will be restored!";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
} else {
|
||||
|
@ -1430,8 +1455,8 @@ void RestoreFeature::start() {
|
|||
// final result
|
||||
Result result;
|
||||
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
||||
true, !_options.createDatabase, false);
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force, true,
|
||||
!_options.createDatabase, false);
|
||||
if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) {
|
||||
LOG_TOPIC("c23bf", FATAL, Logger::RESTORE)
|
||||
<< "cannot create server connection, giving up!";
|
||||
|
@ -1447,7 +1472,9 @@ void RestoreFeature::start() {
|
|||
|
||||
Result res = ::tryCreateDatabase(dbName);
|
||||
if (res.fail()) {
|
||||
LOG_TOPIC("b19db", FATAL, Logger::RESTORE) << "Could not create database '" << dbName << "': " << httpClient->getErrorMessage();
|
||||
LOG_TOPIC("b19db", FATAL, Logger::RESTORE)
|
||||
<< "Could not create database '" << dbName
|
||||
<< "': " << httpClient->getErrorMessage();
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
@ -1455,9 +1482,12 @@ void RestoreFeature::start() {
|
|||
client->setDatabaseName(dbName);
|
||||
|
||||
// re-check connection and version
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force, true, true, false);
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
||||
true, true, false);
|
||||
} else {
|
||||
LOG_TOPIC("ad95b", WARN, Logger::RESTORE) << "Database '" << dbName << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
||||
LOG_TOPIC("ad95b", WARN, Logger::RESTORE)
|
||||
<< "Database '"
|
||||
<< dbName << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1479,7 +1509,10 @@ void RestoreFeature::start() {
|
|||
}
|
||||
|
||||
if (role == "DBSERVER" || role == "PRIMARY") {
|
||||
LOG_TOPIC("1fc99", WARN, arangodb::Logger::RESTORE) << "You connected to a DBServer node, but operations in a cluster should be carried out via a Coordinator. This is an unsupported operation!";
|
||||
LOG_TOPIC("1fc99", WARN, arangodb::Logger::RESTORE)
|
||||
<< "You connected to a DBServer node, but operations in a cluster "
|
||||
"should be carried out via a Coordinator. This is an unsupported "
|
||||
"operation!";
|
||||
}
|
||||
|
||||
std::tie(result, _options.indexesFirst) =
|
||||
|
@ -1500,10 +1533,14 @@ void RestoreFeature::start() {
|
|||
// set up threads and workers
|
||||
_clientTaskQueue.spawnWorkers(_clientManager, _options.threadCount);
|
||||
|
||||
LOG_TOPIC("6bb3c", DEBUG, Logger::RESTORE) << "Using " << _options.threadCount << " worker thread(s)";
|
||||
LOG_TOPIC("6bb3c", DEBUG, Logger::RESTORE)
|
||||
<< "Using " << _options.threadCount << " worker thread(s)";
|
||||
|
||||
if (_options.allDatabases) {
|
||||
LOG_TOPIC("7c10a", INFO, Logger::RESTORE) << "About to restore databases '" << basics::StringUtils::join(databases, "', '") << "' from dump directory '" << _options.inputPath << "'...";
|
||||
LOG_TOPIC("7c10a", INFO, Logger::RESTORE)
|
||||
<< "About to restore databases '"
|
||||
<< basics::StringUtils::join(databases, "', '")
|
||||
<< "' from dump directory '" << _options.inputPath << "'...";
|
||||
}
|
||||
|
||||
for (auto const& db : databases) {
|
||||
|
@ -1513,10 +1550,11 @@ void RestoreFeature::start() {
|
|||
// inject current database
|
||||
client->setDatabaseName(db);
|
||||
LOG_TOPIC("36075", INFO, Logger::RESTORE) << "Restoring database '" << db << "'";
|
||||
_directory = std::make_unique<ManagedDirectory>(basics::FileUtils::buildFilename(_options.inputPath, db), false, false);
|
||||
_directory = std::make_unique<ManagedDirectory>(
|
||||
basics::FileUtils::buildFilename(_options.inputPath, db), false, false);
|
||||
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
||||
false, !_options.createDatabase, false);
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force, false,
|
||||
!_options.createDatabase, false);
|
||||
if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) {
|
||||
LOG_TOPIC("3e715", FATAL, Logger::RESTORE)
|
||||
<< "cannot create server connection, giving up!";
|
||||
|
@ -1532,7 +1570,9 @@ void RestoreFeature::start() {
|
|||
|
||||
result = ::tryCreateDatabase(db);
|
||||
if (result.fail()) {
|
||||
LOG_TOPIC("7a35f", ERR, Logger::RESTORE) << "Could not create database '" << db << "': " << httpClient->getErrorMessage();
|
||||
LOG_TOPIC("7a35f", ERR, Logger::RESTORE)
|
||||
<< "Could not create database '" << db
|
||||
<< "': " << httpClient->getErrorMessage();
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1540,14 +1580,18 @@ void RestoreFeature::start() {
|
|||
client->setDatabaseName(db);
|
||||
|
||||
// re-check connection and version
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force, false, true, false);
|
||||
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
||||
false, true, false);
|
||||
} else {
|
||||
LOG_TOPIC("be594", WARN, Logger::RESTORE) << "Database '" << db << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
||||
LOG_TOPIC("be594", WARN, Logger::RESTORE)
|
||||
<< "Database '"
|
||||
<< db << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
||||
}
|
||||
}
|
||||
|
||||
if (result.fail()) {
|
||||
result.reset(result.errorNumber(), std::string("cannot create server connection: ") + result.errorMessage());
|
||||
result.reset(result.errorNumber(),
|
||||
std::string("cannot create server connection: ") + result.errorMessage());
|
||||
|
||||
if (!_options.force) {
|
||||
break;
|
||||
|
@ -1574,13 +1618,16 @@ void RestoreFeature::start() {
|
|||
result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this,
|
||||
_options, *_directory, _stats);
|
||||
} catch (basics::Exception const& ex) {
|
||||
LOG_TOPIC("52b22", ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what();
|
||||
LOG_TOPIC("52b22", ERR, arangodb::Logger::RESTORE)
|
||||
<< "caught exception: " << ex.what();
|
||||
result = {ex.code(), ex.what()};
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC("8f13f", ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what();
|
||||
LOG_TOPIC("8f13f", ERR, arangodb::Logger::RESTORE)
|
||||
<< "caught exception: " << ex.what();
|
||||
result = {TRI_ERROR_INTERNAL, ex.what()};
|
||||
} catch (...) {
|
||||
LOG_TOPIC("a74e8", ERR, arangodb::Logger::RESTORE) << "caught unknown exception";
|
||||
LOG_TOPIC("a74e8", ERR, arangodb::Logger::RESTORE)
|
||||
<< "caught unknown exception";
|
||||
result = {TRI_ERROR_INTERNAL};
|
||||
}
|
||||
|
||||
|
|
|
@ -113,6 +113,13 @@ class DumpRestoreHelper {
|
|||
print(CYAN + Date() + ': ' + this.which + ' and Restore - ' + s + RESET);
|
||||
}
|
||||
|
||||
adjustRestoreToDump()
|
||||
{
|
||||
this.restoreOptions = this.dumpOptions;
|
||||
this.restoreConfig = pu.createBaseConfig('restore', this.dumpOptions, this.instanceInfo);
|
||||
this.arangorestore = pu.run.arangoDumpRestoreWithConfig.bind(this, this.restoreConfig, this.restoreOptions, this.instanceInfo.rootDir, this.options.coreCheck);
|
||||
}
|
||||
|
||||
isAlive() {
|
||||
return pu.arangod.check.instanceAlive(this.instanceInfo, this.options);
|
||||
}
|
||||
|
@ -157,8 +164,14 @@ class DumpRestoreHelper {
|
|||
return this.validate(this.results.setup);
|
||||
}
|
||||
|
||||
dumpFrom(database) {
|
||||
dumpFrom(database, separateDir = false) {
|
||||
this.print('dump');
|
||||
if (separateDir) {
|
||||
if (!fs.exists(fs.join(this.instanceInfo.rootDir, 'dump'))) {
|
||||
fs.makeDirectory(fs.join(this.instanceInfo.rootDir, 'dump'));
|
||||
}
|
||||
this.dumpConfig.setOutputDirectory('dump' + fs.pathSeparator + database);
|
||||
}
|
||||
if (!this.dumpConfig.haveSetAllDatabases()) {
|
||||
this.dumpConfig.setDatabase(database);
|
||||
}
|
||||
|
@ -166,8 +179,19 @@ class DumpRestoreHelper {
|
|||
return this.validate(this.results.dump);
|
||||
}
|
||||
|
||||
restoreTo(database) {
|
||||
restoreTo(database, options = { separate: false, fromDir: '' }) {
|
||||
this.print('restore');
|
||||
|
||||
if (options.hasOwnProperty('separate') && options.separate === true) {
|
||||
if (!options.hasOwnProperty('fromDir') || typeof options.fromDir !== 'string') {
|
||||
options.fromDir = database;
|
||||
}
|
||||
if (!fs.exists(fs.join(this.instanceInfo.rootDir, 'dump'))) {
|
||||
fs.makeDirectory(fs.join(this.instanceInfo.rootDir, 'dump'));
|
||||
}
|
||||
this.restoreConfig.setInputDirectory('dump' + fs.pathSeparator + options.fromDir, true);
|
||||
}
|
||||
|
||||
if (!this.restoreConfig.haveSetAllDatabases()) {
|
||||
this.restoreConfig.setDatabase(database);
|
||||
}
|
||||
|
@ -213,6 +237,7 @@ class DumpRestoreHelper {
|
|||
restoreFoxxComplete(database) {
|
||||
this.print('Foxx Apps with full restore');
|
||||
this.restoreConfig.setDatabase(database);
|
||||
this.restoreConfig.setIncludeSystem(true);
|
||||
this.results.restoreFoxxComplete = this.arangorestore();
|
||||
return this.validate(this.results.restoreFoxxComplete);
|
||||
}
|
||||
|
@ -343,8 +368,21 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore
|
|||
const cleanupFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpCleanup));
|
||||
const testFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpAgain));
|
||||
const tearDownFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpTearDown));
|
||||
if (
|
||||
!helper.runSetupSuite(setupFile) ||
|
||||
|
||||
if (options.hasOwnProperty("multipleDumps") && options.multipleDumps) {
|
||||
if (!helper.runSetupSuite(setupFile) ||
|
||||
!helper.dumpFrom('_system', true) ||
|
||||
!helper.dumpFrom('UnitTestsDumpSrc', true) ||
|
||||
!helper.runCleanupSuite(cleanupFile) ||
|
||||
!helper.restoreTo('UnitTestsDumpDst', { separate: true, fromDir: 'UnitTestsDumpSrc'}) ||
|
||||
!helper.restoreTo('_system', { separate: true }) ||
|
||||
!helper.runTests(testFile,'UnitTestsDumpDst') ||
|
||||
!helper.tearDown(tearDownFile)) {
|
||||
return helper.extractResults();
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!helper.runSetupSuite(setupFile) ||
|
||||
!helper.dumpFrom('UnitTestsDumpSrc') ||
|
||||
!helper.runCleanupSuite(cleanupFile) ||
|
||||
!helper.restoreTo('UnitTestsDumpDst') ||
|
||||
|
@ -352,6 +390,7 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore
|
|||
!helper.tearDown(tearDownFile)) {
|
||||
return helper.extractResults();
|
||||
}
|
||||
}
|
||||
|
||||
if (tstFiles.hasOwnProperty("dumpCheckGraph")) {
|
||||
const notCluster = getClusterStrings(options).notCluster;
|
||||
|
@ -365,6 +404,10 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore
|
|||
|
||||
if (tstFiles.hasOwnProperty("foxxTest")) {
|
||||
const foxxTestFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.foxxTest));
|
||||
if (options.hasOwnProperty("multipleDumps") && options.multipleDumps) {
|
||||
helper.adjustRestoreToDump();
|
||||
helper.restoreConfig.setInputDirectory(fs.join('dump','UnitTestsDumpSrc'), true);
|
||||
}
|
||||
if (!helper.restoreFoxxComplete('UnitTestsDumpFoxxComplete') ||
|
||||
!helper.testFoxxComplete(foxxTestFile, 'UnitTestsDumpFoxxComplete') ||
|
||||
!helper.restoreFoxxAppsBundle('UnitTestsDumpFoxxAppsBundle') ||
|
||||
|
@ -410,20 +453,6 @@ function dumpMultiple (options) {
|
|||
}
|
||||
|
||||
function dumpAuthentication (options) {
|
||||
if (options.cluster) {
|
||||
if (options.extremeVerbosity) {
|
||||
print(CYAN + 'Skipped because of cluster.' + RESET);
|
||||
}
|
||||
|
||||
return {
|
||||
'dump_authentication': {
|
||||
'status': true,
|
||||
'message': 'skipped because of cluster',
|
||||
'skipped': true
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const clientAuth = {
|
||||
'server.authentication': 'true'
|
||||
};
|
||||
|
@ -438,16 +467,26 @@ function dumpAuthentication (options) {
|
|||
password: 'foobarpasswd'
|
||||
};
|
||||
|
||||
let restoreAuthOpts = {
|
||||
username: 'foobaruser',
|
||||
password: 'pinus'
|
||||
};
|
||||
|
||||
_.defaults(dumpAuthOpts, options);
|
||||
_.defaults(restoreAuthOpts, options);
|
||||
|
||||
let tstFiles = {
|
||||
dumpSetup: 'dump-authentication-setup.js',
|
||||
dumpCleanup: 'cleanup-nothing.js',
|
||||
dumpCleanup: 'cleanup-alter-user.js',
|
||||
dumpAgain: 'dump-authentication.js',
|
||||
dumpTearDown: 'dump-teardown.js',
|
||||
foxxTest: 'check-foxx.js'
|
||||
};
|
||||
|
||||
return dump_backend(options, serverAuthInfo, clientAuth, dumpAuthOpts, dumpAuthOpts, 'dump_authentication', tstFiles, function(){});
|
||||
options.multipleDumps = true;
|
||||
options['server.jwt-secret'] = 'haxxmann';
|
||||
|
||||
return dump_backend(options, serverAuthInfo, clientAuth, dumpAuthOpts, restoreAuthOpts, 'dump_authentication', tstFiles, function(){});
|
||||
}
|
||||
|
||||
function dumpEncrypted (options) {
|
||||
|
@ -635,7 +674,7 @@ exports.setup = function (testFns, defaultFns, opts, fnDocs, optionsDoc, allTest
|
|||
defaultFns.push('dump_multiple');
|
||||
|
||||
testFns['hot_backup'] = hotBackup;
|
||||
defaultFns.push('hotBackup');
|
||||
defaultFns.push('hot_backup');
|
||||
|
||||
for (var attrname in functionsDocumentation) { fnDocs[attrname] = functionsDocumentation[attrname]; }
|
||||
for (var i = 0; i < optionsDocumentation.length; i++) { optionsDoc.push(optionsDocumentation[i]); }
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*jshint globalstrict:false, strict:false */
|
||||
/* global db */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief teardown for dump/reload tests
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB Inc, Cologne, Germany
|
||||
///
|
||||
/// @author Wilfried Goesgens
|
||||
/// @author Copyright 2019, ArangoDB Inc, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
var users = require("@arangodb/users");
|
||||
users.update("foobaruser", "pinus", true);
|
||||
})();
|
||||
|
||||
return {
|
||||
status: true
|
||||
};
|
||||
|
|
@ -163,6 +163,8 @@ function dumpTestSuite () {
|
|||
assertEqual(users.permission(uName, "_system"), 'rw');
|
||||
assertEqual(users.permission(uName, "UnitTestsDumpSrc"), 'rw');
|
||||
assertEqual(users.permission(uName, "UnitTestsDumpEmpty"), 'rw');
|
||||
|
||||
assertTrue(users.isValid("foobaruser", "foobarpasswd"));
|
||||
}
|
||||
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue