diff --git a/CHANGELOG b/CHANGELOG index 26b3ac0444..ca60bb00ba 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,8 @@ v3.5.3 (XXXX-XX-XX) ------------------- +* The _users collection is now properly restored when using arangorestore. + * Allow the optimizer to use indexes when a collection attribute is compared to an expansion followed by an attribute name, e.g. `doc.value IN something[*].name`. diff --git a/arangod/Auth/UserManager.cpp b/arangod/Auth/UserManager.cpp index 5ab0bb2653..fcb4a0097c 100644 --- a/arangod/Auth/UserManager.cpp +++ b/arangod/Auth/UserManager.cpp @@ -394,6 +394,12 @@ VPackBuilder auth::UserManager::allUsers() { return result; } +void auth::UserManager::triggerCacheRevalidation() { + triggerLocalReload(); + triggerGlobalReload(); + loadFromDB(); +} + /// Trigger eventual reload, user facing API call void auth::UserManager::triggerGlobalReload() { if (!ServerState::instance()->isCoordinator()) { diff --git a/arangod/Auth/UserManager.h b/arangod/Auth/UserManager.h index c4212cc9c3..a482ba31b4 100644 --- a/arangod/Auth/UserManager.h +++ b/arangod/Auth/UserManager.h @@ -90,6 +90,9 @@ class UserManager { /// Trigger eventual reload on all other coordinators (and in TokenCache) void triggerGlobalReload(); + /// Trigger cache revalidation after user restore + void triggerCacheRevalidation(); + /// Create the root user with a default password, will fail if the user /// already exists. Only ever call if you can guarantee to be in charge void createRootUser(); diff --git a/arangod/RestHandler/RestReplicationHandler.cpp b/arangod/RestHandler/RestReplicationHandler.cpp index f662e1aa9e..3601d422ee 100644 --- a/arangod/RestHandler/RestReplicationHandler.cpp +++ b/arangod/RestHandler/RestReplicationHandler.cpp @@ -1455,7 +1455,7 @@ Result RestReplicationHandler::processRestoreUsersBatch(std::string const& colle VPackSlice allMarkersSlice = allMarkers.slice(); std::string aql( - "FOR u IN @restored UPSERT {name: u.name} INSERT u REPLACE u " + "FOR u IN @restored UPSERT {user: u.user} INSERT u REPLACE u " "INTO @@collection OPTIONS {ignoreErrors: true, silent: true, " "waitForSync: false, isRestore: true}"); @@ -1508,8 +1508,7 @@ Result RestReplicationHandler::processRestoreUsersBatch(std::string const& colle AuthenticationFeature* af = AuthenticationFeature::instance(); TRI_ASSERT(af->userManager() != nullptr); if (af->userManager() != nullptr) { - af->userManager()->triggerLocalReload(); - af->userManager()->triggerGlobalReload(); + af->userManager()->triggerCacheRevalidation(); } return queryResult.result; diff --git a/arangosh/Restore/RestoreFeature.cpp b/arangosh/Restore/RestoreFeature.cpp index f4af1266b7..4bfc2e2897 100644 --- a/arangosh/Restore/RestoreFeature.cpp +++ b/arangosh/Restore/RestoreFeature.cpp @@ -156,8 +156,8 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien if (response == nullptr || !response->isComplete()) { return {TRI_ERROR_INTERNAL, "got invalid response from server: '" + client.getErrorMessage() + - "' while executing " + requestAction + (originalRequest.empty() ? "" : " with this payload: '" + - originalRequest + "'")}; + "' while executing " + requestAction + + (originalRequest.empty() ? "" : " with this payload: '" + originalRequest + "'")}; } if (response->wasHttpError()) { int errorNum = TRI_ERROR_INTERNAL; @@ -168,10 +168,10 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien errorNum = error.get(arangodb::StaticStrings::ErrorNum).getNumericValue(); errorMsg = error.get(arangodb::StaticStrings::ErrorMessage).copyString(); } - return {errorNum, "got invalid response from server: HTTP " + - itoa(response->getHttpReturnCode()) + ": '" + - errorMsg + "' while executing '" + requestAction + - (originalRequest.empty() ? "" : "' with this payload: '" + originalRequest + "'")}; + return {errorNum, + "got invalid response from server: HTTP " + itoa(response->getHttpReturnCode()) + + ": '" + errorMsg + "' while executing '" + requestAction + + (originalRequest.empty() ? "" : "' with this payload: '" + originalRequest + "'")}; } return {TRI_ERROR_NO_ERROR}; } @@ -180,23 +180,21 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) { VPackSlice const left = l.slice().get("parameters"); VPackSlice const right = r.slice().get("parameters"); - + std::string leftName = arangodb::basics::VelocyPackHelper::getStringValue(left, "name", ""); std::string rightName = arangodb::basics::VelocyPackHelper::getStringValue(right, "name", ""); - + // First we sort by shard distribution. // We first have to create the collections which have no dependencies. // NB: Dependency graph has depth at most 1, no need to manage complex DAG VPackSlice leftDist = left.get("distributeShardsLike"); VPackSlice rightDist = right.get("distributeShardsLike"); - if (leftDist.isNone() && rightDist.isString() && - rightDist.copyString() == leftName) { + if (leftDist.isNone() && rightDist.isString() && rightDist.copyString() == leftName) { return true; } - if (rightDist.isNone() && leftDist.isString() && - leftDist.copyString() == rightName) { + if (rightDist.isNone() && leftDist.isString() && leftDist.copyString() == rightName) { return false; } @@ -212,12 +210,10 @@ bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) { // Finally, sort by name so we have stable, reproducible results // Sort system collections first - if (!leftName.empty() && leftName[0] == '_' && - !rightName.empty() && rightName[0] != '_') { + if (!leftName.empty() && leftName[0] == '_' && !rightName.empty() && rightName[0] != '_') { return true; } - if (!leftName.empty() && leftName[0] != '_' && - !rightName.empty() && rightName[0] == '_') { + if (!leftName.empty() && leftName[0] != '_' && !rightName.empty() && rightName[0] == '_') { return false; } return strcasecmp(leftName.c_str(), rightName.c_str()) < 0; @@ -546,11 +542,11 @@ arangodb::Result recreateCollection(arangodb::httpclient::SimpleHttpClient& http // re-create collection if (jobData.options.progress) { if (jobData.options.overwrite) { - LOG_TOPIC("9b414", INFO, Logger::RESTORE) << "# Re-creating " << collectionType - << " collection '" << cname << "'..."; + LOG_TOPIC("9b414", INFO, Logger::RESTORE) + << "# Re-creating " << collectionType << " collection '" << cname << "'..."; } else { - LOG_TOPIC("a9123", INFO, Logger::RESTORE) << "# Creating " << collectionType - << " collection '" << cname << "'..."; + LOG_TOPIC("a9123", INFO, Logger::RESTORE) + << "# Creating " << collectionType << " collection '" << cname << "'..."; } } @@ -634,19 +630,21 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient, cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json"); if (!datafile || datafile->status().fail()) { datafile = jobData.directory.readableFile( - cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json.gz"); + cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + + ".data.json.gz"); } if (!datafile || datafile->status().fail()) { datafile = jobData.directory.readableFile(cname + ".data.json.gz"); - } + } if (!datafile || datafile->status().fail()) { datafile = jobData.directory.readableFile(cname + ".data.json"); } if (!datafile || datafile->status().fail()) { - result = {TRI_ERROR_CANNOT_READ_FILE, "could not open data file for collection '" + cname + "'"}; + result = {TRI_ERROR_CANNOT_READ_FILE, + "could not open data file for collection '" + cname + "'"}; return result; } - + int64_t const fileSize = TRI_SizeFile(datafile->path().c_str()); if (jobData.options.progress) { @@ -657,8 +655,9 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient, int64_t numReadForThisCollection = 0; int64_t numReadSinceLastReport = 0; - - bool const isGzip = (0 == datafile->path().substr(datafile->path().size() - 3).compare(".gz")); + + bool const isGzip = + (0 == datafile->path().substr(datafile->path().size() - 3).compare(".gz")); buffer.clear(); while (true) { @@ -731,8 +730,9 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient, } else { ofFilesize << " of " << fileSize; percentage << " (" - << int(100. * double(numReadForThisCollection) / double(fileSize)) << " %)"; - } // else + << int(100. * double(numReadForThisCollection) / double(fileSize)) + << " %)"; + } // else LOG_TOPIC("69a73", INFO, Logger::RESTORE) << "# Still loading data into " << collectionType << " collection '" @@ -913,7 +913,7 @@ arangodb::Result processInputDirectory( VPackSlice const collection = b.slice(); LOG_TOPIC("c601a", DEBUG, Logger::RESTORE) - << "# Processing collection: " << collection.toJson(); + << "# Processing collection: " << collection.toJson(); VPackSlice params = collection.get("parameters"); VPackSlice name = VPackSlice::emptyStringSlice(); @@ -937,9 +937,9 @@ arangodb::Result processInputDirectory( return result; } } - + if (name.isString() && name.stringRef() == "_users") { - // special treatment for _users collection - this must be the very last, + // special treatment for _users collection - this must be the very last, // and run isolated from all previous data loading operations - the // reason is that loading into the users collection may change the // credentials for the current arangorestore connection! @@ -970,7 +970,7 @@ arangodb::Result processInputDirectory( for (auto const& viewDefinition : views) { LOG_TOPIC("c608d", DEBUG, Logger::RESTORE) - << "# Creating view: " << viewDefinition.toJson(); + << "# Creating view: " << viewDefinition.toJson(); auto res = ::restoreView(httpClient, options, viewDefinition.slice()); @@ -1036,11 +1036,12 @@ arangodb::Result processInputDirectory( Result res = ::triggerFoxxHeal(httpClient); if (res.fail()) { LOG_TOPIC("47cd7", WARN, Logger::RESTORE) - << "Reloading of Foxx services failed: " << res.errorMessage() - << "- in the cluster Foxx services will be available eventually, On single servers send " + << "Reloading of Foxx services failed: " << res.errorMessage() << "- in the cluster Foxx services will be available eventually, On single servers send " << "a POST to '/_api/foxx/_local/heal' on the current database, " - << "with an empty body. Please note that any of this is not necessary if the Foxx APIs " - << "have been turned off on the server using the option `--foxx.api false`."; + << "with an empty body. Please note that any of this is not " + "necessary if the Foxx APIs " + << "have been turned off on the server using the option " + "`--foxx.api false`."; } } @@ -1075,24 +1076,42 @@ arangodb::Result processInputDirectory( arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient, arangodb::RestoreFeature::JobData& jobData) { arangodb::Result result; - if (jobData.options.indexesFirst && jobData.options.importStructure) { - // restore indexes first if we are using rocksdb + + VPackSlice const parameters = jobData.collection.get("parameters"); + std::string const cname = + arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", ""); + + if (cname == "_users") { + // special case: never restore data in the _users collection first as it could + // potentially change user permissions. In that case index creation will fail. result = ::restoreIndexes(httpClient, jobData); if (result.fail()) { return result; } - } - if (jobData.options.importData) { result = ::restoreData(httpClient, jobData); if (result.fail()) { return result; } - } - if (!jobData.options.indexesFirst && jobData.options.importStructure) { - // restore indexes second if we are using mmfiles - result = ::restoreIndexes(httpClient, jobData); - if (result.fail()) { - return result; + } else { + if (jobData.options.indexesFirst && jobData.options.importStructure) { + // restore indexes first if we are using rocksdb + result = ::restoreIndexes(httpClient, jobData); + if (result.fail()) { + return result; + } + } + if (jobData.options.importData) { + result = ::restoreData(httpClient, jobData); + if (result.fail()) { + return result; + } + } + if (!jobData.options.indexesFirst && jobData.options.importStructure) { + // restore indexes second if we are using mmfiles + result = ::restoreIndexes(httpClient, jobData); + if (result.fail()) { + return result; + } } } @@ -1106,8 +1125,9 @@ arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient, int type = arangodb::basics::VelocyPackHelper::getNumericValue(parameters, "type", 2); std::string const collectionType(type == 2 ? "document" : "edge"); - LOG_TOPIC("6ae09", INFO, arangodb::Logger::RESTORE) << "# Successfully restored " << collectionType - << " collection '" << cname << "'"; + LOG_TOPIC("6ae09", INFO, arangodb::Logger::RESTORE) + << "# Successfully restored " << collectionType << " collection '" + << cname << "'"; } return result; @@ -1186,9 +1206,9 @@ void RestoreFeature::collectOptions(std::shared_ptr opt "force usage of the same database name as in the source dump.json file", new BooleanParameter(&_options.forceSameDatabase)); - options->addOption( - "--all-databases", "restore data to all databases", - new BooleanParameter(&_options.allDatabases)) + options + ->addOption("--all-databases", "restore data to all databases", + new BooleanParameter(&_options.allDatabases)) .setIntroducedIn(30500); options->addOption("--input-directory", "input directory", @@ -1279,13 +1299,15 @@ void RestoreFeature::validateOptions(std::shared_ptr op if (_options.allDatabases) { if (options->processingResult().touched("server.database")) { LOG_TOPIC("94d22", FATAL, arangodb::Logger::RESTORE) - << "cannot use --server.database and --all-databases at the same time"; + << "cannot use --server.database and --all-databases at the same " + "time"; FATAL_ERROR_EXIT(); } if (_options.forceSameDatabase) { LOG_TOPIC("fd66a", FATAL, arangodb::Logger::RESTORE) - << "cannot use --force-same-database and --all-databases at the same time"; + << "cannot use --force-same-database and --all-databases at the same " + "time"; FATAL_ERROR_EXIT(); } } @@ -1409,16 +1431,19 @@ void RestoreFeature::start() { // and we have to process users last of all. otherwise we risk updating the // credentials for the user which users the current arangorestore connection, and // this will make subsequent arangorestore calls to the server fail with "unauthorized" - std::sort(databases.begin(), databases.end(), [](std::string const& lhs, std::string const& rhs) { - if (lhs == "_system" && rhs != "_system") { - return false; - } else if (rhs == "_system" && lhs != "_system") { - return true; - } - return lhs < rhs; - }); + std::sort(databases.begin(), databases.end(), + [](std::string const& lhs, std::string const& rhs) { + if (lhs == "_system" && rhs != "_system") { + return false; + } else if (rhs == "_system" && lhs != "_system") { + return true; + } + return lhs < rhs; + }); if (databases.empty()) { - LOG_TOPIC("b41d9", FATAL, Logger::RESTORE) << "Unable to find per-database subdirectories in input directory '" << _options.inputPath << "'. No data will be restored!"; + LOG_TOPIC("b41d9", FATAL, Logger::RESTORE) + << "Unable to find per-database subdirectories in input directory '" + << _options.inputPath << "'. No data will be restored!"; FATAL_ERROR_EXIT(); } } else { @@ -1430,8 +1455,8 @@ void RestoreFeature::start() { // final result Result result; - result = _clientManager.getConnectedClient(httpClient, _options.force, - true, !_options.createDatabase, false); + result = _clientManager.getConnectedClient(httpClient, _options.force, true, + !_options.createDatabase, false); if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) { LOG_TOPIC("c23bf", FATAL, Logger::RESTORE) << "cannot create server connection, giving up!"; @@ -1447,7 +1472,9 @@ void RestoreFeature::start() { Result res = ::tryCreateDatabase(dbName); if (res.fail()) { - LOG_TOPIC("b19db", FATAL, Logger::RESTORE) << "Could not create database '" << dbName << "': " << httpClient->getErrorMessage(); + LOG_TOPIC("b19db", FATAL, Logger::RESTORE) + << "Could not create database '" << dbName + << "': " << httpClient->getErrorMessage(); FATAL_ERROR_EXIT(); } @@ -1455,9 +1482,12 @@ void RestoreFeature::start() { client->setDatabaseName(dbName); // re-check connection and version - result = _clientManager.getConnectedClient(httpClient, _options.force, true, true, false); + result = _clientManager.getConnectedClient(httpClient, _options.force, + true, true, false); } else { - LOG_TOPIC("ad95b", WARN, Logger::RESTORE) << "Database '" << dbName << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option"; + LOG_TOPIC("ad95b", WARN, Logger::RESTORE) + << "Database '" + << dbName << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option"; } } @@ -1479,7 +1509,10 @@ void RestoreFeature::start() { } if (role == "DBSERVER" || role == "PRIMARY") { - LOG_TOPIC("1fc99", WARN, arangodb::Logger::RESTORE) << "You connected to a DBServer node, but operations in a cluster should be carried out via a Coordinator. This is an unsupported operation!"; + LOG_TOPIC("1fc99", WARN, arangodb::Logger::RESTORE) + << "You connected to a DBServer node, but operations in a cluster " + "should be carried out via a Coordinator. This is an unsupported " + "operation!"; } std::tie(result, _options.indexesFirst) = @@ -1500,10 +1533,14 @@ void RestoreFeature::start() { // set up threads and workers _clientTaskQueue.spawnWorkers(_clientManager, _options.threadCount); - LOG_TOPIC("6bb3c", DEBUG, Logger::RESTORE) << "Using " << _options.threadCount << " worker thread(s)"; + LOG_TOPIC("6bb3c", DEBUG, Logger::RESTORE) + << "Using " << _options.threadCount << " worker thread(s)"; if (_options.allDatabases) { - LOG_TOPIC("7c10a", INFO, Logger::RESTORE) << "About to restore databases '" << basics::StringUtils::join(databases, "', '") << "' from dump directory '" << _options.inputPath << "'..."; + LOG_TOPIC("7c10a", INFO, Logger::RESTORE) + << "About to restore databases '" + << basics::StringUtils::join(databases, "', '") + << "' from dump directory '" << _options.inputPath << "'..."; } for (auto const& db : databases) { @@ -1513,10 +1550,11 @@ void RestoreFeature::start() { // inject current database client->setDatabaseName(db); LOG_TOPIC("36075", INFO, Logger::RESTORE) << "Restoring database '" << db << "'"; - _directory = std::make_unique(basics::FileUtils::buildFilename(_options.inputPath, db), false, false); + _directory = std::make_unique( + basics::FileUtils::buildFilename(_options.inputPath, db), false, false); - result = _clientManager.getConnectedClient(httpClient, _options.force, - false, !_options.createDatabase, false); + result = _clientManager.getConnectedClient(httpClient, _options.force, false, + !_options.createDatabase, false); if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) { LOG_TOPIC("3e715", FATAL, Logger::RESTORE) << "cannot create server connection, giving up!"; @@ -1532,7 +1570,9 @@ void RestoreFeature::start() { result = ::tryCreateDatabase(db); if (result.fail()) { - LOG_TOPIC("7a35f", ERR, Logger::RESTORE) << "Could not create database '" << db << "': " << httpClient->getErrorMessage(); + LOG_TOPIC("7a35f", ERR, Logger::RESTORE) + << "Could not create database '" << db + << "': " << httpClient->getErrorMessage(); break; } @@ -1540,14 +1580,18 @@ void RestoreFeature::start() { client->setDatabaseName(db); // re-check connection and version - result = _clientManager.getConnectedClient(httpClient, _options.force, false, true, false); + result = _clientManager.getConnectedClient(httpClient, _options.force, + false, true, false); } else { - LOG_TOPIC("be594", WARN, Logger::RESTORE) << "Database '" << db << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option"; + LOG_TOPIC("be594", WARN, Logger::RESTORE) + << "Database '" + << db << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option"; } } if (result.fail()) { - result.reset(result.errorNumber(), std::string("cannot create server connection: ") + result.errorMessage()); + result.reset(result.errorNumber(), + std::string("cannot create server connection: ") + result.errorMessage()); if (!_options.force) { break; @@ -1572,15 +1616,18 @@ void RestoreFeature::start() { // run the actual restore try { result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this, - _options, *_directory, _stats); + _options, *_directory, _stats); } catch (basics::Exception const& ex) { - LOG_TOPIC("52b22", ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what(); + LOG_TOPIC("52b22", ERR, arangodb::Logger::RESTORE) + << "caught exception: " << ex.what(); result = {ex.code(), ex.what()}; } catch (std::exception const& ex) { - LOG_TOPIC("8f13f", ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what(); + LOG_TOPIC("8f13f", ERR, arangodb::Logger::RESTORE) + << "caught exception: " << ex.what(); result = {TRI_ERROR_INTERNAL, ex.what()}; } catch (...) { - LOG_TOPIC("a74e8", ERR, arangodb::Logger::RESTORE) << "caught unknown exception"; + LOG_TOPIC("a74e8", ERR, arangodb::Logger::RESTORE) + << "caught unknown exception"; result = {TRI_ERROR_INTERNAL}; } diff --git a/js/client/modules/@arangodb/testsuites/dump.js b/js/client/modules/@arangodb/testsuites/dump.js index 117044b56a..a9f1cae42f 100644 --- a/js/client/modules/@arangodb/testsuites/dump.js +++ b/js/client/modules/@arangodb/testsuites/dump.js @@ -113,6 +113,13 @@ class DumpRestoreHelper { print(CYAN + Date() + ': ' + this.which + ' and Restore - ' + s + RESET); } + adjustRestoreToDump() + { + this.restoreOptions = this.dumpOptions; + this.restoreConfig = pu.createBaseConfig('restore', this.dumpOptions, this.instanceInfo); + this.arangorestore = pu.run.arangoDumpRestoreWithConfig.bind(this, this.restoreConfig, this.restoreOptions, this.instanceInfo.rootDir, this.options.coreCheck); + } + isAlive() { return pu.arangod.check.instanceAlive(this.instanceInfo, this.options); } @@ -157,8 +164,14 @@ class DumpRestoreHelper { return this.validate(this.results.setup); } - dumpFrom(database) { + dumpFrom(database, separateDir = false) { this.print('dump'); + if (separateDir) { + if (!fs.exists(fs.join(this.instanceInfo.rootDir, 'dump'))) { + fs.makeDirectory(fs.join(this.instanceInfo.rootDir, 'dump')); + } + this.dumpConfig.setOutputDirectory('dump' + fs.pathSeparator + database); + } if (!this.dumpConfig.haveSetAllDatabases()) { this.dumpConfig.setDatabase(database); } @@ -166,8 +179,19 @@ class DumpRestoreHelper { return this.validate(this.results.dump); } - restoreTo(database) { + restoreTo(database, options = { separate: false, fromDir: '' }) { this.print('restore'); + + if (options.hasOwnProperty('separate') && options.separate === true) { + if (!options.hasOwnProperty('fromDir') || typeof options.fromDir !== 'string') { + options.fromDir = database; + } + if (!fs.exists(fs.join(this.instanceInfo.rootDir, 'dump'))) { + fs.makeDirectory(fs.join(this.instanceInfo.rootDir, 'dump')); + } + this.restoreConfig.setInputDirectory('dump' + fs.pathSeparator + options.fromDir, true); + } + if (!this.restoreConfig.haveSetAllDatabases()) { this.restoreConfig.setDatabase(database); } @@ -213,6 +237,7 @@ class DumpRestoreHelper { restoreFoxxComplete(database) { this.print('Foxx Apps with full restore'); this.restoreConfig.setDatabase(database); + this.restoreConfig.setIncludeSystem(true); this.results.restoreFoxxComplete = this.arangorestore(); return this.validate(this.results.restoreFoxxComplete); } @@ -343,14 +368,28 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore const cleanupFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpCleanup)); const testFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpAgain)); const tearDownFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpTearDown)); - if ( - !helper.runSetupSuite(setupFile) || - !helper.dumpFrom('UnitTestsDumpSrc') || - !helper.runCleanupSuite(cleanupFile) || - !helper.restoreTo('UnitTestsDumpDst') || - !helper.runTests(testFile,'UnitTestsDumpDst') || - !helper.tearDown(tearDownFile)) { - return helper.extractResults(); + + if (options.hasOwnProperty("multipleDumps") && options.multipleDumps) { + if (!helper.runSetupSuite(setupFile) || + !helper.dumpFrom('_system', true) || + !helper.dumpFrom('UnitTestsDumpSrc', true) || + !helper.runCleanupSuite(cleanupFile) || + !helper.restoreTo('UnitTestsDumpDst', { separate: true, fromDir: 'UnitTestsDumpSrc'}) || + !helper.restoreTo('_system', { separate: true }) || + !helper.runTests(testFile,'UnitTestsDumpDst') || + !helper.tearDown(tearDownFile)) { + return helper.extractResults(); + } + } + else { + if (!helper.runSetupSuite(setupFile) || + !helper.dumpFrom('UnitTestsDumpSrc') || + !helper.runCleanupSuite(cleanupFile) || + !helper.restoreTo('UnitTestsDumpDst') || + !helper.runTests(testFile,'UnitTestsDumpDst') || + !helper.tearDown(tearDownFile)) { + return helper.extractResults(); + } } if (tstFiles.hasOwnProperty("dumpCheckGraph")) { @@ -365,6 +404,10 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore if (tstFiles.hasOwnProperty("foxxTest")) { const foxxTestFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.foxxTest)); + if (options.hasOwnProperty("multipleDumps") && options.multipleDumps) { + helper.adjustRestoreToDump(); + helper.restoreConfig.setInputDirectory(fs.join('dump','UnitTestsDumpSrc'), true); + } if (!helper.restoreFoxxComplete('UnitTestsDumpFoxxComplete') || !helper.testFoxxComplete(foxxTestFile, 'UnitTestsDumpFoxxComplete') || !helper.restoreFoxxAppsBundle('UnitTestsDumpFoxxAppsBundle') || @@ -410,20 +453,6 @@ function dumpMultiple (options) { } function dumpAuthentication (options) { - if (options.cluster) { - if (options.extremeVerbosity) { - print(CYAN + 'Skipped because of cluster.' + RESET); - } - - return { - 'dump_authentication': { - 'status': true, - 'message': 'skipped because of cluster', - 'skipped': true - } - }; - } - const clientAuth = { 'server.authentication': 'true' }; @@ -438,16 +467,26 @@ function dumpAuthentication (options) { password: 'foobarpasswd' }; + let restoreAuthOpts = { + username: 'foobaruser', + password: 'pinus' + }; + _.defaults(dumpAuthOpts, options); + _.defaults(restoreAuthOpts, options); + let tstFiles = { dumpSetup: 'dump-authentication-setup.js', - dumpCleanup: 'cleanup-nothing.js', + dumpCleanup: 'cleanup-alter-user.js', dumpAgain: 'dump-authentication.js', dumpTearDown: 'dump-teardown.js', foxxTest: 'check-foxx.js' }; - return dump_backend(options, serverAuthInfo, clientAuth, dumpAuthOpts, dumpAuthOpts, 'dump_authentication', tstFiles, function(){}); + options.multipleDumps = true; + options['server.jwt-secret'] = 'haxxmann'; + + return dump_backend(options, serverAuthInfo, clientAuth, dumpAuthOpts, restoreAuthOpts, 'dump_authentication', tstFiles, function(){}); } function dumpEncrypted (options) { @@ -635,7 +674,7 @@ exports.setup = function (testFns, defaultFns, opts, fnDocs, optionsDoc, allTest defaultFns.push('dump_multiple'); testFns['hot_backup'] = hotBackup; - defaultFns.push('hotBackup'); + defaultFns.push('hot_backup'); for (var attrname in functionsDocumentation) { fnDocs[attrname] = functionsDocumentation[attrname]; } for (var i = 0; i < optionsDocumentation.length; i++) { optionsDoc.push(optionsDocumentation[i]); } diff --git a/tests/js/server/dump/cleanup-alter-user.js b/tests/js/server/dump/cleanup-alter-user.js new file mode 100644 index 0000000000..3c542a9f99 --- /dev/null +++ b/tests/js/server/dump/cleanup-alter-user.js @@ -0,0 +1,41 @@ +/*jshint globalstrict:false, strict:false */ +/* global db */ + +//////////////////////////////////////////////////////////////////////////////// +/// @brief teardown for dump/reload tests +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2010-2012 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB Inc, Cologne, Germany +/// +/// @author Wilfried Goesgens +/// @author Copyright 2019, ArangoDB Inc, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +(function () { + 'use strict'; + + var users = require("@arangodb/users"); + users.update("foobaruser", "pinus", true); +})(); + +return { + status: true +}; + diff --git a/tests/js/server/dump/dump-authentication.js b/tests/js/server/dump/dump-authentication.js index 13d97eb17c..989a7264fd 100644 --- a/tests/js/server/dump/dump-authentication.js +++ b/tests/js/server/dump/dump-authentication.js @@ -163,6 +163,8 @@ function dumpTestSuite () { assertEqual(users.permission(uName, "_system"), 'rw'); assertEqual(users.permission(uName, "UnitTestsDumpSrc"), 'rw'); assertEqual(users.permission(uName, "UnitTestsDumpEmpty"), 'rw'); + + assertTrue(users.isValid("foobaruser", "foobarpasswd")); } };