mirror of https://gitee.com/bigwinds/arangodb
Bug fix 3.5/fix internal issue 4451 (#10538)
* Fix dump_authentication suite * Fix typos * user the correct attribute name * properly reload user permissions after _users collection restore * fixed foxx restore test * changelog * changed the order of index creation during restore for _users collection
This commit is contained in:
parent
76f84f2c6c
commit
a7b64bd815
|
@ -1,6 +1,8 @@
|
||||||
v3.5.3 (XXXX-XX-XX)
|
v3.5.3 (XXXX-XX-XX)
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
|
* The _users collection is now properly restored when using arangorestore.
|
||||||
|
|
||||||
* Allow the optimizer to use indexes when a collection attribute is compared to
|
* Allow the optimizer to use indexes when a collection attribute is compared to
|
||||||
an expansion followed by an attribute name, e.g.
|
an expansion followed by an attribute name, e.g.
|
||||||
`doc.value IN something[*].name`.
|
`doc.value IN something[*].name`.
|
||||||
|
|
|
@ -394,6 +394,12 @@ VPackBuilder auth::UserManager::allUsers() {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void auth::UserManager::triggerCacheRevalidation() {
|
||||||
|
triggerLocalReload();
|
||||||
|
triggerGlobalReload();
|
||||||
|
loadFromDB();
|
||||||
|
}
|
||||||
|
|
||||||
/// Trigger eventual reload, user facing API call
|
/// Trigger eventual reload, user facing API call
|
||||||
void auth::UserManager::triggerGlobalReload() {
|
void auth::UserManager::triggerGlobalReload() {
|
||||||
if (!ServerState::instance()->isCoordinator()) {
|
if (!ServerState::instance()->isCoordinator()) {
|
||||||
|
|
|
@ -90,6 +90,9 @@ class UserManager {
|
||||||
/// Trigger eventual reload on all other coordinators (and in TokenCache)
|
/// Trigger eventual reload on all other coordinators (and in TokenCache)
|
||||||
void triggerGlobalReload();
|
void triggerGlobalReload();
|
||||||
|
|
||||||
|
/// Trigger cache revalidation after user restore
|
||||||
|
void triggerCacheRevalidation();
|
||||||
|
|
||||||
/// Create the root user with a default password, will fail if the user
|
/// Create the root user with a default password, will fail if the user
|
||||||
/// already exists. Only ever call if you can guarantee to be in charge
|
/// already exists. Only ever call if you can guarantee to be in charge
|
||||||
void createRootUser();
|
void createRootUser();
|
||||||
|
|
|
@ -1455,7 +1455,7 @@ Result RestReplicationHandler::processRestoreUsersBatch(std::string const& colle
|
||||||
VPackSlice allMarkersSlice = allMarkers.slice();
|
VPackSlice allMarkersSlice = allMarkers.slice();
|
||||||
|
|
||||||
std::string aql(
|
std::string aql(
|
||||||
"FOR u IN @restored UPSERT {name: u.name} INSERT u REPLACE u "
|
"FOR u IN @restored UPSERT {user: u.user} INSERT u REPLACE u "
|
||||||
"INTO @@collection OPTIONS {ignoreErrors: true, silent: true, "
|
"INTO @@collection OPTIONS {ignoreErrors: true, silent: true, "
|
||||||
"waitForSync: false, isRestore: true}");
|
"waitForSync: false, isRestore: true}");
|
||||||
|
|
||||||
|
@ -1508,8 +1508,7 @@ Result RestReplicationHandler::processRestoreUsersBatch(std::string const& colle
|
||||||
AuthenticationFeature* af = AuthenticationFeature::instance();
|
AuthenticationFeature* af = AuthenticationFeature::instance();
|
||||||
TRI_ASSERT(af->userManager() != nullptr);
|
TRI_ASSERT(af->userManager() != nullptr);
|
||||||
if (af->userManager() != nullptr) {
|
if (af->userManager() != nullptr) {
|
||||||
af->userManager()->triggerLocalReload();
|
af->userManager()->triggerCacheRevalidation();
|
||||||
af->userManager()->triggerGlobalReload();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return queryResult.result;
|
return queryResult.result;
|
||||||
|
|
|
@ -156,8 +156,8 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien
|
||||||
if (response == nullptr || !response->isComplete()) {
|
if (response == nullptr || !response->isComplete()) {
|
||||||
return {TRI_ERROR_INTERNAL,
|
return {TRI_ERROR_INTERNAL,
|
||||||
"got invalid response from server: '" + client.getErrorMessage() +
|
"got invalid response from server: '" + client.getErrorMessage() +
|
||||||
"' while executing " + requestAction + (originalRequest.empty() ? "" : " with this payload: '" +
|
"' while executing " + requestAction +
|
||||||
originalRequest + "'")};
|
(originalRequest.empty() ? "" : " with this payload: '" + originalRequest + "'")};
|
||||||
}
|
}
|
||||||
if (response->wasHttpError()) {
|
if (response->wasHttpError()) {
|
||||||
int errorNum = TRI_ERROR_INTERNAL;
|
int errorNum = TRI_ERROR_INTERNAL;
|
||||||
|
@ -168,10 +168,10 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien
|
||||||
errorNum = error.get(arangodb::StaticStrings::ErrorNum).getNumericValue<int>();
|
errorNum = error.get(arangodb::StaticStrings::ErrorNum).getNumericValue<int>();
|
||||||
errorMsg = error.get(arangodb::StaticStrings::ErrorMessage).copyString();
|
errorMsg = error.get(arangodb::StaticStrings::ErrorMessage).copyString();
|
||||||
}
|
}
|
||||||
return {errorNum, "got invalid response from server: HTTP " +
|
return {errorNum,
|
||||||
itoa(response->getHttpReturnCode()) + ": '" +
|
"got invalid response from server: HTTP " + itoa(response->getHttpReturnCode()) +
|
||||||
errorMsg + "' while executing '" + requestAction +
|
": '" + errorMsg + "' while executing '" + requestAction +
|
||||||
(originalRequest.empty() ? "" : "' with this payload: '" + originalRequest + "'")};
|
(originalRequest.empty() ? "" : "' with this payload: '" + originalRequest + "'")};
|
||||||
}
|
}
|
||||||
return {TRI_ERROR_NO_ERROR};
|
return {TRI_ERROR_NO_ERROR};
|
||||||
}
|
}
|
||||||
|
@ -180,23 +180,21 @@ arangodb::Result checkHttpResponse(arangodb::httpclient::SimpleHttpClient& clien
|
||||||
bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) {
|
bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) {
|
||||||
VPackSlice const left = l.slice().get("parameters");
|
VPackSlice const left = l.slice().get("parameters");
|
||||||
VPackSlice const right = r.slice().get("parameters");
|
VPackSlice const right = r.slice().get("parameters");
|
||||||
|
|
||||||
std::string leftName =
|
std::string leftName =
|
||||||
arangodb::basics::VelocyPackHelper::getStringValue(left, "name", "");
|
arangodb::basics::VelocyPackHelper::getStringValue(left, "name", "");
|
||||||
std::string rightName =
|
std::string rightName =
|
||||||
arangodb::basics::VelocyPackHelper::getStringValue(right, "name", "");
|
arangodb::basics::VelocyPackHelper::getStringValue(right, "name", "");
|
||||||
|
|
||||||
// First we sort by shard distribution.
|
// First we sort by shard distribution.
|
||||||
// We first have to create the collections which have no dependencies.
|
// We first have to create the collections which have no dependencies.
|
||||||
// NB: Dependency graph has depth at most 1, no need to manage complex DAG
|
// NB: Dependency graph has depth at most 1, no need to manage complex DAG
|
||||||
VPackSlice leftDist = left.get("distributeShardsLike");
|
VPackSlice leftDist = left.get("distributeShardsLike");
|
||||||
VPackSlice rightDist = right.get("distributeShardsLike");
|
VPackSlice rightDist = right.get("distributeShardsLike");
|
||||||
if (leftDist.isNone() && rightDist.isString() &&
|
if (leftDist.isNone() && rightDist.isString() && rightDist.copyString() == leftName) {
|
||||||
rightDist.copyString() == leftName) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (rightDist.isNone() && leftDist.isString() &&
|
if (rightDist.isNone() && leftDist.isString() && leftDist.copyString() == rightName) {
|
||||||
leftDist.copyString() == rightName) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,12 +210,10 @@ bool sortCollectionsForCreation(VPackBuilder const& l, VPackBuilder const& r) {
|
||||||
|
|
||||||
// Finally, sort by name so we have stable, reproducible results
|
// Finally, sort by name so we have stable, reproducible results
|
||||||
// Sort system collections first
|
// Sort system collections first
|
||||||
if (!leftName.empty() && leftName[0] == '_' &&
|
if (!leftName.empty() && leftName[0] == '_' && !rightName.empty() && rightName[0] != '_') {
|
||||||
!rightName.empty() && rightName[0] != '_') {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (!leftName.empty() && leftName[0] != '_' &&
|
if (!leftName.empty() && leftName[0] != '_' && !rightName.empty() && rightName[0] == '_') {
|
||||||
!rightName.empty() && rightName[0] == '_') {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return strcasecmp(leftName.c_str(), rightName.c_str()) < 0;
|
return strcasecmp(leftName.c_str(), rightName.c_str()) < 0;
|
||||||
|
@ -546,11 +542,11 @@ arangodb::Result recreateCollection(arangodb::httpclient::SimpleHttpClient& http
|
||||||
// re-create collection
|
// re-create collection
|
||||||
if (jobData.options.progress) {
|
if (jobData.options.progress) {
|
||||||
if (jobData.options.overwrite) {
|
if (jobData.options.overwrite) {
|
||||||
LOG_TOPIC("9b414", INFO, Logger::RESTORE) << "# Re-creating " << collectionType
|
LOG_TOPIC("9b414", INFO, Logger::RESTORE)
|
||||||
<< " collection '" << cname << "'...";
|
<< "# Re-creating " << collectionType << " collection '" << cname << "'...";
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC("a9123", INFO, Logger::RESTORE) << "# Creating " << collectionType
|
LOG_TOPIC("a9123", INFO, Logger::RESTORE)
|
||||||
<< " collection '" << cname << "'...";
|
<< "# Creating " << collectionType << " collection '" << cname << "'...";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,19 +630,21 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
||||||
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json");
|
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json");
|
||||||
if (!datafile || datafile->status().fail()) {
|
if (!datafile || datafile->status().fail()) {
|
||||||
datafile = jobData.directory.readableFile(
|
datafile = jobData.directory.readableFile(
|
||||||
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) + ".data.json.gz");
|
cname + "_" + arangodb::rest::SslInterface::sslMD5(cname) +
|
||||||
|
".data.json.gz");
|
||||||
}
|
}
|
||||||
if (!datafile || datafile->status().fail()) {
|
if (!datafile || datafile->status().fail()) {
|
||||||
datafile = jobData.directory.readableFile(cname + ".data.json.gz");
|
datafile = jobData.directory.readableFile(cname + ".data.json.gz");
|
||||||
}
|
}
|
||||||
if (!datafile || datafile->status().fail()) {
|
if (!datafile || datafile->status().fail()) {
|
||||||
datafile = jobData.directory.readableFile(cname + ".data.json");
|
datafile = jobData.directory.readableFile(cname + ".data.json");
|
||||||
}
|
}
|
||||||
if (!datafile || datafile->status().fail()) {
|
if (!datafile || datafile->status().fail()) {
|
||||||
result = {TRI_ERROR_CANNOT_READ_FILE, "could not open data file for collection '" + cname + "'"};
|
result = {TRI_ERROR_CANNOT_READ_FILE,
|
||||||
|
"could not open data file for collection '" + cname + "'"};
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t const fileSize = TRI_SizeFile(datafile->path().c_str());
|
int64_t const fileSize = TRI_SizeFile(datafile->path().c_str());
|
||||||
|
|
||||||
if (jobData.options.progress) {
|
if (jobData.options.progress) {
|
||||||
|
@ -657,8 +655,9 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
||||||
|
|
||||||
int64_t numReadForThisCollection = 0;
|
int64_t numReadForThisCollection = 0;
|
||||||
int64_t numReadSinceLastReport = 0;
|
int64_t numReadSinceLastReport = 0;
|
||||||
|
|
||||||
bool const isGzip = (0 == datafile->path().substr(datafile->path().size() - 3).compare(".gz"));
|
bool const isGzip =
|
||||||
|
(0 == datafile->path().substr(datafile->path().size() - 3).compare(".gz"));
|
||||||
|
|
||||||
buffer.clear();
|
buffer.clear();
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -731,8 +730,9 @@ arangodb::Result restoreData(arangodb::httpclient::SimpleHttpClient& httpClient,
|
||||||
} else {
|
} else {
|
||||||
ofFilesize << " of " << fileSize;
|
ofFilesize << " of " << fileSize;
|
||||||
percentage << " ("
|
percentage << " ("
|
||||||
<< int(100. * double(numReadForThisCollection) / double(fileSize)) << " %)";
|
<< int(100. * double(numReadForThisCollection) / double(fileSize))
|
||||||
} // else
|
<< " %)";
|
||||||
|
} // else
|
||||||
|
|
||||||
LOG_TOPIC("69a73", INFO, Logger::RESTORE)
|
LOG_TOPIC("69a73", INFO, Logger::RESTORE)
|
||||||
<< "# Still loading data into " << collectionType << " collection '"
|
<< "# Still loading data into " << collectionType << " collection '"
|
||||||
|
@ -913,7 +913,7 @@ arangodb::Result processInputDirectory(
|
||||||
VPackSlice const collection = b.slice();
|
VPackSlice const collection = b.slice();
|
||||||
|
|
||||||
LOG_TOPIC("c601a", DEBUG, Logger::RESTORE)
|
LOG_TOPIC("c601a", DEBUG, Logger::RESTORE)
|
||||||
<< "# Processing collection: " << collection.toJson();
|
<< "# Processing collection: " << collection.toJson();
|
||||||
|
|
||||||
VPackSlice params = collection.get("parameters");
|
VPackSlice params = collection.get("parameters");
|
||||||
VPackSlice name = VPackSlice::emptyStringSlice();
|
VPackSlice name = VPackSlice::emptyStringSlice();
|
||||||
|
@ -937,9 +937,9 @@ arangodb::Result processInputDirectory(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (name.isString() && name.stringRef() == "_users") {
|
if (name.isString() && name.stringRef() == "_users") {
|
||||||
// special treatment for _users collection - this must be the very last,
|
// special treatment for _users collection - this must be the very last,
|
||||||
// and run isolated from all previous data loading operations - the
|
// and run isolated from all previous data loading operations - the
|
||||||
// reason is that loading into the users collection may change the
|
// reason is that loading into the users collection may change the
|
||||||
// credentials for the current arangorestore connection!
|
// credentials for the current arangorestore connection!
|
||||||
|
@ -970,7 +970,7 @@ arangodb::Result processInputDirectory(
|
||||||
|
|
||||||
for (auto const& viewDefinition : views) {
|
for (auto const& viewDefinition : views) {
|
||||||
LOG_TOPIC("c608d", DEBUG, Logger::RESTORE)
|
LOG_TOPIC("c608d", DEBUG, Logger::RESTORE)
|
||||||
<< "# Creating view: " << viewDefinition.toJson();
|
<< "# Creating view: " << viewDefinition.toJson();
|
||||||
|
|
||||||
auto res = ::restoreView(httpClient, options, viewDefinition.slice());
|
auto res = ::restoreView(httpClient, options, viewDefinition.slice());
|
||||||
|
|
||||||
|
@ -1036,11 +1036,12 @@ arangodb::Result processInputDirectory(
|
||||||
Result res = ::triggerFoxxHeal(httpClient);
|
Result res = ::triggerFoxxHeal(httpClient);
|
||||||
if (res.fail()) {
|
if (res.fail()) {
|
||||||
LOG_TOPIC("47cd7", WARN, Logger::RESTORE)
|
LOG_TOPIC("47cd7", WARN, Logger::RESTORE)
|
||||||
<< "Reloading of Foxx services failed: " << res.errorMessage()
|
<< "Reloading of Foxx services failed: " << res.errorMessage() << "- in the cluster Foxx services will be available eventually, On single servers send "
|
||||||
<< "- in the cluster Foxx services will be available eventually, On single servers send "
|
|
||||||
<< "a POST to '/_api/foxx/_local/heal' on the current database, "
|
<< "a POST to '/_api/foxx/_local/heal' on the current database, "
|
||||||
<< "with an empty body. Please note that any of this is not necessary if the Foxx APIs "
|
<< "with an empty body. Please note that any of this is not "
|
||||||
<< "have been turned off on the server using the option `--foxx.api false`.";
|
"necessary if the Foxx APIs "
|
||||||
|
<< "have been turned off on the server using the option "
|
||||||
|
"`--foxx.api false`.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1075,24 +1076,42 @@ arangodb::Result processInputDirectory(
|
||||||
arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient,
|
arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient,
|
||||||
arangodb::RestoreFeature::JobData& jobData) {
|
arangodb::RestoreFeature::JobData& jobData) {
|
||||||
arangodb::Result result;
|
arangodb::Result result;
|
||||||
if (jobData.options.indexesFirst && jobData.options.importStructure) {
|
|
||||||
// restore indexes first if we are using rocksdb
|
VPackSlice const parameters = jobData.collection.get("parameters");
|
||||||
|
std::string const cname =
|
||||||
|
arangodb::basics::VelocyPackHelper::getStringValue(parameters, "name", "");
|
||||||
|
|
||||||
|
if (cname == "_users") {
|
||||||
|
// special case: never restore data in the _users collection first as it could
|
||||||
|
// potentially change user permissions. In that case index creation will fail.
|
||||||
result = ::restoreIndexes(httpClient, jobData);
|
result = ::restoreIndexes(httpClient, jobData);
|
||||||
if (result.fail()) {
|
if (result.fail()) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (jobData.options.importData) {
|
|
||||||
result = ::restoreData(httpClient, jobData);
|
result = ::restoreData(httpClient, jobData);
|
||||||
if (result.fail()) {
|
if (result.fail()) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
if (!jobData.options.indexesFirst && jobData.options.importStructure) {
|
if (jobData.options.indexesFirst && jobData.options.importStructure) {
|
||||||
// restore indexes second if we are using mmfiles
|
// restore indexes first if we are using rocksdb
|
||||||
result = ::restoreIndexes(httpClient, jobData);
|
result = ::restoreIndexes(httpClient, jobData);
|
||||||
if (result.fail()) {
|
if (result.fail()) {
|
||||||
return result;
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (jobData.options.importData) {
|
||||||
|
result = ::restoreData(httpClient, jobData);
|
||||||
|
if (result.fail()) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!jobData.options.indexesFirst && jobData.options.importStructure) {
|
||||||
|
// restore indexes second if we are using mmfiles
|
||||||
|
result = ::restoreIndexes(httpClient, jobData);
|
||||||
|
if (result.fail()) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1106,8 +1125,9 @@ arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& httpClient,
|
||||||
int type = arangodb::basics::VelocyPackHelper::getNumericValue<int>(parameters,
|
int type = arangodb::basics::VelocyPackHelper::getNumericValue<int>(parameters,
|
||||||
"type", 2);
|
"type", 2);
|
||||||
std::string const collectionType(type == 2 ? "document" : "edge");
|
std::string const collectionType(type == 2 ? "document" : "edge");
|
||||||
LOG_TOPIC("6ae09", INFO, arangodb::Logger::RESTORE) << "# Successfully restored " << collectionType
|
LOG_TOPIC("6ae09", INFO, arangodb::Logger::RESTORE)
|
||||||
<< " collection '" << cname << "'";
|
<< "# Successfully restored " << collectionType << " collection '"
|
||||||
|
<< cname << "'";
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -1186,9 +1206,9 @@ void RestoreFeature::collectOptions(std::shared_ptr<options::ProgramOptions> opt
|
||||||
"force usage of the same database name as in the source dump.json file",
|
"force usage of the same database name as in the source dump.json file",
|
||||||
new BooleanParameter(&_options.forceSameDatabase));
|
new BooleanParameter(&_options.forceSameDatabase));
|
||||||
|
|
||||||
options->addOption(
|
options
|
||||||
"--all-databases", "restore data to all databases",
|
->addOption("--all-databases", "restore data to all databases",
|
||||||
new BooleanParameter(&_options.allDatabases))
|
new BooleanParameter(&_options.allDatabases))
|
||||||
.setIntroducedIn(30500);
|
.setIntroducedIn(30500);
|
||||||
|
|
||||||
options->addOption("--input-directory", "input directory",
|
options->addOption("--input-directory", "input directory",
|
||||||
|
@ -1279,13 +1299,15 @@ void RestoreFeature::validateOptions(std::shared_ptr<options::ProgramOptions> op
|
||||||
if (_options.allDatabases) {
|
if (_options.allDatabases) {
|
||||||
if (options->processingResult().touched("server.database")) {
|
if (options->processingResult().touched("server.database")) {
|
||||||
LOG_TOPIC("94d22", FATAL, arangodb::Logger::RESTORE)
|
LOG_TOPIC("94d22", FATAL, arangodb::Logger::RESTORE)
|
||||||
<< "cannot use --server.database and --all-databases at the same time";
|
<< "cannot use --server.database and --all-databases at the same "
|
||||||
|
"time";
|
||||||
FATAL_ERROR_EXIT();
|
FATAL_ERROR_EXIT();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_options.forceSameDatabase) {
|
if (_options.forceSameDatabase) {
|
||||||
LOG_TOPIC("fd66a", FATAL, arangodb::Logger::RESTORE)
|
LOG_TOPIC("fd66a", FATAL, arangodb::Logger::RESTORE)
|
||||||
<< "cannot use --force-same-database and --all-databases at the same time";
|
<< "cannot use --force-same-database and --all-databases at the same "
|
||||||
|
"time";
|
||||||
FATAL_ERROR_EXIT();
|
FATAL_ERROR_EXIT();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1409,16 +1431,19 @@ void RestoreFeature::start() {
|
||||||
// and we have to process users last of all. otherwise we risk updating the
|
// and we have to process users last of all. otherwise we risk updating the
|
||||||
// credentials for the user which users the current arangorestore connection, and
|
// credentials for the user which users the current arangorestore connection, and
|
||||||
// this will make subsequent arangorestore calls to the server fail with "unauthorized"
|
// this will make subsequent arangorestore calls to the server fail with "unauthorized"
|
||||||
std::sort(databases.begin(), databases.end(), [](std::string const& lhs, std::string const& rhs) {
|
std::sort(databases.begin(), databases.end(),
|
||||||
if (lhs == "_system" && rhs != "_system") {
|
[](std::string const& lhs, std::string const& rhs) {
|
||||||
return false;
|
if (lhs == "_system" && rhs != "_system") {
|
||||||
} else if (rhs == "_system" && lhs != "_system") {
|
return false;
|
||||||
return true;
|
} else if (rhs == "_system" && lhs != "_system") {
|
||||||
}
|
return true;
|
||||||
return lhs < rhs;
|
}
|
||||||
});
|
return lhs < rhs;
|
||||||
|
});
|
||||||
if (databases.empty()) {
|
if (databases.empty()) {
|
||||||
LOG_TOPIC("b41d9", FATAL, Logger::RESTORE) << "Unable to find per-database subdirectories in input directory '" << _options.inputPath << "'. No data will be restored!";
|
LOG_TOPIC("b41d9", FATAL, Logger::RESTORE)
|
||||||
|
<< "Unable to find per-database subdirectories in input directory '"
|
||||||
|
<< _options.inputPath << "'. No data will be restored!";
|
||||||
FATAL_ERROR_EXIT();
|
FATAL_ERROR_EXIT();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1430,8 +1455,8 @@ void RestoreFeature::start() {
|
||||||
// final result
|
// final result
|
||||||
Result result;
|
Result result;
|
||||||
|
|
||||||
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
result = _clientManager.getConnectedClient(httpClient, _options.force, true,
|
||||||
true, !_options.createDatabase, false);
|
!_options.createDatabase, false);
|
||||||
if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) {
|
if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) {
|
||||||
LOG_TOPIC("c23bf", FATAL, Logger::RESTORE)
|
LOG_TOPIC("c23bf", FATAL, Logger::RESTORE)
|
||||||
<< "cannot create server connection, giving up!";
|
<< "cannot create server connection, giving up!";
|
||||||
|
@ -1447,7 +1472,9 @@ void RestoreFeature::start() {
|
||||||
|
|
||||||
Result res = ::tryCreateDatabase(dbName);
|
Result res = ::tryCreateDatabase(dbName);
|
||||||
if (res.fail()) {
|
if (res.fail()) {
|
||||||
LOG_TOPIC("b19db", FATAL, Logger::RESTORE) << "Could not create database '" << dbName << "': " << httpClient->getErrorMessage();
|
LOG_TOPIC("b19db", FATAL, Logger::RESTORE)
|
||||||
|
<< "Could not create database '" << dbName
|
||||||
|
<< "': " << httpClient->getErrorMessage();
|
||||||
FATAL_ERROR_EXIT();
|
FATAL_ERROR_EXIT();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1455,9 +1482,12 @@ void RestoreFeature::start() {
|
||||||
client->setDatabaseName(dbName);
|
client->setDatabaseName(dbName);
|
||||||
|
|
||||||
// re-check connection and version
|
// re-check connection and version
|
||||||
result = _clientManager.getConnectedClient(httpClient, _options.force, true, true, false);
|
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
||||||
|
true, true, false);
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC("ad95b", WARN, Logger::RESTORE) << "Database '" << dbName << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
LOG_TOPIC("ad95b", WARN, Logger::RESTORE)
|
||||||
|
<< "Database '"
|
||||||
|
<< dbName << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1479,7 +1509,10 @@ void RestoreFeature::start() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (role == "DBSERVER" || role == "PRIMARY") {
|
if (role == "DBSERVER" || role == "PRIMARY") {
|
||||||
LOG_TOPIC("1fc99", WARN, arangodb::Logger::RESTORE) << "You connected to a DBServer node, but operations in a cluster should be carried out via a Coordinator. This is an unsupported operation!";
|
LOG_TOPIC("1fc99", WARN, arangodb::Logger::RESTORE)
|
||||||
|
<< "You connected to a DBServer node, but operations in a cluster "
|
||||||
|
"should be carried out via a Coordinator. This is an unsupported "
|
||||||
|
"operation!";
|
||||||
}
|
}
|
||||||
|
|
||||||
std::tie(result, _options.indexesFirst) =
|
std::tie(result, _options.indexesFirst) =
|
||||||
|
@ -1500,10 +1533,14 @@ void RestoreFeature::start() {
|
||||||
// set up threads and workers
|
// set up threads and workers
|
||||||
_clientTaskQueue.spawnWorkers(_clientManager, _options.threadCount);
|
_clientTaskQueue.spawnWorkers(_clientManager, _options.threadCount);
|
||||||
|
|
||||||
LOG_TOPIC("6bb3c", DEBUG, Logger::RESTORE) << "Using " << _options.threadCount << " worker thread(s)";
|
LOG_TOPIC("6bb3c", DEBUG, Logger::RESTORE)
|
||||||
|
<< "Using " << _options.threadCount << " worker thread(s)";
|
||||||
|
|
||||||
if (_options.allDatabases) {
|
if (_options.allDatabases) {
|
||||||
LOG_TOPIC("7c10a", INFO, Logger::RESTORE) << "About to restore databases '" << basics::StringUtils::join(databases, "', '") << "' from dump directory '" << _options.inputPath << "'...";
|
LOG_TOPIC("7c10a", INFO, Logger::RESTORE)
|
||||||
|
<< "About to restore databases '"
|
||||||
|
<< basics::StringUtils::join(databases, "', '")
|
||||||
|
<< "' from dump directory '" << _options.inputPath << "'...";
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto const& db : databases) {
|
for (auto const& db : databases) {
|
||||||
|
@ -1513,10 +1550,11 @@ void RestoreFeature::start() {
|
||||||
// inject current database
|
// inject current database
|
||||||
client->setDatabaseName(db);
|
client->setDatabaseName(db);
|
||||||
LOG_TOPIC("36075", INFO, Logger::RESTORE) << "Restoring database '" << db << "'";
|
LOG_TOPIC("36075", INFO, Logger::RESTORE) << "Restoring database '" << db << "'";
|
||||||
_directory = std::make_unique<ManagedDirectory>(basics::FileUtils::buildFilename(_options.inputPath, db), false, false);
|
_directory = std::make_unique<ManagedDirectory>(
|
||||||
|
basics::FileUtils::buildFilename(_options.inputPath, db), false, false);
|
||||||
|
|
||||||
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
result = _clientManager.getConnectedClient(httpClient, _options.force, false,
|
||||||
false, !_options.createDatabase, false);
|
!_options.createDatabase, false);
|
||||||
if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) {
|
if (result.is(TRI_ERROR_SIMPLE_CLIENT_COULD_NOT_CONNECT)) {
|
||||||
LOG_TOPIC("3e715", FATAL, Logger::RESTORE)
|
LOG_TOPIC("3e715", FATAL, Logger::RESTORE)
|
||||||
<< "cannot create server connection, giving up!";
|
<< "cannot create server connection, giving up!";
|
||||||
|
@ -1532,7 +1570,9 @@ void RestoreFeature::start() {
|
||||||
|
|
||||||
result = ::tryCreateDatabase(db);
|
result = ::tryCreateDatabase(db);
|
||||||
if (result.fail()) {
|
if (result.fail()) {
|
||||||
LOG_TOPIC("7a35f", ERR, Logger::RESTORE) << "Could not create database '" << db << "': " << httpClient->getErrorMessage();
|
LOG_TOPIC("7a35f", ERR, Logger::RESTORE)
|
||||||
|
<< "Could not create database '" << db
|
||||||
|
<< "': " << httpClient->getErrorMessage();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1540,14 +1580,18 @@ void RestoreFeature::start() {
|
||||||
client->setDatabaseName(db);
|
client->setDatabaseName(db);
|
||||||
|
|
||||||
// re-check connection and version
|
// re-check connection and version
|
||||||
result = _clientManager.getConnectedClient(httpClient, _options.force, false, true, false);
|
result = _clientManager.getConnectedClient(httpClient, _options.force,
|
||||||
|
false, true, false);
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC("be594", WARN, Logger::RESTORE) << "Database '" << db << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
LOG_TOPIC("be594", WARN, Logger::RESTORE)
|
||||||
|
<< "Database '"
|
||||||
|
<< db << "' does not exist on target endpoint. In order to create this database along with the restore, please use the --create-database option";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result.fail()) {
|
if (result.fail()) {
|
||||||
result.reset(result.errorNumber(), std::string("cannot create server connection: ") + result.errorMessage());
|
result.reset(result.errorNumber(),
|
||||||
|
std::string("cannot create server connection: ") + result.errorMessage());
|
||||||
|
|
||||||
if (!_options.force) {
|
if (!_options.force) {
|
||||||
break;
|
break;
|
||||||
|
@ -1572,15 +1616,18 @@ void RestoreFeature::start() {
|
||||||
// run the actual restore
|
// run the actual restore
|
||||||
try {
|
try {
|
||||||
result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this,
|
result = ::processInputDirectory(*httpClient, _clientTaskQueue, *this,
|
||||||
_options, *_directory, _stats);
|
_options, *_directory, _stats);
|
||||||
} catch (basics::Exception const& ex) {
|
} catch (basics::Exception const& ex) {
|
||||||
LOG_TOPIC("52b22", ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what();
|
LOG_TOPIC("52b22", ERR, arangodb::Logger::RESTORE)
|
||||||
|
<< "caught exception: " << ex.what();
|
||||||
result = {ex.code(), ex.what()};
|
result = {ex.code(), ex.what()};
|
||||||
} catch (std::exception const& ex) {
|
} catch (std::exception const& ex) {
|
||||||
LOG_TOPIC("8f13f", ERR, arangodb::Logger::RESTORE) << "caught exception: " << ex.what();
|
LOG_TOPIC("8f13f", ERR, arangodb::Logger::RESTORE)
|
||||||
|
<< "caught exception: " << ex.what();
|
||||||
result = {TRI_ERROR_INTERNAL, ex.what()};
|
result = {TRI_ERROR_INTERNAL, ex.what()};
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
LOG_TOPIC("a74e8", ERR, arangodb::Logger::RESTORE) << "caught unknown exception";
|
LOG_TOPIC("a74e8", ERR, arangodb::Logger::RESTORE)
|
||||||
|
<< "caught unknown exception";
|
||||||
result = {TRI_ERROR_INTERNAL};
|
result = {TRI_ERROR_INTERNAL};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -113,6 +113,13 @@ class DumpRestoreHelper {
|
||||||
print(CYAN + Date() + ': ' + this.which + ' and Restore - ' + s + RESET);
|
print(CYAN + Date() + ': ' + this.which + ' and Restore - ' + s + RESET);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
adjustRestoreToDump()
|
||||||
|
{
|
||||||
|
this.restoreOptions = this.dumpOptions;
|
||||||
|
this.restoreConfig = pu.createBaseConfig('restore', this.dumpOptions, this.instanceInfo);
|
||||||
|
this.arangorestore = pu.run.arangoDumpRestoreWithConfig.bind(this, this.restoreConfig, this.restoreOptions, this.instanceInfo.rootDir, this.options.coreCheck);
|
||||||
|
}
|
||||||
|
|
||||||
isAlive() {
|
isAlive() {
|
||||||
return pu.arangod.check.instanceAlive(this.instanceInfo, this.options);
|
return pu.arangod.check.instanceAlive(this.instanceInfo, this.options);
|
||||||
}
|
}
|
||||||
|
@ -157,8 +164,14 @@ class DumpRestoreHelper {
|
||||||
return this.validate(this.results.setup);
|
return this.validate(this.results.setup);
|
||||||
}
|
}
|
||||||
|
|
||||||
dumpFrom(database) {
|
dumpFrom(database, separateDir = false) {
|
||||||
this.print('dump');
|
this.print('dump');
|
||||||
|
if (separateDir) {
|
||||||
|
if (!fs.exists(fs.join(this.instanceInfo.rootDir, 'dump'))) {
|
||||||
|
fs.makeDirectory(fs.join(this.instanceInfo.rootDir, 'dump'));
|
||||||
|
}
|
||||||
|
this.dumpConfig.setOutputDirectory('dump' + fs.pathSeparator + database);
|
||||||
|
}
|
||||||
if (!this.dumpConfig.haveSetAllDatabases()) {
|
if (!this.dumpConfig.haveSetAllDatabases()) {
|
||||||
this.dumpConfig.setDatabase(database);
|
this.dumpConfig.setDatabase(database);
|
||||||
}
|
}
|
||||||
|
@ -166,8 +179,19 @@ class DumpRestoreHelper {
|
||||||
return this.validate(this.results.dump);
|
return this.validate(this.results.dump);
|
||||||
}
|
}
|
||||||
|
|
||||||
restoreTo(database) {
|
restoreTo(database, options = { separate: false, fromDir: '' }) {
|
||||||
this.print('restore');
|
this.print('restore');
|
||||||
|
|
||||||
|
if (options.hasOwnProperty('separate') && options.separate === true) {
|
||||||
|
if (!options.hasOwnProperty('fromDir') || typeof options.fromDir !== 'string') {
|
||||||
|
options.fromDir = database;
|
||||||
|
}
|
||||||
|
if (!fs.exists(fs.join(this.instanceInfo.rootDir, 'dump'))) {
|
||||||
|
fs.makeDirectory(fs.join(this.instanceInfo.rootDir, 'dump'));
|
||||||
|
}
|
||||||
|
this.restoreConfig.setInputDirectory('dump' + fs.pathSeparator + options.fromDir, true);
|
||||||
|
}
|
||||||
|
|
||||||
if (!this.restoreConfig.haveSetAllDatabases()) {
|
if (!this.restoreConfig.haveSetAllDatabases()) {
|
||||||
this.restoreConfig.setDatabase(database);
|
this.restoreConfig.setDatabase(database);
|
||||||
}
|
}
|
||||||
|
@ -213,6 +237,7 @@ class DumpRestoreHelper {
|
||||||
restoreFoxxComplete(database) {
|
restoreFoxxComplete(database) {
|
||||||
this.print('Foxx Apps with full restore');
|
this.print('Foxx Apps with full restore');
|
||||||
this.restoreConfig.setDatabase(database);
|
this.restoreConfig.setDatabase(database);
|
||||||
|
this.restoreConfig.setIncludeSystem(true);
|
||||||
this.results.restoreFoxxComplete = this.arangorestore();
|
this.results.restoreFoxxComplete = this.arangorestore();
|
||||||
return this.validate(this.results.restoreFoxxComplete);
|
return this.validate(this.results.restoreFoxxComplete);
|
||||||
}
|
}
|
||||||
|
@ -343,14 +368,28 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore
|
||||||
const cleanupFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpCleanup));
|
const cleanupFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpCleanup));
|
||||||
const testFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpAgain));
|
const testFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpAgain));
|
||||||
const tearDownFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpTearDown));
|
const tearDownFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.dumpTearDown));
|
||||||
if (
|
|
||||||
!helper.runSetupSuite(setupFile) ||
|
if (options.hasOwnProperty("multipleDumps") && options.multipleDumps) {
|
||||||
!helper.dumpFrom('UnitTestsDumpSrc') ||
|
if (!helper.runSetupSuite(setupFile) ||
|
||||||
!helper.runCleanupSuite(cleanupFile) ||
|
!helper.dumpFrom('_system', true) ||
|
||||||
!helper.restoreTo('UnitTestsDumpDst') ||
|
!helper.dumpFrom('UnitTestsDumpSrc', true) ||
|
||||||
!helper.runTests(testFile,'UnitTestsDumpDst') ||
|
!helper.runCleanupSuite(cleanupFile) ||
|
||||||
!helper.tearDown(tearDownFile)) {
|
!helper.restoreTo('UnitTestsDumpDst', { separate: true, fromDir: 'UnitTestsDumpSrc'}) ||
|
||||||
return helper.extractResults();
|
!helper.restoreTo('_system', { separate: true }) ||
|
||||||
|
!helper.runTests(testFile,'UnitTestsDumpDst') ||
|
||||||
|
!helper.tearDown(tearDownFile)) {
|
||||||
|
return helper.extractResults();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (!helper.runSetupSuite(setupFile) ||
|
||||||
|
!helper.dumpFrom('UnitTestsDumpSrc') ||
|
||||||
|
!helper.runCleanupSuite(cleanupFile) ||
|
||||||
|
!helper.restoreTo('UnitTestsDumpDst') ||
|
||||||
|
!helper.runTests(testFile,'UnitTestsDumpDst') ||
|
||||||
|
!helper.tearDown(tearDownFile)) {
|
||||||
|
return helper.extractResults();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tstFiles.hasOwnProperty("dumpCheckGraph")) {
|
if (tstFiles.hasOwnProperty("dumpCheckGraph")) {
|
||||||
|
@ -365,6 +404,10 @@ function dump_backend (options, serverAuthInfo, clientAuth, dumpOptions, restore
|
||||||
|
|
||||||
if (tstFiles.hasOwnProperty("foxxTest")) {
|
if (tstFiles.hasOwnProperty("foxxTest")) {
|
||||||
const foxxTestFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.foxxTest));
|
const foxxTestFile = tu.makePathUnix(fs.join(testPaths[which][0], tstFiles.foxxTest));
|
||||||
|
if (options.hasOwnProperty("multipleDumps") && options.multipleDumps) {
|
||||||
|
helper.adjustRestoreToDump();
|
||||||
|
helper.restoreConfig.setInputDirectory(fs.join('dump','UnitTestsDumpSrc'), true);
|
||||||
|
}
|
||||||
if (!helper.restoreFoxxComplete('UnitTestsDumpFoxxComplete') ||
|
if (!helper.restoreFoxxComplete('UnitTestsDumpFoxxComplete') ||
|
||||||
!helper.testFoxxComplete(foxxTestFile, 'UnitTestsDumpFoxxComplete') ||
|
!helper.testFoxxComplete(foxxTestFile, 'UnitTestsDumpFoxxComplete') ||
|
||||||
!helper.restoreFoxxAppsBundle('UnitTestsDumpFoxxAppsBundle') ||
|
!helper.restoreFoxxAppsBundle('UnitTestsDumpFoxxAppsBundle') ||
|
||||||
|
@ -410,20 +453,6 @@ function dumpMultiple (options) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function dumpAuthentication (options) {
|
function dumpAuthentication (options) {
|
||||||
if (options.cluster) {
|
|
||||||
if (options.extremeVerbosity) {
|
|
||||||
print(CYAN + 'Skipped because of cluster.' + RESET);
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
'dump_authentication': {
|
|
||||||
'status': true,
|
|
||||||
'message': 'skipped because of cluster',
|
|
||||||
'skipped': true
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
const clientAuth = {
|
const clientAuth = {
|
||||||
'server.authentication': 'true'
|
'server.authentication': 'true'
|
||||||
};
|
};
|
||||||
|
@ -438,16 +467,26 @@ function dumpAuthentication (options) {
|
||||||
password: 'foobarpasswd'
|
password: 'foobarpasswd'
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let restoreAuthOpts = {
|
||||||
|
username: 'foobaruser',
|
||||||
|
password: 'pinus'
|
||||||
|
};
|
||||||
|
|
||||||
_.defaults(dumpAuthOpts, options);
|
_.defaults(dumpAuthOpts, options);
|
||||||
|
_.defaults(restoreAuthOpts, options);
|
||||||
|
|
||||||
let tstFiles = {
|
let tstFiles = {
|
||||||
dumpSetup: 'dump-authentication-setup.js',
|
dumpSetup: 'dump-authentication-setup.js',
|
||||||
dumpCleanup: 'cleanup-nothing.js',
|
dumpCleanup: 'cleanup-alter-user.js',
|
||||||
dumpAgain: 'dump-authentication.js',
|
dumpAgain: 'dump-authentication.js',
|
||||||
dumpTearDown: 'dump-teardown.js',
|
dumpTearDown: 'dump-teardown.js',
|
||||||
foxxTest: 'check-foxx.js'
|
foxxTest: 'check-foxx.js'
|
||||||
};
|
};
|
||||||
|
|
||||||
return dump_backend(options, serverAuthInfo, clientAuth, dumpAuthOpts, dumpAuthOpts, 'dump_authentication', tstFiles, function(){});
|
options.multipleDumps = true;
|
||||||
|
options['server.jwt-secret'] = 'haxxmann';
|
||||||
|
|
||||||
|
return dump_backend(options, serverAuthInfo, clientAuth, dumpAuthOpts, restoreAuthOpts, 'dump_authentication', tstFiles, function(){});
|
||||||
}
|
}
|
||||||
|
|
||||||
function dumpEncrypted (options) {
|
function dumpEncrypted (options) {
|
||||||
|
@ -635,7 +674,7 @@ exports.setup = function (testFns, defaultFns, opts, fnDocs, optionsDoc, allTest
|
||||||
defaultFns.push('dump_multiple');
|
defaultFns.push('dump_multiple');
|
||||||
|
|
||||||
testFns['hot_backup'] = hotBackup;
|
testFns['hot_backup'] = hotBackup;
|
||||||
defaultFns.push('hotBackup');
|
defaultFns.push('hot_backup');
|
||||||
|
|
||||||
for (var attrname in functionsDocumentation) { fnDocs[attrname] = functionsDocumentation[attrname]; }
|
for (var attrname in functionsDocumentation) { fnDocs[attrname] = functionsDocumentation[attrname]; }
|
||||||
for (var i = 0; i < optionsDocumentation.length; i++) { optionsDoc.push(optionsDocumentation[i]); }
|
for (var i = 0; i < optionsDocumentation.length; i++) { optionsDoc.push(optionsDocumentation[i]); }
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*jshint globalstrict:false, strict:false */
|
||||||
|
/* global db */
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief teardown for dump/reload tests
|
||||||
|
///
|
||||||
|
/// @file
|
||||||
|
///
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is ArangoDB Inc, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Wilfried Goesgens
|
||||||
|
/// @author Copyright 2019, ArangoDB Inc, Cologne, Germany
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
(function () {
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
var users = require("@arangodb/users");
|
||||||
|
users.update("foobaruser", "pinus", true);
|
||||||
|
})();
|
||||||
|
|
||||||
|
return {
|
||||||
|
status: true
|
||||||
|
};
|
||||||
|
|
|
@ -163,6 +163,8 @@ function dumpTestSuite () {
|
||||||
assertEqual(users.permission(uName, "_system"), 'rw');
|
assertEqual(users.permission(uName, "_system"), 'rw');
|
||||||
assertEqual(users.permission(uName, "UnitTestsDumpSrc"), 'rw');
|
assertEqual(users.permission(uName, "UnitTestsDumpSrc"), 'rw');
|
||||||
assertEqual(users.permission(uName, "UnitTestsDumpEmpty"), 'rw');
|
assertEqual(users.permission(uName, "UnitTestsDumpEmpty"), 'rw');
|
||||||
|
|
||||||
|
assertTrue(users.isValid("foobaruser", "foobarpasswd"));
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue