1
0
Fork 0

Replacing js upgrade logic (#4061)

This commit is contained in:
Simon 2018-03-08 13:57:30 +01:00 committed by Jan
parent d6246de85c
commit 272859c5fd
46 changed files with 1293 additions and 1757 deletions

View File

@ -490,6 +490,9 @@ SET(ARANGOD_SOURCES
VocBase/Methods/Databases.cpp
VocBase/Methods/Indexes.cpp
VocBase/Methods/Transactions.cpp
VocBase/Methods/Upgrade.cpp
VocBase/Methods/UpgradeTasks.cpp
VocBase/Methods/Version.cpp
VocBase/Graphs.cpp
VocBase/KeyGenerator.cpp
VocBase/LogicalCollection.cpp

View File

@ -2571,7 +2571,7 @@ int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector, double maxWa
}
#ifndef USE_ENTERPRISE
std::unique_ptr<LogicalCollection> ClusterMethods::createCollectionOnCoordinator(
std::shared_ptr<LogicalCollection> ClusterMethods::createCollectionOnCoordinator(
TRI_col_type_e collectionType, TRI_vocbase_t* vocbase, VPackSlice parameters,
bool ignoreDistributeShardsLikeErrors, bool waitForSyncReplication,
bool enforceReplicationFactor) {
@ -2589,7 +2589,7 @@ std::unique_ptr<LogicalCollection> ClusterMethods::createCollectionOnCoordinator
/// @brief Persist collection in Agency and trigger shard creation process
////////////////////////////////////////////////////////////////////////////////
std::unique_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
std::shared_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors,
bool waitForSyncReplication, bool enforceReplicationFactor,
VPackSlice) {
@ -2677,8 +2677,8 @@ std::unique_ptr<LogicalCollection> ClusterMethods::persistCollectionInAgency(
// We never get a nullptr here because an exception is thrown if the
// collection does not exist. Also, the create collection should have
// failed before.
TRI_ASSERT(c != nullptr);
return c->clone();
TRI_ASSERT(c.get() != nullptr);
return c;
}
/// @brief fetch edges from TraverserEngines

View File

@ -259,7 +259,7 @@ class ClusterMethods {
// Note that this returns a newly allocated object and ownership is
// transferred
// to the caller, which is expressed by the returned unique_ptr.
static std::unique_ptr<LogicalCollection> createCollectionOnCoordinator(
static std::shared_ptr<LogicalCollection> createCollectionOnCoordinator(
TRI_col_type_e collectionType, TRI_vocbase_t* vocbase,
arangodb::velocypack::Slice parameters,
bool ignoreDistributeShardsLikeErrors,
@ -273,7 +273,7 @@ class ClusterMethods {
/// @brief Persist collection in Agency and trigger shard creation process
////////////////////////////////////////////////////////////////////////////////
static std::unique_ptr<LogicalCollection> persistCollectionInAgency(
static std::shared_ptr<LogicalCollection> persistCollectionInAgency(
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors,
bool waitForSyncReplication, bool enforceReplicationFactor,
arangodb::velocypack::Slice parameters);

View File

@ -490,6 +490,7 @@ MMFilesCollection::MMFilesCollection(LogicalCollection* collection,
_isVolatile(arangodb::basics::VelocyPackHelper::readBooleanValue(
info, "isVolatile", false)),
_persistentIndexes(0),
_primaryIndex(nullptr),
_indexBuckets(Helper::readNumericValue<uint32_t>(
info, "indexBuckets", defaultIndexBuckets)),
_useSecondaryIndexes(true),
@ -530,6 +531,7 @@ MMFilesCollection::MMFilesCollection(LogicalCollection* logical,
_lastCompactionStamp = mmfiles._lastCompactionStamp;
_journalSize = mmfiles._journalSize;
_indexBuckets = mmfiles._indexBuckets;
_primaryIndex = mmfiles._primaryIndex;
_path = mmfiles._path;
_doCompact = mmfiles._doCompact;
_maxTick = mmfiles._maxTick;
@ -1556,30 +1558,6 @@ void MMFilesCollection::fillIndex(
uint32_t MMFilesCollection::indexBuckets() const { return _indexBuckets; }
// @brief return the primary index
// WARNING: Make sure that this LogicalCollection Instance
// is somehow protected. If it goes out of all scopes
// or it's indexes are freed the pointer returned will get invalidated.
arangodb::MMFilesPrimaryIndex* MMFilesCollection::primaryIndex() const {
// The primary index always has iid 0
auto primary = _logicalCollection->lookupIndex(0);
TRI_ASSERT(primary != nullptr);
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
if (primary->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "got invalid indexes for collection '" << _logicalCollection->name()
<< "'";
for (auto const& it : _indexes) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "- " << it.get();
}
}
#endif
TRI_ASSERT(primary->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
// the primary index must be the index at position #0
return static_cast<arangodb::MMFilesPrimaryIndex*>(primary.get());
}
int MMFilesCollection::fillAllIndexes(transaction::Methods* trx) {
return fillIndexes(trx, _indexes);
}
@ -2113,6 +2091,8 @@ void MMFilesCollection::prepareIndexes(VPackSlice indexesSlice) {
}
}
#endif
TRI_ASSERT(!_indexes.empty());
}
/// @brief creates the initial indexes for the collection
@ -2235,6 +2215,7 @@ int MMFilesCollection::saveIndex(transaction::Methods* trx,
try {
builder = idx->toVelocyPack(false, true);
} catch (arangodb::basics::Exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot save index definition: " << ex.what();
return ex.code();
} catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot save index definition: " << ex.what();
@ -2264,7 +2245,11 @@ int MMFilesCollection::saveIndex(transaction::Methods* trx,
MMFilesLogfileManager::instance()->allocateAndWrite(marker, false);
res = slotInfo.errorCode;
} catch (arangodb::basics::Exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot save index definition: " << ex.what();
res = ex.code();
} catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot save index definition: " << ex.what();
return TRI_ERROR_INTERNAL;
} catch (...) {
res = TRI_ERROR_INTERNAL;
}
@ -2284,6 +2269,10 @@ bool MMFilesCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(id));
_indexes.emplace_back(idx);
if (idx->type() == Index::TRI_IDX_TYPE_PRIMARY_INDEX) {
TRI_ASSERT(idx->id() == 0);
_primaryIndex = static_cast<MMFilesPrimaryIndex*>(idx.get());
}
return true;
}

View File

@ -278,7 +278,10 @@ class MMFilesCollection final : public PhysicalCollection {
// WARNING: Make sure that this Collection Instance
// is somehow protected. If it goes out of all scopes
// or it's indexes are freed the pointer returned will get invalidated.
MMFilesPrimaryIndex* primaryIndex() const;
MMFilesPrimaryIndex* primaryIndex() const {
TRI_ASSERT(_primaryIndex != nullptr);
return _primaryIndex;
}
inline bool useSecondaryIndexes() const { return _useSecondaryIndexes; }
@ -576,6 +579,7 @@ class MMFilesCollection final : public PhysicalCollection {
// SECTION: Indexes
size_t _persistentIndexes;
MMFilesPrimaryIndex* _primaryIndex;
uint32_t _indexBuckets;
// whether or not secondary indexes should be filled

View File

@ -262,10 +262,6 @@ void RestCollectionHandler::handleCommandPost() {
collectionRepresentation(builder, coll->name(), /*showProperties*/ true,
/*showFigures*/ false, /*showCount*/ false,
/*aggregateCount*/ false);
if (!coll->isLocal()) { // FIXME: this is crappy design
delete coll;
}
});
if (res.ok()) {

View File

@ -34,8 +34,10 @@
#include "Rest/GeneralResponse.h"
#include "Rest/Version.h"
#include "RestServer/DatabaseFeature.h"
#include "VocBase/Methods/Upgrade.h"
#include "V8Server/V8DealerFeature.h"
using namespace arangodb;
using namespace arangodb::options;
@ -119,26 +121,20 @@ static void raceForClusterBootstrap() {
continue;
}
auto vocbase = DatabaseFeature::DATABASE->systemDatabase();
VPackBuilder builder;
V8DealerFeature::DEALER->loadJavaScriptFileInDefaultContext(
vocbase, "server/bootstrap/cluster-bootstrap.js", &builder);
VPackSlice jsresult = builder.slice();
if (!jsresult.isTrue()) {
TRI_vocbase_t* vocbase = DatabaseFeature::DATABASE->systemDatabase();
auto upgradeRes = methods::Upgrade::clusterBootstrap(vocbase);
if (upgradeRes.fail()) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Problems with cluster bootstrap, "
<< "marking as not successful.";
if (!jsresult.isNone()) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Returned value: "
<< jsresult.toJson();
} else {
LOG_TOPIC(ERR, Logger::STARTUP) << "Empty returned value.";
}
<< "marking as not successful.";
agency.removeValues(boostrapKey, false);
std::this_thread::sleep_for(std::chrono::seconds(1));
continue;
}
// become Foxxmater, ignore result
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Write Foxxmaster";
agency.setValue("Current/Foxxmaster", b.slice(), 0);
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Creating the root user";
AuthenticationFeature::instance()->userManager()->createRootUser();
@ -204,9 +200,13 @@ void BootstrapFeature::start() {
}
}
} else if (ServerState::isDBServer(role)) {
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Running server/bootstrap/db-server.js";
// only run the JavaScript in V8 context #0.
V8DealerFeature::DEALER->loadJavaScriptFileInDefaultContext(vocbase, "server/bootstrap/db-server.js", nullptr);
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Running bootstrap";
auto upgradeRes = methods::Upgrade::clusterBootstrap(vocbase);
if (upgradeRes.fail()) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Problem during startup";
}
} else {
TRI_ASSERT(false);
}

View File

@ -22,18 +22,15 @@
#include "CheckVersionFeature.h"
#include "Basics/exitcodes.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "Replication/ReplicationFeature.h"
#include "RestServer/DatabaseFeature.h"
#include "V8Server/V8Context.h"
#include "V8Server/V8DealerFeature.h"
#include "V8Server/v8-query.h"
#include "V8Server/v8-vocbase.h"
#include "VocBase/Methods/Version.h"
#include "VocBase/vocbase.h"
#include "Basics/exitcodes.h"
using namespace arangodb;
using namespace arangodb::application_features;
@ -50,7 +47,7 @@ CheckVersionFeature::CheckVersionFeature(
setOptional(false);
requiresElevatedPrivileges(false);
startsAfter("Database");
startsAfter("V8Dealer");
startsAfter("EngineSelector");
}
void CheckVersionFeature::collectOptions(
@ -83,10 +80,6 @@ void CheckVersionFeature::validateOptions(
DatabaseFeature* databaseFeature =
ApplicationServer::getFeature<DatabaseFeature>("Database");
databaseFeature->enableCheckVersion();
V8DealerFeature* v8dealer =
ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
v8dealer->setMaximumContexts(1);
}
void CheckVersionFeature::start() {
@ -113,88 +106,59 @@ void CheckVersionFeature::checkVersion() {
// run version check
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "starting version check";
// can do this without a lock as this is the startup
DatabaseFeature* databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
auto* vocbase = DatabaseFeature::DATABASE->systemDatabase();
// iterate over all databases
for (auto& name : databaseFeature->getDatabaseNames()) {
TRI_vocbase_t* vocbase = databaseFeature->lookupDatabase(name);
methods::VersionResult res = methods::Version::check(vocbase);
TRI_ASSERT(vocbase != nullptr);
// enter context and isolate
{
V8Context* context = V8DealerFeature::DEALER->enterContext(vocbase, true, 0);
if (context == nullptr) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "could not enter context #0";
FATAL_ERROR_EXIT();
}
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
{
v8::HandleScope scope(context->_isolate);
auto localContext =
v8::Local<v8::Context>::New(context->_isolate, context->_context);
localContext->Enter();
{
v8::Context::Scope contextScope(localContext);
// run version-check script
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "running database version check";
// can do this without a lock as this is the startup
DatabaseFeature* databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
// iterate over all databases
for (auto& name : databaseFeature->getDatabaseNames()) {
TRI_vocbase_t* vocbase = databaseFeature->lookupDatabase(name);
TRI_ASSERT(vocbase != nullptr);
// special check script to be run just once in first thread (not in
// all) but for all databases
int status = TRI_CheckDatabaseVersion(vocbase, localContext);
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "version check return status " << status;
if (status < 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Database version check failed for '"
<< vocbase->name()
<< "'. Please inspect the logs for any errors";
FATAL_ERROR_EXIT_CODE(TRI_EXIT_VERSION_CHECK_FAILED);
} else if (status == 3) {
// this is safe to do even if further databases will be checked
// because we will never set the status back to success
*_result = 3;
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "Database version check failed for '"
<< vocbase->name() << "': upgrade needed";
} else if (status == 2 && *_result == 1) {
// this is safe to do even if further databases will be checked
// because we will never set the status back to success
*_result = 2;
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "Database version check failed for '"
<< vocbase->name() << "': downgrade needed";
}
}
}
// issue #391: when invoked with --database.auto-upgrade, the server will
// not always shut down
localContext->Exit();
LOG_TOPIC(DEBUG, Logger::STARTUP) << "version check return status "
<< res.status;
if (res.status < 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "Database version check failed for '" << vocbase->name()
<< "'. Please inspect the logs for any errors";
FATAL_ERROR_EXIT_CODE(TRI_EXIT_VERSION_CHECK_FAILED);
} else if (res.status == methods::VersionResult::DOWNGRADE_NEEDED) {
// this is safe to do even if further databases will be checked
// because we will never set the status back to success
*_result = 3;
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "Database version check failed for '" << vocbase->name()
<< "': upgrade needed";
} else if (res.status == methods::VersionResult::UPGRADE_NEEDED &&
*_result == 1) {
// this is safe to do even if further databases will be checked
// because we will never set the status back to success
*_result = 2;
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "Database version check failed for '" << vocbase->name()
<< "': downgrade needed";
}
}
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "final result of version check: " << *_result;
LOG_TOPIC(DEBUG, Logger::STARTUP) << "final result of version check: "
<< *_result;
if (*_result == 1) {
*_result = EXIT_SUCCESS;
} else if (*_result > 1) {
if (*_result == 2) {
// downgrade needed
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Database version check failed: downgrade needed";
LOG_TOPIC(FATAL, Logger::FIXME)
<< "Database version check failed: downgrade needed";
FATAL_ERROR_EXIT_CODE(TRI_EXIT_DOWNGRADE_REQUIRED);
} else if (*_result == 3) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Database version check failed: upgrade needed";
LOG_TOPIC(FATAL, Logger::FIXME)
<< "Database version check failed: upgrade needed";
FATAL_ERROR_EXIT_CODE(TRI_EXIT_UPGRADE_REQUIRED);
} else {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Database version check failed";
LOG_TOPIC(FATAL, Logger::FIXME) << "Database version check failed";
FATAL_ERROR_EXIT_CODE(TRI_EXIT_VERSION_CHECK_FAILED);
}
FATAL_ERROR_EXIT_CODE(*_result);

View File

@ -23,9 +23,13 @@
#ifndef APPLICATION_FEATURES_CHECK_VERSION_FEATURE_H
#define APPLICATION_FEATURES_CHECK_VERSION_FEATURE_H 1
#include <cstdint>
#include "ApplicationFeatures/ApplicationFeature.h"
struct TRI_vocbase_t;
namespace arangodb {
class CheckVersionFeature final
: public application_features::ApplicationFeature {
public:

View File

@ -138,6 +138,7 @@ void FlushFeature::executeCallbacks() {
// execute all callbacks. this will create as many transactions as
// there are callbacks
for (auto const& cb : _callbacks) {
// copy elision, std::move(..) not required
transactions.emplace_back(cb.second());
}

View File

@ -30,12 +30,8 @@
#include "Replication/ReplicationFeature.h"
#include "RestServer/DatabaseFeature.h"
#include "RestServer/InitDatabaseFeature.h"
#include "V8/v8-conv.h"
#include "V8/v8-globals.h"
#include "V8Server/V8Context.h"
#include "V8Server/V8DealerFeature.h"
#include "V8Server/v8-vocbase.h"
#include "VocBase/vocbase.h"
#include "VocBase/Methods/Upgrade.h"
using namespace arangodb;
using namespace arangodb::application_features;
@ -107,6 +103,11 @@ void UpgradeFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
cluster->forceDisable();
}
void UpgradeFeature::prepare() {
// need to register tasks before creating any database
methods::Upgrade::registerTasks();
}
void UpgradeFeature::start() {
auto init = ApplicationServer::getFeature<InitDatabaseFeature>("InitDatabase");
auth::UserManager* um = AuthenticationFeature::instance()->userManager();
@ -172,104 +173,36 @@ void UpgradeFeature::upgradeDatabase() {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "starting database init/upgrade";
DatabaseFeature* databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
auto* systemVocbase = DatabaseFeature::DATABASE->systemDatabase();
// enter context and isolate
{
V8Context* context = V8DealerFeature::DEALER->enterContext(systemVocbase, true, 0);
for (auto& name : databaseFeature->getDatabaseNames()) {
TRI_vocbase_t* vocbase = databaseFeature->lookupDatabase(name);
TRI_ASSERT(vocbase != nullptr);
if (context == nullptr) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "could not enter context #0";
FATAL_ERROR_EXIT();
}
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
{
v8::HandleScope scope(context->_isolate);
auto localContext =
v8::Local<v8::Context>::New(context->_isolate, context->_context);
localContext->Enter();
{
v8::Context::Scope contextScope(localContext);
// run upgrade script
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "running database init/upgrade";
for (auto& name : databaseFeature->getDatabaseNames()) {
TRI_vocbase_t* vocbase = databaseFeature->lookupDatabase(name);
TRI_ASSERT(vocbase != nullptr);
// special check script to be run just once in first thread (not in
// all) but for all databases
v8::HandleScope scope(context->_isolate);
v8::Handle<v8::Object> args = v8::Object::New(context->_isolate);
args->Set(TRI_V8_ASCII_STRING(context->_isolate, "upgrade"),
v8::Boolean::New(context->_isolate, _upgrade));
localContext->Global()->Set(
TRI_V8_ASCII_STRING(context->_isolate, "UPGRADE_ARGS"), args);
bool ok = TRI_UpgradeDatabase(vocbase, localContext);
if (!ok) {
if (localContext->Global()->Has(TRI_V8_ASCII_STRING(
context->_isolate, "UPGRADE_STARTED"))) {
uint64_t upgradeType = TRI_ObjectToUInt64(localContext->Global()->Get(TRI_V8_ASCII_STRING(context->_isolate, "UPGRADE_TYPE")), false);
localContext->Exit();
// 0 = undecided
// 1 = same version
// 2 = downgrade
// 3 = upgrade
// 4 = requires upgrade
// 5 = no version found
char const* typeName = "initialization";
switch (upgradeType) {
case 0: // undecided
case 1: // same version
case 2: // downgrade
case 5: // no VERSION file found
// initialization
break;
case 3: // upgrade
typeName = "upgrade";
break;
case 4: // requires upgrade
typeName = "upgrade";
if (!_upgrade) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "Database '" << vocbase->name()
<< "' needs upgrade. Please start the server with the "
"--database.auto-upgrade option";
}
break;
}
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Database '" << vocbase->name()
<< "' " << typeName << " failed. Please inspect the logs from "
"the " << typeName << " procedure";
} else {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "JavaScript error during server start";
}
FATAL_ERROR_EXIT();
methods::UpgradeResult res = methods::Upgrade::startup(vocbase, _upgrade);
if (res.fail()) {
char const* typeName = "initialization";
switch (res.type) {
case methods::VersionResult::VERSION_MATCH:
case methods::VersionResult::DOWNGRADE_NEEDED:
case methods::VersionResult::NO_VERSION_FILE:
// initialization
break;
case methods::VersionResult::UPGRADE_NEEDED:
typeName = "upgrade";
if (!_upgrade) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "Database '" << vocbase->name()
<< "' needs upgrade. Please start the server with the "
"--database.auto-upgrade option";
}
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "database '" << vocbase->name()
<< "' init/upgrade done";
}
break;
default:
break;
}
// finally leave the context. otherwise v8 will crash with assertion
// failure when we delete the context locker below
localContext->Exit();
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Database '" << vocbase->name()
<< "' " << typeName << " failed (" << res.errorMessage() << "). "
"Please inspect the logs from the " << typeName << " procedure";
FATAL_ERROR_EXIT();
}
}

View File

@ -34,6 +34,7 @@ class UpgradeFeature final : public application_features::ApplicationFeature {
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
private:

View File

@ -423,6 +423,8 @@ void RocksDBCollection::prepareIndexes(
}
}
#endif
TRI_ASSERT(!_indexes.empty());
}
static std::shared_ptr<Index> findIndex(
@ -1322,15 +1324,6 @@ int RocksDBCollection::saveIndex(transaction::Methods* trx,
/// from this collection
arangodb::Result RocksDBCollection::fillIndexes(
transaction::Methods* trx, std::shared_ptr<arangodb::Index> added) {
// LOCKED from the outside, can't use lookupIndex
RocksDBPrimaryIndex* primIndex = nullptr;
for (std::shared_ptr<Index> idx : _indexes) {
if (idx->type() == Index::TRI_IDX_TYPE_PRIMARY_INDEX) {
primIndex = static_cast<RocksDBPrimaryIndex*>(idx.get());
break;
}
}
TRI_ASSERT(primIndex != nullptr);
// FIXME: assert for an exclusive lock on this collection
TRI_ASSERT(trx->state()->collection(_logicalCollection->cid(),
AccessMode::Type::EXCLUSIVE) != nullptr);

View File

@ -976,11 +976,6 @@ void StatisticsWorker::createCollection(std::string const& collection) const {
methods::Collections::create(
vocbase, collection, TRI_COL_TYPE_DOCUMENT, s.slice(), false, true,
[&](LogicalCollection* coll) {
// we must be sure to delete the just-created collection objects, otherwise we'll leak memory
std::unique_ptr<LogicalCollection> deleter;
if (ServerState::instance()->isCoordinator()) {
deleter.reset(coll);
}
VPackBuilder t;
t.openObject();
t.add("collection", VPackValue(collection));

View File

@ -388,7 +388,3 @@ bool CollectionNameResolver::visitCollections(
// emulate the original behaviour, assume 'cid' is for a regular collection and visit it as is
return visitor(cid);
}
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------

View File

@ -606,30 +606,6 @@ void V8DealerFeature::loadJavaScriptFileInAllContexts(TRI_vocbase_t* vocbase,
}
}
void V8DealerFeature::loadJavaScriptFileInDefaultContext(TRI_vocbase_t* vocbase,
std::string const& file, VPackBuilder* builder) {
// find context with id 0
alreadyLockedInThread = true;
TRI_DEFER(alreadyLockedInThread = false);
// enter context #0
V8Context* context = enterContext(vocbase, true, 0);
if (context == nullptr) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "could not acquire default V8 context");
}
TRI_DEFER(exitContext(context));
try {
loadJavaScriptFileInternal(file, context, builder);
} catch (...) {
LOG_TOPIC(WARN, Logger::V8) << "caught exception while executing JavaScript file '" << file << "' in context #" << context->id();
throw;
}
}
void V8DealerFeature::startGarbageCollection() {
TRI_ASSERT(_gcThread == nullptr);
_gcThread.reset(new V8GcThread(this));

View File

@ -83,8 +83,6 @@ class V8DealerFeature final : public application_features::ApplicationFeature {
// the builder is not cleared and thus should be empty before the call.
void loadJavaScriptFileInAllContexts(TRI_vocbase_t*, std::string const& file,
VPackBuilder* builder);
void loadJavaScriptFileInDefaultContext(TRI_vocbase_t*, std::string const& file,
VPackBuilder* builder);
void startGarbageCollection();
/// @brief forceContext == -1 means that any free context may be

View File

@ -245,7 +245,7 @@ static int V8ToVPackNoKeyRevId(v8::Isolate* isolate,
}
////////////////////////////////////////////////////////////////////////////////
/// @brief get all cluster collections
/// @brief get all cluster collections cloned, caller needs to cleanupb
////////////////////////////////////////////////////////////////////////////////
std::vector<LogicalCollection*> GetCollectionsCluster(

View File

@ -1131,6 +1131,32 @@ class KeySpace {
return false;
}
bool keySet(std::string const& key, double val) {
TRI_json_t* json = TRI_CreateNumberJson(val);
if (json == nullptr) {
// OOM
return false;
}
auto element = std::make_unique<KeySpaceElement>(key.c_str(), key.size(), json);
{
WRITE_LOCKER(writeLocker, _lock);
auto it = _hash.find(key);
if (it != _hash.end()) {
it->second->json->_value._number = val;
return true;
} else {
auto it2 = _hash.emplace(key, element.get());
if (it2.second) {
element.release(); // _hash now has ownership
return true;
}
}
}
// insertion failed
return false;
}
int keyCas(v8::Isolate* isolate, std::string const& key,
v8::Handle<v8::Value> const& value,
v8::Handle<v8::Value> const& compare, bool& match) {
@ -2017,6 +2043,31 @@ static void JS_KeySet(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief calls global.KEY_SET('queue-control', 'databases-expire', 0);
////////////////////////////////////////////////////////////////////////////////
void TRI_ExpireFoxxQueueDatabaseCache(TRI_vocbase_t* vocbase) {
TRI_ASSERT(vocbase->isSystem());
std::string const name = "queue-control";
std::string const key = "databases-expire";
auto h = &(static_cast<UserStructures*>(vocbase->_userStructures)->hashes);
bool result;
{
READ_LOCKER(readLocker, h->lock);
auto hash = GetKeySpace(vocbase, name);
if (hash == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
result = hash->keySet(key, 0);
}
if (!result) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief conditionally set the value for a key in the keyspace
////////////////////////////////////////////////////////////////////////////////

View File

@ -48,4 +48,9 @@ void TRI_FreeUserStructuresVocBase(TRI_vocbase_t*);
void TRI_InitV8UserStructures(v8::Isolate* isolate, v8::Handle<v8::Context>);
////////////////////////////////////////////////////////////////////////////////
/// @brief calls global.KEY_SET('queue-control', 'databases-expire', 0);
////////////////////////////////////////////////////////////////////////////////
void TRI_ExpireFoxxQueueDatabaseCache(TRI_vocbase_t* system);
#endif

View File

@ -1922,57 +1922,6 @@ static void JS_LdapEnabled(
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief run version check
////////////////////////////////////////////////////////////////////////////////
bool TRI_UpgradeDatabase(TRI_vocbase_t* vocbase,
v8::Handle<v8::Context> context) {
auto isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
TRI_GET_GLOBALS();
TRI_vocbase_t* orig = v8g->_vocbase;
v8g->_vocbase = vocbase;
auto startupLoader = V8DealerFeature::DEALER->startupLoader();
v8::Handle<v8::Value> result = startupLoader->executeGlobalScript(
isolate, isolate->GetCurrentContext(), "server/upgrade-database.js");
bool ok = TRI_ObjectToBoolean(result);
if (!ok) {
vocbase->setState(TRI_vocbase_t::State::FAILED_VERSION);
}
v8g->_vocbase = orig;
return ok;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief run upgrade check
////////////////////////////////////////////////////////////////////////////////
int TRI_CheckDatabaseVersion(TRI_vocbase_t* vocbase,
v8::Handle<v8::Context> context) {
auto isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
TRI_GET_GLOBALS();
TRI_vocbase_t* orig = v8g->_vocbase;
v8g->_vocbase = vocbase;
auto startupLoader = V8DealerFeature::DEALER->startupLoader();
v8::Handle<v8::Value> result = startupLoader->executeGlobalScript(
isolate, isolate->GetCurrentContext(), "server/check-version.js");
int code = (int)TRI_ObjectToInt64(result);
v8g->_vocbase = orig;
return code;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief check if we are in the enterprise edition
////////////////////////////////////////////////////////////////////////////////

View File

@ -38,19 +38,6 @@ class CollectionNameResolver;
class JSLoader;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief run version check
////////////////////////////////////////////////////////////////////////////////
bool TRI_UpgradeDatabase(TRI_vocbase_t*, v8::Handle<v8::Context>);
////////////////////////////////////////////////////////////////////////////////
/// @brief run upgrade check
////////////////////////////////////////////////////////////////////////////////
int TRI_CheckDatabaseVersion(TRI_vocbase_t* vocbase,
v8::Handle<v8::Context> context);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates a TRI_vocbase_t global context
////////////////////////////////////////////////////////////////////////////////

View File

@ -260,8 +260,14 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
propSlice,
createWaitsForSyncReplication,
enforceReplicationFactor,
[&isolate, &result](LogicalCollection* collection) {
result = WrapCollection(isolate, collection);
[&isolate, &result](LogicalCollection* coll) {
if (ServerState::instance()->isCoordinator()) {
std::unique_ptr<LogicalCollection> cc = coll->clone();
result = WrapCollection(isolate, cc.get());
cc.release();
} else {
result = WrapCollection(isolate, coll);
}
});
if (res.fail()) {
TRI_V8_THROW_EXCEPTION(res);

View File

@ -206,7 +206,6 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
TRI_ASSERT(!_globallyUniqueId.empty());
}
// @brief Constructor used in coordinator case.
// The Slice contains the part of the plan that
// is relevant for this collection.
LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,

View File

@ -23,6 +23,8 @@
#include "Collections.h"
#include "Basics/Common.h"
#include "Aql/Query.h"
#include "Aql/QueryRegistry.h"
#include "Basics/LocalTaskQueue.h"
#include "Basics/ReadLocker.h"
#include "Basics/StringUtils.h"
@ -33,10 +35,12 @@
#include "Cluster/ServerState.h"
#include "GeneralServer/AuthenticationFeature.h"
#include "RestServer/DatabaseFeature.h"
#include "RestServer/QueryRegistryFeature.h"
#include "Scheduler/Scheduler.h"
#include "Scheduler/SchedulerFeature.h"
#include "StorageEngine/PhysicalCollection.h"
#include "Transaction/V8Context.h"
#include "Utils/OperationCursor.h"
#include "Utils/ExecContext.h"
#include "Utils/SingleCollectionTransaction.h"
#include "V8/v8-conv.h"
@ -178,7 +182,7 @@ Result Collections::create(TRI_vocbase_t* vocbase, std::string const& name,
ExecContext const* exe = ExecContext::CURRENT;
AuthenticationFeature* af = AuthenticationFeature::instance();
if (ServerState::instance()->isCoordinator()) {
std::unique_ptr<LogicalCollection> col =
std::shared_ptr<LogicalCollection> col =
ClusterMethods::createCollectionOnCoordinator(
collectionType, vocbase, infoSlice, false,
createWaitsForSyncReplication, enforceReplicationFactor);
@ -198,7 +202,7 @@ Result Collections::create(TRI_vocbase_t* vocbase, std::string const& name,
}
// reload otherwise collection might not be in yet
func(col.release());
func(col.get());
} else {
arangodb::LogicalCollection* col = vocbase->createCollection(infoSlice);
TRI_ASSERT(col != nullptr);
@ -546,3 +550,50 @@ Result Collections::revisionId(TRI_vocbase_t* vocbase,
rid = coll->revision(&trx);
return TRI_ERROR_NO_ERROR;
}
/// @brief Helper implementation similar to ArangoCollection.all() in v8
Result Collections::all(TRI_vocbase_t* vocbase, std::string const& cname,
DocCallback cb) {
// Implement it like this to stay close to the original
if (ServerState::instance()->isCoordinator()) {
auto empty = std::make_shared<VPackBuilder>();
std::string q = "FOR r IN @@coll RETURN r";
auto binds = std::make_shared<VPackBuilder>();
binds->openObject();
binds->add("@coll", VPackValue(cname));
binds->close();
arangodb::aql::Query query(false, vocbase, aql::QueryString(q), binds,
std::make_shared<VPackBuilder>(), arangodb::aql::PART_MAIN);
auto queryRegistry = QueryRegistryFeature::QUERY_REGISTRY;
TRI_ASSERT(queryRegistry != nullptr);
aql::QueryResult queryResult = query.execute(queryRegistry);
Result res = queryResult.code;
if (queryResult.code == TRI_ERROR_NO_ERROR) {
VPackSlice array = queryResult.result->slice();
for (VPackSlice doc : VPackArrayIterator(array)) {
cb(doc.resolveExternal());
}
}
return res;
} else {
auto ctx = transaction::V8Context::CreateWhenRequired(vocbase, true);
SingleCollectionTransaction trx(ctx, cname, AccessMode::Type::READ);
Result res = trx.begin();
if (res.fail()) {
return res;
}
// We directly read the entire cursor. so batchsize == limit
std::unique_ptr<OperationCursor> opCursor =
trx.indexScan(cname, transaction::Methods::CursorType::ALL, false);
if (!opCursor->hasMore()) {
return TRI_ERROR_OUT_OF_MEMORY;
}
opCursor->allDocuments([&](LocalDocumentId const& token, VPackSlice doc) {
cb(doc.resolveExternal());
}, 1000);
return trx.finish(res);
}
}

View File

@ -38,6 +38,7 @@ namespace methods {
/// Common code for collection REST handler and v8-collections
struct Collections {
typedef std::function<void(LogicalCollection*)> const& FuncCallback;
typedef std::function<void(velocypack::Slice const&)> const& DocCallback;
static void enumerate(TRI_vocbase_t* vocbase, FuncCallback);
@ -46,7 +47,7 @@ struct Collections {
FuncCallback);
/// Create collection, ownership of collection in callback is
/// transferred to callee
static Result create(TRI_vocbase_t* vocbase, std::string const& name,
static Result create(TRI_vocbase_t*, std::string const& name,
TRI_col_type_e collectionType,
velocypack::Slice const& properties,
bool createWaitsForSyncReplication,
@ -62,14 +63,17 @@ struct Collections {
static Result rename(LogicalCollection* coll, std::string const& newName,
bool doOverride);
static Result drop(TRI_vocbase_t* vocbase, LogicalCollection* coll,
static Result drop(TRI_vocbase_t*, LogicalCollection* coll,
bool allowDropSystem, double timeout);
static Result warmup(TRI_vocbase_t* vocbase,
LogicalCollection* coll);
static Result revisionId(TRI_vocbase_t* vocbase, LogicalCollection* coll,
static Result revisionId(TRI_vocbase_t*, LogicalCollection* coll,
TRI_voc_rid_t& rid);
/// @brief Helper implementation similar to ArangoCollection.all() in v8
static Result all(TRI_vocbase_t*, std::string const& cname, DocCallback);
};
#ifdef USE_ENTERPRISE
Result ULColCoordinatorEnterprise(std::string const& databaseName,

View File

@ -32,12 +32,13 @@
#include "Rest/HttpRequest.h"
#include "RestServer/DatabaseFeature.h"
#include "Utils/ExecContext.h"
#include "V8/v8-conv.h"
#include "V8/v8-utils.h"
#include "V8/v8-vpack.h"
#include "V8Server/V8Context.h"
#include "V8Server/V8DealerFeature.h"
#include "V8Server/v8-dispatcher.h"
#include "V8Server/v8-user-structures.h"
#include "VocBase/Methods/Upgrade.h"
#include "VocBase/vocbase.h"
#include <v8.h>
@ -80,7 +81,8 @@ std::vector<std::string> Databases::list(std::string const& user) {
AuthenticationFeature* af = AuthenticationFeature::instance();
std::vector<std::string> names;
std::vector<std::string> dbs = databaseFeature->getDatabaseNamesCoordinator();
std::vector<std::string> dbs =
databaseFeature->getDatabaseNamesCoordinator();
for (std::string const& db : dbs) {
if (!af->isActive() ||
af->userManager()->databaseAuthLevel(user, db) > auth::Level::NONE) {
@ -176,7 +178,7 @@ arangodb::Result Databases::create(std::string const& dbName,
} else if (user.hasKey("user")) {
name = user.get("user");
}
if (!name.isString()) { // empty names are silently ignored later
if (!name.isString()) { // empty names are silently ignored later
return Result(TRI_ERROR_HTTP_BAD_PARAMETER);
}
sanitizedUsers.add("username", name);
@ -211,6 +213,7 @@ arangodb::Result Databases::create(std::string const& dbName,
return Result(TRI_ERROR_INTERNAL);
}
UpgradeResult upgradeRes;
if (ServerState::instance()->isCoordinator()) {
if (!TRI_vocbase_t::IsAllowedName(false, dbName)) {
return Result(TRI_ERROR_ARANGO_DATABASE_NAME_INVALID);
@ -269,35 +272,12 @@ arangodb::Result Databases::create(std::string const& dbName,
});
}
V8Context* ctx = V8DealerFeature::DEALER->enterContext(vocbase, true);
if (ctx == nullptr) {
return Result(TRI_ERROR_INTERNAL, "could not acquire V8 context");
}
TRI_DEFER(V8DealerFeature::DEALER->exitContext(ctx));
v8::Isolate* isolate = ctx->_isolate;
v8::HandleScope scope(isolate);
TRI_GET_GLOBALS();
// now run upgrade and copy users into context
TRI_ASSERT(sanitizedUsers.slice().isArray());
v8::Handle<v8::Object> userVar = v8::Object::New(ctx->_isolate);
userVar->Set(TRI_V8_ASCII_STRING(isolate, "users"),
TRI_VPackToV8(isolate, sanitizedUsers.slice()));
isolate->GetCurrentContext()->Global()->Set(
TRI_V8_ASCII_STRING(isolate, "UPGRADE_ARGS"), userVar);
upgradeRes = methods::Upgrade::createDB(vocbase, sanitizedUsers.slice());
// initialize database
bool allowUseDatabase = v8g->_allowUseDatabase;
v8g->_allowUseDatabase = true;
// execute script
V8DealerFeature::DEALER->startupLoader()->executeGlobalScript(
isolate, isolate->GetCurrentContext(),
"server/bootstrap/coordinator-database.js");
v8g->_allowUseDatabase = allowUseDatabase;
} else {
// options for database (currently only allows setting "id" for testing
// purposes)
} else { // Single, DBServer, Agency
// options for database (currently only allows setting "id"
// for testing purposes)
TRI_voc_tick_t id = 0;
if (options.hasKey("id")) {
id = basics::VelocyPackHelper::stringUInt64(options, "id");
@ -308,9 +288,12 @@ arangodb::Result Databases::create(std::string const& dbName,
if (res != TRI_ERROR_NO_ERROR) {
return Result(res);
}
TRI_ASSERT(vocbase != nullptr);
TRI_ASSERT(!vocbase->isDangling());
TRI_DEFER(vocbase->release());
// we need to add the permissions before running the upgrade script
if (ServerState::instance()->isSingleServer() &&
ExecContext::CURRENT != nullptr) {
@ -323,58 +306,22 @@ arangodb::Result Databases::create(std::string const& dbName,
});
}
TRI_ASSERT(V8DealerFeature::DEALER != nullptr);
upgradeRes = methods::Upgrade::createDB(vocbase, sanitizedUsers.slice());
}
V8Context* ctx = V8DealerFeature::DEALER->enterContext(vocbase, true);
if (ctx == nullptr) {
return Result(TRI_ERROR_INTERNAL, "Could not get v8 context");
}
TRI_DEFER(V8DealerFeature::DEALER->exitContext(ctx));
v8::Isolate* isolate = ctx->_isolate;
v8::HandleScope scope(isolate);
if (upgradeRes.fail()) {
LOG_TOPIC(ERR, Logger::FIXME) << "Could not create database "
<< upgradeRes.errorMessage();
return upgradeRes;
}
// copy users into context
TRI_ASSERT(sanitizedUsers.slice().isArray());
v8::Handle<v8::Object> userVar = v8::Object::New(ctx->_isolate);
userVar->Set(TRI_V8_ASCII_STRING(isolate, "users"),
TRI_VPackToV8(isolate, sanitizedUsers.slice()));
isolate->GetCurrentContext()->Global()->Set(
TRI_V8_ASCII_STRING(isolate, "UPGRADE_ARGS"), userVar);
// switch databases
{
TRI_GET_GLOBALS();
TRI_vocbase_t* orig = v8g->_vocbase;
TRI_ASSERT(orig != nullptr);
v8g->_vocbase = vocbase;
// initialize database
try {
V8DealerFeature::DEALER->startupLoader()->executeGlobalScript(
isolate, isolate->GetCurrentContext(),
"server/bootstrap/local-database.js");
if (v8g->_vocbase == vocbase) {
// decrease the reference-counter only if we are coming back with the
// same database
vocbase->release();
}
// and switch back
v8g->_vocbase = orig;
} catch (...) {
if (v8g->_vocbase == vocbase) {
// decrease the reference-counter only if we are coming back with the
// same database
vocbase->release();
}
// and switch back
v8g->_vocbase = orig;
return Result(TRI_ERROR_INTERNAL,
"Could not execute local-database.js");
}
// Entirely Foxx related:
if (ServerState::instance()->isSingleServerOrCoordinator()) {
try {
TRI_ExpireFoxxQueueDatabaseCache(databaseFeature->systemDatabase());
} catch(...) {
// it is of no real importance if cache invalidation fails, because
// the cache entry has a ttl
}
}
@ -456,7 +403,8 @@ arangodb::Result Databases::drop(TRI_vocbase_t* systemVocbase,
TRI_ExecuteJavaScriptString(
isolate, isolate->GetCurrentContext(),
TRI_V8_ASCII_STRING(isolate, "require('internal').executeGlobalContextFunction('"
TRI_V8_ASCII_STRING(isolate,
"require('internal').executeGlobalContextFunction('"
"reloadRouting')"),
TRI_V8_ASCII_STRING(isolate, "reload routing"), false);
}
@ -471,5 +419,3 @@ arangodb::Result Databases::drop(TRI_vocbase_t* systemVocbase,
}
return res;
}

View File

@ -433,6 +433,26 @@ Result Indexes::ensureIndex(LogicalCollection* collection,
}
}
arangodb::Result Indexes::createIndex(LogicalCollection* coll, Index::IndexType type,
std::vector<std::string> const& fields,
bool unique, bool sparse) {
VPackBuilder props;
props.openObject();
props.add("type", VPackValue(Index::oldtypeName(type)));
props.add("fields", VPackValue(VPackValueType::Array));
for (std::string const& field : fields) {
props.add(VPackValue(field));
}
props.close();
props.add("unique", VPackValue(unique));
props.add("sparse", VPackValue(sparse));
props.close();
VPackBuilder ignored;
return ensureIndex(coll, props.slice(), true, ignored);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief checks if argument is an index identifier
////////////////////////////////////////////////////////////////////////////////

View File

@ -24,6 +24,7 @@
#define ARANGOD_VOC_BASE_API_INDEXES_H 1
#include "Basics/Result.h"
#include "Indexes/Index.h"
#include "VocBase/voc-types.h"
#include <velocypack/Builder.h>
@ -48,6 +49,10 @@ struct Indexes {
bool withFigures,
arangodb::velocypack::Builder&);
static arangodb::Result createIndex(LogicalCollection*, Index::IndexType,
std::vector<std::string> const&,
bool unique, bool sparse);
static arangodb::Result ensureIndex(
LogicalCollection* collection,
velocypack::Slice const& definition, bool create,

View File

@ -0,0 +1,309 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Gräter
////////////////////////////////////////////////////////////////////////////////
#include "Upgrade.h"
#include "Basics/Common.h"
#include "Agency/AgencyComm.h"
#include "Basics/StringUtils.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterInfo.h"
#include "Cluster/ServerState.h"
#include "Rest/Version.h"
#include "Utils/ExecContext.h"
#include "VocBase/Methods/UpgradeTasks.h"
#include "VocBase/Methods/Version.h"
#include "VocBase/vocbase.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::methods;
std::vector<Upgrade::Task> Upgrade::_tasks;
/// corresponding to cluster-bootstrap.js
UpgradeResult Upgrade::clusterBootstrap(TRI_vocbase_t* system) {
uint32_t cc = Version::current(); // not actually used here
VersionResult vinfo = {VersionResult::VERSION_MATCH, cc, cc, {}};
uint32_t clusterFlag = Flags::CLUSTER_COORDINATOR_GLOBAL;
if (ServerState::instance()->isDBServer()) {
clusterFlag = Flags::CLUSTER_DB_SERVER_LOCAL;
}
TRI_ASSERT(ServerState::instance()->isRunningInCluster());
VPackSlice params = VPackSlice::emptyObjectSlice();
return runTasks(system, vinfo, params, clusterFlag,
Upgrade::Flags::DATABASE_INIT);
}
/// corresponding to local-database.js
UpgradeResult Upgrade::createDB(TRI_vocbase_t* vocbase,
VPackSlice const& users) {
TRI_ASSERT(users.isArray());
uint32_t clusterFlag = 0;
ServerState::RoleEnum role = ServerState::instance()->getRole();
if (ServerState::isSingleServer(role)) {
clusterFlag = Upgrade::Flags::CLUSTER_NONE;
} else {
if (ServerState::isRunningInCluster(role)) {
if (ServerState::isDBServer(role)) {
clusterFlag = Flags::CLUSTER_DB_SERVER_LOCAL;
} else {
clusterFlag = Flags::CLUSTER_COORDINATOR_GLOBAL;
}
} else {
TRI_ASSERT(ServerState::isAgent(role));
clusterFlag = Upgrade::Flags::CLUSTER_LOCAL;
}
}
VPackBuilder params;
params.openObject();
params.add("users", users);
params.close();
// will write version file with this number
uint32_t cc = Version::current();
VersionResult vinfo = {VersionResult::VERSION_MATCH, cc, cc, {}};
return runTasks(vocbase, vinfo, params.slice(), clusterFlag,
Upgrade::Flags::DATABASE_INIT);
}
UpgradeResult Upgrade::startup(TRI_vocbase_t* vocbase, bool upgrade) {
uint32_t clusterFlag = Flags::CLUSTER_LOCAL;
if (ServerState::instance()->isSingleServer()) {
clusterFlag = Flags::CLUSTER_NONE;
}
uint32_t dbflag = upgrade ? Flags::DATABASE_UPGRADE : Flags::DATABASE_INIT;
VersionResult vinfo = Version::check(vocbase);
if (vinfo.status == VersionResult::DOWNGRADE_NEEDED) {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "Database directory version (" << vinfo.databaseVersion
<< ") is higher than current version (" << vinfo.serverVersion << ").";
LOG_TOPIC(ERR, Logger::STARTUP)
<< "It seems like you are running ArangoDB on a database directory"
<< " that was created with a newer version of ArangoDB. Maybe this"
<< " is what you wanted but it is not supported by ArangoDB.";
return UpgradeResult(TRI_ERROR_BAD_PARAMETER, vinfo.status);
} else if (vinfo.status == VersionResult::UPGRADE_NEEDED && !upgrade) {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "Database directory version (" << vinfo.databaseVersion
<< ") is lower than current version (" << vinfo.serverVersion << ").";
LOG_TOPIC(ERR, Logger::STARTUP) << "---------------------------------------"
"-------------------------------";
LOG_TOPIC(ERR, Logger::STARTUP)
<< "It seems like you have upgraded the ArangoDB binary.";
LOG_TOPIC(ERR, Logger::STARTUP)
<< "If this is what you wanted to do, please restart with the'";
LOG_TOPIC(ERR, Logger::STARTUP) << " --database.auto-upgrade true'";
LOG_TOPIC(ERR, Logger::STARTUP)
<< "option to upgrade the data in the database directory.'";
LOG_TOPIC(ERR, Logger::STARTUP)
<< "Normally you can use the control script to upgrade your database'";
LOG_TOPIC(ERR, Logger::STARTUP) << " /etc/init.d/arangodb stop'";
LOG_TOPIC(ERR, Logger::STARTUP) << " /etc/init.d/arangodb upgrade'";
LOG_TOPIC(ERR, Logger::STARTUP) << " /etc/init.d/arangodb start'";
LOG_TOPIC(ERR, Logger::STARTUP) << "---------------------------------------"
"-------------------------------'";
return UpgradeResult(TRI_ERROR_BAD_PARAMETER, vinfo.status);
} else {
switch (vinfo.status) {
case VersionResult::CANNOT_PARSE_VERSION_FILE:
case VersionResult::CANNOT_READ_VERSION_FILE:
case VersionResult::NO_SERVER_VERSION: {
std::string msg =
std::string("error during ") + (upgrade ? "upgrade" : "startup");
return UpgradeResult(TRI_ERROR_INTERNAL, msg, vinfo.status);
}
case VersionResult::NO_VERSION_FILE:
LOG_TOPIC(DEBUG, Logger::STARTUP) << "No VERSION file found";
if (upgrade) {
// VERSION file does not exist, we are running on a new database
dbflag = DATABASE_INIT;
}
break;
default:
break;
}
}
// should not do anything on VERSION_MATCH, and init the database
// with all tasks if they were not executed yet. Tasks not listed
// in the "tasks" attribute will be executed automatically
VPackSlice const params = VPackSlice::emptyObjectSlice();
return runTasks(vocbase, vinfo, params, clusterFlag, dbflag);
}
/// @brief register tasks, only run once on startup
void methods::Upgrade::registerTasks() {
TRI_ASSERT(_tasks.empty());
addTask("setupGraphs", "setup _graphs collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupGraphs);
addTask("setupUsers", "setup _users collection",
/*system*/ Flags::DATABASE_SYSTEM,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupUsers);
addTask("createUsersIndex", "create index on 'user' attribute in _users",
/*system*/ Flags::DATABASE_SYSTEM,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE,
&UpgradeTasks::createUsersIndex);
addTask("addDefaultUsers", "add default users for a new database",
/*system*/ Flags::DATABASE_EXCEPT_SYSTEM,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT, &UpgradeTasks::addDefaultUsers);
addTask("updateUserModels",
"convert documents in _users collection to new format",
/*system*/ Flags::DATABASE_SYSTEM,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_UPGRADE, // DATABASE_EXISTING
&UpgradeTasks::updateUserModels);
addTask("createModules", "setup _modules collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::createModules);
addTask("createRouting", "setup _routing collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::createRouting);
addTask("insertRedirectionsAll", "insert default routes for admin interface",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_EXISTING,
&UpgradeTasks::insertRedirections);
addTask("setupAqlFunctions", "setup _aqlfunctions collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupAqlFunctions);
addTask("createFrontend", "setup _frontend collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::createFrontend);
addTask("setupQueues", "setup _queues collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupQueues);
addTask("setupJobs", "setup _jobs collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupJobs);
addTask("createJobsIndex", "create index on attributes in _jobs collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::createJobsIndex);
addTask("setupApps", "setup _apps collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupApps);
addTask("createAppsIndex", "create index on attributes in _apps collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::createAppsIndex);
addTask("setupAppBundles", "setup _appbundles collection",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupAppBundles);
}
UpgradeResult methods::Upgrade::runTasks(TRI_vocbase_t* vocbase,
VersionResult& vinfo,
VPackSlice const& params,
uint32_t clusterFlag,
uint32_t dbFlag) {
TRI_ASSERT(vocbase != nullptr);
TRI_ASSERT(clusterFlag != 0 && dbFlag != 0);
TRI_ASSERT(!_tasks.empty()); // forgot to call registerTask!!
// needs to run in superuser scope, otherwise we get errors
ExecContextScope scope(ExecContext::superuser());
// only local should actually write a VERSION file
bool isLocal = clusterFlag == CLUSTER_NONE || clusterFlag == CLUSTER_LOCAL;
// execute all tasks
for (Task const& t : _tasks) {
// check for system database
if (t.systemFlag == DATABASE_SYSTEM && !vocbase->isSystem()) {
continue;
}
if (t.systemFlag == DATABASE_EXCEPT_SYSTEM && vocbase->isSystem()) {
continue;
}
// check that the cluster occurs in the cluster list
if (!(t.clusterFlags & clusterFlag)) {
continue;
}
// check that the database occurs in the database list
if (!(t.databaseFlags & dbFlag)) {
// special optimisation: for local server and new database,
// an upgrade-only task can be viewed as executed.
if (isLocal && dbFlag == DATABASE_INIT &&
t.databaseFlags == DATABASE_UPGRADE) {
vinfo.tasks.emplace(t.name, true);
}
continue;
}
// we need to execute this task
try {
t.action(vocbase, params);
vinfo.tasks.emplace(t.name, true);
if (isLocal) { // save after every task for resilience
methods::Version::write(vocbase, vinfo.tasks);
}
} catch (basics::Exception const& e) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Executing " << t.name << " ("
<< t.description << ") failed with "
<< e.message();
return UpgradeResult(e.code(), e.what(), vinfo.status);
}
}
if (isLocal) { // no need to write this for cluster bootstrap
methods::Version::write(vocbase, vinfo.tasks);
}
return UpgradeResult(TRI_ERROR_NO_ERROR, vinfo.status);
}

View File

@ -0,0 +1,128 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_VOC_BASE_API_UPGRADE_H
#define ARANGOD_VOC_BASE_API_UPGRADE_H 1
#include <velocypack/Builder.h>
#include <velocypack/Slice.h>
#include "Basics/Result.h"
#include "VocBase/Methods/Version.h"
struct TRI_vocbase_t;
namespace arangodb {
class UpgradeFeature;
namespace methods {
struct UpgradeResult : Result {
UpgradeResult() : Result(), type(VersionResult::INVALID) {}
UpgradeResult(int err, VersionResult::StatusCode s) : Result(err), type(s) {}
UpgradeResult(int err, std::string const& msg, VersionResult::StatusCode s)
: Result(err, msg), type(s) {}
VersionResult::StatusCode type;
};
/// Code to create and initialize databases
/// Replaces ugrade-database.js for good
struct Upgrade {
friend class arangodb::UpgradeFeature;
enum Flags : uint32_t {
DATABASE_SYSTEM = (1u << 0),
DATABASE_ALL = (1u << 1),
DATABASE_EXCEPT_SYSTEM = (1u << 2),
// =============
DATABASE_INIT = (1u << 3),
DATABASE_UPGRADE = (1u << 4),
DATABASE_EXISTING = (1u << 5),
// =============
CLUSTER_NONE = (1u << 6),
CLUSTER_LOCAL = (1u << 7),
CLUSTER_COORDINATOR_GLOBAL = (1u << 8),
CLUSTER_DB_SERVER_LOCAL = (1u << 9)
};
typedef std::function<void(TRI_vocbase_t*, velocypack::Slice const&)>
TaskFunction;
struct Task {
std::string name;
std::string description;
uint32_t systemFlag;
uint32_t clusterFlags;
uint32_t databaseFlags;
TaskFunction action;
};
public:
/// @brief initialize _system db in cluster
/// corresponding to cluster-bootstrap.js
static UpgradeResult clusterBootstrap(TRI_vocbase_t* system);
/// @brief create a database
/// corresponding to local-database.js
static UpgradeResult createDB(TRI_vocbase_t*, velocypack::Slice const&);
/// @brief executed on startup
/// @param upgrade Perform an actual upgrade
/// Corresponds to upgrade-database.js
static UpgradeResult startup(TRI_vocbase_t* vocbase, bool upgrade);
private:
static std::vector<Task> _tasks;
static void addTask(std::string&& name, std::string&& desc,
uint32_t systemFlag, uint32_t clusterFlag,
uint32_t dbFlag, TaskFunction&& action) {
_tasks.push_back(Task{name, desc, systemFlag, clusterFlag, dbFlag, action});
}
/// @brief register tasks, only run once on startup
static void registerTasks();
static UpgradeResult runTasks(TRI_vocbase_t*, VersionResult&,
velocypack::Slice const& params,
uint32_t clusterFlag, uint32_t dbFlag);
/*
/// @brief system database only
constexpr int DATABASE_SYSTEM = 1000;
/// @brief all databases
constexpr int DATABASE_ALL = 1001;
/// @brief all databases expect system
constexpr int DATABASE_EXCEPT_SYSTEM = 1002;
/// @brief for stand-alone, no cluster
constexpr int CLUSTER_NONE = 2000;
/// @brief for cluster local part
constexpr int CLUSTER_LOCAL = 2001;
/// @brief for cluster global part (shared collections)
constexpr int CLUSTER_COORDINATOR_GLOBAL = 2002;
/// @brief db server global part (DB server local!)
constexpr int CLUSTER_DB_SERVER_LOCAL = 2003;
/// @brief for new databases
constexpr int DATABASE_INIT = 3000;
/// @brief for existing database, which must be upgraded
constexpr int DATABASE_UPGRADE = 3001;
/// @brief for existing database, which are already at the correct version
constexpr int DATABASE_EXISTING = 3002;*/
};
}
}
#endif

View File

@ -0,0 +1,239 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Gräter
////////////////////////////////////////////////////////////////////////////////
#include "UpgradeTasks.h"
#include "Basics/Common.h"
#include "Agency/AgencyComm.h"
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterFeature.h"
#include "Cluster/ClusterInfo.h"
#include "Cluster/ServerState.h"
#include "GeneralServer/AuthenticationFeature.h"
#include "Transaction/StandaloneContext.h"
#include "Utils/OperationOptions.h"
#include "Utils/SingleCollectionTransaction.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/Methods/Collections.h"
#include "VocBase/Methods/Indexes.h"
#include "VocBase/vocbase.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::methods;
using application_features::ApplicationServer;
using basics::VelocyPackHelper;
// Note: this entire file should run with superuser rights
static void createSystemCollection(TRI_vocbase_t* vocbase,
std::string const& name) {
Result res = methods::Collections::lookup(vocbase, name,
[](LogicalCollection* coll) {});
if (res.is(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND)) {
auto cl = ApplicationServer::getFeature<ClusterFeature>("Cluster");
VPackBuilder bb;
bb.openObject();
bb.add("isSystem", VPackSlice::trueSlice());
bb.add("waitForSync", VPackSlice::falseSlice());
bb.add("journalSize", VPackValue(1024 * 1024));
bb.add("replicationFactor", VPackValue(cl->systemReplicationFactor()));
if (name != "_graphs") {
bb.add("distributeShardsLike", VPackValue("_graphs"));
}
bb.close();
res = Collections::create(vocbase, name, TRI_COL_TYPE_DOCUMENT, bb.slice(),
/*waitsForSyncReplication*/ true,
/*enforceReplicationFactor*/ true,
[](LogicalCollection* coll) { TRI_ASSERT(coll); });
}
if (res.fail()) {
THROW_ARANGO_EXCEPTION(res);
}
}
static void createIndex(TRI_vocbase_t* vocbase, std::string const& name,
Index::IndexType type,
std::vector<std::string> const& fields, bool unique,
bool sparse) {
VPackBuilder output;
Result res1, res2;
res1 =
methods::Collections::lookup(vocbase, name, [&](LogicalCollection* coll) {
res2 =
methods::Indexes::createIndex(coll, type, fields, unique, sparse);
});
if (res1.fail() || res2.fail()) {
THROW_ARANGO_EXCEPTION(res1.fail() ? res1 : res2);
}
}
void UpgradeTasks::setupGraphs(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_graphs");
}
void UpgradeTasks::setupUsers(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_users");
}
void UpgradeTasks::createUsersIndex(TRI_vocbase_t* vocbase, VPackSlice const&) {
TRI_ASSERT(vocbase->isSystem());
createIndex(vocbase, "_users", Index::TRI_IDX_TYPE_HASH_INDEX, {"user"},
/*unique*/ true, /*sparse*/ true);
}
void UpgradeTasks::addDefaultUsers(TRI_vocbase_t* vocbase,
VPackSlice const& params) {
TRI_ASSERT(!vocbase->isSystem());
TRI_ASSERT(params.isObject());
VPackSlice users = params.get("users");
if (!users.isArray()) {
return;
}
auth::UserManager* um = AuthenticationFeature::instance()->userManager();
TRI_ASSERT(um != nullptr);
for (VPackSlice slice : VPackArrayIterator(users)) {
std::string user = VelocyPackHelper::getStringValue(slice, "username",
StaticStrings::Empty);
if (user.empty()) {
continue;
}
std::string passwd = VelocyPackHelper::getStringValue(slice, "passwd", "");
bool active = VelocyPackHelper::getBooleanValue(slice, "active", true);
VPackSlice extra = slice.get("extra");
Result res = um->storeUser(false, user, passwd, active, VPackSlice::noneSlice());
if (res.fail() && !res.is(TRI_ERROR_USER_DUPLICATE)) {
LOG_TOPIC(WARN, Logger::STARTUP) << "could not add database user "
<< user;
} else if (extra.isObject() && !extra.isEmptyObject()) {
um->updateUser(user, [&](auth::User& user) {
user.setUserData(VPackBuilder(extra));
return TRI_ERROR_NO_ERROR;
});
}
}
}
void UpgradeTasks::updateUserModels(TRI_vocbase_t* vocbase, VPackSlice const&) {
TRI_ASSERT(vocbase->isSystem());
// TODO isn't this done on the fly ?
}
void UpgradeTasks::createModules(TRI_vocbase_t* vocbase, VPackSlice const& s) {
createSystemCollection(vocbase, "_modules");
}
void UpgradeTasks::createRouting(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_routing");
}
void UpgradeTasks::insertRedirections(TRI_vocbase_t* vocbase,
VPackSlice const&) {
std::vector<std::string> toRemove; // remove in a different trx
auto cb = [&toRemove] (VPackSlice const& doc) {
TRI_ASSERT(doc.isObject());
VPackSlice url = doc.get("url"), action = doc.get("action");
if (url.isObject() && action.isObject() && action.get("options").isObject()) {
VPackSlice v = action.get("options").get("destination");
if (v.isString()) {
std::string path = v.copyString();
if (path.find("_admin/html") != std::string::npos ||
path.find("_admin/aardvark") != std::string::npos) {
toRemove.push_back(doc.get(StaticStrings::KeyString).copyString());
}
}
}
};
Result res = methods::Collections::all(vocbase, "_routing", cb);
if (res.fail()) {
THROW_ARANGO_EXCEPTION(res);
}
auto ctx = transaction::StandaloneContext::Create(vocbase);
SingleCollectionTransaction trx(ctx, "_routing", AccessMode::Type::WRITE);
res = trx.begin();
if (!res.ok()) {
THROW_ARANGO_EXCEPTION(res);
}
OperationOptions opts;
opts.waitForSync = true;
for (std::string const& key : toRemove) {
VPackBuilder b;
b(VPackValue(VPackValueType::Object))(StaticStrings::KeyString, VPackValue(key))();
trx.remove("_routing", b.slice(), opts); // check results
}
std::vector<std::string> paths = {"/", "/_admin/html",
"/_admin/html/index.html"};
std::string dest = "/_db/" + vocbase->name() + "/_admin/aardvark/index.html";
OperationResult opres;
for (std::string const& path : paths) {
VPackBuilder bb;
bb.openObject();
bb.add("url", VPackValue(path));
bb.add("action", VPackValue(VPackValueType::Object));
bb.add("do", VPackValue("@arangodb/actions/redirectRequest"));
bb.add("options", VPackValue(VPackValueType::Object));
bb.add("permanently", VPackSlice::trueSlice());
bb.add("destination", VPackValue(dest));
bb.close();
bb.close();
bb.add("priority", VPackValue(-1000000));
bb.close();
opres = trx.insert("_routing", bb.slice(), opts);
if (opres.fail()) {
THROW_ARANGO_EXCEPTION(opres.result);
}
}
res = trx.finish(opres.result);
if (!res.ok()) {
THROW_ARANGO_EXCEPTION(res);
}
}
void UpgradeTasks::setupAqlFunctions(TRI_vocbase_t* vocbase,
VPackSlice const&) {
createSystemCollection(vocbase, "_aqlfunctions");
}
void UpgradeTasks::createFrontend(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_frontend");
}
void UpgradeTasks::setupQueues(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_queues");
}
void UpgradeTasks::setupJobs(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_jobs");
}
void UpgradeTasks::createJobsIndex(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_jobs");
createIndex(vocbase, "_jobs", Index::TRI_IDX_TYPE_SKIPLIST_INDEX,
{"queue", "status", "delayUntil"},
/*unique*/ true, /*sparse*/ true);
createIndex(vocbase, "_jobs", Index::TRI_IDX_TYPE_SKIPLIST_INDEX,
{"status", "queue", "delayUntil"},
/*unique*/ true, /*sparse*/ true);
}
void UpgradeTasks::setupApps(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_apps");
}
void UpgradeTasks::createAppsIndex(TRI_vocbase_t* vocbase, VPackSlice const&) {
createIndex(vocbase, "_apps", Index::TRI_IDX_TYPE_HASH_INDEX, {"mount"},
/*unique*/ true, /*sparse*/ true);
}
void UpgradeTasks::setupAppBundles(TRI_vocbase_t* vocbase, VPackSlice const&) {
createSystemCollection(vocbase, "_appbundles");
}

View File

@ -0,0 +1,56 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_VOC_BASE_API_UPGRADE_TASKS_H
#define ARANGOD_VOC_BASE_API_UPGRADE_TASKS_H 1
#include <velocypack/Slice.h>
struct TRI_vocbase_t;
namespace arangodb {
namespace methods {
/// Code to create and initialize databases
/// Replaces ugrade-database.js for good
struct UpgradeTasks {
static void setupGraphs(TRI_vocbase_t*, velocypack::Slice const&);
static void setupUsers(TRI_vocbase_t*, velocypack::Slice const&);
static void createUsersIndex(TRI_vocbase_t*, velocypack::Slice const&);
static void addDefaultUsers(TRI_vocbase_t*, velocypack::Slice const&);
static void updateUserModels(TRI_vocbase_t*, velocypack::Slice const&);
static void createModules(TRI_vocbase_t*, velocypack::Slice const&);
static void createRouting(TRI_vocbase_t*, velocypack::Slice const&);
static void insertRedirections(TRI_vocbase_t*, velocypack::Slice const&);
static void setupAqlFunctions(TRI_vocbase_t*, velocypack::Slice const&);
static void createFrontend(TRI_vocbase_t*, velocypack::Slice const&);
static void setupQueues(TRI_vocbase_t*, velocypack::Slice const&);
static void setupJobs(TRI_vocbase_t*, velocypack::Slice const&);
static void createJobsIndex(TRI_vocbase_t*, velocypack::Slice const&);
static void setupApps(TRI_vocbase_t*, velocypack::Slice const&);
static void createAppsIndex(TRI_vocbase_t*, velocypack::Slice const&);
static void setupAppBundles(TRI_vocbase_t*, velocypack::Slice const&);
};
}
}
#endif

View File

@ -0,0 +1,166 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Gräter
////////////////////////////////////////////////////////////////////////////////
#include "Version.h"
#include "Basics/Common.h"
#include "Basics/FileUtils.h"
#include "Basics/files.h"
#include "Logger/Logger.h"
#include "Rest/Version.h"
#include "RestServer/DatabaseFeature.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/Parser.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::methods;
static uint64_t parseVersion(char const* str, size_t len) {
uint64_t result = 0;
uint64_t tmp = 0;
for (size_t i = 0; i < len; i++) {
char c = str[i];
if ('0' <= c && c <= '9') {
tmp = tmp * 10 + static_cast<size_t>(c - '0');
} else if (c == '.') {
result = result * 100 + tmp;
tmp = 0;
} else {
// stop at first other character (e.g. "3.4.devel")
while (result > 0 && result < 10000) {
// do we have 5 digits already? if we, then boost the version
// number accordingly. this can happen for version strings
// such as "3.4.devel" or "4.devel"
result *= 100;
}
break;
}
}
return result + tmp;
}
/// @brief "(((major * 100) + minor) * 100) + patch"
uint64_t Version::current() {
return parseVersion(ARANGODB_VERSION, strlen(ARANGODB_VERSION));
}
VersionResult Version::check(TRI_vocbase_t* vocbase) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
TRI_ASSERT(engine != nullptr);
std::string versionFile = engine->versionFilename(vocbase->id());
if (!basics::FileUtils::exists(versionFile)) {
LOG_TOPIC(DEBUG, Logger::STARTUP) << "VERSION file not found: "
<< versionFile;
return VersionResult{VersionResult::NO_VERSION_FILE, 0, 0, {}};
}
std::string versionInfo = basics::FileUtils::slurp(versionFile);
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Found VERSION file: " << versionInfo;
if (versionInfo.empty()) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Empty VERSION file";
return VersionResult{VersionResult::CANNOT_READ_VERSION_FILE, 0, 0, {}};
}
uint64_t lastVersion = UINT64_MAX;
std::map<std::string, bool> tasks;
try {
std::shared_ptr<VPackBuilder> parsed =
velocypack::Parser::fromJson(versionInfo);
VPackSlice versionVals = parsed->slice();
if (!versionVals.isObject() || !versionVals.get("version").isNumber()) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Cannot parse VERSION file " << versionInfo;
return VersionResult{VersionResult::CANNOT_PARSE_VERSION_FILE, 0, 0,
tasks};
}
lastVersion = versionVals.get("version").getUInt();
VPackSlice run = versionVals.get("tasks");
if (run.isNone() || !run.isObject()) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Invalid VERSION file " << versionInfo;
return VersionResult{VersionResult::CANNOT_PARSE_VERSION_FILE, 0, 0,
tasks};
}
for (VPackObjectIterator::ObjectPair pair : VPackObjectIterator(run)) {
tasks.emplace(pair.key.copyString(), pair.value.getBool());
}
} catch (velocypack::Exception const& e) {
LOG_TOPIC(ERR, Logger::STARTUP) << "Cannot parse VERSION file "
<< versionInfo;
return VersionResult{VersionResult::CANNOT_PARSE_VERSION_FILE, 0, 0, tasks};
}
TRI_ASSERT(lastVersion != UINT32_MAX);
uint32_t serverVersion = Version::current();
VersionResult res = {VersionResult::NO_VERSION_FILE, serverVersion,
lastVersion, tasks};
if (lastVersion / 100 == serverVersion / 100) {
LOG_TOPIC(DEBUG, Logger::STARTUP) << "version match: last version "
<< lastVersion << ", current version "
<< serverVersion;
res.status = VersionResult::VERSION_MATCH;
} else if (lastVersion > serverVersion) { // downgrade??
LOG_TOPIC(DEBUG, Logger::STARTUP) << "downgrade: last version "
<< lastVersion << ", current version "
<< serverVersion;
res.status = VersionResult::DOWNGRADE_NEEDED;
} else if (lastVersion < serverVersion) { // upgrade
LOG_TOPIC(DEBUG, Logger::STARTUP) << "upgrade: last version " << lastVersion
<< ", current version " << serverVersion;
res.status = VersionResult::UPGRADE_NEEDED;
} else {
LOG_TOPIC(ERR, Logger::STARTUP) << "should not happen: last version "
<< lastVersion << ", current version "
<< serverVersion;
}
return res;
}
void Version::write(TRI_vocbase_t* vocbase,
std::map<std::string, bool> tasks) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
TRI_ASSERT(engine != nullptr);
std::string versionFile = engine->versionFilename(vocbase->id());
TRI_ASSERT(!versionFile.empty());
VPackOptions opts;
opts.buildUnindexedObjects = true;
VPackBuilder builder(&opts);
builder.openObject(true);
builder.add("version", VPackValue(Version::current()));
builder.add("tasks", VPackValue(VPackValueType::Object));
for (auto const& task : tasks) {
builder.add(task.first, VPackValue(task.second));
}
builder.close();
builder.close();
std::string json = builder.slice().toJson();
basics::FileUtils::spit(versionFile, json.c_str(), json.length());
}

View File

@ -0,0 +1,74 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Simon Grätzer
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_VOC_BASE_API_VERSION_H
#define ARANGOD_VOC_BASE_API_VERSION_H 1
#include <velocypack/Builder.h>
#include <velocypack/Slice.h>
#include <cstdint>
#include "Basics/Result.h"
struct TRI_vocbase_t;
namespace arangodb {
namespace methods {
/// not based on Basics/Result because I did not
/// feel like adding these error codes globally.
/// Originally from js/server/database-version.js
struct VersionResult {
enum StatusCode : int {
INVALID = 0,
VERSION_MATCH = 1,
UPGRADE_NEEDED = 2,
DOWNGRADE_NEEDED = 3,
CANNOT_PARSE_VERSION_FILE = -2,
CANNOT_READ_VERSION_FILE = -3,
NO_VERSION_FILE = -4,
NO_SERVER_VERSION = -5
};
/// @brief status code
StatusCode status;
/// @brief current server version
uint64_t serverVersion;
/// @brief version in VERSION file on disk
uint64_t databaseVersion;
/// @brief tasks that were executed
std::map<std::string, bool> tasks;
};
/// Code to create and initialize databases
/// Replaces ugrade-database.js for good
struct Version {
/// @brief "(((major * 100) + minor) * 100) + patch"
static uint64_t current();
/// @brief read the VERSION file for a database
static VersionResult check(TRI_vocbase_t*);
/// @brief write a VERSION file including all tasks
static void write(TRI_vocbase_t*, std::map<std::string, bool> tasks);
};
}
}
#endif

View File

@ -235,15 +235,16 @@ function performTests (options, testList, testname, runFn, serverOptions, startS
});
}
if (db._graphs.count() !== graphCount) {
let graphs = db._collection('_graphs');
if (graphs && graphs.count() !== graphCount) {
results[te] = {
status: false,
message: 'Cleanup of graphs missing - found graph definitions: [ ' +
JSON.stringify(db._graphs.toArray()) +
JSON.stringify(graphs.toArray()) +
' ] - Original test status: ' +
JSON.stringify(results[te])
};
graphCount = db._graphs.count();
graphCount = graphs.count();
}
if (startStopHandlers !== undefined && startStopHandlers.hasOwnProperty('alive')) {

View File

@ -82,7 +82,7 @@ let optionsDocumentation = [
' - `sanitizer`: if set the programs are run with enabled sanitizer',
' and need longer timeouts',
'',
' - `resilientsingle` starts resilient single server setup (active/passive)',
' - `singleresilient` starts resilient single server setup (active/passive)',
'',
' - `valgrind`: if set the programs are run with the valgrind',
' memory checker; should point to the valgrind executable',
@ -131,7 +131,7 @@ const optionsDefaults = {
'replication': false,
'rr': false,
'sanitizer': false,
'resilientsingle': false,
'singleresilient': false,
'skipLogAnalysis': true,
'skipMemoryIntense': false,
'skipNightly': true,

View File

@ -1,59 +0,0 @@
'use strict';
// //////////////////////////////////////////////////////////////////////////////
// / @brief run cluster bootstrap
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2016 ArangoDB GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Max Neunhoeffer
// / @author Copyright 2016, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////
// / @brief initialize a new database
// //////////////////////////////////////////////////////////////////////////////
(function () {
const internal = require('internal');
global.UPGRADE_ARGS = {
isCluster: true,
isCoordinator: true,
isRelaunch: false,
upgrade: false
};
let result = internal.loadStartup('server/upgrade-database.js');
if (result) {
delete global.UPGRADE_TYPE;
}
result = global.UPGRADE_STARTED && result;
delete global.UPGRADE_STARTED;
delete global.UPGRADE_ARGS;
if (!result) {
console.error('upgrade-database.js for cluster script failed!');
}
global.ArangoAgency.set('Current/Foxxmaster', global.ArangoServerState.id());
return result;
}());

View File

@ -1,51 +0,0 @@
'use strict';
// //////////////////////////////////////////////////////////////////////////////
// / @brief initialize a new database
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2014 ArangoDB GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Dr. Frank Celler
// / @author Copyright 2014, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////
// / @brief initialize a new database
// //////////////////////////////////////////////////////////////////////////////
(function () {
const internal = require('internal');
// do not overwrite arguments set from the calling C++ code
if (!global.UPGRADE_ARGS) {
global.UPGRADE_ARGS = {};
}
// merge in our arguments
global.UPGRADE_ARGS.isCluster = true;
global.UPGRADE_ARGS.isCoordinator = true;
global.UPGRADE_ARGS.isRelaunch = false;
// run the local upgrade-database script
const res = internal.loadStartup('server/bootstrap/local-database.js');
return res;
}());

View File

@ -1,63 +0,0 @@
/* jshint -W051:true */
'use strict';
// //////////////////////////////////////////////////////////////////////////////
// / @brief initialize a new database
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2014 ArangoDB GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Dr. Frank Celler
// / @author Copyright 2014, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////
// / @brief initialize a new database
// //////////////////////////////////////////////////////////////////////////////
(function () {
const internal = require('internal');
// note: this script will only be called and executed in V8 context #0
if (internal.threadNumber === 0) {
// run the local upgrade-database script (global.UPGRADE_ARGS has been set)
global.UPGRADE_ARGS = {
isCluster: true,
isDbServer: true,
isRelaunch: false
};
let result = internal.loadStartup('server/upgrade-database.js');
if (result) {
delete global.UPGRADE_TYPE;
}
result = global.UPGRADE_STARTED && result;
delete global.UPGRADE_STARTED;
delete global.UPGRADE_ARGS;
if (!result) {
console.error('upgrade-database.js script failed!');
}
console.info('bootstrapped DB server %s', global.ArangoServerState.id());
}
return true;
}());

View File

@ -1,56 +0,0 @@
/* jshint -W051:true */
'use strict';
////////////////////////////////////////////////////////////////////////////////
// @brief initialize a new database
//
// @file
//
// DISCLAIMER
//
// Copyright 2014 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// @author Dr. Frank Celler
// @author Copyright 2014, ArangoDB GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
(function () {
var internal = require('internal');
var console = require('console');
var db = internal.db;
// run the upgrade-database script
var result = internal.loadStartup('server/upgrade-database.js');
if (result) {
delete global.UPGRADE_TYPE;
}
result = global.UPGRADE_STARTED && result;
delete global.UPGRADE_STARTED;
delete global.UPGRADE_ARGS;
// expire database cache
try {
global.KEY_SET('queue-control', 'databases-expire', 0);
} catch (err) {
// it is of no real importance if cache invalidation fails, because
// the cache entry has a ttl
}
return result;
}());

View File

@ -1,44 +0,0 @@
'use strict';
////////////////////////////////////////////////////////////////////////////////
/// @brief check if the version of database is a match
///
/// @file
///
/// Version check at the start of the server, will optionally perform necessary
/// upgrades.
///
/// DISCLAIMER
///
/// Copyright 2014 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Copyright 2014, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
(function() {
try {
console.debug("checking database version");
return require("@arangodb/database-version").databaseVersion().result;
} catch (err) {
console.error("database version check failed: " + err);
}
}());

View File

@ -30,28 +30,9 @@
// //////////////////////////////////////////////////////////////////////////////
var cluster = require('@arangodb/cluster');
var fs = require('fs');
var db = require('@arangodb').db;
var console = require('console');
// //////////////////////////////////////////////////////////////////////////////
// / @brief logger
// //////////////////////////////////////////////////////////////////////////////
var logger = {
info: function (msg) {
console.log("In database '%s': %s", db._name(), msg);
},
error: function (msg) {
console.error("In database '%s': %s", db._name(), msg);
},
log: function (msg) {
this.info(msg);
}
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief CURRENT_VERSION
// //////////////////////////////////////////////////////////////////////////////
@ -65,145 +46,3 @@ exports.CURRENT_VERSION = (function () {
return (((major * 100) + minor) * 100) + patch;
}());
// //////////////////////////////////////////////////////////////////////////////
// / @brief VERSION_MATCH
// //////////////////////////////////////////////////////////////////////////////
exports.VERSION_MATCH = 1;
// //////////////////////////////////////////////////////////////////////////////
// / @brief DOWNGRADE
// //////////////////////////////////////////////////////////////////////////////
exports.DOWNGRADE_NEEDED = 2;
// //////////////////////////////////////////////////////////////////////////////
// / @brief UPGRADE
// //////////////////////////////////////////////////////////////////////////////
exports.UPGRADE_NEEDED = 3;
// //////////////////////////////////////////////////////////////////////////////
// / @brief IS_CLUSTER
// //////////////////////////////////////////////////////////////////////////////
exports.IS_CLUSTER = -1;
// //////////////////////////////////////////////////////////////////////////////
// / @brief CANNOT_PARSE_VERSION_FILE
// //////////////////////////////////////////////////////////////////////////////
exports.CANNOT_PARSE_VERSION_FILE = -2;
// //////////////////////////////////////////////////////////////////////////////
// / @brief CANNOT_READ_VERSION_FILE
// //////////////////////////////////////////////////////////////////////////////
exports.CANNOT_READ_VERSION_FILE = -3;
// //////////////////////////////////////////////////////////////////////////////
// / @brief NO_VERSION_FILE
// //////////////////////////////////////////////////////////////////////////////
exports.NO_VERSION_FILE = -4;
// //////////////////////////////////////////////////////////////////////////////
// / @brief NO_SERVER_VERSION
// //////////////////////////////////////////////////////////////////////////////
exports.NO_SERVER_VERSION = -5;
// //////////////////////////////////////////////////////////////////////////////
// / @brief checks the version
// //////////////////////////////////////////////////////////////////////////////
exports.databaseVersion = function () {
if (cluster.isCoordinator()) {
console.debug('skip on corrdinator');
return {
result: exports.IS_CLUSTER
};
}
// path to the VERSION file
let versionFile = db._versionFilename();
var lastVersion = null;
// VERSION file exists, read its contents
if (fs.exists(versionFile)) {
var versionInfo = fs.read(versionFile);
console.debug('found version file: ' + versionInfo);
if (versionInfo !== '') {
var versionValues = JSON.parse(versionInfo);
if (versionValues && versionValues.version && !isNaN(versionValues.version)) {
lastVersion = parseFloat(versionValues.version);
} else {
logger.error("Cannot parse VERSION file '" + versionFile + "': '" + versionInfo + "'");
return {
result: exports.CANNOT_PARSE_VERSION_FILE
};
}
} else {
logger.error("Cannot read VERSION file: '" + versionFile + "'");
return {
result: exports.CANNOT_READ_VERSION_FILE
};
}
} else {
console.debug('version file (' + versionFile + ') not found');
return {
result: exports.NO_VERSION_FILE
};
}
// extract server version
var currentVersion = exports.CURRENT_VERSION;
// version match!
if (Math.floor(lastVersion / 100) === Math.floor(currentVersion / 100)) {
console.debug('version match: last version ' + lastVersion +
', current version ' + currentVersion);
return {
result: exports.VERSION_MATCH,
serverVersion: currentVersion,
databaseVersion: lastVersion
};
}
// downgrade??
if (lastVersion > currentVersion) {
console.debug('downgrade: last version ' + lastVersion +
', current version ' + currentVersion);
return {
result: exports.DOWNGRADE_NEEDED,
serverVersion: currentVersion,
databaseVersion: lastVersion
};
}
// upgrade
if (lastVersion < currentVersion) {
console.debug('upgrade: last version ' + lastVersion +
', current version ' + currentVersion);
return {
result: exports.UPGRADE_NEEDED,
serverVersion: currentVersion,
databaseVersion: lastVersion
};
}
console.error('should not happen: last version ' + lastVersion +
', current version ' + currentVersion);
return {
result: exports.NO_VERSION_FILE
};
};

View File

@ -1,878 +0,0 @@
/* jshint -W051:true, -W069:true */
'use strict';
// //////////////////////////////////////////////////////////////////////////////
// / Version check at the start of the server, will optionally perform necessary
// / upgrades.
// /
// / If you add any task here, please update the database version in
// / @arangodb/database-version.js.
// /
// / DISCLAIMER
// /
// / Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
// / Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Jan Steemann
// / @author Copyright 2014, triAGENS GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
(function () {
var args = global.UPGRADE_ARGS;
delete global.UPGRADE_ARGS;
const internal = require('internal');
const fs = require('fs');
const console = require('console');
const userManager = require('@arangodb/users');
const currentVersion = require('@arangodb/database-version').CURRENT_VERSION;
const db = internal.db;
const shallowCopy = require('@arangodb/util').shallowCopy;
const errors = internal.errors;
function upgrade () {
// default replication factor for system collections
const DEFAULT_REPLICATION_FACTOR_SYSTEM = internal.DEFAULT_REPLICATION_FACTOR_SYSTEM;
// system database only
const DATABASE_SYSTEM = 1000;
// all databases
const DATABASE_ALL = 1001;
// all databases expect system
const DATABASE_EXCEPT_SYSTEM = 1002;
// for stand-alone, no cluster
const CLUSTER_NONE = 2000;
// for cluster local part
const CLUSTER_LOCAL = 2001;
// for cluster global part (shared collections)
const CLUSTER_COORDINATOR_GLOBAL = 2002;
// db server global part (DB server local!)
const CLUSTER_DB_SERVER_LOCAL = 2003;
// for new databases
const DATABASE_INIT = 3000;
// for existing database, which must be upgraded
const DATABASE_UPGRADE = 3001;
// for existing database, which are already at the correct version
const DATABASE_EXISTING = 3002;
// constant to name
const constant2name = {};
constant2name[DATABASE_SYSTEM] = 'system-database';
constant2name[DATABASE_ALL] = 'any-database';
constant2name[CLUSTER_NONE] = 'standalone';
constant2name[CLUSTER_LOCAL] = 'cluster-local';
constant2name[CLUSTER_COORDINATOR_GLOBAL] = 'coordinator-global';
constant2name[CLUSTER_DB_SERVER_LOCAL] = 'db-server-local';
constant2name[DATABASE_INIT] = 'init';
constant2name[DATABASE_UPGRADE] = 'upgrade';
constant2name[DATABASE_EXISTING] = 'existing';
// path to version file
let versionFile = internal.db._versionFilename();
// all defined tasks
const allTasks = [];
// tasks of the last run
let lastTasks = {};
// version of the database
let lastVersion = null;
// special logger with database name
const logger = {
info: function (msg) {
console.debug('In database "%s": %s', db._name(), msg);
},
error: function (msg) {
console.error('In database "%s": %s', db._name(), msg);
},
errorLines: function (msg) {
console.errorLines('In database "%s": %s', db._name(), msg);
},
warn: function (msg) {
console.warn('In database "%s": %s', db._name(), msg);
},
log: function (msg) {
this.info(msg);
}
};
// runs the collection
function getCollection (name) {
return db._collection(name);
}
// checks if the collection exists
function collectionExists (name) {
const collection = getCollection(name);
return (collection !== undefined) && (collection !== null) && (collection.name() === name);
}
// creates a system collection
function createSystemCollection (name, attributes) {
if (collectionExists(name)) {
return true;
}
const realAttributes = attributes || {};
realAttributes.isSystem = true;
if (db._create(name, realAttributes)) {
return true;
}
return collectionExists(name);
}
// //////////////////////////////////////////////////////////////////////////////
// adds a task
// /
// / task has the following attributes:
// /
// / "name" is the name of the task
// / "description" is a textual description of the task that will be printed out on screen
// / "system": system or any database
// / "cluster": list of cluster states (standalone, local, global)
// / "database": init, upgrade, existing
// / "task" is the task
// //////////////////////////////////////////////////////////////////////////////
function addTask (task) {
allTasks.push(task);
}
// loops over all tasks
function runTasks (cluster, database, lastVersion) {
const activeTasks = [];
let i;
let j;
let task;
// we have a local database on disk
const isLocal = (cluster === CLUSTER_NONE || cluster === CLUSTER_LOCAL);
// execute all tasks
for (i = 0; i < allTasks.length; ++i) {
task = allTasks[i];
// check for system database
if (task.system === DATABASE_SYSTEM && db._name() !== '_system') {
continue;
}
if (task.system === DATABASE_EXCEPT_SYSTEM && db._name() === '_system') {
continue;
}
// check that the cluster occurs in the cluster list
const clusterArray = task.cluster;
let match = false;
for (j = 0; j < clusterArray.length; ++j) {
if (clusterArray[j] === cluster) {
match = true;
}
}
if (!match) {
continue;
}
// check that the database occurs in the database list
const databaseArray = task.database;
match = false;
for (j = 0; j < databaseArray.length; ++j) {
if (databaseArray[j] === database) {
match = true;
}
}
// special optimisation: for local server and new database,
// an upgrade-only task can be viewed as executed.
if (!match) {
if (isLocal && database === DATABASE_INIT && databaseArray.length === 1 &&
databaseArray[0] === DATABASE_UPGRADE) {
lastTasks[task.name] = true;
}
continue;
}
// we need to execute this task
if (!lastTasks[task.name]) {
activeTasks.push(task);
}
}
if (activeTasks.length > 0) {
logger.info('Found ' + allTasks.length + ' defined task(s), ' +
activeTasks.length + ' task(s) to run');
logger.info('state ' + constant2name[cluster] + '/' +
constant2name[database] + ', tasks ' + activeTasks.map(function (a) {
return a.name;
}).join(', '));
} else {
logger.info('Database is up-to-date (' + (lastVersion || '-') +
'/' + constant2name[cluster] + '/' + constant2name[database] + ')');
}
let procedure = 'unknown';
if (database === DATABASE_INIT) {
procedure = 'init';
} else if (database === DATABASE_UPGRADE) {
procedure = 'upgrade';
} else if (database === DATABASE_EXISTING) {
procedure = 'existing cleanup';
}
for (i = 0; i < activeTasks.length; ++i) {
task = activeTasks[i];
const taskName = 'task #' + (i + 1) + ' (' + task.name + ': ' + task.description + ')';
// assume failure
let result = false;
// execute task (might have already been executed)
try {
if (lastTasks[task.name]) {
result = true;
} else {
result = task.task();
}
} catch (err) {
logger.errorLines('Executing ' + taskName + ' failed with exception: ' +
String(err) + ' ' +
String(err.stack || ''));
}
// success
if (result) {
lastTasks[task.name] = true;
// save/update version info
if (isLocal) {
fs.write(
versionFile,
JSON.stringify({
version: lastVersion,
tasks: lastTasks
}, true));
}
} else {
logger.error('Executing ' + taskName + ' failed. Aborting ' + procedure + ' procedure.');
logger.error('Please fix the problem and try starting the server again.');
return false;
}
}
// save file so version gets saved even if there are no tasks
if (isLocal) {
fs.write(
versionFile,
JSON.stringify({
version: currentVersion,
tasks: lastTasks
}, true));
}
if (activeTasks.length > 0) {
logger.info(procedure + ' successfully finished');
}
// successfully finished
return true;
}
// upgrade or initialize the database
function upgradeDatabase () {
// cluster
let cluster;
if (global.ArangoAgency.prefix() === '' ||
global.ArangoServerState.role() === 'SINGLE') {
cluster = CLUSTER_NONE;
} else {
if (args.isCluster) {
if (args.isDbServer) {
cluster = CLUSTER_DB_SERVER_LOCAL;
} else {
cluster = CLUSTER_COORDINATOR_GLOBAL;
}
} else {
cluster = CLUSTER_LOCAL;
}
}
// CLUSTER_COORDINATOR_GLOBAL is special, init or upgrade are passed in from the dispatcher
if (cluster === CLUSTER_DB_SERVER_LOCAL || cluster === CLUSTER_COORDINATOR_GLOBAL) {
if (args.isRelaunch) {
return runTasks(cluster, DATABASE_UPGRADE);
}
return runTasks(cluster, DATABASE_INIT);
}
// VERSION file exists, read its contents
if (fs.exists(versionFile)) {
var versionInfo = fs.read(versionFile);
if (versionInfo === '') {
logger.warn('VERSION file "' + versionFile + '" is empty. Creating new default VERSION file');
versionInfo = '{"version":' + currentVersion + ',"tasks":[]}';
// return false;
}
const versionValues = JSON.parse(versionInfo);
if (versionValues && versionValues.hasOwnProperty('version') && !isNaN(versionValues.version)) {
lastVersion = parseFloat(versionValues.version);
} else {
return false;
}
if (versionValues && versionValues.tasks && typeof (versionValues.tasks) === 'object') {
lastTasks = versionValues.tasks || {};
} else {
return false;
}
// same version
const lv = Math.floor(lastVersion / 100);
const cv = Math.floor(currentVersion / 100);
if (lv === cv || (lv === 300 && cv === 301)) {
global.UPGRADE_TYPE = 1;
return runTasks(cluster, DATABASE_EXISTING, lastVersion);
}
// downgrade??
if (lastVersion > currentVersion) {
global.UPGRADE_TYPE = 2;
logger.error('Database directory version (' + lastVersion +
') is higher than current version (' + currentVersion + ').');
logger.error('It seems like you are running ArangoDB on a database directory' +
' that was created with a newer version of ArangoDB. Maybe this' +
' is what you wanted but it is not supported by ArangoDB.');
// still, allow the start
return true;
}
// upgrade??
if (lastVersion < currentVersion) {
if (args && args.upgrade) {
global.UPGRADE_TYPE = 3;
return runTasks(cluster, DATABASE_UPGRADE, lastVersion);
}
global.UPGRADE_TYPE = 4;
logger.error('Database directory version (' + lastVersion +
') is lower than current version (' + currentVersion + ').');
logger.error('----------------------------------------------------------------------');
logger.error('It seems like you have upgraded the ArangoDB binary.');
logger.error('If this is what you wanted to do, please restart with the');
logger.error(' --database.auto-upgrade true');
logger.error('option to upgrade the data in the database directory.');
logger.error('Normally you can use the control script to upgrade your database');
logger.error(' /etc/init.d/arangodb stop');
logger.error(' /etc/init.d/arangodb upgrade');
logger.error(' /etc/init.d/arangodb start');
logger.error('----------------------------------------------------------------------');
// do not start unless started with --database.auto-upgrade
return false;
}
// we should never get here
return false;
} else {
// no VERSION file found
global.UPGRADE_TYPE = 5;
}
// VERSION file does not exist, we are running on a new database
logger.info('No version information file found in database directory.');
return runTasks(cluster, DATABASE_INIT, currentVersion);
}
// setupGraphs
addTask({
name: 'setupGraphs',
description: 'setup _graphs collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_graphs', {
waitForSync: false,
journalSize: 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM
});
}
});
// setupUsers
addTask({
name: 'setupUsers',
description: 'setup _users collection',
system: DATABASE_SYSTEM,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_users', {
waitForSync: false,
shardKeys: ['user'],
journalSize: 4 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// createUsersIndex
addTask({
name: 'createUsersIndex',
description: 'create index on "user" attribute in _users collection',
system: DATABASE_SYSTEM,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE],
task: function () {
const users = getCollection('_users');
if (!users) {
return false;
}
users.ensureIndex({
type: 'hash',
fields: ['user'],
unique: true,
sparse: true
});
return true;
}
});
// add users defined for this database
addTask({
name: 'addDefaultUserOther',
description: 'add default users',
system: DATABASE_EXCEPT_SYSTEM,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT],
task: function () {
const oldDbname = db._name();
try {
db._useDatabase('_system');
if (args && args.users) {
args.users.forEach(function (user) {
try {
if (!userManager.exists(user.username)) {
userManager.save(user.username, user.passwd, user.active, user.extra || {});
}
} catch (err) {
logger.warn('could not add database user "' + user.username + '": ' +
String(err) + ' ' +
String(err.stack || ''));
}
try {
userManager.grantDatabase(user.username, oldDbname, 'rw');
userManager.grantCollection(user.username, oldDbname, '*', 'rw');
} catch (err) {
logger.warn('could not grant access to database user "' + user.username + '": ' +
String(err) + ' ' +
String(err.stack || ''));
}
});
}
return true;
} finally {
db._useDatabase(oldDbname);
}
}
});
// updates the users models
addTask({
name: 'updateUserModels',
description: 'convert documents in _users collection to new format',
system: DATABASE_SYSTEM,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
var users = getCollection('_users');
if (!users) {
return false;
}
var results = users.all().toArray().map(function (oldDoc) {
if (!oldDoc.hasOwnProperty('databases') || oldDoc.databases === null) {
var data = shallowCopy(oldDoc);
data.databases = {};
if (oldDoc.user === 'root') {
data.databases['*'] = 'rw';
} else {
data.databases['_system'] = 'rw';
}
var result = users.replace(oldDoc, data);
return !result.errors;
}
return true;
});
return results.every(Boolean);
}
});
// createModules
addTask({
name: 'createModules',
description: 'setup _modules collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_modules', {
journalSize: 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// setupAnalyzersConfig
addTask({
name: 'setupAnalyzers',
description: 'setup _iresearch_analyzers collection',
system: DATABASE_SYSTEM,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_iresearch_analyzers', {
waitForSync: false,
journalSize: 4 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// _routing
addTask({
name: 'createRouting',
description: 'setup _routing collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
// needs to be big enough for assets
return createSystemCollection('_routing', {
journalSize: 4 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// insertRedirectionsAll
addTask({
name: 'insertRedirectionsAll',
description: 'insert default routes for admin interface',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE],
task: function () {
const routing = getCollection('_routing');
if (!routing) {
return false;
}
// first, check for "old" redirects
routing.toArray().forEach(function (doc) {
// check for specific redirects
if (doc.url && doc.action && doc.action.options &&
doc.action.options.destination) {
if (doc.url.match(/^\/(_admin\/(html|aardvark))?/) &&
doc.action.options.destination.match(/_admin\/(html|aardvark)/)) {
// remove old, non-working redirect
routing.remove(doc);
}
}
});
// add redirections to new location
['/', '/_admin/html', '/_admin/html/index.html'].forEach(function (src) {
try {
routing.save({
url: src,
action: {
'do': '@arangodb/actions/redirectRequest',
options: {
permanently: true,
destination: '/_db/' + db._name() + '/_admin/aardvark/index.html'
}
},
priority: -1000000
});
} catch (err) {
// ignore unique constraint violations here
if (err.errorNum !== errors.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code) {
// rethrow all other types of errors
throw err;
}
}
});
return true;
}
});
// setupAqlFunctions
addTask({
name: 'setupAqlFunctions',
description: 'setup _aqlfunctions collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_aqlfunctions', {
journalSize: 1 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// createFrontend
addTask({
name: 'createFrontend',
description: 'setup _frontend collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
const name = '_frontend';
return createSystemCollection(name, {
waitForSync: false,
journalSize: 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// setupQueues
addTask({
name: 'setupQueues',
description: 'setup _queues collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_queues', {
journalSize: 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// setupJobs
addTask({
name: 'setupJobs',
description: 'setup _jobs collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_jobs', {
journalSize: 2 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// setup jobs index
addTask({
name: 'createJobsIndex',
description: 'create index on attributes in _jobs collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE],
task: function () {
const tasks = getCollection('_jobs');
if (!tasks) {
return false;
}
tasks.ensureIndex({
type: 'skiplist',
fields: ['queue', 'status', 'delayUntil'],
unique: true,
sparse: false
});
tasks.ensureIndex({
type: 'skiplist',
fields: ['status', 'queue', 'delayUntil'],
unique: true,
sparse: false
});
return true;
}
});
// setupApps
addTask({
name: 'setupApps',
description: 'setup _apps collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_apps', {
journalSize: 2 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
// setup apps index
addTask({
name: 'createAppsIndex',
description: 'create index on attributes in _apps collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE],
task: function () {
const apps = getCollection('_apps');
if (!apps) {
return false;
}
apps.ensureIndex({
type: 'hash',
fields: ['mount'],
unique: true,
sparse: false
});
return true;
}
});
// setupAppBundles
addTask({
name: 'setupAppBundles',
description: 'setup _appbundles collection',
system: DATABASE_ALL,
cluster: [CLUSTER_NONE, CLUSTER_COORDINATOR_GLOBAL],
database: [DATABASE_INIT, DATABASE_UPGRADE, DATABASE_EXISTING],
task: function () {
return createSystemCollection('_appbundles', {
journalSize: 2 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: '_graphs'
});
}
});
return upgradeDatabase();
}
// set this global variable to inform the server we actually got until here...
global.UPGRADE_STARTED = true;
// 0 = undecided
// 1 = same version
// 2 = downgrade
// 3 = upgrade
// 4 = requires upgrade
// 5 = no version found
global.UPGRADE_TYPE = 0;
// and run the upgrade
return upgrade();
}());

View File

@ -30,7 +30,6 @@ if [ "x$@" == "x" ] ; then
./js/common/modules/jsunity.js \
./js/client/client.js \
./js/server/server.js \
./js/server/upgrade-database.js \
\
"
if [ -d ./enterprise ] ; then