diff --git a/3rdParty/velocypack/include/velocypack/Builder.h b/3rdParty/velocypack/include/velocypack/Builder.h index e46a36ec44..57cd92a967 100644 --- a/3rdParty/velocypack/include/velocypack/Builder.h +++ b/3rdParty/velocypack/include/velocypack/Builder.h @@ -106,9 +106,15 @@ class Builder { checkOverflow(_pos + len); #endif - _buffer->prealloc(len); - _start = _buffer->data(); - _size = _buffer->size(); + // copy builder pointer into local variable + // this avoids accessing the shared pointer repeatedly, which has + // a small but non-negligible cost + Buffer* buffer = _buffer.get(); + VELOCYPACK_ASSERT(buffer != nullptr); + + buffer->prealloc(len); + _start = buffer->data(); + _size = buffer->size(); } // Sort the indices by attribute name: diff --git a/3rdParty/velocypack/src/Parser.cpp b/3rdParty/velocypack/src/Parser.cpp index 1232f218aa..5d9fd76522 100644 --- a/3rdParty/velocypack/src/Parser.cpp +++ b/3rdParty/velocypack/src/Parser.cpp @@ -233,9 +233,15 @@ void Parser::parseString() { // insert 8 bytes for the length as soon as we reach 127 bytes // in the VPack representation. - ValueLength const base = _b->_pos; - _b->reserveSpace(1); - _b->_start[_b->_pos++] = 0x40; // correct this later + // copy builder pointer into local variable + // this avoids accessing the shared pointer repeatedly, which has + // a small but non-negligible cost + Builder* builder = _b.get(); + VELOCYPACK_ASSERT(builder != nullptr); + + ValueLength const base = builder->_pos; + builder->reserveSpace(1); + builder->_start[builder->_pos++] = 0x40; // correct this later bool large = false; // set to true when we reach 128 bytes uint32_t highSurrogate = 0; // non-zero if high-surrogate was seen @@ -243,42 +249,42 @@ void Parser::parseString() { while (true) { size_t remainder = _size - _pos; if (remainder >= 16) { - _b->reserveSpace(remainder); + builder->reserveSpace(remainder); size_t count; // Note that the SSE4.2 accelerated string copying functions might // peek up to 15 bytes over the given end, because they use 128bit // registers. Therefore, we have to subtract 15 from remainder // to be on the safe side. Further bytes will be processed below. if (options->validateUtf8Strings) { - count = JSONStringCopyCheckUtf8(_b->_start + _b->_pos, _start + _pos, + count = JSONStringCopyCheckUtf8(builder->_start + builder->_pos, _start + _pos, remainder - 15); } else { - count = JSONStringCopy(_b->_start + _b->_pos, _start + _pos, + count = JSONStringCopy(builder->_start + builder->_pos, _start + _pos, remainder - 15); } _pos += count; - _b->_pos += count; + builder->_pos += count; } int i = getOneOrThrow("Unfinished string"); - if (!large && _b->_pos - (base + 1) > 126) { + if (!large && builder->_pos - (base + 1) > 126) { large = true; - _b->reserveSpace(8); - ValueLength len = _b->_pos - (base + 1); - memmove(_b->_start + base + 9, _b->_start + base + 1, checkOverflow(len)); - _b->_pos += 8; + builder->reserveSpace(8); + ValueLength len = builder->_pos - (base + 1); + memmove(builder->_start + base + 9, builder->_start + base + 1, checkOverflow(len)); + builder->_pos += 8; } switch (i) { case '"': ValueLength len; if (!large) { - len = _b->_pos - (base + 1); - _b->_start[base] = 0x40 + static_cast(len); + len = builder->_pos - (base + 1); + builder->_start[base] = 0x40 + static_cast(len); // String is ready } else { - len = _b->_pos - (base + 9); - _b->_start[base] = 0xbf; + len = builder->_pos - (base + 9); + builder->_start[base] = 0xbf; for (ValueLength i = 1; i <= 8; i++) { - _b->_start[base + i] = len & 0xff; + builder->_start[base + i] = len & 0xff; len >>= 8; } } @@ -293,33 +299,33 @@ void Parser::parseString() { case '"': case '/': case '\\': - _b->reserveSpace(1); - _b->_start[_b->_pos++] = static_cast(i); + builder->reserveSpace(1); + builder->_start[builder->_pos++] = static_cast(i); highSurrogate = 0; break; case 'b': - _b->reserveSpace(1); - _b->_start[_b->_pos++] = '\b'; + builder->reserveSpace(1); + builder->_start[builder->_pos++] = '\b'; highSurrogate = 0; break; case 'f': - _b->reserveSpace(1); - _b->_start[_b->_pos++] = '\f'; + builder->reserveSpace(1); + builder->_start[builder->_pos++] = '\f'; highSurrogate = 0; break; case 'n': - _b->reserveSpace(1); - _b->_start[_b->_pos++] = '\n'; + builder->reserveSpace(1); + builder->_start[builder->_pos++] = '\n'; highSurrogate = 0; break; case 'r': - _b->reserveSpace(1); - _b->_start[_b->_pos++] = '\r'; + builder->reserveSpace(1); + builder->_start[builder->_pos++] = '\r'; highSurrogate = 0; break; case 't': - _b->reserveSpace(1); - _b->_start[_b->_pos++] = '\t'; + builder->reserveSpace(1); + builder->_start[builder->_pos++] = '\t'; highSurrogate = 0; break; case 'u': { @@ -342,23 +348,23 @@ void Parser::parseString() { } } if (v < 0x80) { - _b->reserveSpace(1); - _b->_start[_b->_pos++] = static_cast(v); + builder->reserveSpace(1); + builder->_start[builder->_pos++] = static_cast(v); highSurrogate = 0; } else if (v < 0x800) { - _b->reserveSpace(2); - _b->_start[_b->_pos++] = 0xc0 + (v >> 6); - _b->_start[_b->_pos++] = 0x80 + (v & 0x3f); + builder->reserveSpace(2); + builder->_start[builder->_pos++] = 0xc0 + (v >> 6); + builder->_start[builder->_pos++] = 0x80 + (v & 0x3f); highSurrogate = 0; } else if (v >= 0xdc00 && v < 0xe000 && highSurrogate != 0) { // Low surrogate, put the two together: v = 0x10000 + ((highSurrogate - 0xd800) << 10) + v - 0xdc00; - _b->_pos -= 3; - _b->reserveSpace(4); - _b->_start[_b->_pos++] = 0xf0 + (v >> 18); - _b->_start[_b->_pos++] = 0x80 + ((v >> 12) & 0x3f); - _b->_start[_b->_pos++] = 0x80 + ((v >> 6) & 0x3f); - _b->_start[_b->_pos++] = 0x80 + (v & 0x3f); + builder->_pos -= 3; + builder->reserveSpace(4); + builder->_start[builder->_pos++] = 0xf0 + (v >> 18); + builder->_start[builder->_pos++] = 0x80 + ((v >> 12) & 0x3f); + builder->_start[builder->_pos++] = 0x80 + ((v >> 6) & 0x3f); + builder->_start[builder->_pos++] = 0x80 + (v & 0x3f); highSurrogate = 0; } else { if (v >= 0xd800 && v < 0xdc00) { @@ -367,10 +373,10 @@ void Parser::parseString() { } else { highSurrogate = 0; } - _b->reserveSpace(3); - _b->_start[_b->_pos++] = 0xe0 + (v >> 12); - _b->_start[_b->_pos++] = 0x80 + ((v >> 6) & 0x3f); - _b->_start[_b->_pos++] = 0x80 + (v & 0x3f); + builder->reserveSpace(3); + builder->_start[builder->_pos++] = 0xe0 + (v >> 12); + builder->_start[builder->_pos++] = 0x80 + ((v >> 6) & 0x3f); + builder->_start[builder->_pos++] = 0x80 + (v & 0x3f); } break; } @@ -386,13 +392,13 @@ void Parser::parseString() { throw Exception(Exception::UnexpectedControlCharacter); } highSurrogate = 0; - _b->reserveSpace(1); - _b->_start[_b->_pos++] = static_cast(i); + builder->reserveSpace(1); + builder->_start[builder->_pos++] = static_cast(i); } else { if (!options->validateUtf8Strings) { highSurrogate = 0; - _b->reserveSpace(1); - _b->_start[_b->_pos++] = static_cast(i); + builder->reserveSpace(1); + builder->_start[builder->_pos++] = static_cast(i); } else { // multi-byte UTF-8 sequence! int follow = 0; @@ -412,14 +418,14 @@ void Parser::parseString() { } // validate follow up characters - _b->reserveSpace(1 + follow); - _b->_start[_b->_pos++] = static_cast(i); + builder->reserveSpace(1 + follow); + builder->_start[builder->_pos++] = static_cast(i); for (int j = 0; j < follow; ++j) { i = getOneOrThrow("scanString: truncated UTF-8 sequence"); if ((i & 0xc0) != 0x80) { throw Exception(Exception::InvalidUtf8Sequence); } - _b->_start[_b->_pos++] = static_cast(i); + builder->_start[builder->_pos++] = static_cast(i); } highSurrogate = 0; } @@ -430,13 +436,19 @@ void Parser::parseString() { } void Parser::parseArray() { - _b->addArray(); + // copy builder pointer into local variable + // this avoids accessing the shared pointer repeatedly, which has + // a small but non-negligible cost + Builder* builder = _b.get(); + VELOCYPACK_ASSERT(builder != nullptr); + + builder->addArray(); int i = skipWhiteSpace("Expecting item or ']'"); if (i == ']') { // empty array ++_pos; // the closing ']' - _b->close(); + builder->close(); return; } @@ -444,13 +456,13 @@ void Parser::parseArray() { while (true) { // parse array element itself - _b->reportAdd(); + builder->reportAdd(); parseJson(); i = skipWhiteSpace("Expecting ',' or ']'"); if (i == ']') { // end of array ++_pos; // the closing ']' - _b->close(); + builder->close(); decreaseNesting(); return; } @@ -466,7 +478,13 @@ void Parser::parseArray() { } void Parser::parseObject() { - _b->addObject(); + // copy builder pointer into local variable + // this avoids accessing the shared pointer repeatedly, which has + // a small but non-negligible cost + Builder* builder = _b.get(); + VELOCYPACK_ASSERT(builder != nullptr); + + builder->addObject(); int i = skipWhiteSpace("Expecting item or '}'"); if (i == '}') { @@ -475,7 +493,7 @@ void Parser::parseObject() { if (_nesting != 0 || !options->keepTopLevelOpen) { // only close if we've not been asked to keep top level open - _b->close(); + builder->close(); } return; } @@ -490,22 +508,22 @@ void Parser::parseObject() { // get past the initial '"' ++_pos; - _b->reportAdd(); + builder->reportAdd(); bool excludeAttribute = false; - auto const lastPos = _b->_pos; + auto const lastPos = builder->_pos; if (options->attributeExcludeHandler == nullptr) { parseString(); } else { parseString(); if (options->attributeExcludeHandler->shouldExclude( - Slice(_b->_start + lastPos), _nesting)) { + Slice(builder->_start + lastPos), _nesting)) { excludeAttribute = true; } } if (!excludeAttribute && options->attributeTranslator != nullptr) { // check if a translation for the attribute name exists - Slice key(_b->_start + lastPos); + Slice key(builder->_start + lastPos); if (key.isString()) { ValueLength keyLength; @@ -517,8 +535,8 @@ void Parser::parseObject() { // found translation... now reset position to old key position // and simply overwrite the existing key with the numeric translation // id - _b->_pos = lastPos; - _b->addUInt(Slice(translated).getUInt()); + builder->_pos = lastPos; + builder->addUInt(Slice(translated).getUInt()); } } } @@ -533,7 +551,7 @@ void Parser::parseObject() { parseJson(); if (excludeAttribute) { - _b->removeLast(); + builder->removeLast(); } i = skipWhiteSpace("Expecting ',' or '}'"); @@ -542,7 +560,7 @@ void Parser::parseObject() { ++_pos; // the closing '}' if (_nesting != 1 || !options->keepTopLevelOpen) { // only close if we've not been asked to keep top level open - _b->close(); + builder->close(); } decreaseNesting(); return; diff --git a/arangod/Agency/AgencyComm.cpp b/arangod/Agency/AgencyComm.cpp index 6e16adc47d..61b54c068a 100644 --- a/arangod/Agency/AgencyComm.cpp +++ b/arangod/Agency/AgencyComm.cpp @@ -224,7 +224,7 @@ void AgencyWriteTransaction::toVelocyPack(VPackBuilder& builder) const { VPackObjectBuilder guard3(&builder); } - builder.add(VPackValue(transactionId)); // Transactions + builder.add(VPackValue(clientId)); // Transactions } bool AgencyWriteTransaction::validate(AgencyCommResult const& result) const { @@ -283,7 +283,7 @@ void AgencyGeneralTransaction::toVelocyPack(VPackBuilder& builder) const { } else { std::get<0>(operation).toGeneralBuilder(builder); std::get<1>(operation).toGeneralBuilder(builder); - builder.add(VPackValue(transactionId)); + builder.add(VPackValue(clientId)); } } } @@ -328,18 +328,23 @@ AgencyCommResult::AgencyCommResult() _body(), _values(), _statusCode(0), - _connected(false) {} + _connected(false), + _clientId("") {} -AgencyCommResult::AgencyCommResult(int code, std::string const& message) +AgencyCommResult::AgencyCommResult( + int code, std::string const& message, std::string const& clientId) : _location(), _message(message), _body(), _values(), _statusCode(code), - _connected(false) {} + _connected(false), + _clientId(clientId) {} bool AgencyCommResult::connected() const { return _connected; } +std::string AgencyCommResult::clientId() const { return _clientId; } + int AgencyCommResult::httpCode() const { return _statusCode; } int AgencyCommResult::errorCode() const { @@ -1070,9 +1075,9 @@ AgencyCommResult AgencyComm::sendTransactionWithFailover( AgencyCommResult result = sendWithFailover( arangodb::rest::RequestType::POST, - (timeout == 0.0 ? AgencyCommManager::CONNECTION_OPTIONS._requestTimeout - : timeout), - url, builder.slice().toJson()); + (timeout == 0.0) ? + AgencyCommManager::CONNECTION_OPTIONS._requestTimeout : timeout, + url, builder.slice().toJson(), transaction.getClientId()); if (!result.successful() && result.httpCode() != (int)arangodb::rest::ResponseCode::PRECONDITION_FAILED) { @@ -1285,7 +1290,8 @@ void AgencyComm::updateEndpoints(arangodb::velocypack::Slice const& current) { AgencyCommResult AgencyComm::sendWithFailover( arangodb::rest::RequestType method, double const timeout, - std::string const& initialUrl, std::string const& body) { + std::string const& initialUrl, std::string const& body, + std::string const& clientId) { std::string endpoint; std::unique_ptr connection = @@ -1326,7 +1332,7 @@ AgencyCommResult AgencyComm::sendWithFailover( ++tries; if (connection == nullptr) { - AgencyCommResult result(400, "No endpoints for agency found."); + AgencyCommResult result(400, "No endpoints for agency found.", clientId); LOG_TOPIC(ERR, Logger::AGENCYCOMM) << result._message; return result; } @@ -1343,7 +1349,7 @@ AgencyCommResult AgencyComm::sendWithFailover( // try to send; if we fail completely, do not retry try { - result = send(connection.get(), method, conTimeout, url, body); + result = send(connection.get(), method, conTimeout, url, body, clientId); } catch (...) { AgencyCommManager::MANAGER->failed(std::move(connection), endpoint); endpoint.clear(); @@ -1403,7 +1409,7 @@ AgencyCommResult AgencyComm::sendWithFailover( AgencyCommResult AgencyComm::send( arangodb::httpclient::GeneralClientConnection* connection, arangodb::rest::RequestType method, double timeout, std::string const& url, - std::string const& body) { + std::string const& body, std::string const& clientId) { TRI_ASSERT(connection != nullptr); if (method == arangodb::rest::RequestType::GET || @@ -1417,6 +1423,9 @@ AgencyCommResult AgencyComm::send( AgencyCommResult result; result._connected = false; result._statusCode = 0; + if (!clientId.empty()) { + result._clientId = clientId; + } LOG_TOPIC(TRACE, Logger::AGENCYCOMM) << "sending " << arangodb::HttpRequest::translateMethod(method) diff --git a/arangod/Agency/AgencyComm.h b/arangod/Agency/AgencyComm.h index 0ce795fa6e..7069a41f8b 100644 --- a/arangod/Agency/AgencyComm.h +++ b/arangod/Agency/AgencyComm.h @@ -218,7 +218,9 @@ class AgencyOperation { class AgencyCommResult { public: AgencyCommResult(); - AgencyCommResult(int code, std::string const& message); + AgencyCommResult(int code, std::string const& message, + std::string const& transactionId = std::string()); + ~AgencyCommResult() = default; public: @@ -230,6 +232,8 @@ class AgencyCommResult { int errorCode() const; + std::string clientId() const; + std::string errorMessage() const; std::string errorDetails() const; @@ -256,6 +260,9 @@ class AgencyCommResult { private: std::shared_ptr _vpack; + +public: + std::string _clientId; }; // ----------------------------------------------------------------------------- @@ -272,7 +279,8 @@ public: virtual std::string toJson() const = 0; virtual void toVelocyPack(arangodb::velocypack::Builder&) const = 0; virtual std::string const& path() const = 0; - + virtual std::string getClientId() const = 0; + virtual bool validate(AgencyCommResult const& result) const = 0; }; @@ -285,14 +293,14 @@ struct AgencyGeneralTransaction : public AgencyTransaction { explicit AgencyGeneralTransaction( std::pair const& operation) : - transactionId(to_string(boost::uuids::random_generator()())) { + clientId(to_string(boost::uuids::random_generator()())) { operations.push_back(operation); } explicit AgencyGeneralTransaction( std::vector> const& _opers) : operations(_opers), - transactionId(to_string(boost::uuids::random_generator()())) {} + clientId(to_string(boost::uuids::random_generator()())) {} AgencyGeneralTransaction() = default; @@ -305,12 +313,16 @@ struct AgencyGeneralTransaction : public AgencyTransaction { void push_back(std::pair const&); - inline std::string const& path() const override final { + inline virtual std::string const& path() const override final { return AgencyTransaction::TypeUrl[2]; } + inline virtual std::string getClientId() const override final { + return clientId; + } + virtual bool validate(AgencyCommResult const& result) const override final; - std::string transactionId; + std::string clientId; }; @@ -323,24 +335,24 @@ struct AgencyWriteTransaction : public AgencyTransaction { public: explicit AgencyWriteTransaction(AgencyOperation const& operation) : - transactionId(to_string(boost::uuids::random_generator()())) { + clientId(to_string(boost::uuids::random_generator()())) { operations.push_back(operation); } explicit AgencyWriteTransaction (std::vector const& _opers) : operations(_opers), - transactionId(to_string(boost::uuids::random_generator()())) {} + clientId(to_string(boost::uuids::random_generator()())) {} AgencyWriteTransaction(AgencyOperation const& operation, AgencyPrecondition const& precondition) : - transactionId(to_string(boost::uuids::random_generator()())) { + clientId(to_string(boost::uuids::random_generator()())) { operations.push_back(operation); preconditions.push_back(precondition); } AgencyWriteTransaction(std::vector const& _operations, AgencyPrecondition const& precondition) : - transactionId(to_string(boost::uuids::random_generator()())) { + clientId(to_string(boost::uuids::random_generator()())) { for (auto const& op : _operations) { operations.push_back(op); } @@ -349,7 +361,7 @@ public: AgencyWriteTransaction(std::vector const& opers, std::vector const& precs) : - transactionId(to_string(boost::uuids::random_generator()())) { + clientId(to_string(boost::uuids::random_generator()())) { for (auto const& op : opers) { operations.push_back(op); } @@ -365,15 +377,19 @@ public: std::string toJson() const override final; - inline std::string const& path() const override final { + inline virtual std::string const& path() const override final { return AgencyTransaction::TypeUrl[1]; } + inline virtual std::string getClientId() const override final { + return clientId; + } + virtual bool validate(AgencyCommResult const& result) const override final; std::vector preconditions; std::vector operations; - std::string transactionId; + std::string clientId; }; // ----------------------------------------------------------------------------- @@ -427,6 +443,10 @@ public: return AgencyTransaction::TypeUrl[3]; } + inline virtual std::string getClientId() const override final { + return std::string(); + } + virtual bool validate(AgencyCommResult const& result) const override final; std::vector preconditions; @@ -454,10 +474,14 @@ public: std::string toJson() const override final; - inline std::string const& path() const override final { + inline virtual std::string const& path() const override final { return AgencyTransaction::TypeUrl[0]; } + inline virtual std::string getClientId() const override final { + return std::string(); + } + virtual bool validate(AgencyCommResult const& result) const override final; std::vector keys; @@ -614,7 +638,8 @@ class AgencyComm { bool ensureStructureInitialized(); AgencyCommResult sendWithFailover(arangodb::rest::RequestType, double, - std::string const&, std::string const&); + std::string const&, std::string const&, + std::string const& clientId = std::string()); private: bool lock(std::string const&, double, double, @@ -623,7 +648,8 @@ class AgencyComm { bool unlock(std::string const&, arangodb::velocypack::Slice const&, double); AgencyCommResult send(httpclient::GeneralClientConnection*, rest::RequestType, - double, std::string const&, std::string const&); + double, std::string const&, std::string const&, + std::string const& clientId = std::string()); bool tryInitializeStructure(std::string const& jwtSecret); diff --git a/arangod/Agency/Agent.cpp b/arangod/Agency/Agent.cpp index 504b305000..33b54ef88f 100644 --- a/arangod/Agency/Agent.cpp +++ b/arangod/Agency/Agent.cpp @@ -810,7 +810,6 @@ void Agent::run() { } else { _appendCV.wait(1000000); - updateConfiguration(); } } @@ -910,9 +909,9 @@ void Agent::detectActiveAgentFailures() { system_clock::now() - lastAcked.at(id)).count(); if (ds > 180.0) { std::string repl = _config.nextAgentInLine(); - LOG_TOPIC(DEBUG, Logger::AGENCY) << "Active agent " << id << " has failed. << " - << repl << " will be promoted to active agency membership"; - // Guarded in :: + LOG_TOPIC(DEBUG, Logger::AGENCY) + << "Active agent " << id << " has failed. << " << repl + << " will be promoted to active agency membership"; _activator = std::make_unique(this, id, repl); _activator->start(); return; @@ -923,13 +922,6 @@ void Agent::detectActiveAgentFailures() { } -void Agent::updateConfiguration() { - - // First ask last know leader - -} - - /// Orderly shutdown void Agent::beginShutdown() { Thread::beginShutdown(); diff --git a/arangod/Agency/Agent.h b/arangod/Agency/Agent.h index 206a901428..f29b6a74a6 100644 --- a/arangod/Agency/Agent.h +++ b/arangod/Agency/Agent.h @@ -223,9 +223,6 @@ class Agent : public arangodb::Thread { /// @brief persist agency configuration in RAFT void persistConfiguration(term_t t); - /// @brief Update my configuration as passive agent - void updateConfiguration(); - /// @brief Find out, if we've had acknowledged RPCs recent enough bool challengeLeadership(); diff --git a/arangod/Agency/FailedFollower.cpp b/arangod/Agency/FailedFollower.cpp index 64ca233541..2f7ed71078 100644 --- a/arangod/Agency/FailedFollower.cpp +++ b/arangod/Agency/FailedFollower.cpp @@ -252,8 +252,7 @@ JOB_STATUS FailedFollower::status() { Node const& planned = _snapshot(planPath); Node const& current = _snapshot(curPath); - if (planned.slice() == current.slice()) { - + if (compareServerLists(planned.slice(), current.slice())) { // Remove shard from /arango/Target/FailedServers/ array Builder del; del.openArray(); diff --git a/arangod/Agency/Job.cpp b/arangod/Agency/Job.cpp index c882619fba..24e56d14f6 100644 --- a/arangod/Agency/Job.cpp +++ b/arangod/Agency/Job.cpp @@ -25,6 +25,28 @@ using namespace arangodb::consensus; +bool arangodb::consensus::compareServerLists(Slice plan, Slice current) { + if (!plan.isArray() || !current.isArray()) { + return false; + } + std::vector planv, currv; + for (auto const& srv : VPackArrayIterator(plan)) { + if (srv.isString()) { + planv.push_back(srv.copyString()); + } + } + for (auto const& srv : VPackArrayIterator(current)) { + if (srv.isString()) { + currv.push_back(srv.copyString()); + } + } + bool equalLeader = !planv.empty() && !currv.empty() && + planv.front() == currv.front(); + std::sort(planv.begin(), planv.end()); + std::sort(currv.begin(), currv.end()); + return equalLeader && currv == planv; +} + Job::Job(Node const& snapshot, Agent* agent, std::string const& jobId, std::string const& creator, std::string const& agencyPrefix) : _snapshot(snapshot), diff --git a/arangod/Agency/Job.h b/arangod/Agency/Job.h index d0f8b840ab..8826f90de1 100644 --- a/arangod/Agency/Job.h +++ b/arangod/Agency/Job.h @@ -37,6 +37,12 @@ namespace arangodb { namespace consensus { +// This is intended for lists of servers with the first being the leader +// and all others followers. Both arguments must be arrays. Returns true, +// if the first items in both slice are equal and if both arrays contain +// the same set of strings. +bool compareServerLists(Slice plan, Slice current); + enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND }; const std::vector pos({"/Target/ToDo/", "/Target/Pending/", "/Target/Finished/", "/Target/Failed/"}); diff --git a/arangod/Agency/MoveShard.cpp b/arangod/Agency/MoveShard.cpp index 714524e66b..c38a08686d 100644 --- a/arangod/Agency/MoveShard.cpp +++ b/arangod/Agency/MoveShard.cpp @@ -190,7 +190,7 @@ bool MoveShard::start() { } } - // Are we ditributeShardsLiked by others? + // Are we distributeShardsLiked by others? // Invoke moveShard here with others auto collections = _snapshot(planColPrefix + _database).children(); std::vector colsLikeMe; @@ -430,16 +430,7 @@ JOB_STATUS MoveShard::status() { Slice current = _snapshot(curPath).slice(); Slice plan = _snapshot(planPath).slice(); - std::vector planv, currv; - for (auto const& srv : VPackArrayIterator(plan)) { - planv.push_back(srv.copyString()); - } - std::sort(planv.begin(), planv.end()); - for (auto const& srv : VPackArrayIterator(current)) { - currv.push_back(srv.copyString()); - } - std::sort(currv.begin(), currv.end()); - if (currv == planv) { + if (compareServerLists(plan, current)) { if (current[0].copyString() == std::string("_") + _from) { // Retired leader diff --git a/arangod/Agency/Store.cpp b/arangod/Agency/Store.cpp index 7905462ef1..fcd606999e 100644 --- a/arangod/Agency/Store.cpp +++ b/arangod/Agency/Store.cpp @@ -318,21 +318,40 @@ std::vector Store::apply( body.add("index", VPackValue(lastCommitIndex)); auto ret = in.equal_range(url); + // mop: XXX not exactly sure what is supposed to happen here + // if there are multiple subobjects being updates at the same time + // e.g. + // /hans/wurst + // /hans/wurst/peter: 1 + // /hans/wurst + // /hans/wurst/uschi: 2 + // we are generating invalid json...not sure if this here is a + // valid fix...it is most likely broken :S + std::string currentKey; for (auto it = ret.first; it != ret.second; ++it) { - body.add(it->second->key, VPackValue(VPackValueType::Object)); + if (currentKey != it->second->key) { + if (!currentKey.empty()) { + body.close(); + } + body.add(it->second->key, VPackValue(VPackValueType::Object)); + currentKey = it->second->key; + } + // mop: XXX maybe there are duplicates here as well? + // e.g. a key is set and deleted in the same transaction? body.add(it->second->modified, VPackValue(VPackValueType::Object)); body.add("op", VPackValue(it->second->oper)); body.close(); + } + if (!currentKey.empty()) { body.close(); } - body.close(); std::string endpoint, path; if (endpointPathFromUrl(url, endpoint, path)) { auto headerFields = std::make_unique>(); - + arangodb::ClusterComm::instance()->asyncRequest( "1", 1, endpoint, rest::RequestType::POST, path, std::make_shared(body.toString()), headerFields, diff --git a/arangod/Agency/Supervision.cpp b/arangod/Agency/Supervision.cpp index 8bc4834457..e9886331e9 100644 --- a/arangod/Agency/Supervision.cpp +++ b/arangod/Agency/Supervision.cpp @@ -103,13 +103,14 @@ void Supervision::upgradeAgency() { // Check all DB servers, guarded above doChecks std::vector Supervision::checkDBServers() { + std::vector ret; Node::Children const& machinesPlanned = _snapshot(planDBServersPrefix).children(); Node::Children const serversRegistered = _snapshot(currentServersRegisteredPrefix).children(); - bool reportPersistent; + bool reportPersistent = false; std::vector todelete; for (auto const& machine : _snapshot(healthPrefix).children()) { @@ -148,6 +149,8 @@ std::vector Supervision::checkDBServers() { good = true; } + reportPersistent = (heartbeatStatus != lastStatus); + query_t report = std::make_shared(); report->openArray(); report->openArray(); @@ -174,9 +177,6 @@ std::vector Supervision::checkDBServers() { if (good) { - if (lastStatus != Supervision::HEALTH_STATUS_GOOD) { - reportPersistent = true; - } report->add( "LastHeartbeatAcked", VPackValue(timepointToString(std::chrono::system_clock::now()))); @@ -210,7 +210,6 @@ std::vector Supervision::checkDBServers() { // for at least grace period if (t.count() > _gracePeriod && secondsSinceLeader > _gracePeriod) { if (lastStatus == "BAD") { - reportPersistent = true; report->add("Status", VPackValue("FAILED")); FailedServer fsj(_snapshot, _agent, std::to_string(_jobId++), "supervision", _agencyPrefix, serverID); @@ -257,6 +256,9 @@ std::vector Supervision::checkDBServers() { // Check all coordinators, guarded above doChecks std::vector Supervision::checkCoordinators() { + + bool reportPersistent = false; + std::vector ret; Node::Children const& machinesPlanned = _snapshot(planCoordinatorsPrefix).children(); @@ -305,6 +307,8 @@ std::vector Supervision::checkCoordinators() { good = true; } + reportPersistent = (heartbeatStatus != lastStatus); + query_t report = std::make_shared(); report->openArray(); report->openArray(); @@ -329,6 +333,7 @@ std::vector Supervision::checkCoordinators() { } if (good) { + if (goodServerId.empty()) { goodServerId = serverID; } @@ -359,6 +364,9 @@ std::vector Supervision::checkCoordinators() { report->close(); if (!this->isStopping()) { _agent->transient(report); + if (reportPersistent) { // STATUS changes should be persisted + _agent->write(report); + } } } diff --git a/arangod/Aql/Optimizer.h b/arangod/Aql/Optimizer.h index 44bb2ed5b0..0493863ecf 100644 --- a/arangod/Aql/Optimizer.h +++ b/arangod/Aql/Optimizer.h @@ -62,145 +62,148 @@ class Optimizer { // lower level values mean earlier rule execution // note that levels must be unique + initial = 100, // "Pass 1": moving nodes "up" (potentially outside loops): - pass1 = 100, + // ======================================================== // determine the "right" type of CollectNode and // add a sort node for each COLLECT (may be removed later) - specializeCollectRule_pass1 = 105, + specializeCollectRule_pass1, - inlineSubqueriesRule_pass1 = 106, + inlineSubqueriesRule_pass1, // split and-combined filters into multiple smaller filters - splitFiltersRule_pass1 = 110, + splitFiltersRule_pass1, // move calculations up the dependency chain (to pull them out of // inner loops etc.) - moveCalculationsUpRule_pass1 = 120, + moveCalculationsUpRule_pass1, // move filters up the dependency chain (to make result sets as small // as possible as early as possible) - moveFiltersUpRule_pass1 = 130, + moveFiltersUpRule_pass1, // remove calculations that are repeatedly used in a query - removeRedundantCalculationsRule_pass1 = 140, + removeRedundantCalculationsRule_pass1, - /// "Pass 2": try to remove redundant or unnecessary nodes - pass2 = 200, + // "Pass 2": try to remove redundant or unnecessary nodes + // ====================================================== + // remove filters from the query that are not necessary at all // filters that are always true will be removed entirely // filters that are always false will be replaced with a NoResults node - removeUnnecessaryFiltersRule_pass2 = 210, + removeUnnecessaryFiltersRule_pass2, // remove calculations that are never necessary - removeUnnecessaryCalculationsRule_pass2 = 220, + removeUnnecessaryCalculationsRule_pass2, // remove redundant sort blocks - removeRedundantSortsRule_pass2 = 230, + removeRedundantSortsRule_pass2, - /// "Pass 3": interchange EnumerateCollection nodes in all possible ways - /// this is level 500, please never let new plans from higher - /// levels go back to this or lower levels! - pass3 = 500, - interchangeAdjacentEnumerationsRule_pass3 = 510, + // "Pass 3": interchange EnumerateCollection nodes in all possible ways + // this is level 500, please never let new plans from higher + // levels go back to this or lower levels! + // ====================================================== + + interchangeAdjacentEnumerationsRule_pass3, // "Pass 4": moving nodes "up" (potentially outside loops) (second try): - pass4 = 600, + // ====================================================== + // move calculations up the dependency chain (to pull them out of // inner loops etc.) - moveCalculationsUpRule_pass4 = 610, + moveCalculationsUpRule_pass4, // move filters up the dependency chain (to make result sets as small // as possible as early as possible) - moveFiltersUpRule_pass4 = 620, + moveFiltersUpRule_pass4, /// "Pass 5": try to remove redundant or unnecessary nodes (second try) // remove filters from the query that are not necessary at all // filters that are always true will be removed entirely // filters that are always false will be replaced with a NoResults node - pass5 = 700, + // ====================================================== // remove redundant sort blocks - removeRedundantSortsRule_pass5 = 710, + removeRedundantSortsRule_pass5, // remove SORT RAND() if appropriate - removeSortRandRule_pass5 = 720, + removeSortRandRule_pass5, // remove INTO for COLLECT if appropriate - removeCollectVariablesRule_pass5 = 740, + removeCollectVariablesRule_pass5, // propagate constant attributes in FILTERs - propagateConstantAttributesRule_pass5 = 750, + propagateConstantAttributesRule_pass5, // remove unused out variables for data-modification queries - removeDataModificationOutVariablesRule_pass5 = 760, + removeDataModificationOutVariablesRule_pass5, /// "Pass 6": use indexes if possible for FILTER and/or SORT nodes - pass6 = 800, + // ====================================================== // replace simple OR conditions with IN - replaceOrWithInRule_pass6 = 810, + replaceOrWithInRule_pass6, // remove redundant OR conditions - removeRedundantOrRule_pass6 = 820, + removeRedundantOrRule_pass6, - applyGeoIndexRule = 825, + applyGeoIndexRule, - useIndexesRule_pass6 = 830, + useIndexesRule_pass6, // try to remove filters covered by index ranges - removeFiltersCoveredByIndexRule_pass6 = 840, + removeFiltersCoveredByIndexRule_pass6, - removeUnnecessaryFiltersRule_pass6 = 850, + removeUnnecessaryFiltersRule_pass6, // try to find sort blocks which are superseeded by indexes - useIndexForSortRule_pass6 = 860, + useIndexForSortRule_pass6, // sort values used in IN comparisons of remaining filters - sortInValuesRule_pass6 = 865, + sortInValuesRule_pass6, // remove calculations that are never necessary - removeUnnecessaryCalculationsRule_pass6 = 870, + removeUnnecessaryCalculationsRule_pass6, // merge filters into graph traversals - optimizeTraversalsRule_pass6 = 880, - prepareTraversalsRule_pass6 = 881, + optimizeTraversalsRule_pass6, + prepareTraversalsRule_pass6, /// Pass 9: push down calculations beyond FILTERs and LIMITs - moveCalculationsDownRule_pass9 = 900, + moveCalculationsDownRule_pass9, /// Pass 9: patch update statements - patchUpdateStatementsRule_pass9 = 902, + patchUpdateStatementsRule_pass9, /// "Pass 10": final transformations for the cluster // make operations on sharded collections use distribute - distributeInClusterRule_pass10 = 1000, + distributeInClusterRule_pass10, // make operations on sharded collections use scatter / gather / remote - scatterInClusterRule_pass10 = 1010, + scatterInClusterRule_pass10, // move FilterNodes & Calculation nodes in between // scatter(remote) <-> gather(remote) so they're // distributed to the cluster nodes. - distributeFilternCalcToClusterRule_pass10 = 1020, + distributeFilternCalcToClusterRule_pass10, // move SortNodes into the distribution. // adjust gathernode to also contain the sort criteria. - distributeSortToClusterRule_pass10 = 1030, + distributeSortToClusterRule_pass10, // try to get rid of a RemoteNode->ScatterNode combination which has // only a SingletonNode and possibly some CalculationNodes as dependencies - removeUnnecessaryRemoteScatterRule_pass10 = 1040, + removeUnnecessaryRemoteScatterRule_pass10, // remove any superflous satellite collection joins... // put it after Scatter rule because we would do // the work twice otherwise - removeSatelliteJoinsRule_pass10 = 1045, + removeSatelliteJoinsRule_pass10, // recognize that a RemoveNode can be moved to the shards - undistributeRemoveAfterEnumCollRule_pass10 = 1050 - + undistributeRemoveAfterEnumCollRule_pass10 }; public: diff --git a/arangod/Aql/OptimizerRules.cpp b/arangod/Aql/OptimizerRules.cpp index b614a18d0a..d3aca09ed0 100644 --- a/arangod/Aql/OptimizerRules.cpp +++ b/arangod/Aql/OptimizerRules.cpp @@ -3928,10 +3928,6 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt, /////////////////////////////////////////////////////////////////////////////// // GEO RULE /////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// -// -// Description of what this Rule tries to achieve: -// https://docs.google.com/document/d/1G57UP08ZFywUXKi5cLvEIKpZP-AUKGwG9oAnFOX8LLo -// struct GeoIndexInfo{ operator bool() const { return distanceNode && valid; } diff --git a/arangod/CMakeLists.txt b/arangod/CMakeLists.txt index 4d5bf91e45..94cb62846e 100644 --- a/arangod/CMakeLists.txt +++ b/arangod/CMakeLists.txt @@ -78,8 +78,6 @@ SET(ARANGOD_SOURCES Actions/actions.cpp Agency/ActivationCallback.cpp Agency/AddFollower.cpp - Agency/AgencyCallback.cpp - Agency/AgencyCallbackRegistry.cpp Agency/AgencyComm.cpp Agency/AgencyFeature.cpp Agency/Agent.cpp @@ -176,6 +174,8 @@ SET(ARANGOD_SOURCES Aql/VariableGenerator.cpp Aql/grammar.cpp Aql/tokens.cpp + Cluster/AgencyCallback.cpp + Cluster/AgencyCallbackRegistry.cpp Cluster/ClusterComm.cpp Cluster/ClusterEdgeCursor.cpp Cluster/ClusterFeature.cpp diff --git a/arangod/Agency/AgencyCallback.cpp b/arangod/Cluster/AgencyCallback.cpp similarity index 100% rename from arangod/Agency/AgencyCallback.cpp rename to arangod/Cluster/AgencyCallback.cpp diff --git a/arangod/Agency/AgencyCallback.h b/arangod/Cluster/AgencyCallback.h similarity index 100% rename from arangod/Agency/AgencyCallback.h rename to arangod/Cluster/AgencyCallback.h diff --git a/arangod/Agency/AgencyCallbackRegistry.cpp b/arangod/Cluster/AgencyCallbackRegistry.cpp similarity index 100% rename from arangod/Agency/AgencyCallbackRegistry.cpp rename to arangod/Cluster/AgencyCallbackRegistry.cpp diff --git a/arangod/Agency/AgencyCallbackRegistry.h b/arangod/Cluster/AgencyCallbackRegistry.h similarity index 98% rename from arangod/Agency/AgencyCallbackRegistry.h rename to arangod/Cluster/AgencyCallbackRegistry.h index db827963e5..7e3bc567c9 100644 --- a/arangod/Agency/AgencyCallbackRegistry.h +++ b/arangod/Cluster/AgencyCallbackRegistry.h @@ -24,7 +24,7 @@ #ifndef CLUSTER_AGENCYCALLACKREGISTRY_H #define CLUSTER_AGENCYCALLACKREGISTRY_H 1 -#include "Agency/AgencyCallback.h" +#include "Cluster/AgencyCallback.h" #include "Basics/ReadWriteLock.h" namespace arangodb { diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 8d936e74b6..be0a63af0b 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -903,10 +903,11 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, (int)arangodb::rest::ResponseCode::PRECONDITION_FAILED) { return setErrormsg(TRI_ERROR_ARANGO_DUPLICATE_NAME, errorMsg); } - errorMsg = std::string("Failed to create database in ") + __FILE__ + ":" + std::to_string(__LINE__); + errorMsg = std::string("Failed to create database with ") + + res._clientId + " at " + __FILE__ + ":" + std::to_string(__LINE__); return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE_IN_PLAN, errorMsg); - } + } // Now update our own cache of planned databases: loadPlan(); @@ -1171,6 +1172,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName, LOG_TOPIC(ERR, Logger::CLUSTER) << "Could not get agency dump!"; } } else { + errorMsg += std::string("\nClientId ") + res._clientId; errorMsg += std::string("\n") + __FILE__ + std::to_string(__LINE__); errorMsg += std::string("\n") + res.errorMessage(); errorMsg += std::string("\n") + res.errorDetails(); @@ -1295,9 +1297,9 @@ int ClusterInfo::dropCollectionCoordinator(std::string const& databaseName, res = ac.sendTransactionWithFailover(trans); if (!res.successful()) { - LOG(ERR) << "###################### WAS ERLAUBE? ####################"; AgencyCommResult ag = ac.getValues(""); if (ag.successful()) { + LOG_TOPIC(ERR, Logger::CLUSTER) << "ClientId: " << res._clientId; LOG_TOPIC(ERR, Logger::CLUSTER) << "Agency dump:\n" << ag.slice().toJson(); } else { @@ -1552,11 +1554,6 @@ int ClusterInfo::ensureIndexCoordinator( std::shared_ptr c = getCollection(databaseName, collectionID); - // Note that nobody is removing this collection in the plan, since - // we hold the write lock in the agency, therefore it does not matter - // that getCollection fetches the read lock and releases it before - // we get it again. - // READ_LOCKER(readLocker, _planProt.lock); if (c == nullptr) { @@ -1761,6 +1758,7 @@ int ClusterInfo::ensureIndexCoordinator( AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0); if (!result.successful()) { + errorMsg += "ClientId: " + result._clientId; errorMsg += std::string(" ") + __FILE__ + ":" + std::to_string(__LINE__); resultBuilder = *resBuilder; return TRI_ERROR_CLUSTER_COULD_NOT_CREATE_INDEX_IN_PLAN; @@ -1981,6 +1979,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName, AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0); if (!result.successful()) { + errorMsg += "ClientId: " + result._clientId; errorMsg += std::string(" ") + __FILE__ + ":" + std::to_string(__LINE__); events::DropIndex(collectionID, idString, TRI_ERROR_CLUSTER_COULD_NOT_DROP_INDEX_IN_PLAN); @@ -2050,20 +2049,20 @@ void ClusterInfo::loadServers() { result.slice()[0].get( std::vector( {AgencyCommManager::path(), "Current", "ServersRegistered"})); - + velocypack::Slice serversAliases = result.slice()[0].get( std::vector( {AgencyCommManager::path(), "Target", "MapUniqueToShortID"})); - - if (serversRegistered.isObject()) { + + if (serversRegistered.isObject()) { decltype(_servers) newServers; decltype(_serverAliases) newAliases; - + size_t i = 0; for (auto const& res : VPackObjectIterator(serversRegistered)) { velocypack::Slice slice = res.value; - + if (slice.isObject() && slice.hasKey("endpoint")) { std::string server = arangodb::basics::VelocyPackHelper::getStringValue( @@ -2080,7 +2079,7 @@ void ClusterInfo::loadServers() { newServers.emplace(std::make_pair(res.key.copyString(), server)); } } - + // Now set the new value: { WRITE_LOCKER(writeLocker, _serversProt.lock); @@ -2092,13 +2091,13 @@ void ClusterInfo::loadServers() { return; } } - + LOG_TOPIC(DEBUG, Logger::CLUSTER) - << "Error while loading " << prefixServers - << " httpCode: " << result.httpCode() - << " errorCode: " << result.errorCode() - << " errorMessage: " << result.errorMessage() - << " body: " << result.body(); + << "Error while loading " << prefixServers + << " httpCode: " << result.httpCode() + << " errorCode: " << result.errorCode() + << " errorMessage: " << result.errorMessage() + << " body: " << result.body(); } //////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index 68c4b536c4..a1c8ce2fc7 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -31,7 +31,7 @@ #include #include -#include "Agency/AgencyCallbackRegistry.h" +#include "Cluster/AgencyCallbackRegistry.h" #include "Agency/AgencyComm.h" #include "Basics/Mutex.h" #include "Basics/ReadWriteLock.h" diff --git a/arangod/Cluster/FollowerInfo.cpp b/arangod/Cluster/FollowerInfo.cpp index 163e1da3e0..38639b3653 100644 --- a/arangod/Cluster/FollowerInfo.cpp +++ b/arangod/Cluster/FollowerInfo.cpp @@ -100,6 +100,12 @@ static VPackBuilder newShardEntry(VPackSlice oldValue, ServerID const& sid, void FollowerInfo::add(ServerID const& sid) { MUTEX_LOCKER(locker, _mutex); + // First check if there is anything to do: + for (auto const& s : *_followers) { + if (s == sid) { + return; // Do nothing, if follower already there + } + } // Fully copy the vector: auto v = std::make_shared>(*_followers); v->push_back(sid); // add a single entry @@ -180,11 +186,25 @@ void FollowerInfo::add(ServerID const& sid) { void FollowerInfo::remove(ServerID const& sid) { MUTEX_LOCKER(locker, _mutex); + // First check if there is anything to do: + bool found = false; + for (auto const& s : *_followers) { + if (s == sid) { + found = true; + break; + } + } + if (!found) { + return; // nothing to do + } + auto v = std::make_shared>(); - v->reserve(_followers->size() - 1); - for (auto const& i : *_followers) { - if (i != sid) { - v->push_back(i); + if (_followers->size() > 0) { + v->reserve(_followers->size() - 1); + for (auto const& i : *_followers) { + if (i != sid) { + v->push_back(i); + } } } _followers = v; // will cast to std::vector const diff --git a/arangod/Cluster/FollowerInfo.h b/arangod/Cluster/FollowerInfo.h index fa12def019..b97b896712 100644 --- a/arangod/Cluster/FollowerInfo.h +++ b/arangod/Cluster/FollowerInfo.h @@ -37,11 +37,12 @@ class FollowerInfo { std::shared_ptr const> _followers; Mutex _mutex; arangodb::LogicalCollection* _docColl; + bool _isLeader; public: explicit FollowerInfo(arangodb::LogicalCollection* d) - : _followers(new std::vector()), _docColl(d) { } + : _followers(new std::vector()), _docColl(d), _isLeader(false) { } ////////////////////////////////////////////////////////////////////////////// /// @brief get information about current followers of a shard. @@ -74,6 +75,22 @@ class FollowerInfo { void clear(); + ////////////////////////////////////////////////////////////////////////////// + /// @brief report if we are the leader + ////////////////////////////////////////////////////////////////////////////// + + bool isLeader() const { + return _isLeader; + } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief set leadership + ////////////////////////////////////////////////////////////////////////////// + + void setLeader(bool b) { + _isLeader = b; + } + }; } // end namespace arangodb diff --git a/arangod/Cluster/RestAgencyCallbacksHandler.cpp b/arangod/Cluster/RestAgencyCallbacksHandler.cpp index df58813407..6cb2c639aa 100644 --- a/arangod/Cluster/RestAgencyCallbacksHandler.cpp +++ b/arangod/Cluster/RestAgencyCallbacksHandler.cpp @@ -23,7 +23,7 @@ #include "RestAgencyCallbacksHandler.h" -#include "Agency/AgencyCallbackRegistry.h" +#include "Cluster/AgencyCallbackRegistry.h" #include "Rest/HttpRequest.h" #include "Rest/HttpResponse.h" diff --git a/arangod/GeneralServer/GeneralServerFeature.cpp b/arangod/GeneralServer/GeneralServerFeature.cpp index dd1e643e0b..8b25f17011 100644 --- a/arangod/GeneralServer/GeneralServerFeature.cpp +++ b/arangod/GeneralServer/GeneralServerFeature.cpp @@ -24,12 +24,12 @@ #include -#include "Agency/AgencyCallbackRegistry.h" #include "Agency/AgencyFeature.h" #include "Agency/RestAgencyHandler.h" #include "Agency/RestAgencyPrivHandler.h" #include "Aql/RestAqlHandler.h" #include "Basics/StringUtils.h" +#include "Cluster/AgencyCallbackRegistry.h" #include "Cluster/ClusterComm.h" #include "Cluster/ClusterFeature.h" #include "Cluster/RestAgencyCallbacksHandler.h" diff --git a/arangod/RestHandler/RestDocumentHandler.cpp b/arangod/RestHandler/RestDocumentHandler.cpp index 5e2dcb8120..1e20b72880 100644 --- a/arangod/RestHandler/RestDocumentHandler.cpp +++ b/arangod/RestHandler/RestDocumentHandler.cpp @@ -115,10 +115,10 @@ bool RestDocumentHandler::createDocument() { VPackSlice body = parsedBody->slice(); arangodb::OperationOptions opOptions; - opOptions.isRestore = extractBooleanParameter("isRestore", false); - opOptions.waitForSync = extractBooleanParameter("waitForSync", false); - opOptions.returnNew = extractBooleanParameter("returnNew", false); - opOptions.silent = extractBooleanParameter("silent", false); + opOptions.isRestore = extractBooleanParameter(StaticStrings::IsRestoreString, false); + opOptions.waitForSync = extractBooleanParameter(StaticStrings::WaitForSyncString, false); + opOptions.returnNew = extractBooleanParameter(StaticStrings::ReturnNewString, false); + opOptions.silent = extractBooleanParameter(StaticStrings::SilentString, false); // find and load collection given by name or identifier auto transactionContext(StandaloneTransactionContext::Create(_vocbase)); @@ -380,12 +380,12 @@ bool RestDocumentHandler::modifyDocument(bool isPatch) { } OperationOptions opOptions; - opOptions.isRestore = extractBooleanParameter("isRestore", false); - opOptions.ignoreRevs = extractBooleanParameter("ignoreRevs", true); - opOptions.waitForSync = extractBooleanParameter("waitForSync", false); - opOptions.returnNew = extractBooleanParameter("returnNew", false); - opOptions.returnOld = extractBooleanParameter("returnOld", false); - opOptions.silent = extractBooleanParameter("silent", false); + opOptions.isRestore = extractBooleanParameter(StaticStrings::IsRestoreString, false); + opOptions.ignoreRevs = extractBooleanParameter(StaticStrings::IgnoreRevsString, true); + opOptions.waitForSync = extractBooleanParameter(StaticStrings::WaitForSyncString, false); + opOptions.returnNew = extractBooleanParameter(StaticStrings::ReturnNewString, false); + opOptions.returnOld = extractBooleanParameter(StaticStrings::ReturnOldString, false); + opOptions.silent = extractBooleanParameter(StaticStrings::SilentString, false); // extract the revision, if single document variant and header given: std::shared_ptr builder; @@ -442,8 +442,8 @@ bool RestDocumentHandler::modifyDocument(bool isPatch) { OperationResult result(TRI_ERROR_NO_ERROR); if (isPatch) { // patching an existing document - opOptions.keepNull = extractBooleanParameter("keepNull", true); - opOptions.mergeObjects = extractBooleanParameter("mergeObjects", true); + opOptions.keepNull = extractBooleanParameter(StaticStrings::KeepNullString, true); + opOptions.mergeObjects = extractBooleanParameter(StaticStrings::MergeObjectsString, true); result = trx.update(collectionName, body, opOptions); } else { result = trx.replace(collectionName, body, opOptions); @@ -507,10 +507,10 @@ bool RestDocumentHandler::deleteDocument() { } OperationOptions opOptions; - opOptions.returnOld = extractBooleanParameter("returnOld", false); - opOptions.ignoreRevs = extractBooleanParameter("ignoreRevs", true); - opOptions.waitForSync = extractBooleanParameter("waitForSync", false); - opOptions.silent = extractBooleanParameter("silent", false); + opOptions.returnOld = extractBooleanParameter(StaticStrings::ReturnOldString, false); + opOptions.ignoreRevs = extractBooleanParameter(StaticStrings::IgnoreRevsString, true); + opOptions.waitForSync = extractBooleanParameter(StaticStrings::WaitForSyncString, false); + opOptions.silent = extractBooleanParameter(StaticStrings::SilentString, false); auto transactionContext(StandaloneTransactionContext::Create(_vocbase)); @@ -600,7 +600,7 @@ bool RestDocumentHandler::readManyDocuments() { std::string const& collectionName = suffixes[0]; OperationOptions opOptions; - opOptions.ignoreRevs = extractBooleanParameter("ignoreRevs", true); + opOptions.ignoreRevs = extractBooleanParameter(StaticStrings::IgnoreRevsString, true); auto transactionContext(StandaloneTransactionContext::Create(_vocbase)); SingleCollectionTransaction trx(transactionContext, collectionName, diff --git a/arangod/RestHandler/RestReplicationHandler.cpp b/arangod/RestHandler/RestReplicationHandler.cpp index 8ed641c5b3..3c11eb64e8 100644 --- a/arangod/RestHandler/RestReplicationHandler.cpp +++ b/arangod/RestHandler/RestReplicationHandler.cpp @@ -3447,7 +3447,6 @@ void RestReplicationHandler::handleCommandRemoveFollower() { "did not find collection"); return; } - col->followers()->remove(followerId.copyString()); VPackBuilder b; diff --git a/arangod/RestHandler/RestVocbaseBaseHandler.cpp b/arangod/RestHandler/RestVocbaseBaseHandler.cpp index 6dd0829207..d99b80ff47 100644 --- a/arangod/RestHandler/RestVocbaseBaseHandler.cpp +++ b/arangod/RestHandler/RestVocbaseBaseHandler.cpp @@ -608,7 +608,7 @@ TRI_voc_rid_t RestVocbaseBaseHandler::extractRevision(char const* header, /// @brief extracts a boolean parameter value //////////////////////////////////////////////////////////////////////////////// -bool RestVocbaseBaseHandler::extractBooleanParameter(char const* name, +bool RestVocbaseBaseHandler::extractBooleanParameter(std::string const& name, bool def) const { bool found; std::string const& value = _request->value(name, found); diff --git a/arangod/RestHandler/RestVocbaseBaseHandler.h b/arangod/RestHandler/RestVocbaseBaseHandler.h index 7081fdfd36..3e5a01bdff 100644 --- a/arangod/RestHandler/RestVocbaseBaseHandler.h +++ b/arangod/RestHandler/RestVocbaseBaseHandler.h @@ -264,7 +264,11 @@ class RestVocbaseBaseHandler : public RestBaseHandler { /// @brief extracts a boolean parameter value ////////////////////////////////////////////////////////////////////////////// - bool extractBooleanParameter(char const* name, bool def) const; + bool extractBooleanParameter(std::string const& name, bool def) const; + + bool extractBooleanParameter(char const* name, bool def) const { + return extractBooleanParameter(std::string(name), def); + } protected: ////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/StorageEngine/MMFilesCollection.cpp b/arangod/StorageEngine/MMFilesCollection.cpp index 6c11e36c3e..50d3eddd59 100644 --- a/arangod/StorageEngine/MMFilesCollection.cpp +++ b/arangod/StorageEngine/MMFilesCollection.cpp @@ -1199,7 +1199,7 @@ void MMFilesCollection::removeRevision(TRI_voc_rid_t revisionId, bool updateStat TRI_ASSERT(revisionId != 0); if (updateStats) { MMFilesDocumentPosition const old = _revisionsCache.fetchAndRemove(revisionId); - if (old && !old.pointsToWal()) { + if (old && !old.pointsToWal() && old.fid() != 0) { TRI_ASSERT(old.dataptr() != nullptr); uint8_t const* vpack = static_cast(old.dataptr()); int64_t size = MMFilesDatafileHelper::AlignedSize(arangodb::MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT) + VPackSlice(vpack).byteSize()); diff --git a/arangod/StorageEngine/MMFilesCollectorThread.cpp b/arangod/StorageEngine/MMFilesCollectorThread.cpp index f45dd0d23d..4f1655dff4 100644 --- a/arangod/StorageEngine/MMFilesCollectorThread.cpp +++ b/arangod/StorageEngine/MMFilesCollectorThread.cpp @@ -374,16 +374,11 @@ int MMFilesCollectorThread::collectLogfiles(bool& worked) { try { int res = collect(logfile); - // LOG_TOPIC(TRACE, Logger::COLLECTOR) << "collected logfile: " << // logfile->id() << ". result: " - // << res; + // LOG_TOPIC(TRACE, Logger::COLLECTOR) << "collected logfile: " << logfile->id() << ". result: " << res; if (res == TRI_ERROR_NO_ERROR) { // reset collector status - { - CONDITION_LOCKER(guard, _collectorResultCondition); - _collectorResult = TRI_ERROR_NO_ERROR; - _collectorResultCondition.broadcast(); - } + broadcastCollectorResult(res); RocksDBFeature::syncWal(); @@ -393,11 +388,7 @@ int MMFilesCollectorThread::collectLogfiles(bool& worked) { _logfileManager->forceStatus(logfile, wal::Logfile::StatusType::SEALED); // set error in collector - { - CONDITION_LOCKER(guard, _collectorResultCondition); - _collectorResult = res; - _collectorResultCondition.broadcast(); - } + broadcastCollectorResult(res); } return res; @@ -979,3 +970,9 @@ int MMFilesCollectorThread::updateDatafileStatistics( return TRI_ERROR_NO_ERROR; } + +void MMFilesCollectorThread::broadcastCollectorResult(int res) { + CONDITION_LOCKER(guard, _collectorResultCondition); + _collectorResult = res; + _collectorResultCondition.broadcast(); +} diff --git a/arangod/StorageEngine/MMFilesCollectorThread.h b/arangod/StorageEngine/MMFilesCollectorThread.h index 42b3119357..5fa02f403c 100644 --- a/arangod/StorageEngine/MMFilesCollectorThread.h +++ b/arangod/StorageEngine/MMFilesCollectorThread.h @@ -103,6 +103,8 @@ class MMFilesCollectorThread final : public Thread { /// @brief update a collection's datafile information int updateDatafileStatistics(LogicalCollection*, MMFilesCollectorCache*); + void broadcastCollectorResult(int res); + private: /// @brief the logfile manager wal::LogfileManager* _logfileManager; diff --git a/arangod/StorageEngine/MMFilesDocumentOperation.cpp b/arangod/StorageEngine/MMFilesDocumentOperation.cpp index 09777d84dd..a48456e1a6 100644 --- a/arangod/StorageEngine/MMFilesDocumentOperation.cpp +++ b/arangod/StorageEngine/MMFilesDocumentOperation.cpp @@ -39,26 +39,6 @@ MMFilesDocumentOperation::MMFilesDocumentOperation(LogicalCollection* collection } MMFilesDocumentOperation::~MMFilesDocumentOperation() { - TRI_ASSERT(_status != StatusType::INDEXED); - - if (_status == StatusType::HANDLED) { - try { - if (_type == TRI_VOC_DOCUMENT_OPERATION_UPDATE || - _type == TRI_VOC_DOCUMENT_OPERATION_REPLACE) { - // remove old, now unused revision - TRI_ASSERT(!_oldRevision.empty()); - TRI_ASSERT(!_newRevision.empty()); - _collection->removeRevision(_oldRevision._revisionId, true); - } else if (_type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) { - // remove old, now unused revision - TRI_ASSERT(!_oldRevision.empty()); - TRI_ASSERT(_newRevision.empty()); - _collection->removeRevision(_oldRevision._revisionId, true); - } - } catch (...) { - // never throw here because of destructor - } - } } MMFilesDocumentOperation* MMFilesDocumentOperation::swap() { @@ -108,16 +88,13 @@ void MMFilesDocumentOperation::setRevisions(DocumentDescriptor const& oldRevisio void MMFilesDocumentOperation::revert(arangodb::Transaction* trx) { TRI_ASSERT(trx != nullptr); - - if (_status == StatusType::CREATED || - _status == StatusType::SWAPPED || - _status == StatusType::REVERTED) { + + if (_status == StatusType::SWAPPED || _status == StatusType::REVERTED) { return; } - - TRI_ASSERT(_status == StatusType::INDEXED || _status == StatusType::HANDLED); - - // set to reverted now + + // fetch old status and set it to reverted now + StatusType status = _status; _status = StatusType::REVERTED; TRI_voc_rid_t oldRevisionId = 0; @@ -136,39 +113,82 @@ void MMFilesDocumentOperation::revert(arangodb::Transaction* trx) { newDoc = VPackSlice(_newRevision._vpack); } - try { - _collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc); - } catch (...) { - // TODO: decide whether we should rethrow here + // clear caches so the following operations all use + if (oldRevisionId != 0) { + _collection->removeRevisionCacheEntry(oldRevisionId); + } + if (newRevisionId != 0) { + _collection->removeRevisionCacheEntry(newRevisionId); } if (_type == TRI_VOC_DOCUMENT_OPERATION_INSERT) { TRI_ASSERT(_oldRevision.empty()); TRI_ASSERT(!_newRevision.empty()); + + if (status != StatusType::CREATED) { + // remove revision from indexes + try { + _collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc); + } catch (...) { + } + } + // remove now obsolete new revision try { _collection->removeRevision(newRevisionId, true); } catch (...) { // operation probably was never inserted - // TODO: decide whether we should rethrow here } } else if (_type == TRI_VOC_DOCUMENT_OPERATION_UPDATE || _type == TRI_VOC_DOCUMENT_OPERATION_REPLACE) { TRI_ASSERT(!_oldRevision.empty()); TRI_ASSERT(!_newRevision.empty()); + + try { + // re-insert the old revision + _collection->insertRevision(_oldRevision._revisionId, _oldRevision._vpack, 0, true); + } catch (...) { + } + + if (status != StatusType::CREATED) { + try { + // restore the old index state + _collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc); + } catch (...) { + } + } + + // let the primary index entry point to the correct document SimpleIndexElement* element = _collection->primaryIndex()->lookupKeyRef(trx, Transaction::extractKeyFromDocument(newDoc)); if (element != nullptr && element->revisionId() != 0) { VPackSlice keySlice(Transaction::extractKeyFromDocument(oldDoc)); element->updateRevisionId(oldRevisionId, static_cast(keySlice.begin() - oldDoc.begin())); } + _collection->updateRevision(oldRevisionId, oldDoc.begin(), 0, false); // remove now obsolete new revision + if (oldRevisionId != newRevisionId) { + // we need to check for the same revision id here + try { + _collection->removeRevision(newRevisionId, true); + } catch (...) { + } + } + } else if (_type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) { + TRI_ASSERT(!_oldRevision.empty()); + TRI_ASSERT(_newRevision.empty()); + try { - _collection->removeRevision(newRevisionId, true); + _collection->insertRevision(_oldRevision._revisionId, _oldRevision._vpack, 0, true); } catch (...) { - // operation probably was never inserted - // TODO: decide whether we should rethrow here + } + + if (status != StatusType::CREATED) { + try { + // remove from indexes again + _collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc); + } catch (...) { + } } } } - diff --git a/arangod/StorageEngine/MMFilesDocumentOperation.h b/arangod/StorageEngine/MMFilesDocumentOperation.h index b723c698ec..afe00f9912 100644 --- a/arangod/StorageEngine/MMFilesDocumentOperation.h +++ b/arangod/StorageEngine/MMFilesDocumentOperation.h @@ -21,8 +21,8 @@ /// @author Jan Steemann //////////////////////////////////////////////////////////////////////////////// -#ifndef ARANGOD_WAL_DOCUMENT_OPERATION_H -#define ARANGOD_WAL_DOCUMENT_OPERATION_H 1 +#ifndef ARANGOD_MMFILES_DOCUMENT_OPERATION_H +#define ARANGOD_MMFILES_DOCUMENT_OPERATION_H 1 #include "Basics/Common.h" #include "VocBase/voc-types.h" @@ -71,10 +71,6 @@ struct MMFilesDocumentOperation { _status = StatusType::HANDLED; } - void done() noexcept { - _status = StatusType::SWAPPED; - } - void revert(arangodb::Transaction*); private: diff --git a/arangod/V8Server/v8-collection.cpp b/arangod/V8Server/v8-collection.cpp index f7e5b9043f..a8b458d81c 100644 --- a/arangod/V8Server/v8-collection.cpp +++ b/arangod/V8Server/v8-collection.cpp @@ -945,7 +945,7 @@ static void JS_FiguresVocbaseCol( } //////////////////////////////////////////////////////////////////////////////// -/// @brief was docuBlock collectionLoad +/// @brief was docuBlock leaderResign //////////////////////////////////////////////////////////////////////////////// static void JS_LeaderResign(v8::FunctionCallbackInfo const& args) { @@ -981,13 +981,156 @@ static void JS_LeaderResign(v8::FunctionCallbackInfo const& args) { if (res != TRI_ERROR_NO_ERROR) { TRI_V8_THROW_EXCEPTION(res); } - trx.documentCollection()->followers()->clear(); + // do not reset followers at this time...we are still the only source of truth + // to trust... + //trx.documentCollection()->followers()->clear(); + trx.documentCollection()->followers()->setLeader(false); } TRI_V8_RETURN_UNDEFINED(); TRI_V8_TRY_CATCH_END } +//////////////////////////////////////////////////////////////////////////////// +/// @brief was docuBlock assumeLeadership +//////////////////////////////////////////////////////////////////////////////// + +static void JS_AssumeLeadership(v8::FunctionCallbackInfo const& args) { + TRI_V8_TRY_CATCH_BEGIN(isolate); + v8::HandleScope scope(isolate); + + TRI_vocbase_t* vocbase = GetContextVocBase(isolate); + + if (vocbase == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); + } + + if (ServerState::instance()->isDBServer()) { + arangodb::LogicalCollection const* collection = + TRI_UnwrapClass(args.Holder(), + WRP_VOCBASE_COL_TYPE); + + if (collection == nullptr) { + TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); + } + + TRI_vocbase_t* vocbase = collection->vocbase(); + std::string collectionName = collection->name(); + if (vocbase == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); + } + + auto transactionContext = std::make_shared(vocbase, true); + + SingleCollectionTransaction trx(transactionContext, collectionName, + TRI_TRANSACTION_READ); + int res = trx.begin(); + if (res != TRI_ERROR_NO_ERROR) { + TRI_V8_THROW_EXCEPTION(res); + } + trx.documentCollection()->followers()->clear(); + trx.documentCollection()->followers()->setLeader(true); + } + + TRI_V8_RETURN_UNDEFINED(); + TRI_V8_TRY_CATCH_END +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief was docuBlock isLeader +//////////////////////////////////////////////////////////////////////////////// + +static void JS_IsLeader(v8::FunctionCallbackInfo const& args) { + TRI_V8_TRY_CATCH_BEGIN(isolate); + v8::HandleScope scope(isolate); + + TRI_vocbase_t* vocbase = GetContextVocBase(isolate); + + if (vocbase == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); + } + + bool b = false; + if (ServerState::instance()->isDBServer()) { + arangodb::LogicalCollection const* collection = + TRI_UnwrapClass(args.Holder(), + WRP_VOCBASE_COL_TYPE); + + if (collection == nullptr) { + TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); + } + + TRI_vocbase_t* vocbase = collection->vocbase(); + std::string collectionName = collection->name(); + if (vocbase == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); + } + + auto realCollection = vocbase->lookupCollection(collectionName); + if (realCollection == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND); + } + b = realCollection->followers()->isLeader(); + } + + if (b) { + TRI_V8_RETURN_TRUE(); + } else { + TRI_V8_RETURN_FALSE(); + } + TRI_V8_TRY_CATCH_END +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief was docuBlock getFollowers +//////////////////////////////////////////////////////////////////////////////// + +static void JS_GetFollowers(v8::FunctionCallbackInfo const& args) { + TRI_V8_TRY_CATCH_BEGIN(isolate); + v8::HandleScope scope(isolate); + + TRI_vocbase_t* vocbase = GetContextVocBase(isolate); + + if (vocbase == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); + } + + v8::Handle list = v8::Array::New(isolate); + if (ServerState::instance()->isDBServer()) { + arangodb::LogicalCollection const* collection = + TRI_UnwrapClass(args.Holder(), + WRP_VOCBASE_COL_TYPE); + + if (collection == nullptr) { + TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); + } + + TRI_vocbase_t* vocbase = collection->vocbase(); + std::string collectionName = collection->name(); + if (vocbase == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); + } + + auto transactionContext = std::make_shared(vocbase, true); + + SingleCollectionTransaction trx(transactionContext, collectionName, + TRI_TRANSACTION_READ); + int res = trx.begin(); + if (res != TRI_ERROR_NO_ERROR) { + TRI_V8_THROW_EXCEPTION(res); + } + std::unique_ptr const& followerInfo = trx.documentCollection()->followers(); + std::shared_ptr const> followers = followerInfo->get(); + uint32_t i = 0; + for (auto const& n : *followers) { + list->Set(i++, TRI_V8_STD_STRING(n)); + } + } + + TRI_V8_RETURN(list); + TRI_V8_TRY_CATCH_END +} + //////////////////////////////////////////////////////////////////////////////// /// @brief was docuBlock collectionLoad //////////////////////////////////////////////////////////////////////////////// @@ -3213,6 +3356,12 @@ void TRI_InitV8Collection(v8::Handle context, JS_InsertVocbaseCol); TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("leaderResign"), JS_LeaderResign, true); + TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("assumeLeadership"), + JS_AssumeLeadership, true); + TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("isLeader"), + JS_IsLeader, true); + TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("getFollowers"), + JS_GetFollowers, true); TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("load"), JS_LoadVocbaseCol); TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("name"), diff --git a/arangod/VocBase/LogicalCollection.cpp b/arangod/VocBase/LogicalCollection.cpp index b0775790b7..16eceb3b02 100644 --- a/arangod/VocBase/LogicalCollection.cpp +++ b/arangod/VocBase/LogicalCollection.cpp @@ -2282,8 +2282,14 @@ int LogicalCollection::update(Transaction* trx, VPackSlice const newSlice, try { insertRevision(revisionId, marker->vpack(), 0, true); + operation.setRevisions(DocumentDescriptor(oldRevisionId, oldDoc.begin()), DocumentDescriptor(revisionId, newDoc.begin())); + + if (oldRevisionId == revisionId) { + // update with same revision id => can happen if isRestore = true + result.clear(0); + } res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, newDoc, operation, marker, options.waitForSync); @@ -2439,8 +2445,14 @@ int LogicalCollection::replace(Transaction* trx, VPackSlice const newSlice, try { insertRevision(revisionId, marker->vpack(), 0, true); + operation.setRevisions(DocumentDescriptor(oldRevisionId, oldDoc.begin()), DocumentDescriptor(revisionId, newDoc.begin())); + + if (oldRevisionId == revisionId) { + // update with same revision id => can happen if isRestore = true + result.clear(0); + } res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, newDoc, operation, marker, options.waitForSync); @@ -2455,6 +2467,10 @@ int LogicalCollection::replace(Transaction* trx, VPackSlice const newSlice, if (res != TRI_ERROR_NO_ERROR) { operation.revert(trx); } else { + if (oldRevisionId == revisionId) { + // update with same revision id => can happen if isRestore = true + result.clear(0); + } readRevision(trx, result, revisionId); if (options.waitForSync) { @@ -2590,6 +2606,11 @@ int LogicalCollection::remove(arangodb::Transaction* trx, TRI_IF_FAILURE("RemoveDocumentNoOperation") { THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); } + + try { + removeRevision(oldRevisionId, true); + } catch (...) { + } TRI_IF_FAILURE("RemoveDocumentNoOperationExcept") { THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); @@ -2683,6 +2704,11 @@ int LogicalCollection::remove(arangodb::Transaction* trx, } operation.indexed(); + + try { + removeRevision(oldRevisionId, true); + } catch (...) { + } TRI_IF_FAILURE("RemoveDocumentNoOperation") { THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); @@ -2725,17 +2751,11 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx, TRI_ASSERT(newRevisionId != 0); TRI_ASSERT(!newDoc.isNone()); + removeRevisionCacheEntry(newRevisionId); + // ignore any errors we're getting from this deletePrimaryIndex(trx, newRevisionId, newDoc); deleteSecondaryIndexes(trx, newRevisionId, newDoc, true); - - // remove new revision - try { - removeRevision(newRevisionId, false); - } catch (...) { - // TODO: decide whether we should rethrow here - } - return TRI_ERROR_NO_ERROR; } @@ -2745,6 +2765,10 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx, TRI_ASSERT(!oldDoc.isNone()); TRI_ASSERT(newRevisionId != 0); TRI_ASSERT(!newDoc.isNone()); + + removeRevisionCacheEntry(oldRevisionId); + removeRevisionCacheEntry(newRevisionId); + // remove the current values from the indexes deleteSecondaryIndexes(trx, newRevisionId, newDoc, true); // re-insert old state @@ -2757,6 +2781,8 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx, TRI_ASSERT(!oldDoc.isNone()); TRI_ASSERT(newRevisionId == 0); TRI_ASSERT(newDoc.isNone()); + + removeRevisionCacheEntry(oldRevisionId); int res = insertPrimaryIndex(trx, oldRevisionId, oldDoc); @@ -3292,8 +3318,6 @@ int LogicalCollection::updateDocument( // rollback deleteSecondaryIndexes(trx, newRevisionId, newDoc, true); insertSecondaryIndexes(trx, oldRevisionId, oldDoc, true); - removeRevision(newRevisionId, false); - return res; } @@ -3308,6 +3332,16 @@ int LogicalCollection::updateDocument( } operation.indexed(); + + if (oldRevisionId != newRevisionId) { + try { + removeRevision(oldRevisionId, true); + } catch (...) { + } + } else { + // clear readcache entry for the revision + removeRevisionCacheEntry(oldRevisionId); + } TRI_IF_FAILURE("UpdateDocumentNoOperation") { return TRI_ERROR_DEBUG; } @@ -3770,13 +3804,18 @@ bool LogicalCollection::updateRevisionConditional( void LogicalCollection::removeRevision(TRI_voc_rid_t revisionId, bool updateStats) { // clean up cache entry - TRI_ASSERT(_revisionsCache); - _revisionsCache->removeRevision(revisionId); + removeRevisionCacheEntry(revisionId); // and remove from storage engine getPhysical()->removeRevision(revisionId, updateStats); } +void LogicalCollection::removeRevisionCacheEntry(TRI_voc_rid_t revisionId) { + // clean up cache entry + TRI_ASSERT(_revisionsCache); + _revisionsCache->removeRevision(revisionId); +} + /// @brief a method to skip certain documents in AQL write operations, /// this is only used in the enterprise edition for smart graphs #ifndef USE_ENTERPRISE diff --git a/arangod/VocBase/LogicalCollection.h b/arangod/VocBase/LogicalCollection.h index baab01863b..587f44c267 100644 --- a/arangod/VocBase/LogicalCollection.h +++ b/arangod/VocBase/LogicalCollection.h @@ -390,6 +390,7 @@ class LogicalCollection { void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal); bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal); void removeRevision(TRI_voc_rid_t revisionId, bool updateStats); + void removeRevisionCacheEntry(TRI_voc_rid_t revisionId); private: // SECTION: Index creation diff --git a/arangod/VocBase/transaction.cpp b/arangod/VocBase/transaction.cpp index 91e1d73276..25aa393bad 100644 --- a/arangod/VocBase/transaction.cpp +++ b/arangod/VocBase/transaction.cpp @@ -193,7 +193,6 @@ static void FreeOperations(arangodb::Transaction* activeTrx, TRI_transaction_t* try { op->revert(activeTrx); } catch (...) { - // TODO: decide whether we should rethrow here } delete op; } @@ -201,10 +200,7 @@ static void FreeOperations(arangodb::Transaction* activeTrx, TRI_transaction_t* // no rollback. simply delete all operations for (auto it = trxCollection->_operations->rbegin(); it != trxCollection->_operations->rend(); ++it) { - MMFilesDocumentOperation* op = (*it); - - //op->done(); // set to done so dtor of DocumentOperation won't fail - delete op; + delete (*it); } } diff --git a/arangod/VocBase/voc-types.h b/arangod/VocBase/voc-types.h index 851b1ab3e2..12eb029be0 100644 --- a/arangod/VocBase/voc-types.h +++ b/arangod/VocBase/voc-types.h @@ -121,14 +121,18 @@ struct DocumentDescriptor { DocumentDescriptor(TRI_voc_rid_t revisionId, uint8_t const* vpack) : _revisionId(revisionId), _vpack(vpack) {} bool empty() const { return _vpack == nullptr; } + void reset(DocumentDescriptor const& other) { _revisionId = other._revisionId; _vpack = other._vpack; } + +/* void reset(TRI_voc_rid_t revisionId, uint8_t const* vpack) { _revisionId = revisionId; _vpack = vpack; } +*/ void clear() { _revisionId = 0; _vpack = nullptr; diff --git a/arangod/Wal/LogfileManager.cpp b/arangod/Wal/LogfileManager.cpp index b3f911f46b..ea3c40d7dc 100644 --- a/arangod/Wal/LogfileManager.cpp +++ b/arangod/Wal/LogfileManager.cpp @@ -869,10 +869,11 @@ int LogfileManager::flush(bool waitForSync, bool waitForCollector, if (res == TRI_ERROR_NO_ERROR) { // we need to wait for the collector... - // LOG(TRACE) << "entering waitForCollector with lastOpenLogfileId " << // - // (unsigned long long) lastOpenLogfileId; + // LOG(TRACE) << "entering waitForCollector with lastOpenLogfileId " << lastOpenLogfileId; res = this->waitForCollector(lastOpenLogfileId, maxWaitTime); + if (res == TRI_ERROR_LOCK_TIMEOUT) { + LOG(ERR) << "got lock timeout when waiting for WAL flush. lastOpenLogfileId: " << lastOpenLogfileId; } } else if (res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) { // current logfile is empty and cannot be collected @@ -881,6 +882,10 @@ int LogfileManager::flush(bool waitForSync, bool waitForCollector, if (lastSealedLogfileId > 0) { res = this->waitForCollector(lastSealedLogfileId, maxWaitTime); + + if (res == TRI_ERROR_LOCK_TIMEOUT) { + LOG(ERR) << "got lock timeout when waiting for WAL flush. lastSealedLogfileId: " << lastSealedLogfileId; + } } } } @@ -1305,10 +1310,6 @@ Logfile* LogfileManager::getLogfile(Logfile::IdType id, int LogfileManager::getWriteableLogfile(uint32_t size, Logfile::StatusType& status, Logfile*& result) { - static uint64_t const SleepTime = 10 * 1000; - double const end = TRI_microtime() + 15.0; - size_t iterations = 0; - // always initialize the result result = nullptr; @@ -1316,6 +1317,9 @@ int LogfileManager::getWriteableLogfile(uint32_t size, // intentionally don't return a logfile return TRI_ERROR_DEBUG; } + + size_t iterations = 0; + double const end = TRI_microtime() + 15.0; while (true) { { @@ -1363,7 +1367,7 @@ int LogfileManager::getWriteableLogfile(uint32_t size, _allocatorThread->signal(size); } - int res = _allocatorThread->waitForResult(SleepTime); + int res = _allocatorThread->waitForResult(15000); if (res != TRI_ERROR_LOCK_TIMEOUT && res != TRI_ERROR_NO_ERROR) { TRI_ASSERT(result == nullptr); @@ -1680,43 +1684,42 @@ void LogfileManager::waitForCollector() { // wait until a specific logfile has been collected int LogfileManager::waitForCollector(Logfile::IdType logfileId, double maxWaitTime) { - static int64_t const SingleWaitPeriod = 50 * 1000; - - int64_t maxIterations = INT64_MAX; // wait forever - if (maxWaitTime > 0.0) { - // if specified, wait for a shorter period of time - maxIterations = static_cast(maxWaitTime * 1000000.0 / - (double)SingleWaitPeriod); - LOG(TRACE) << "will wait for max. " << maxWaitTime - << " seconds for collector to finish"; + if (maxWaitTime <= 0.0) { + maxWaitTime = 24.0 * 3600.0; // wait "forever" } LOG(TRACE) << "waiting for collector thread to collect logfile " << logfileId; // wait for the collector thread to finish the collection - int64_t iterations = 0; + double const end = TRI_microtime() + maxWaitTime; - while (++iterations < maxIterations) { + while (true) { if (_lastCollectedId >= logfileId) { return TRI_ERROR_NO_ERROR; } - int res = _collectorThread->waitForResult(SingleWaitPeriod); + int res = _collectorThread->waitForResult(50 * 1000); // LOG(TRACE) << "still waiting for collector. logfileId: " << logfileId << - // " lastCollected: - // " << // _lastCollectedId << ", result: " << res; + // " lastCollected: " << _lastCollectedId << ", result: " << res; if (res != TRI_ERROR_LOCK_TIMEOUT && res != TRI_ERROR_NO_ERROR) { // some error occurred return res; } + double const now = TRI_microtime(); + + if (now > end) { + break; + } + + usleep(20000); // try again } // TODO: remove debug info here - LOG(ERR) << "going into lock timeout. having waited for logfile: " << logfileId << ", maxIterations: " << maxIterations << ", maxWaitTime: " << maxWaitTime; + LOG(ERR) << "going into lock timeout. having waited for logfile: " << logfileId << ", maxWaitTime: " << maxWaitTime; logStatus(); // waited for too long diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index 49f8f0eeb9..77b99d6950 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -34,6 +34,8 @@ const functionsDocumentation = { 'authentication_parameters': 'authentication parameters tests', 'boost': 'boost test suites', 'config': 'checks the config file parsing', + 'client_resilience': 'client resilience tests', + 'cluster_sync': 'cluster sync tests', 'dump': 'dump tests', 'dump_authentication': 'dump tests with authentication', 'dfdb': 'start test', @@ -48,7 +50,6 @@ const functionsDocumentation = { 'replication_static': 'replication static tests', 'replication_sync': 'replication sync tests', 'resilience': 'resilience tests', - 'client_resilience': 'client resilience tests', 'shell_client': 'shell client tests', 'shell_replication': 'shell replication tests', 'shell_server': 'shell server tests', @@ -158,7 +159,9 @@ const optionsDefaults = { 'loopEternal': false, 'loopSleepSec': 1, 'loopSleepWhen': 1, + 'minPort': 1024, 'maxPort': 32768, + 'mochaGrep': undefined, 'onlyNightly': false, 'password': '', 'replication': false, @@ -627,20 +630,32 @@ function cleanupDBDirectories (options) { // / @brief finds a free port // ////////////////////////////////////////////////////////////////////////////// -function findFreePort (maxPort) { +function findFreePort (minPort, maxPort, usedPorts) { if (typeof maxPort !== 'number') { maxPort = 32768; } - if (maxPort < 2048) { - maxPort = 2048; + + if (maxPort - minPort < 0) { + throw new Error('minPort ' + minPort + ' is smaller than maxPort ' + maxPort); } + + let tries = 0; while (true) { - const port = Math.floor(Math.random() * (maxPort - 1024)) + 1024; + const port = Math.floor(Math.random() * (maxPort - minPort)) + minPort; + tries++; + if (tries > 20) { + throw new Error('Couldn\'t find a port after ' + tries + ' tries. portrange of ' + minPort + ', ' + maxPort + ' too narrow?'); + } + if (Array.isArray(usedPorts) && usedPorts.indexOf(port) >= 0) { + continue; + } const free = testPort('tcp://0.0.0.0:' + port); if (free) { return port; } + + require('internal').wait(0.1); } } @@ -667,13 +682,13 @@ function makePathGeneric (path) { function runThere (options, instanceInfo, file) { try { let testCode; - + let mochaGrep = options.mochaGrep ? ', ' + JSON.stringify(options.mochaGrep) : ''; if (file.indexOf('-spec') === -1) { testCode = 'const runTest = require("jsunity").runTest; ' + 'return runTest(' + JSON.stringify(file) + ', true);'; } else { testCode = 'const runTest = require("@arangodb/mocha-runner"); ' + - 'return runTest(' + JSON.stringify(file) + ', true);'; + 'return runTest(' + JSON.stringify(file) + ', true' + mochaGrep + ');'; } if (options.propagateInstanceInfo) { @@ -1323,10 +1338,13 @@ function startInstanceCluster (instanceInfo, protocol, options, options.agencyWaitForSync = false; startInstanceAgency(instanceInfo, protocol, options, ...makeArgs('agency', 'agency', {})); + let usedPorts = []; let agencyEndpoint = instanceInfo.endpoint; let i; for (i = 0; i < options.dbServers; i++) { - let endpoint = protocol + '://127.0.0.1:' + findFreePort(options.maxPort); + let port = findFreePort(options.minPort, options.maxPort, usedPorts); + usedPorts.push(port); + let endpoint = protocol + '://127.0.0.1:' + port; let primaryArgs = _.clone(options.extraArgs); primaryArgs['server.endpoint'] = endpoint; primaryArgs['cluster.my-address'] = endpoint; @@ -1338,7 +1356,9 @@ function startInstanceCluster (instanceInfo, protocol, options, } for (i=0;i 0); + return current; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief return a shardId => server map -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function getShardMap (plannedCollections) { var shardMap = { }; @@ -235,9 +234,9 @@ function getShardMap (plannedCollections) { return shardMap; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief return the indexes of a collection as a map -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function getIndexMap (shard) { var indexes = { }, i; @@ -254,24 +253,28 @@ function getIndexMap (shard) { return indexes; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief return a hash with the local databases -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function getLocalDatabases () { - var result = { }; - var db = require('internal').db; - - db._databases().forEach(function (database) { - result[database] = { name: database }; - }); - + let result = { }; + let db = require('internal').db; + let curDb = db._name(); + try { + db._databases().forEach(function (database) { + db._useDatabase(database); + result[database] = { name: database, id: db._id() }; + }); + } finally { + db._useDatabase(curDb); + } return result; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief return a hash with the local collections -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function getLocalCollections () { var result = { }; @@ -286,7 +289,8 @@ function getLocalCollections () { name: name, type: collection.type(), status: collection.status(), - planId: collection.planId() + planId: collection.planId(), + isLeader: collection.isLeader() }; // merge properties @@ -305,659 +309,31 @@ function getLocalCollections () { return result; } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief create databases if they exist in the plan but not locally -// ////////////////////////////////////////////////////////////////////////////// - -function createLocalDatabases (plannedDatabases, currentDatabases) { - var ourselves = global.ArangoServerState.id(); - var createDatabaseAgency = function (payload) { - var envelope = {}; - envelope[curDatabases + payload.name + '/' + ourselves] = - { 'op': 'set', 'new': payload }; - envelope[curVersion] = {"op":"increment"}; - global.ArangoAgency.write([[envelope]]); - }; - - var db = require('internal').db; - db._useDatabase('_system'); - - var localDatabases = getLocalDatabases(); - var name; - - // check which databases need to be created locally - for (name in plannedDatabases) { - if (plannedDatabases.hasOwnProperty(name)) { - var payload = plannedDatabases[name]; - payload.error = false; - payload.errorNum = 0; - payload.errorMessage = 'no error'; - - if (!localDatabases.hasOwnProperty(name)) { - // must create database - - // TODO: handle options and user information - - console.debug("creating local database '%s'", payload.name); - - try { - db._createDatabase(payload.name); - payload.error = false; - payload.errorNum = 0; - payload.errorMessage = 'no error'; - } catch (err) { - payload.error = true; - payload.errorNum = err.errorNum; - payload.errorMessage = err.errorMessage; - } - createDatabaseAgency(payload); - } else if (typeof currentDatabases[name] !== 'object' || !currentDatabases[name].hasOwnProperty(ourselves)) { - // mop: ok during cluster startup we have this buggy situation where a dbserver - // has a database but has not yet announced it to the agency :S - createDatabaseAgency(payload); - } - } - } -} - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief drop databases if they do exist locally but not in the plan -// ////////////////////////////////////////////////////////////////////////////// - -function dropLocalDatabases (plannedDatabases) { - var ourselves = global.ArangoServerState.id(); - - var dropDatabaseAgency = function (payload) { - try { - var envelope = {}; - envelope[curDatabases + payload.name + '/' + ourselves] = {'op':'delete'}; - envelope[curVersion] = {'op':'increment'}; - global.ArangoAgency.write([[envelope]]); - } catch (err) { - // ignore errors - } - }; - - var db = require('internal').db; - db._useDatabase('_system'); - - var localDatabases = getLocalDatabases(); - var name; - - // check which databases need to be deleted locally - for (name in localDatabases) { - if (localDatabases.hasOwnProperty(name)) { - if (!plannedDatabases.hasOwnProperty(name) && name.substr(0, 1) !== '_') { - // must drop database - - console.info("dropping local database '%s'", name); - - // Do we have to stop a replication applier first? - if (ArangoServerState.role() === 'SECONDARY') { - try { - db._useDatabase(name); - var rep = require('@arangodb/replication'); - var state = rep.applier.state(); - if (state.state.running === true) { - console.info('stopping replication applier first'); - rep.applier.stop(); - } - } - finally { - db._useDatabase('_system'); - } - } - db._dropDatabase(name); - - dropDatabaseAgency({name: name}); - } - } - } -} - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief clean up what's in Current/Databases for ourselves -// ////////////////////////////////////////////////////////////////////////////// - -function cleanupCurrentDatabases (currentDatabases) { - var ourselves = global.ArangoServerState.id(); - - var dropDatabaseAgency = function (payload) { - try { - var envelope = {}; - envelope[curDatabases + payload.name + '/' + ourselves] = {'op':'delete'}; - envelope[curVersion] = {'op':'increment'}; - global.ArangoAgency.write([[envelope]]); - } catch (err) { - // ignore errors - } - }; - - var db = require('internal').db; - db._useDatabase('_system'); - - var localDatabases = getLocalDatabases(); - var name; - - for (name in currentDatabases) { - if (currentDatabases.hasOwnProperty(name) && name.substr(0, 1) !== '_') { - if (!localDatabases.hasOwnProperty(name)) { - // we found a database we don't have locally - - if (currentDatabases[name].hasOwnProperty(ourselves)) { - // we are entered for a database that we don't have locally - console.debug("cleaning up entry for unknown database '%s'", name); - - dropDatabaseAgency({name: name}); - } - } - } - } -} - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief handle database changes -// ////////////////////////////////////////////////////////////////////////////// - -function handleDatabaseChanges (plan, current) { - var plannedDatabases = plan.Databases; - var currentDatabases = current.Databases; - - createLocalDatabases(plannedDatabases, currentDatabases); - dropLocalDatabases(plannedDatabases); - cleanupCurrentDatabases(currentDatabases); -} - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief create collections if they exist in the plan but not locally -// ////////////////////////////////////////////////////////////////////////////// - -function createLocalCollections (plannedCollections, planVersion, - currentCollections, takeOverResponsibility) { - var ourselves = global.ArangoServerState.id(); - - var createCollectionAgency = function (database, shard, collInfo, error) { - - var payload = { - error: error.error, - errorNum: error.errorNum, - errorMessage: error.errorMessage, - satellite: collInfo.replicationFactor === 0, - indexes: collInfo.indexes, - servers: [ ourselves ], - planVersion: planVersion }; - - console.debug('creating Current/Collections/' + database + '/' + - collInfo.planId + '/' + shard); - - var envelope = {}; - envelope[curCollections + database + '/' + collInfo.planId + '/' + shard] = - { 'op': 'set', 'new': payload }; - envelope[curVersion] = {'op':'increment'}; - var ret = global.ArangoAgency.write([[envelope]]); - - console.debug('creating Current/Collections/' + database + '/' + - collInfo.planId + '/' + shard + ' done.'); - }; - - var takeOver = createCollectionAgency; - - var db = require('internal').db; - db._useDatabase('_system'); - - var migrate = writeLocked => { - var localDatabases = getLocalDatabases(); - var database; - var i; - - // iterate over all matching databases - for (database in plannedCollections) { - if (plannedCollections.hasOwnProperty(database)) { - if (localDatabases.hasOwnProperty(database)) { - // switch into other database - db._useDatabase(database); - - try { - // iterate over collections of database - var localCollections = getLocalCollections(); - - var collections = plannedCollections[database]; - - // diff the collections - Object.keys(collections).forEach(function (collection) { - var collInfo = collections[collection]; - var shards = collInfo.shards; - var shard; - - collInfo.planId = collInfo.id; - var save = [collInfo.id, collInfo.name]; - delete collInfo.id; // must not actually set it here - delete collInfo.name; // name is now shard - - for (shard in shards) { - if (shards.hasOwnProperty(shard)) { - var didWrite = false; - if (shards[shard].indexOf(ourselves) >= 0) { - var isLeader = shards[shard][0] === ourselves; - var wasLeader = isLeader; - try { - var currentServers = currentCollections[database][collection][shard].servers; - wasLeader = currentServers[0] === ourselves; - } catch(err) {} - - // found a shard we are responsible for - - var error = { error: false, errorNum: 0, - errorMessage: 'no error' }; - - if (!localCollections.hasOwnProperty(shard)) { - // must create this shard - console.debug("creating local shard '%s/%s' for central '%s/%s'", - database, - shard, - database, - collInfo.planId); - - try { - if (collInfo.type === ArangoCollection.TYPE_EDGE) { - db._createEdgeCollection(shard, collInfo); - } else { - db._create(shard, collInfo); - } - } catch (err2) { - error = { error: true, errorNum: err2.errorNum, - errorMessage: err2.errorMessage }; - console.error("creating local shard '%s/%s' for central '%s/%s' failed: %s", - database, - shard, - database, - collInfo.planId, - JSON.stringify(err2)); - } - - if (isLeader) { - createCollectionAgency(database, shard, collInfo, error); - didWrite = true; - } - } else { - if (!isLeader && wasLeader) { - db._collection(shard).leaderResign(); - } - - if (localCollections[shard].status !== collInfo.status) { - console.info("detected status change for local shard '%s/%s'", - database, - shard); - - if (collInfo.status === ArangoCollection.STATUS_UNLOADED) { - console.info("unloading local shard '%s/%s'", - database, - shard); - db._collection(shard).unload(); - } else if (collInfo.status === ArangoCollection.STATUS_LOADED) { - console.info("loading local shard '%s/%s'", - database, - shard); - db._collection(shard).load(); - } - if (isLeader) { - createCollectionAgency(database, shard, collInfo, error); - didWrite = true; - } - } - - // collection exists, now compare collection properties - var properties = { }; - var cmp = [ 'journalSize', 'waitForSync', 'doCompact', - 'indexBuckets' ]; - for (i = 0; i < cmp.length; ++i) { - var p = cmp[i]; - if (localCollections[shard][p] !== collInfo[p]) { - // property change - properties[p] = collInfo[p]; - } - } - - if (Object.keys(properties).length > 0) { - console.info("updating properties for local shard '%s/%s'", - database, - shard); - - try { - db._collection(shard).properties(properties); - } catch (err3) { - error = { error: true, errorNum: err3.errorNum, - errorMessage: err3.errorMessage }; - } - if (isLeader) { - createCollectionAgency(database, shard, collInfo, error); - didWrite = true; - } - } - } - - if (error.error) { - if (takeOverResponsibility && !didWrite) { - if (isLeader) { - takeOver(database, shard, collInfo, error); - } - } - continue; // No point to look for properties and - // indices, if the creation has not worked - } - - var indexes = getIndexMap(shard); - var idx; - var index; - - if (collInfo.hasOwnProperty('indexes')) { - for (i = 0; i < collInfo.indexes.length; ++i) { - index = collInfo.indexes[i]; - - var changed = false; - - if (index.type !== 'primary' && index.type !== 'edge' && - !indexes.hasOwnProperty(index.id)) { - console.debug("creating index '%s/%s': %s", - database, - shard, - JSON.stringify(index)); - - try { - arangodb.db._collection(shard).ensureIndex(index); - index.error = false; - index.errorNum = 0; - index.errorMessage = ''; - } catch (err5) { - index.error = true; - index.errorNum = err5.errorNum; - index.errorMessage = err5.errorMessage; - } - - changed = true; - } - if (changed && isLeader) { - createCollectionAgency(database, shard, collInfo, error); - didWrite = true; - } - } - - var changed2 = false; - for (idx in indexes) { - if (indexes.hasOwnProperty(idx)) { - // found an index in the index map, check if it must be deleted - - if (indexes[idx].type !== 'primary' && indexes[idx].type !== 'edge') { - var found = false; - for (i = 0; i < collInfo.indexes.length; ++i) { - if (collInfo.indexes[i].id === idx) { - found = true; - break; - } - } - - if (!found) { - // found an index to delete locally - changed2 = true; - index = indexes[idx]; - - console.info("dropping index '%s/%s': %s", - database, - shard, - JSON.stringify(index)); - - arangodb.db._collection(shard).dropIndex(index); - - delete indexes[idx]; - collInfo.indexes.splice(i, i); - } - } - } - } - if (changed2 && isLeader) { - createCollectionAgency(database, shard, collInfo, error); - didWrite = true; - } - } - - if ((takeOverResponsibility && !didWrite && isLeader) || - (!didWrite && isLeader && !wasLeader)) { - takeOver(database, shard, collInfo, error); - } - } - } - } - collInfo.id = save[0]; - collInfo.name = save[1]; - }); - } catch (err) { - // always return to previous database - db._useDatabase('_system'); - throw err; - } - - db._useDatabase('_system'); - } - } - } - }; - - if (takeOverResponsibility) { - // mop: if this is a complete takeover we need a global lock because - // otherwise the coordinator might fetch results which are only partly - // migrated - var fakeLock = (lockInfo, cb, args) => { - if (!lockInfo || lockInfo.part !== 'Current') { - throw new Error('Invalid lockInfo ' + JSON.stringify(lockInfo)); - } - return cb(...args); - }; - migrate(fakeLock); - //writeLocked({ part: 'Current' }, migrate, [fakeLock]); - } else { - migrate(); - } -} - -function leaderResign (database, collId, shardName, ourselves) { +function organiseLeaderResign (database, collId, shardName) { console.info("trying to withdraw as leader of shard '%s/%s' of '%s/%s'", database, shardName, database, collId); + // This starts a write transaction, just to wait for any ongoing + // write transaction on this shard to terminate. We will then later + // report to Current about this resignation. If a new write operation + // starts in the meantime (which is unlikely, since no coordinator that + // has seen the _ will start a new one), it is doomed, and we ignore the + // problem, since similar problems can arise in failover scenarios anyway. try { + // we know the shard exists locally! var db = require('internal').db; + db._collection(shardName).leaderResign(); db._executeTransaction( { 'collections': { 'write': [shardName] }, - 'action': function () { - var path = curCollections + database + '/' + collId + '/' + - shardName + '/servers'; - var servers = global.ArangoAgency.read([[path]])[0] - .arango.Current.Collections[database][collId][shardName].servers; - if (servers[0] === ourselves) { - servers[0] = '_' + ourselves; - - var envelope = {}; - envelope[path] = {'op':'set', 'new':servers}; - envelope[curVersion] = {'op':'increment'}; - global.ArangoAgency.write([[envelope]]); - } - } }); + 'action': function () { } + }); } catch (x) { console.error('exception thrown when resigning:', x); } } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief drop collections if they exist locally but not in the plan -// ////////////////////////////////////////////////////////////////////////////// - -function dropLocalCollections (plannedCollections, currentCollections) { - var ourselves = global.ArangoServerState.id(); - - var dropCollectionAgency = function (database, shardID, id) { - try { - console.debug('dropping Current/Collections/' + database + '/' + - id + '/' + shardID); - var envelope = {}; - envelope[curCollections + database + '/' + id + '/' + shardID] = - {'op':'delete'}; - envelope[curVersion] = {'op':'increment'}; - global.ArangoAgency.write([[envelope]]); - console.debug('dropping Current/Collections/' + database + '/' + - id + '/' + shardID + ' done.'); - } catch (err) { - // ignore errors - } - }; - - var db = require('internal').db; - db._useDatabase('_system'); - var shardMap = getShardMap(plannedCollections); - - var localDatabases = getLocalDatabases(); - var database; - - // iterate over all databases - for (database in localDatabases) { - if (localDatabases.hasOwnProperty(database)) { - var removeAll = !plannedCollections.hasOwnProperty(database); - - // switch into other database - db._useDatabase(database); - - try { - // iterate over collections of database - var collections = getLocalCollections(); - var collection; - - for (collection in collections) { - if (collections.hasOwnProperty(collection)) { - // found a local collection - // check if it is in the plan and we are responsible for it - - var remove = removeAll || - (!shardMap.hasOwnProperty(collection)) || - (shardMap[collection].indexOf(ourselves) === -1); - - if (remove) { - var currentServers; - // May be we have been the leader and are asked to withdraw: - if (shardMap.hasOwnProperty(collection) && - shardMap[collection][0] === '_' + ourselves) { - try { - currentServers = currentCollections[database][collections[collection].planId][collection].servers; - } catch (err2) { - currentServers = []; - } - if (currentServers[0] === ourselves) { - leaderResign(database, collections[collection].planId, - collection, ourselves); - } - } else { - // Remove us from the follower list, this is a best effort, - // we might actually have been the leader ourselves, in which - // case we try to unfollow the new leader, no problem, we - // simply ignore any errors. If a proper error occurs, this - // is also no problem, since the leader will soon notice - // that the shard here is gone and will drop us automatically: - var servers = shardMap[collection]; - try { - currentServers = currentCollections[database][collections[collection].planId][collection].servers; - } catch (err2) { - currentServers = []; - } - if (servers !== undefined && - currentServers.indexOf(ourselves) >= 0) { - var endpoint = ArangoClusterInfo.getServerEndpoint(servers[0]); - try { - removeShardFollower(endpoint, database, collection); - } catch (err) {} - } - console.info("dropping local shard '%s/%s' of '%s/%s", - database, - collection, - database, - collections[collection].planId); - - db._drop(collection); - - if (removeAll || !shardMap.hasOwnProperty(collection)) { - console.debug('cleaning out Current entry for shard %s in', - 'agency for %s/%s', collection, database, - collections[collection].name); - dropCollectionAgency(database, collection, collections[collection].planId); - } - } - } - } - } - } catch (err) { - db._useDatabase('_system'); - throw err; - } - db._useDatabase('_system'); - } - } -} - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief clean up what's in Current/Collections for ourselves -// ////////////////////////////////////////////////////////////////////////////// - -function cleanupCurrentCollections (plannedCollections, currentCollections) { - var dropCollectionAgency = function (database, collection, shardID) { - try { - console.debug('cleaning Current/Collections/' + database + '/' + - collection + '/' + shardID); - var envelope = {}; - envelope[curCollections + database + '/' + collection + '/' + shardID] = - {'op':'delete'}; - envelope[curVersion] = {'op':'increment'}; - global.ArangoAgency.write([[envelope]]); - console.debug('cleaning Current/Collections/' + database + '/' + - collection + '/' + shardID + ' done.'); - } catch (err) { - // ignore errors - } - }; - - var db = require('internal').db; - db._useDatabase('_system'); - - var shardMap = getShardMap(plannedCollections); - var database; - - for (database in currentCollections) { - if (currentCollections.hasOwnProperty(database)) { - var collections = currentCollections[database]; - var collection; - - for (collection in collections) { - if (collections.hasOwnProperty(collection)) { - var shards = collections[collection]; - var shard; - - for (shard in shards) { - if (shards.hasOwnProperty(shard)) { - if (!shardMap.hasOwnProperty(shard)) { - // found an entry in current of a shard that is no longer - // mentioned in the plan - console.info("cleaning up entry for shard '%s' of '%s/%s", - shard, - database, - collection); - - dropCollectionAgency(database, collection, shard); - } - } - } - } - } - } - } -} - -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief lock key space -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function lockSyncKeyspace () { while (!global.KEY_SET_CAS('shardSynchronization', 'lock', 1, null)) { @@ -965,17 +341,17 @@ function lockSyncKeyspace () { } } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief unlock key space -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function unlockSyncKeyspace () { global.KEY_SET('shardSynchronization', 'lock', null); } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief launch a scheduled job if needed -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function tryLaunchJob () { const registerTask = require('internal').registerTask; @@ -1033,9 +409,9 @@ function tryLaunchJob () { } } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief synchronize one shard, this is run as a V8 task -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function synchronizeOneShard (database, shard, planId, leader) { // synchronize this shard from the leader @@ -1183,9 +559,9 @@ function synchronizeOneShard (database, shard, planId, leader) { database, shard, database, planId); } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief schedule a shard synchronization -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function scheduleOneShardSynchronization (database, shard, planId, leader) { console.debug('scheduleOneShardSynchronization:', database, shard, planId, @@ -1222,115 +598,695 @@ function scheduleOneShardSynchronization (database, shard, planId, leader) { return true; } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief synchronize collections for which we are followers (synchronously -// / replicated shards) -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// +// / @brief executePlanForCollections +// ///////////////////////////////////////////////////////////////////////////// -function synchronizeLocalFollowerCollections (plannedCollections, - currentCollections) { - if (typeof currentCollections !== 'object') { - throw new Error('Current.Collections is not an object!'); - } - var ourselves = global.ArangoServerState.id(); +function executePlanForCollections(plannedCollections) { + let ourselves = global.ArangoServerState.id(); + let localErrors = {}; - var db = require('internal').db; + let db = require('internal').db; db._useDatabase('_system'); - var localDatabases = getLocalDatabases(); - var database; - // iterate over all matching databases - for (database in plannedCollections) { - if (plannedCollections.hasOwnProperty(database)) { - if (localDatabases.hasOwnProperty(database)) { - // switch into other database - db._useDatabase(database); + let localDatabases = getLocalDatabases(); + // Create shards in Plan that are not there locally: + Object.keys(plannedCollections).forEach(database => { + if (localDatabases.hasOwnProperty(database)) { + // switch into other database + db._useDatabase(database); - try { - // iterate over collections of database - var collections = plannedCollections[database]; - var collection; + try { + // iterate over collections of database + let localCollections = getLocalCollections(); + let collections = plannedCollections[database]; - // diff the collections - for (collection in collections) { - if (collections.hasOwnProperty(collection)) { - var collInfo = collections[collection]; - var shards = collInfo.shards; // this is the Plan - var shard; + // diff the collections + Object.keys(collections).forEach(function (collection) { + let collInfo = collections[collection]; + let shards = collInfo.shards; - collInfo.planId = collInfo.id; + collInfo.planId = collInfo.id; + Object.keys(shards).forEach(shard => { + if (shards[shard].indexOf(ourselves) >= 0) { + let shouldBeLeader = shards[shard][0] === ourselves; - for (shard in shards) { - if (shards.hasOwnProperty(shard)) { - var pos = shards[shard].indexOf(ourselves); - if (pos > 0) { // found and not in position 0 - // found a shard we have to replicate synchronously - // now see whether we are in sync by looking at the - // current entry in the agency: - var inCurrent = lookup4d(currentCollections, database, - collection, shard); - // If inCurrent is not in order in any way, we schedule - // a synchronization job: - if (inCurrent === undefined || - !inCurrent.hasOwnProperty('servers') || - typeof inCurrent.servers !== 'object' || - !Array.isArray(inCurrent.servers) || - inCurrent.servers.indexOf(ourselves) <= 0 || - inCurrent.servers[0].substr(0, 1) === '_' || - inCurrent.servers[0] !== shards[shard][0]) { - scheduleOneShardSynchronization( - database, shard, collInfo.planId, shards[shard][0]); + // found a shard we are responsible for + localErrors[shard] = { error: false, errorNum: 0, + errorMessage: 'no error', indexes: {} }; + + let error = localErrors[shard]; + let collectionStatus; + if (!localCollections.hasOwnProperty(shard)) { + // must create this shard + console.debug("creating local shard '%s/%s' for central '%s/%s'", + database, + shard, + database, + collInfo.planId); + + let save = {id: collInfo.id, name: collInfo.name}; + delete collInfo.id; // must not + delete collInfo.name; + try { + if (collInfo.type === ArangoCollection.TYPE_EDGE) { + db._createEdgeCollection(shard, collInfo); + } else { + db._create(shard, collInfo); + } + } catch (err2) { + error = { error: true, errorNum: err2.errorNum, + errorMessage: err2.errorMessage }; + console.error("creating local shard '%s/%s' for central '%s/%s' failed: %s", + database, + shard, + database, + collInfo.planId, + JSON.stringify(err2)); + } + collInfo.id = save.id; + collInfo.name = save.name; + if (shouldBeLeader) { + db._collection(shard).assumeLeadership(); + } + collectionStatus = ArangoCollection.STATUS_LOADED; + } else { + // We adjust local leadership, note that the planned resignation + // case is not handled here, since then ourselves does not appear + // in shards[shard] but only "_" + ourselves. + // We adjust local leadership, note that the planned + // resignation case is not handled here, since then + // ourselves does not appear in shards[shard] but only + // "_" + ourselves. See below under "Drop local shards" + // to see the proper handling of this case. Place is marked + // with *** in comments. + if (!shouldBeLeader && localCollections[shard].isLeader) { + db._collection(shard).leaderResign(); + } else if (shouldBeLeader && + !localCollections[shard].isLeader) { + db._collection(shard).assumeLeadership(); + } + + collectionStatus = localCollections[shard].status; + + // collection exists, now compare collection properties + let cmp = [ 'journalSize', 'waitForSync', 'doCompact', + 'indexBuckets' ]; + + let properties = cmp.reduce((obj, key) => { + if (localCollections[shard][key] !== collInfo[key]) { + // property change + obj[key] = collInfo[key]; + } + return obj; + }, {}); + + if (Object.keys(properties).length > 0) { + console.info("updating properties for local shard '%s/%s'", + database, + shard); + + try { + db._collection(shard).properties(properties); + } catch (err3) { + error = { error: true, errorNum: err3.errorNum, + errorMessage: err3.errorMessage }; + } + } + } + if (error.error) { + return; // No point to look for indices, if the + // creation has not worked + } + + // Now check whether the status is OK: + if (collectionStatus !== collInfo.status) { + console.info("detected status change for local shard '%s/%s'", + database, + shard); + + if (collInfo.status === ArangoCollection.STATUS_UNLOADED) { + console.info("unloading local shard '%s/%s'", + database, + shard); + db._collection(shard).unload(); + } else if (collInfo.status === ArangoCollection.STATUS_LOADED) { + console.info("loading local shard '%s/%s'", + database, + shard); + db._collection(shard).load(); + } + } + + let indexes = getIndexMap(shard); + let idx; + let index; + + if (collInfo.hasOwnProperty('indexes')) { + for (let i = 0; i < collInfo.indexes.length; ++i) { + index = collInfo.indexes[i]; + + if (index.type !== 'primary' && index.type !== 'edge' && + !indexes.hasOwnProperty(index.id)) { + console.debug("creating index '%s/%s': %s", + database, + shard, + JSON.stringify(index)); + try { + arangodb.db._collection(shard).ensureIndex(index); + + } catch (err5) { + error.indexes[index.id] = { + id: index.id, + error: true, + errorNum: err5.errorNum, + errorMessage: err5.errorMessage + }; + } + } + } + + for (idx in indexes) { + if (indexes.hasOwnProperty(idx)) { + // found an index in the index map, check if it must be deleted + + if (indexes[idx].type !== 'primary' && indexes[idx].type !== 'edge') { + let found = false; + for (let i = 0; i < collInfo.indexes.length; ++i) { + if (collInfo.indexes[i].id === idx) { + found = true; + break; + } + } + + if (!found) { + // found an index to delete locally + index = indexes[idx]; + + console.info("dropping index '%s/%s': %s", + database, + shard, + JSON.stringify(index)); + + arangodb.db._collection(shard).dropIndex(index); + + delete indexes[idx]; + } } } } } } - } - } catch (err) { - // always return to previous database - db._useDatabase('_system'); - throw err; - } - + }); + }); + } catch(e) { + console.debug("Got error executing plan", e, e.stack); + } finally { + // always return to previous database db._useDatabase('_system'); } } - } - return true; -} + }); + // Drop local shards that do no longer exist in Plan: + let shardMap = getShardMap(plannedCollections); -// ////////////////////////////////////////////////////////////////////////////// -// / @brief handle collection changes -// ////////////////////////////////////////////////////////////////////////////// + // iterate over all databases + Object.keys(localDatabases).forEach(database => { + let removeAll = !plannedCollections.hasOwnProperty(database); -function handleCollectionChanges (plan, current, takeOverResponsibility) { - var plannedCollections = plan.Collections; - var currentCollections = current.Collections; + // switch into other database + db._useDatabase(database); + try { + // iterate over collections of database + let collections = getLocalCollections(); - var ok = true; + Object.keys(collections).forEach(collection => { + // found a local collection + // check if it is in the plan and we are responsible for it + if (removeAll || + !shardMap.hasOwnProperty(collection) || + shardMap[collection].indexOf(ourselves) === -1) { - try { - createLocalCollections(plannedCollections, plan.Version, currentCollections, - takeOverResponsibility); - dropLocalCollections(plannedCollections, currentCollections); - cleanupCurrentCollections(plannedCollections, currentCollections); - if (!synchronizeLocalFollowerCollections(plannedCollections, - currentCollections)) { - // If not all needed jobs have been scheduled, then work is still - // ongoing, therefore we want to revisit this soon. - ok = false; + // May be we have been the leader and are asked to withdraw: *** + if (shardMap.hasOwnProperty(collection) && + shardMap[collection][0] === '_' + ourselves) { + if (collections[collection].isLeader) { + organiseLeaderResign(database, collections[collection].planId, + collection); + } + } else { + if (!collections[collection].isLeader) { + // Remove us from the follower list, this is a best + // effort: If an error occurs, this is no problem, since + // the leader will soon notice that the shard here is + // gone and will drop us automatically: + console.debug("removing local shard '%s/%s' of '%s/%s' from follower list", + database, collection, database, + collections[collection].planId); + let servers = shardMap[collection]; + if (servers !== undefined) { + let endpoint = ArangoClusterInfo.getServerEndpoint(servers[0]); + try { + removeShardFollower(endpoint, database, collection); + } catch (err) { + console.debug("caught exception during removal of local shard '%s/%s' of '%s/%s' from follower list", + database, collection, database, + collections[collection].planId, err); + } + } + } + console.info("dropping local shard '%s/%s' of '%s/%s", + database, + collection, + database, + collections[collection].planId); + + db._drop(collection); + } + } + }); + } finally { + db._useDatabase('_system'); } - } catch (err) { - console.error('Caught error in handleCollectionChanges: ' + - JSON.stringify(err), JSON.stringify(err.stack)); - ok = false; - } - return ok; + }); + + return localErrors; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// +// / @brief updateCurrentForCollections +// ///////////////////////////////////////////////////////////////////////////// + +function updateCurrentForCollections(localErrors, current) { + let currentCollections = current.Collections; + let ourselves = global.ArangoServerState.id(); + + let db = require('internal').db; + db._useDatabase('_system'); + + let localDatabases = getLocalDatabases(); + let database; + + function assembleLocalCollectionInfo(info, error) { + let coll = db._collection(info.name); + let payload = { + error: error.error, + errorMessage: error.errorMessage, + errorNum: error.errorNum, + }; + payload.indexes = coll.getIndexes().map(index => { + let agencyIndex = {}; + Object.assign(agencyIndex, index); + // Fix up the IDs of the indexes: + let pos = index.id.indexOf("/"); + if (pos >= 0) { + agencyIndex.id = index.id.slice(pos+1); + } else { + agencyIndex.id = index.id; + } + + if (error.indexes[agencyIndex.id] !== undefined) { + Object.assign(agencyIndex, error.indexes[agencyIndex.id]); + delete error.indexes[agencyIndex.id]; + } + return agencyIndex; + }); + // add the remaining errors which do not have a local id + Object.keys(error.indexes).forEach(indexId => { + payload.indexes.push(error.indexes[indexId]); + }); + + payload.servers = [ourselves].concat(coll.getFollowers()); + return payload; + } + + function makeDropCurrentEntryCollection(dbname, col, shard, trx) { + trx[0][curCollections + dbname + '/' + col + '/' + shard] = + {op: 'delete'}; + } + + let trx = [{}]; + + // Go through local databases and collections and add stuff to Current + // as needed: + Object.keys(localDatabases).forEach(database => { + // All local databases should be in Current by now, if not, we ignore + // it, this will happen later. + try { + db._useDatabase(database); + + // iterate over collections (i.e. shards) of database + let localCollections = getLocalCollections(); + let shard; + for (shard in localCollections) { + if (localCollections.hasOwnProperty(shard)) { + let shardInfo = localCollections[shard]; + if (shardInfo.isLeader) { + let localCollectionInfo = assembleLocalCollectionInfo(shardInfo, localErrors[shard]); + + let currentCollectionInfo = fetchKey(current, 'Collections', database, shardInfo.planId, shard); + if (!_.isEqual(localCollectionInfo, currentCollectionInfo)) { + trx[0][curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name] = { + op: 'set', + new: localCollectionInfo, + }; + } + } else { + let currentServers = fetchKey(current, 'Collections', database, shardInfo.planId, shard, 'servers'); + // we were previously leader and we are done resigning. update current and let supervision handle the rest + if (Array.isArray(currentServers) && currentServers[0] === ourselves) { + trx[0][curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name + '/servers'] = { + op: 'set', + new: ['_' + ourselves].concat(db._collection(shardInfo.name).getFollowers()), + }; + } + } + } + } + } catch (e) { + console.error('Got error while trying to sync current collections:', e, e.stack); + } finally { + // always return to previous database + db._useDatabase('_system'); + } + }); + + // Go through all current databases and collections and remove stuff + // if no longer present locally: + for (database in currentCollections) { + if (currentCollections.hasOwnProperty(database)) { + if (localDatabases.hasOwnProperty(database)) { + // If a database has vanished locally, it is not our job to + // remove it in Current, that is what `updateCurrentForDatabases` + // does. + db._useDatabase(database); + + try { + // iterate over collections (i.e. shards) of database in Current + let localCollections = getLocalCollections(); + let collection; + for (collection in currentCollections[database]) { + if (currentCollections[database].hasOwnProperty(collection)) { + let shard; + for (shard in currentCollections[database][collection]) { + if (currentCollections[database][collection].hasOwnProperty(shard)) { + let cur = currentCollections[database][collection][shard]; + if (!localCollections.hasOwnProperty(shard) && + cur.servers[0] === ourselves) { + makeDropCurrentEntryCollection(database, collection, shard, + trx); + } + } + } + } + } + } finally { + // always return to previous database + db._useDatabase('_system'); + } + } + } + } + return trx; +} + +// ///////////////////////////////////////////////////////////////////////////// +// / @brief syncReplicatedShardsWithLeaders +// ///////////////////////////////////////////////////////////////////////////// + +function syncReplicatedShardsWithLeaders(plan, current, localErrors) { + let plannedDatabases = plan.Collections; + let currentDatabases = current.Collections; + let ourselves = global.ArangoServerState.id(); + + let db = require('internal').db; + db._useDatabase('_system'); + + let localDatabases = getLocalDatabases(); + + // Schedule sync tasks for shards which exist and we should be a follower: + Object.keys(plannedDatabases).forEach(databaseName => { + if (localDatabases.hasOwnProperty(databaseName) + && currentDatabases.hasOwnProperty(databaseName)) { + // switch into other database + db._useDatabase(databaseName); + + try { + // iterate over collections of database + let localCollections = getLocalCollections(); + + let plannedCollections = plannedDatabases[databaseName]; + let currentCollections = currentDatabases[databaseName]; + + // find planned collections that need sync (not registered in current by the leader): + Object.keys(plannedCollections).forEach(collectionName => { + let plannedCollection = plannedCollections[collectionName]; + let currentShards = currentCollections[collectionName]; + // what should it bring + // collection.planId = collection.id; + if (currentShards !== undefined) { + let plannedShards = plannedCollection.shards; + Object.keys(plannedShards).forEach(shardName => { + // shard does not exist locally so nothing we can do at this point + if (!localCollections.hasOwnProperty(shardName)) { + return; + } + // current stuff is created by the leader + // this one here will just bring followers in sync + // so just continue here + if (!currentShards.hasOwnProperty(shardName)) { + return; + } + let currentServers = currentShards[shardName].servers; + let plannedServers = plannedShards[shardName]; + if (!plannedServers) { + console.error('Shard ' + shardName + ' does not have servers substructure in plan'); + return; + } + if (!currentServers) { + console.error('Shard ' + shardName + ' does not have servers substructure in current'); + return; + } + + // we are not planned to be a follower + if (plannedServers.indexOf(ourselves) <= 0) { + return; + } + // if we are considered to be in sync there is nothing to do + if (currentServers.indexOf(ourselves) > 0) { + return; + } + + let leader = plannedServers[0]; + scheduleOneShardSynchronization(databaseName, shardName, plannedCollection.id, leader); + }); + } + }); + } catch (e) { + console.debug('Got an error synchronizing with leader', e, e.stack); + } finally { + // always return to previous database + db._useDatabase('_system'); + } + } + }); +} + +// ///////////////////////////////////////////////////////////////////////////// +// / @brief take care of collections on primary DBservers according to Plan +// ///////////////////////////////////////////////////////////////////////////// + +function migratePrimary(plan, current) { + // will analyze local state and then issue db._create(), + // db._drop() etc. to sync plan and local state for shards + let localErrors = executePlanForCollections(plan.Collections); + + // diff current and local and prepare agency transactions or whatever + // to update current. Will report the errors created locally to the agency + let trx = updateCurrentForCollections(localErrors, current); + if (trx.length > 0 && Object.keys(trx[0]).length !== 0) { + trx[0][curVersion] = {op: 'increment'}; + // TODO: reduce timeout when we can: + try { + let res = global.ArangoAgency.write([trx]); + if (typeof res !== 'object' || !res.hasOwnProperty("results") || + typeof res.results !== 'object' || res.results.length !== 1 || + res.results[0] === 0) { + console.error('migratePrimary: could not send transaction for Current to agency, result:', res); + } + } catch (err) { + console.error('migratePrimary: caught exception when sending transaction for Current to agency:', err); + } + } + + // will do a diff between plan and current to find out + // the shards for which this db server is a planned + // follower. Background jobs for this activity are scheduled. + // This will then supervise any actions necessary + // to bring the shard into sync and ultimately register + // at the leader using addFollower + // this step should assume that the local state matches the + // plan...can NOT be sure that the plan was completely executed + // may react on the errors that have been created + syncReplicatedShardsWithLeaders(plan, current, localErrors); +} + +// ///////////////////////////////////////////////////////////////////////////// +// / @brief executePlanForDatabases +// ///////////////////////////////////////////////////////////////////////////// + +function executePlanForDatabases(plannedDatabases) { + let localErrors = {}; + + let db = require('internal').db; + db._useDatabase('_system'); + + let localDatabases = getLocalDatabases(); + let name; + + // check which databases need to be created locally: + Object.keys(plannedDatabases).forEach(name => { + if (!localDatabases.hasOwnProperty(name)) { + // must create database + + // TODO: handle options and user information + + console.debug("creating local database '%s'", name); + + try { + db._createDatabase(name); + } catch (err) { + localErrors[name] = { error: true, errorNum: err.errorNum, + errorMessage: err.errorMessage, name: name }; + } + } + }); + + // check which databases need to be deleted locally + localDatabases = getLocalDatabases(); + + Object.keys(localDatabases).forEach(name => { + if (!plannedDatabases.hasOwnProperty(name) && name.substr(0, 1) !== '_') { + // must drop database + + console.info("dropping local database '%s'", name); + + // Do we have to stop a replication applier first? + if (ArangoServerState.role() === 'SECONDARY') { + try { + db._useDatabase(name); + var rep = require('@arangodb/replication'); + var state = rep.applier.state(); + if (state.state.running === true) { + console.info('stopping replication applier first'); + rep.applier.stop(); + } + } + finally { + db._useDatabase('_system'); + } + } + db._dropDatabase(name); + } + }); + + return localErrors; +} + +// ///////////////////////////////////////////////////////////////////////////// +// / @brief updateCurrentForDatabases +// ///////////////////////////////////////////////////////////////////////////// + +function updateCurrentForDatabases(localErrors, current) { + let ourselves = global.ArangoServerState.id(); + + function makeAddDatabaseAgencyOperation(payload, trx) { + trx[0][curDatabases + payload.name + '/' + ourselves] = + {op: 'set', new: payload}; + }; + + function makeDropDatabaseAgencyOperation(name, trx) { + trx[0][curDatabases + name + '/' + ourselves] = {'op':'delete'}; + }; + + let db = require('internal').db; + db._useDatabase('_system'); + + let localDatabases = getLocalDatabases(); + let currentDatabases = current.Databases; + let name; + let trx = [{}]; // Here we collect all write operations + + // Add entries that we have but that are not in Current: + for (name in localDatabases) { + if (localDatabases.hasOwnProperty(name)) { + if (!currentDatabases.hasOwnProperty(name) || + !currentDatabases[name].hasOwnProperty(ourselves)) { + console.debug("adding entry in Current for database '%s'", name); + makeAddDatabaseAgencyOperation({error: false, errorNum: 0, name: name, + id: localDatabases[name].id, + errorMessage: ""}, trx); + } + } + } + + // Remove entries from current that no longer exist locally: + for (name in currentDatabases) { + if (currentDatabases.hasOwnProperty(name) && name.substr(0, 1) !== '_') { + if (!localDatabases.hasOwnProperty(name)) { + // we found a database we don't have locally + + if (currentDatabases[name].hasOwnProperty(ourselves)) { + // we are entered for a database that we don't have locally + console.debug("cleaning up entry for unknown database '%s'", name); + makeDropDatabaseAgencyOperation(name, trx); + } + } + } + } + + // Finally, report any errors that might have been produced earlier when + // we were trying to execute the Plan: + for (name in localErrors) { + if (localErrors.hasOwnProperty(name)) { + console.debug("reporting error to Current about database '%s'", name); + makeAddDatabaseAgencyOperation(localErrors[name], trx); + } + } + + return trx; +} + +// ///////////////////////////////////////////////////////////////////////////// +// / @brief take care of databases on any type of server according to Plan +// ///////////////////////////////////////////////////////////////////////////// + +function migrateAnyServer(plan, current) { + // will analyze local state and then issue db._createDatabase(), + // db._dropDatabase() etc. to sync plan and local state for databases + let localErrors = executePlanForDatabases(plan.Databases); + // diff current and local and prepare agency transactions or whatever + // to update current. will report the errors created locally to the agency + let trx = updateCurrentForDatabases(localErrors, current); + if (Object.keys(trx[0]).length !== 0) { + trx[0][curVersion] = {op: 'increment'}; + // TODO: reduce timeout when we can: + try { + let res = global.ArangoAgency.write([trx]); + if (typeof res !== 'object' || !res.hasOwnProperty("results") || + typeof res.results !== 'object' || res.results.length !== 1 || + res.results[0] === 0) { + console.error('migrateAnyServer: could not send transaction for Current to agency, result:', res); + } + } catch (err) { + console.error('migrateAnyServer: caught exception when sending transaction for Current to agency:', err); + } + } +} + +// ///////////////////////////////////////////////////////////////////////////// // / @brief make sure that replication is set up for all databases -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function setupReplication () { console.debug('Setting up replication...'); @@ -1370,9 +1326,9 @@ function setupReplication () { return ok; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief role change from secondary to primary -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function secondaryToPrimary () { console.info('Switching role from secondary to primary...'); @@ -1402,49 +1358,49 @@ function secondaryToPrimary () { } } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief role change from primary to secondary -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function primaryToSecondary () { console.info('Switching role from primary to secondary...'); } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief change handling trampoline function -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function handleChanges (plan, current) { + // Note: This is never called with role === 'COORDINATOR' or on a single + // server. var changed = false; var role = ArangoServerState.role(); - if (role === 'PRIMARY' || role === 'SECONDARY') { - // Need to check role change for automatic failover: - var myId = ArangoServerState.id(); - if (role === 'PRIMARY') { - if (!plan.DBServers[myId]) { - // Ooops! We do not seem to be a primary any more! - changed = ArangoServerState.redetermineRole(); + // Need to check role change for automatic failover: + var myId = ArangoServerState.id(); + if (role === 'PRIMARY') { + if (!plan.DBServers[myId]) { + // Ooops! We do not seem to be a primary any more! + changed = ArangoServerState.redetermineRole(); + } + } else { // role === "SECONDARY" + if (plan.DBServers[myId]) { + changed = ArangoServerState.redetermineRole(); + if (!changed) { + // mop: oops...changing role has failed. retry next time. + return false; } - } else { // role === "SECONDARY" - if (plan.DBServers[myId]) { + } else { + var found = null; + var p; + for (p in plan) { + if (plan.hasOwnProperty(p) && plan[p] === myId) { + found = p; + break; + } + } + if (found !== ArangoServerState.idOfPrimary()) { + // Note this includes the case that we are not found at all! changed = ArangoServerState.redetermineRole(); - if (!changed) { - // mop: oops...changing role has failed. retry next time. - return false; - } - } else { - var found = null; - var p; - for (p in plan) { - if (plan.hasOwnProperty(p) && plan[p] === myId) { - found = p; - break; - } - } - if (found !== ArangoServerState.idOfPrimary()) { - // Note this includes the case that we are not found at all! - changed = ArangoServerState.redetermineRole(); - } } } } @@ -1459,22 +1415,19 @@ function handleChanges (plan, current) { } } - handleDatabaseChanges(plan, current); - var success; - if (role === 'PRIMARY' || role === 'COORDINATOR') { - // Note: This is only ever called for DBservers (primary and secondary), - // we keep the coordinator case here just in case... - success = handleCollectionChanges(plan, current, changed); - } else { - success = setupReplication(); + migrateAnyServer(plan, current); + if (role === 'PRIMARY') { + migratePrimary(plan, current); + } else { // if (role == 'SECONDARY') { + setupReplication(); } - return success; + return true; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief throw an ArangoError -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var raiseError = function (code, msg) { var err = new ArangoError(); @@ -1484,9 +1437,9 @@ var raiseError = function (code, msg) { throw err; }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief retrieve a list of shards for a collection -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var shardList = function (dbName, collectionName) { let ci = global.ArangoClusterInfo.getCollectionInfo(dbName, collectionName); @@ -1514,9 +1467,9 @@ var shardList = function (dbName, collectionName) { return shards; }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief wait for a distributed response -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var waitForDistributedResponse = function (data, numberOfRequests) { var received = []; @@ -1565,9 +1518,9 @@ var waitForDistributedResponse = function (data, numberOfRequests) { return received; }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief whether or not clustering is enabled -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var isCluster = function () { var role = global.ArangoServerState.role(); @@ -1575,17 +1528,17 @@ var isCluster = function () { return (role !== undefined && role !== 'SINGLE' && role !== 'AGENT'); }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief whether or not we are a coordinator -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var isCoordinator = function () { return global.ArangoServerState.isCoordinator(); }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief role -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var role = function () { var role = global.ArangoServerState.role(); @@ -1596,9 +1549,9 @@ var role = function () { return undefined; }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief status -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var status = function () { if (!isCluster() || !global.ArangoServerState.initialized()) { @@ -1608,9 +1561,9 @@ var status = function () { return global.ArangoServerState.status(); }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief isCoordinatorRequest -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var isCoordinatorRequest = function (req) { if (!req || !req.hasOwnProperty('headers')) { @@ -1620,11 +1573,13 @@ var isCoordinatorRequest = function (req) { return req.headers.hasOwnProperty('x-arango-coordinator'); }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief handlePlanChange -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var handlePlanChange = function (plan, current) { + // This is never called on a coordinator, we still make sure that it + // is not executed on a single server or coordinator, just to be sure: if (!isCluster() || isCoordinator() || !global.ArangoServerState.initialized()) { return true; } @@ -1634,43 +1589,8 @@ var handlePlanChange = function (plan, current) { current: current.Version }; - // //////////////////////////////////////////////////////////////////////////// - // / @brief execute an action under a write-lock - // //////////////////////////////////////////////////////////////////////////// - - function writeLocked (lockInfo, cb, args) { - var timeout = lockInfo.timeout; - if (timeout === undefined) { - timeout = 60; - } - - var ttl = lockInfo.ttl; - if (ttl === undefined) { - ttl = 120; - } - if (require('internal').coverage || require('internal').valgrind) { - ttl *= 10; - timeout *= 10; - } - - global.ArangoAgency.lockWrite(lockInfo.part, ttl, timeout); - - try { - cb.apply(null, args); - global.ArangoAgency.increaseVersion(lockInfo.part + '/Version'); - - let version = global.ArangoAgency.get(lockInfo.part + '/Version'); - versions[lockInfo.part.toLowerCase()] = version.arango[lockInfo.part].Version; - - global.ArangoAgency.unlockWrite(lockInfo.part, timeout); - } catch (err) { - global.ArangoAgency.unlockWrite(lockInfo.part, timeout); - throw err; - } - } - try { - versions.success = handleChanges(plan, current, writeLocked); + versions.success = handleChanges(plan, current); console.debug('plan change handling successful'); } catch (err) { @@ -1682,9 +1602,9 @@ var handlePlanChange = function (plan, current) { return versions; }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief coordinatorId -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var coordinatorId = function () { if (!isCoordinator()) { @@ -1693,9 +1613,9 @@ var coordinatorId = function () { return global.ArangoServerState.id(); }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief bootstrap db servers -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// var bootstrapDbServers = function (isRelaunch) { global.ArangoClusterInfo.reloadDBServers(); @@ -1743,9 +1663,9 @@ var bootstrapDbServers = function (isRelaunch) { return result; }; -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief shard distribution -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function format (x) { var r = {}; @@ -1783,9 +1703,9 @@ function shardDistribution () { }; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief move shard -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function moveShard (info) { var isLeader; @@ -1827,9 +1747,9 @@ function moveShard (info) { return {error: false, id: id}; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief rebalance shards -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function rebalanceShards () { var dbServers = global.ArangoClusterInfo.getDBServers(); @@ -1935,9 +1855,9 @@ function rebalanceShards () { return true; } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief supervision state -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function supervisionState () { try { @@ -1953,9 +1873,9 @@ function supervisionState () { } } -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // / @brief wait for synchronous replication to settle -// ////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// function waitForSyncReplOneCollection (dbName, collName) { console.debug('waitForSyncRepl:', dbName, collName); @@ -2016,3 +1936,6 @@ exports.rebalanceShards = rebalanceShards; exports.moveShard = moveShard; exports.supervisionState = supervisionState; exports.waitForSyncRepl = waitForSyncRepl; + +exports.executePlanForDatabases = executePlanForDatabases; +exports.executePlanForCollections = executePlanForCollections; diff --git a/js/server/modules/@arangodb/replication.js b/js/server/modules/@arangodb/replication.js index 489c18b907..ace0b12a4e 100644 --- a/js/server/modules/@arangodb/replication.js +++ b/js/server/modules/@arangodb/replication.js @@ -29,6 +29,7 @@ // ////////////////////////////////////////////////////////////////////////////// var internal = require('internal'); +var ERRORS = internal.errors; var endpointToURL = require('@arangodb/cluster').endpointToURL; var request; if (ArangoServerState.role() === 'PRIMARY') { @@ -272,7 +273,15 @@ function syncCollectionFinalize (database, collname, from, config) { coll.remove(entry.data._key); } catch (errx) { console.error('syncCollectionFinalize: remove', entry, JSON.stringify(errx)); - throw errx; + if (errx.errorNum !== ERRORS.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code) { + throw errx; + } + // We swallow the NOT FOUND error here. It is possible that + // a follower tries to synchronize to a leader with whom it + // is already in sync. In that case there could have been a + // synchronously replicated removal operation that has happened + // whilst we were resynchronizing the shard. In this case, the + // removal would have happened already. } } else if (entry.type === mType.REPLICATION_TRANSACTION_START) { transactions[entry.tid] = []; diff --git a/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js b/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js new file mode 100644 index 0000000000..4cce429a98 --- /dev/null +++ b/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js @@ -0,0 +1,757 @@ +/* global describe, it, before, beforeEach, afterEach */ + +// ////////////////////////////////////////////////////////////////////////////// +// / @brief JavaScript cluster functionality +// / +// / @file +// / +// / DISCLAIMER +// / +// / Copyright 2017 ArangoDB GmbH, Cologne, Germany +// / +// / Licensed under the Apache License, Version 2.0 (the "License") +// / you may not use this file except in compliance with the License. +// / You may obtain a copy of the License at +// / +// / http://www.apache.org/licenses/LICENSE-2.0 +// / +// / Unless required by applicable law or agreed to in writing, software +// / distributed under the License is distributed on an "AS IS" BASIS, +// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// / See the License for the specific language governing permissions and +// / limitations under the License. +// / +// / Copyright holder is ArangoDB GmbH, Cologne, Germany +// / +// / @author Andreas Streichardt +// ////////////////////////////////////////////////////////////////////////////// + +const db = require('internal').db; +const cluster = require('@arangodb/cluster'); +const expect = require('chai').expect; +const ArangoCollection = require('@arangodb/arango-collection').ArangoCollection; + +describe('Cluster sync', function() { + describe('Databaseplan to local', function() { + before(function() { + require('@arangodb/sync-replication-debug').setup(); + }); + + beforeEach(function() { + db._databases().forEach(database => { + if (database !== '_system') { + db._dropDatabase(database); + } + }); + }); + it('should create a planned database', function() { + let plan = { + "Databases": { + "test": { + "id": 1, + "name": "test" + } + } + }; + let errors = cluster.executePlanForDatabases(plan.Databases); + let databases = db._databases(); + expect(databases).to.have.lengthOf(2); + expect(databases).to.contain('test'); + expect(errors).to.be.empty; + }); + it('should leave everything in place if a planned database already exists', function() { + let plan = { + Databases: { + "test": { + "id": 1, + "name": "test" + } + } + }; + db._createDatabase('test'); + let errors = cluster.executePlanForDatabases(plan.Databases); + let databases = db._databases(); + expect(databases).to.have.lengthOf(2); + expect(databases).to.contain('test'); + expect(errors).to.be.empty; + }); + it('should delete a database if it is not used anymore', function() { + db._createDatabase('peng'); + let plan = { + Databases: { + } + }; + cluster.executePlanForDatabases(plan.Databases); + let databases = db._databases(); + expect(databases).to.have.lengthOf(1); + expect(databases).to.contain('_system'); + }); + }); + describe('Collection plan to local', function() { + let numSystemCollections; + before(function() { + require('@arangodb/sync-replication-debug').setup(); + }); + + beforeEach(function() { + db._databases().forEach(database => { + if (database !== '_system') { + db._dropDatabase(database); + } + }); + db._createDatabase('test'); + db._useDatabase('test'); + numSystemCollections = db._collections().length; + }); + afterEach(function() { + db._useDatabase('_system'); + }); + it('should create and load a collection if it does not exist', function() { + let plan = { + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "", + ] + }, + "status": 3, + "type": 2, + "waitForSync": false + } + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let collections = db._collections(); + expect(collections.map(collection => collection.name())).to.contain('s100001'); + expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_LOADED); + }); + it('should create a collection if it does not exist (unloaded case)', function() { + let plan = { + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let collections = db._collections(); + expect(collections.map(collection => collection.name())).to.contain('s100001'); + expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_UNLOADED); + }); + it('should unload an existing collection', function() { + db._create('s100001'); + expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_LOADED); + let plan = { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + }; + cluster.executePlanForCollections(plan); + db._useDatabase('test'); + expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_UNLOADED); + }); + it('should delete a stale collection', function() { + db._create('s100001'); + let plan = { + Collections: { + test: { + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let collections = db._collections(); + expect(collections).to.have.lengthOf(numSystemCollections); + }); + it('should ignore a collection for which it is not responsible', function() { + let plan = { + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "swag", + ] + }, + "status": 3, + "type": 2, + "waitForSync": false + } + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let collections = db._collections(); + expect(collections).to.have.lengthOf(numSystemCollections); + }); + it('should delete a collection for which it lost responsibility', function() { + db._create('s100001'); + let plan = { + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "debug-follower", // this is a different server than we are + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let collections = db._collections(); + expect(collections).to.have.lengthOf(numSystemCollections); + }); + it('should create an additional index if instructed to do so', function() { + db._create('s100001'); + let plan = { + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + }, + { + "error": false, + "errorMessage": "", + "errorNum": 0, + "fields": [ + "user" + ], + "id": "100005", + "sparse": true, + "type": "hash", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "" + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let indexes = db._collection('s100001').getIndexes(); + expect(indexes).to.have.lengthOf(2); + }); + it('should remove an additional index if instructed to do so', function() { + db._create('s100001'); + db._collection('s100001').ensureIndex({ type: "hash", fields: [ "name" ] }); + let plan = { + Databases: { + "_system": { + "id": 1, + "name": "_system" + }, + "test": { + "id": 2, + "name": "test" + } + }, + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + } + }; + cluster.executePlanForCollections(plan.Collections); + db._useDatabase('test'); + let indexes = db._collection('s100001').getIndexes(); + expect(indexes).to.have.lengthOf(1); + }); + it('should report an error when collection creation failed', function() { + let plan = { + Collections: { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "Möter": [ + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + } + }; + let errors = cluster.executePlanForCollections(plan.Collections); + expect(errors).to.be.an('object'); + expect(errors).to.have.property('Möter'); + }); + it('should be leading a collection when ordered to be leader', function() { + let plan = { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "", + ] + }, + "status": 3, + "type": 2, + "waitForSync": false + } + } + }; + let errors = cluster.executePlanForCollections(plan); + db._useDatabase('test'); + expect(db._collection('s100001').isLeader()).to.equal(true); + }); + it('should be following a leader when ordered to be follower', function() { + let plan = { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "the leader-leader", + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + }; + let errors = cluster.executePlanForCollections(plan); + db._useDatabase('test'); + expect(db._collection('s100001').isLeader()).to.equal(false); + }); + it('should be able to switch from leader to follower', function() { + let plan = { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + }; + let errors = cluster.executePlanForCollections(plan); + plan.test['100001'].shards['s100001'].unshift('der-hund'); + cluster.executePlanForCollections(plan); + db._useDatabase('test'); + expect(db._collection('s100001').isLeader()).to.equal(false); + }); + it('should be able to switch from follower to leader', function() { + let plan = { + test: { + "100001": { + "deleted": false, + "doCompact": true, + "id": "100001", + "indexBuckets": 8, + "indexes": [ + { + "fields": [ + "_key" + ], + "id": "0", + "sparse": false, + "type": "primary", + "unique": true + } + ], + "isSystem": false, + "isVolatile": false, + "journalSize": 1048576, + "keyOptions": { + "allowUserKeys": true, + "type": "traditional" + }, + "name": "test", + "numberOfShards": 1, + "replicationFactor": 2, + "shardKeys": [ + "_key" + ], + "shards": { + "s100001": [ + "old-leader", + "", + ] + }, + "status": 2, + "type": 2, + "waitForSync": false + } + } + }; + let errors = cluster.executePlanForCollections(plan); + plan.test['100001'].shards['s100001'] = [""]; + cluster.executePlanForCollections(plan); + db._useDatabase('test'); + expect(db._collection('s100001').isLeader()).to.equal(true); + }); + }); + describe('Update current', function() { + beforeEach(function() { + db._databases().forEach(database => { + if (database !== '_system') { + db._dropDatabase(database); + } + }); + }); + it('should report a new database', function() { + let Current = { + Databases: {}, + }; + }); + }); +}); diff --git a/js/server/tests/resilience/moving-shards-cluster.js b/js/server/tests/resilience/moving-shards-cluster.js index 087bd87960..7f7c104df8 100644 --- a/js/server/tests/resilience/moving-shards-cluster.js +++ b/js/server/tests/resilience/moving-shards-cluster.js @@ -157,7 +157,6 @@ function MovingShardsSuite () { wait(1.0); global.ArangoClusterInfo.flush(); var servers = findCollectionServers("_system", c[i].name()); - console.info("Seeing servers:", i, c[i].name(), servers); if (servers.indexOf(id) === -1) { // Now check current as well: var collInfo = diff --git a/js/server/tests/shell/shell-transactions-noncluster.js b/js/server/tests/shell/shell-transactions-noncluster.js index edfb37e140..a18265929b 100644 --- a/js/server/tests/shell/shell-transactions-noncluster.js +++ b/js/server/tests/shell/shell-transactions-noncluster.js @@ -64,6 +64,237 @@ var sortedKeys = function (col) { return keys; }; +function transactionRevisionsSuite () { + 'use strict'; + var cn = "UnitTestsTransaction"; + var c = null; + + return { + + setUp : function () { + internal.debugClearFailAt(); + db._drop(cn); + c = db._create(cn); + }, + + tearDown : function () { + internal.debugClearFailAt(); + + if (c !== null) { + c.drop(); + } + + c = null; + internal.wait(0); + }, + + testInsertUniqueFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.insert({ _key: "test", value: 2 }); + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.count()); + assertEqual(1, c.toArray().length); + assertEqual(1, c.document("test").value); + }, + + + + testInsertUniqueSingleFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + c.insert({ _key: "test", value: 2 }); + fail(); + } catch (err) { + } + + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.count()); + assertEqual(1, c.toArray().length); + assertEqual(1, c.document("test").value); + }, + + testInsertTransactionFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.insert({ _key: "test2", value: 2 }); + throw "foo"; + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.document("test").value); + }, + + testRemoveTransactionFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.remove("test"); + throw "foo"; + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.document("test").value); + }, + + testRemoveInsertWithSameRev : function () { + var doc = c.insert({ _key: "test", value: 1 }); + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.remove("test"); + c.insert({ _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true }); + } + }); + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(2, c.document("test").value); + }, + + testUpdateWithSameRev : function () { + var doc = c.insert({ _key: "test", value: 1 }); + c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true }); + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(2, c.document("test").value); + }, + + testUpdateWithSameRevTransaction : function () { + var doc = c.insert({ _key: "test", value: 1 }); + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true }); + } + }); + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(2, c.document("test").value); + }, + + testUpdateFailingWithSameRev : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true }); + throw "foo"; + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.document("test").value); + }, + + testUpdateFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.update({ _key: "test", value: 2 }); + throw "foo"; + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.document("test").value); + }, + + testUpdateAndInsertFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.update({ _key: "test", value: 2 }); + c.insert({ _key: "test", value: 3 }); + throw "foo"; + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.document("test").value); + }, + + testRemoveAndInsert : function () { + var doc = c.insert({ _key: "test", value: 1 }); + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.remove("test"); + c.insert({ _key: "test", value: 2 }); + } + }); + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(2, c.document("test").value); + }, + + testRemoveAndInsertFailing : function () { + var doc = c.insert({ _key: "test", value: 1 }); + try { + db._executeTransaction({ + collections: { write: c.name() }, + action: function() { + c.remove("test"); + c.insert({ _key: "test", value: 3 }); + throw "foo"; + } + }); + fail(); + } catch (err) { + } + + assertEqual(1, c.toArray().length); + assertEqual(1, c.figures().revisions.count); + assertEqual(1, c.document("test").value); + } + + }; +} + //////////////////////////////////////////////////////////////////////////////// /// @brief test suite //////////////////////////////////////////////////////////////////////////////// @@ -5157,6 +5388,9 @@ function transactionServerFailuresSuite () { /// @brief executes the test suites //////////////////////////////////////////////////////////////////////////////// +jsunity.run(transactionRevisionsSuite); +jsunity.run(transactionRollbackSuite); + // only run this test suite if server-side failures are enabled if (internal.debugCanUseFailAt()) { jsunity.run(transactionServerFailuresSuite); @@ -5167,7 +5401,6 @@ jsunity.run(transactionCollectionsSuite); jsunity.run(transactionOperationsSuite); jsunity.run(transactionBarriersSuite); jsunity.run(transactionGraphSuite); -jsunity.run(transactionRollbackSuite); jsunity.run(transactionCountSuite); jsunity.run(transactionCrossCollectionSuite); jsunity.run(transactionConstraintsSuite); diff --git a/lib/Basics/StaticStrings.cpp b/lib/Basics/StaticStrings.cpp index de26d7d8a5..c0a921785d 100644 --- a/lib/Basics/StaticStrings.cpp +++ b/lib/Basics/StaticStrings.cpp @@ -45,6 +45,16 @@ std::string const StaticStrings::RevString("_rev"); std::string const StaticStrings::FromString("_from"); std::string const StaticStrings::ToString("_to"); +// URL parameter names +std::string const StaticStrings::IgnoreRevsString("ignoreRevs"); +std::string const StaticStrings::IsRestoreString("isRestore"); +std::string const StaticStrings::KeepNullString("keepNull"); +std::string const StaticStrings::MergeObjectsString("mergeObjects"); +std::string const StaticStrings::ReturnNewString("returnNew"); +std::string const StaticStrings::ReturnOldString("returnOld"); +std::string const StaticStrings::SilentString("silent"); +std::string const StaticStrings::WaitForSyncString("waitForSync"); + // database and collection names std::string const StaticStrings::SystemDatabase("_system"); diff --git a/lib/Basics/StaticStrings.h b/lib/Basics/StaticStrings.h index 14e228e651..1b67c7537d 100644 --- a/lib/Basics/StaticStrings.h +++ b/lib/Basics/StaticStrings.h @@ -50,6 +50,16 @@ class StaticStrings { static std::string const RevString; static std::string const FromString; static std::string const ToString; + + // URL parameter names + static std::string const IgnoreRevsString; + static std::string const IsRestoreString; + static std::string const KeepNullString; + static std::string const MergeObjectsString; + static std::string const ReturnNewString; + static std::string const ReturnOldString; + static std::string const SilentString; + static std::string const WaitForSyncString; // database and collection names static std::string const SystemDatabase; diff --git a/lib/Basics/errors.dat b/lib/Basics/errors.dat index 8f25b67019..c3659c3fa3 100755 --- a/lib/Basics/errors.dat +++ b/lib/Basics/errors.dat @@ -350,7 +350,7 @@ SIMPLE_CLIENT_COULD_NOT_READ,2003,"could not read from server","Will be raised w ## Communicator errors ################################################################################ -COMMUNICATOR_REQUEST_ABORTED,2100,"Request aborted", "Request was aborted." +COMMUNICATOR_REQUEST_ABORTED,2100,"Request aborted","Request was aborted." ################################################################################ ## Foxx management errors diff --git a/scripts/startLocalCluster.sh b/scripts/startLocalCluster.sh index 99590dddea..80f3b92920 100755 --- a/scripts/startLocalCluster.sh +++ b/scripts/startLocalCluster.sh @@ -33,8 +33,12 @@ TRANSPORT="tcp" LOG_LEVEL="INFO" LOG_LEVEL_AGENCY="" LOG_LEVEL_CLUSTER="" -XTERM="x-terminal-emulator" -XTERMOPTIONS="--geometry=80x43" +if [ -z "$XTERM" ] ; then + XTERM="x-terminal-emulator" +fi +if [ -z "$XTERMOPTIONS" ] ; then + XTERMOPTIONS="--geometry=80x43" +fi SECONDARIES=0 BUILD="build" JWT_SECRET="" @@ -227,7 +231,7 @@ start() { --server.endpoint $TRANSPORT://0.0.0.0:$PORT \ --cluster.my-role $ROLE \ --log.file cluster/$PORT.log \ - --log.level info \ + --log.level $LOG_LEVEL \ --server.statistics true \ --server.threads 5 \ --javascript.startup-directory ./js \ @@ -250,7 +254,7 @@ startTerminal() { PORT=$2 mkdir cluster/data$PORT echo Starting $TYPE on port $PORT - $XTERM $XTERMOPTIONS -e ${BUILD}/bin/arangod \ + $XTERM $XTERMOPTIONS -e "${BUILD}/bin/arangod \ -c none \ --database.directory cluster/data$PORT \ --cluster.agency-endpoint $TRANSPORT://127.0.0.1:$BASE \ @@ -258,7 +262,7 @@ startTerminal() { --server.endpoint $TRANSPORT://0.0.0.0:$PORT \ --cluster.my-role $ROLE \ --log.file cluster/$PORT.log \ - --log.level info \ + --log.level $LOG_LEVEL \ --server.statistics true \ --server.threads 5 \ --javascript.startup-directory ./js \ @@ -266,7 +270,7 @@ startTerminal() { --javascript.app-path ./js/apps \ $AUTHENTICATION \ $SSLKEYFILE \ - --console & + --console" & } startDebugger() { @@ -287,7 +291,7 @@ startDebugger() { --server.endpoint $TRANSPORT://0.0.0.0:$PORT \ --cluster.my-role $ROLE \ --log.file cluster/$PORT.log \ - --log.level info \ + --log.level $LOG_LEVEL \ --server.statistics false \ --server.threads 5 \ --javascript.startup-directory ./js \ @@ -295,7 +299,7 @@ startDebugger() { --javascript.app-path ./js/apps \ $SSLKEYFILE \ $AUTHENTICATION & - $XTERM $XTERMOPTIONS -e gdb ${BUILD}/bin/arangod -p $! & + $XTERM $XTERMOPTIONS -e "gdb ${BUILD}/bin/arangod -p $!" & } startRR() { @@ -308,7 +312,7 @@ startRR() { PORT=$2 mkdir cluster/data$PORT echo Starting $TYPE on port $PORT with rr tracer - $XTERM $XTERMOPTIONS -e rr ${BUILD}/bin/arangod \ + $XTERM $XTERMOPTIONS -e "rr ${BUILD}/bin/arangod \ -c none \ --database.directory cluster/data$PORT \ --cluster.agency-endpoint $TRANSPORT://127.0.0.1:$BASE \ @@ -316,7 +320,7 @@ startRR() { --server.endpoint $TRANSPORT://0.0.0.0:$PORT \ --cluster.my-role $ROLE \ --log.file cluster/$PORT.log \ - --log.level info \ + --log.level $LOG_LEVEL \ --server.statistics true \ --server.threads 5 \ --javascript.startup-directory ./js \ @@ -324,7 +328,7 @@ startRR() { --javascript.app-path ./js/apps \ $AUTHENTICATION \ $SSLKEYFILE \ - --console & + --console" & } PORTTOPDB=`expr 8629 + $NRDBSERVERS - 1`