mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
d71ebf7c1d
|
@ -106,9 +106,15 @@ class Builder {
|
|||
checkOverflow(_pos + len);
|
||||
#endif
|
||||
|
||||
_buffer->prealloc(len);
|
||||
_start = _buffer->data();
|
||||
_size = _buffer->size();
|
||||
// copy builder pointer into local variable
|
||||
// this avoids accessing the shared pointer repeatedly, which has
|
||||
// a small but non-negligible cost
|
||||
Buffer<uint8_t>* buffer = _buffer.get();
|
||||
VELOCYPACK_ASSERT(buffer != nullptr);
|
||||
|
||||
buffer->prealloc(len);
|
||||
_start = buffer->data();
|
||||
_size = buffer->size();
|
||||
}
|
||||
|
||||
// Sort the indices by attribute name:
|
||||
|
|
|
@ -233,9 +233,15 @@ void Parser::parseString() {
|
|||
// insert 8 bytes for the length as soon as we reach 127 bytes
|
||||
// in the VPack representation.
|
||||
|
||||
ValueLength const base = _b->_pos;
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = 0x40; // correct this later
|
||||
// copy builder pointer into local variable
|
||||
// this avoids accessing the shared pointer repeatedly, which has
|
||||
// a small but non-negligible cost
|
||||
Builder* builder = _b.get();
|
||||
VELOCYPACK_ASSERT(builder != nullptr);
|
||||
|
||||
ValueLength const base = builder->_pos;
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = 0x40; // correct this later
|
||||
|
||||
bool large = false; // set to true when we reach 128 bytes
|
||||
uint32_t highSurrogate = 0; // non-zero if high-surrogate was seen
|
||||
|
@ -243,42 +249,42 @@ void Parser::parseString() {
|
|||
while (true) {
|
||||
size_t remainder = _size - _pos;
|
||||
if (remainder >= 16) {
|
||||
_b->reserveSpace(remainder);
|
||||
builder->reserveSpace(remainder);
|
||||
size_t count;
|
||||
// Note that the SSE4.2 accelerated string copying functions might
|
||||
// peek up to 15 bytes over the given end, because they use 128bit
|
||||
// registers. Therefore, we have to subtract 15 from remainder
|
||||
// to be on the safe side. Further bytes will be processed below.
|
||||
if (options->validateUtf8Strings) {
|
||||
count = JSONStringCopyCheckUtf8(_b->_start + _b->_pos, _start + _pos,
|
||||
count = JSONStringCopyCheckUtf8(builder->_start + builder->_pos, _start + _pos,
|
||||
remainder - 15);
|
||||
} else {
|
||||
count = JSONStringCopy(_b->_start + _b->_pos, _start + _pos,
|
||||
count = JSONStringCopy(builder->_start + builder->_pos, _start + _pos,
|
||||
remainder - 15);
|
||||
}
|
||||
_pos += count;
|
||||
_b->_pos += count;
|
||||
builder->_pos += count;
|
||||
}
|
||||
int i = getOneOrThrow("Unfinished string");
|
||||
if (!large && _b->_pos - (base + 1) > 126) {
|
||||
if (!large && builder->_pos - (base + 1) > 126) {
|
||||
large = true;
|
||||
_b->reserveSpace(8);
|
||||
ValueLength len = _b->_pos - (base + 1);
|
||||
memmove(_b->_start + base + 9, _b->_start + base + 1, checkOverflow(len));
|
||||
_b->_pos += 8;
|
||||
builder->reserveSpace(8);
|
||||
ValueLength len = builder->_pos - (base + 1);
|
||||
memmove(builder->_start + base + 9, builder->_start + base + 1, checkOverflow(len));
|
||||
builder->_pos += 8;
|
||||
}
|
||||
switch (i) {
|
||||
case '"':
|
||||
ValueLength len;
|
||||
if (!large) {
|
||||
len = _b->_pos - (base + 1);
|
||||
_b->_start[base] = 0x40 + static_cast<uint8_t>(len);
|
||||
len = builder->_pos - (base + 1);
|
||||
builder->_start[base] = 0x40 + static_cast<uint8_t>(len);
|
||||
// String is ready
|
||||
} else {
|
||||
len = _b->_pos - (base + 9);
|
||||
_b->_start[base] = 0xbf;
|
||||
len = builder->_pos - (base + 9);
|
||||
builder->_start[base] = 0xbf;
|
||||
for (ValueLength i = 1; i <= 8; i++) {
|
||||
_b->_start[base + i] = len & 0xff;
|
||||
builder->_start[base + i] = len & 0xff;
|
||||
len >>= 8;
|
||||
}
|
||||
}
|
||||
|
@ -293,33 +299,33 @@ void Parser::parseString() {
|
|||
case '"':
|
||||
case '/':
|
||||
case '\\':
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = static_cast<uint8_t>(i);
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
|
||||
highSurrogate = 0;
|
||||
break;
|
||||
case 'b':
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = '\b';
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = '\b';
|
||||
highSurrogate = 0;
|
||||
break;
|
||||
case 'f':
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = '\f';
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = '\f';
|
||||
highSurrogate = 0;
|
||||
break;
|
||||
case 'n':
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = '\n';
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = '\n';
|
||||
highSurrogate = 0;
|
||||
break;
|
||||
case 'r':
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = '\r';
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = '\r';
|
||||
highSurrogate = 0;
|
||||
break;
|
||||
case 't':
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = '\t';
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = '\t';
|
||||
highSurrogate = 0;
|
||||
break;
|
||||
case 'u': {
|
||||
|
@ -342,23 +348,23 @@ void Parser::parseString() {
|
|||
}
|
||||
}
|
||||
if (v < 0x80) {
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = static_cast<uint8_t>(v);
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = static_cast<uint8_t>(v);
|
||||
highSurrogate = 0;
|
||||
} else if (v < 0x800) {
|
||||
_b->reserveSpace(2);
|
||||
_b->_start[_b->_pos++] = 0xc0 + (v >> 6);
|
||||
_b->_start[_b->_pos++] = 0x80 + (v & 0x3f);
|
||||
builder->reserveSpace(2);
|
||||
builder->_start[builder->_pos++] = 0xc0 + (v >> 6);
|
||||
builder->_start[builder->_pos++] = 0x80 + (v & 0x3f);
|
||||
highSurrogate = 0;
|
||||
} else if (v >= 0xdc00 && v < 0xe000 && highSurrogate != 0) {
|
||||
// Low surrogate, put the two together:
|
||||
v = 0x10000 + ((highSurrogate - 0xd800) << 10) + v - 0xdc00;
|
||||
_b->_pos -= 3;
|
||||
_b->reserveSpace(4);
|
||||
_b->_start[_b->_pos++] = 0xf0 + (v >> 18);
|
||||
_b->_start[_b->_pos++] = 0x80 + ((v >> 12) & 0x3f);
|
||||
_b->_start[_b->_pos++] = 0x80 + ((v >> 6) & 0x3f);
|
||||
_b->_start[_b->_pos++] = 0x80 + (v & 0x3f);
|
||||
builder->_pos -= 3;
|
||||
builder->reserveSpace(4);
|
||||
builder->_start[builder->_pos++] = 0xf0 + (v >> 18);
|
||||
builder->_start[builder->_pos++] = 0x80 + ((v >> 12) & 0x3f);
|
||||
builder->_start[builder->_pos++] = 0x80 + ((v >> 6) & 0x3f);
|
||||
builder->_start[builder->_pos++] = 0x80 + (v & 0x3f);
|
||||
highSurrogate = 0;
|
||||
} else {
|
||||
if (v >= 0xd800 && v < 0xdc00) {
|
||||
|
@ -367,10 +373,10 @@ void Parser::parseString() {
|
|||
} else {
|
||||
highSurrogate = 0;
|
||||
}
|
||||
_b->reserveSpace(3);
|
||||
_b->_start[_b->_pos++] = 0xe0 + (v >> 12);
|
||||
_b->_start[_b->_pos++] = 0x80 + ((v >> 6) & 0x3f);
|
||||
_b->_start[_b->_pos++] = 0x80 + (v & 0x3f);
|
||||
builder->reserveSpace(3);
|
||||
builder->_start[builder->_pos++] = 0xe0 + (v >> 12);
|
||||
builder->_start[builder->_pos++] = 0x80 + ((v >> 6) & 0x3f);
|
||||
builder->_start[builder->_pos++] = 0x80 + (v & 0x3f);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -386,13 +392,13 @@ void Parser::parseString() {
|
|||
throw Exception(Exception::UnexpectedControlCharacter);
|
||||
}
|
||||
highSurrogate = 0;
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = static_cast<uint8_t>(i);
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
|
||||
} else {
|
||||
if (!options->validateUtf8Strings) {
|
||||
highSurrogate = 0;
|
||||
_b->reserveSpace(1);
|
||||
_b->_start[_b->_pos++] = static_cast<uint8_t>(i);
|
||||
builder->reserveSpace(1);
|
||||
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
|
||||
} else {
|
||||
// multi-byte UTF-8 sequence!
|
||||
int follow = 0;
|
||||
|
@ -412,14 +418,14 @@ void Parser::parseString() {
|
|||
}
|
||||
|
||||
// validate follow up characters
|
||||
_b->reserveSpace(1 + follow);
|
||||
_b->_start[_b->_pos++] = static_cast<uint8_t>(i);
|
||||
builder->reserveSpace(1 + follow);
|
||||
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
|
||||
for (int j = 0; j < follow; ++j) {
|
||||
i = getOneOrThrow("scanString: truncated UTF-8 sequence");
|
||||
if ((i & 0xc0) != 0x80) {
|
||||
throw Exception(Exception::InvalidUtf8Sequence);
|
||||
}
|
||||
_b->_start[_b->_pos++] = static_cast<uint8_t>(i);
|
||||
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
|
||||
}
|
||||
highSurrogate = 0;
|
||||
}
|
||||
|
@ -430,13 +436,19 @@ void Parser::parseString() {
|
|||
}
|
||||
|
||||
void Parser::parseArray() {
|
||||
_b->addArray();
|
||||
// copy builder pointer into local variable
|
||||
// this avoids accessing the shared pointer repeatedly, which has
|
||||
// a small but non-negligible cost
|
||||
Builder* builder = _b.get();
|
||||
VELOCYPACK_ASSERT(builder != nullptr);
|
||||
|
||||
builder->addArray();
|
||||
|
||||
int i = skipWhiteSpace("Expecting item or ']'");
|
||||
if (i == ']') {
|
||||
// empty array
|
||||
++_pos; // the closing ']'
|
||||
_b->close();
|
||||
builder->close();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -444,13 +456,13 @@ void Parser::parseArray() {
|
|||
|
||||
while (true) {
|
||||
// parse array element itself
|
||||
_b->reportAdd();
|
||||
builder->reportAdd();
|
||||
parseJson();
|
||||
i = skipWhiteSpace("Expecting ',' or ']'");
|
||||
if (i == ']') {
|
||||
// end of array
|
||||
++_pos; // the closing ']'
|
||||
_b->close();
|
||||
builder->close();
|
||||
decreaseNesting();
|
||||
return;
|
||||
}
|
||||
|
@ -466,7 +478,13 @@ void Parser::parseArray() {
|
|||
}
|
||||
|
||||
void Parser::parseObject() {
|
||||
_b->addObject();
|
||||
// copy builder pointer into local variable
|
||||
// this avoids accessing the shared pointer repeatedly, which has
|
||||
// a small but non-negligible cost
|
||||
Builder* builder = _b.get();
|
||||
VELOCYPACK_ASSERT(builder != nullptr);
|
||||
|
||||
builder->addObject();
|
||||
|
||||
int i = skipWhiteSpace("Expecting item or '}'");
|
||||
if (i == '}') {
|
||||
|
@ -475,7 +493,7 @@ void Parser::parseObject() {
|
|||
|
||||
if (_nesting != 0 || !options->keepTopLevelOpen) {
|
||||
// only close if we've not been asked to keep top level open
|
||||
_b->close();
|
||||
builder->close();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -490,22 +508,22 @@ void Parser::parseObject() {
|
|||
// get past the initial '"'
|
||||
++_pos;
|
||||
|
||||
_b->reportAdd();
|
||||
builder->reportAdd();
|
||||
bool excludeAttribute = false;
|
||||
auto const lastPos = _b->_pos;
|
||||
auto const lastPos = builder->_pos;
|
||||
if (options->attributeExcludeHandler == nullptr) {
|
||||
parseString();
|
||||
} else {
|
||||
parseString();
|
||||
if (options->attributeExcludeHandler->shouldExclude(
|
||||
Slice(_b->_start + lastPos), _nesting)) {
|
||||
Slice(builder->_start + lastPos), _nesting)) {
|
||||
excludeAttribute = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!excludeAttribute && options->attributeTranslator != nullptr) {
|
||||
// check if a translation for the attribute name exists
|
||||
Slice key(_b->_start + lastPos);
|
||||
Slice key(builder->_start + lastPos);
|
||||
|
||||
if (key.isString()) {
|
||||
ValueLength keyLength;
|
||||
|
@ -517,8 +535,8 @@ void Parser::parseObject() {
|
|||
// found translation... now reset position to old key position
|
||||
// and simply overwrite the existing key with the numeric translation
|
||||
// id
|
||||
_b->_pos = lastPos;
|
||||
_b->addUInt(Slice(translated).getUInt());
|
||||
builder->_pos = lastPos;
|
||||
builder->addUInt(Slice(translated).getUInt());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -533,7 +551,7 @@ void Parser::parseObject() {
|
|||
parseJson();
|
||||
|
||||
if (excludeAttribute) {
|
||||
_b->removeLast();
|
||||
builder->removeLast();
|
||||
}
|
||||
|
||||
i = skipWhiteSpace("Expecting ',' or '}'");
|
||||
|
@ -542,7 +560,7 @@ void Parser::parseObject() {
|
|||
++_pos; // the closing '}'
|
||||
if (_nesting != 1 || !options->keepTopLevelOpen) {
|
||||
// only close if we've not been asked to keep top level open
|
||||
_b->close();
|
||||
builder->close();
|
||||
}
|
||||
decreaseNesting();
|
||||
return;
|
||||
|
|
|
@ -224,7 +224,7 @@ void AgencyWriteTransaction::toVelocyPack(VPackBuilder& builder) const {
|
|||
VPackObjectBuilder guard3(&builder);
|
||||
}
|
||||
|
||||
builder.add(VPackValue(transactionId)); // Transactions
|
||||
builder.add(VPackValue(clientId)); // Transactions
|
||||
}
|
||||
|
||||
bool AgencyWriteTransaction::validate(AgencyCommResult const& result) const {
|
||||
|
@ -283,7 +283,7 @@ void AgencyGeneralTransaction::toVelocyPack(VPackBuilder& builder) const {
|
|||
} else {
|
||||
std::get<0>(operation).toGeneralBuilder(builder);
|
||||
std::get<1>(operation).toGeneralBuilder(builder);
|
||||
builder.add(VPackValue(transactionId));
|
||||
builder.add(VPackValue(clientId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -328,18 +328,23 @@ AgencyCommResult::AgencyCommResult()
|
|||
_body(),
|
||||
_values(),
|
||||
_statusCode(0),
|
||||
_connected(false) {}
|
||||
_connected(false),
|
||||
_clientId("") {}
|
||||
|
||||
AgencyCommResult::AgencyCommResult(int code, std::string const& message)
|
||||
AgencyCommResult::AgencyCommResult(
|
||||
int code, std::string const& message, std::string const& clientId)
|
||||
: _location(),
|
||||
_message(message),
|
||||
_body(),
|
||||
_values(),
|
||||
_statusCode(code),
|
||||
_connected(false) {}
|
||||
_connected(false),
|
||||
_clientId(clientId) {}
|
||||
|
||||
bool AgencyCommResult::connected() const { return _connected; }
|
||||
|
||||
std::string AgencyCommResult::clientId() const { return _clientId; }
|
||||
|
||||
int AgencyCommResult::httpCode() const { return _statusCode; }
|
||||
|
||||
int AgencyCommResult::errorCode() const {
|
||||
|
@ -1070,9 +1075,9 @@ AgencyCommResult AgencyComm::sendTransactionWithFailover(
|
|||
|
||||
AgencyCommResult result = sendWithFailover(
|
||||
arangodb::rest::RequestType::POST,
|
||||
(timeout == 0.0 ? AgencyCommManager::CONNECTION_OPTIONS._requestTimeout
|
||||
: timeout),
|
||||
url, builder.slice().toJson());
|
||||
(timeout == 0.0) ?
|
||||
AgencyCommManager::CONNECTION_OPTIONS._requestTimeout : timeout,
|
||||
url, builder.slice().toJson(), transaction.getClientId());
|
||||
|
||||
if (!result.successful() && result.httpCode() !=
|
||||
(int)arangodb::rest::ResponseCode::PRECONDITION_FAILED) {
|
||||
|
@ -1285,7 +1290,8 @@ void AgencyComm::updateEndpoints(arangodb::velocypack::Slice const& current) {
|
|||
|
||||
AgencyCommResult AgencyComm::sendWithFailover(
|
||||
arangodb::rest::RequestType method, double const timeout,
|
||||
std::string const& initialUrl, std::string const& body) {
|
||||
std::string const& initialUrl, std::string const& body,
|
||||
std::string const& clientId) {
|
||||
|
||||
std::string endpoint;
|
||||
std::unique_ptr<GeneralClientConnection> connection =
|
||||
|
@ -1326,7 +1332,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
++tries;
|
||||
|
||||
if (connection == nullptr) {
|
||||
AgencyCommResult result(400, "No endpoints for agency found.");
|
||||
AgencyCommResult result(400, "No endpoints for agency found.", clientId);
|
||||
LOG_TOPIC(ERR, Logger::AGENCYCOMM) << result._message;
|
||||
return result;
|
||||
}
|
||||
|
@ -1343,7 +1349,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
|
||||
// try to send; if we fail completely, do not retry
|
||||
try {
|
||||
result = send(connection.get(), method, conTimeout, url, body);
|
||||
result = send(connection.get(), method, conTimeout, url, body, clientId);
|
||||
} catch (...) {
|
||||
AgencyCommManager::MANAGER->failed(std::move(connection), endpoint);
|
||||
endpoint.clear();
|
||||
|
@ -1403,7 +1409,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
AgencyCommResult AgencyComm::send(
|
||||
arangodb::httpclient::GeneralClientConnection* connection,
|
||||
arangodb::rest::RequestType method, double timeout, std::string const& url,
|
||||
std::string const& body) {
|
||||
std::string const& body, std::string const& clientId) {
|
||||
TRI_ASSERT(connection != nullptr);
|
||||
|
||||
if (method == arangodb::rest::RequestType::GET ||
|
||||
|
@ -1417,6 +1423,9 @@ AgencyCommResult AgencyComm::send(
|
|||
AgencyCommResult result;
|
||||
result._connected = false;
|
||||
result._statusCode = 0;
|
||||
if (!clientId.empty()) {
|
||||
result._clientId = clientId;
|
||||
}
|
||||
|
||||
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
|
||||
<< "sending " << arangodb::HttpRequest::translateMethod(method)
|
||||
|
|
|
@ -218,7 +218,9 @@ class AgencyOperation {
|
|||
class AgencyCommResult {
|
||||
public:
|
||||
AgencyCommResult();
|
||||
AgencyCommResult(int code, std::string const& message);
|
||||
AgencyCommResult(int code, std::string const& message,
|
||||
std::string const& transactionId = std::string());
|
||||
|
||||
~AgencyCommResult() = default;
|
||||
|
||||
public:
|
||||
|
@ -230,6 +232,8 @@ class AgencyCommResult {
|
|||
|
||||
int errorCode() const;
|
||||
|
||||
std::string clientId() const;
|
||||
|
||||
std::string errorMessage() const;
|
||||
|
||||
std::string errorDetails() const;
|
||||
|
@ -256,6 +260,9 @@ class AgencyCommResult {
|
|||
|
||||
private:
|
||||
std::shared_ptr<velocypack::Builder> _vpack;
|
||||
|
||||
public:
|
||||
std::string _clientId;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -272,7 +279,8 @@ public:
|
|||
virtual std::string toJson() const = 0;
|
||||
virtual void toVelocyPack(arangodb::velocypack::Builder&) const = 0;
|
||||
virtual std::string const& path() const = 0;
|
||||
|
||||
virtual std::string getClientId() const = 0;
|
||||
|
||||
virtual bool validate(AgencyCommResult const& result) const = 0;
|
||||
|
||||
};
|
||||
|
@ -285,14 +293,14 @@ struct AgencyGeneralTransaction : public AgencyTransaction {
|
|||
|
||||
explicit AgencyGeneralTransaction(
|
||||
std::pair<AgencyOperation,AgencyPrecondition> const& operation) :
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {
|
||||
clientId(to_string(boost::uuids::random_generator()())) {
|
||||
operations.push_back(operation);
|
||||
}
|
||||
|
||||
explicit AgencyGeneralTransaction(
|
||||
std::vector<std::pair<AgencyOperation,AgencyPrecondition>> const& _opers) :
|
||||
operations(_opers),
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {}
|
||||
clientId(to_string(boost::uuids::random_generator()())) {}
|
||||
|
||||
AgencyGeneralTransaction() = default;
|
||||
|
||||
|
@ -305,12 +313,16 @@ struct AgencyGeneralTransaction : public AgencyTransaction {
|
|||
|
||||
void push_back(std::pair<AgencyOperation,AgencyPrecondition> const&);
|
||||
|
||||
inline std::string const& path() const override final {
|
||||
inline virtual std::string const& path() const override final {
|
||||
return AgencyTransaction::TypeUrl[2];
|
||||
}
|
||||
|
||||
inline virtual std::string getClientId() const override final {
|
||||
return clientId;
|
||||
}
|
||||
|
||||
virtual bool validate(AgencyCommResult const& result) const override final;
|
||||
std::string transactionId;
|
||||
std::string clientId;
|
||||
|
||||
};
|
||||
|
||||
|
@ -323,24 +335,24 @@ struct AgencyWriteTransaction : public AgencyTransaction {
|
|||
public:
|
||||
|
||||
explicit AgencyWriteTransaction(AgencyOperation const& operation) :
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {
|
||||
clientId(to_string(boost::uuids::random_generator()())) {
|
||||
operations.push_back(operation);
|
||||
}
|
||||
|
||||
explicit AgencyWriteTransaction (std::vector<AgencyOperation> const& _opers) :
|
||||
operations(_opers),
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {}
|
||||
clientId(to_string(boost::uuids::random_generator()())) {}
|
||||
|
||||
AgencyWriteTransaction(AgencyOperation const& operation,
|
||||
AgencyPrecondition const& precondition) :
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {
|
||||
clientId(to_string(boost::uuids::random_generator()())) {
|
||||
operations.push_back(operation);
|
||||
preconditions.push_back(precondition);
|
||||
}
|
||||
|
||||
AgencyWriteTransaction(std::vector<AgencyOperation> const& _operations,
|
||||
AgencyPrecondition const& precondition) :
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {
|
||||
clientId(to_string(boost::uuids::random_generator()())) {
|
||||
for (auto const& op : _operations) {
|
||||
operations.push_back(op);
|
||||
}
|
||||
|
@ -349,7 +361,7 @@ public:
|
|||
|
||||
AgencyWriteTransaction(std::vector<AgencyOperation> const& opers,
|
||||
std::vector<AgencyPrecondition> const& precs) :
|
||||
transactionId(to_string(boost::uuids::random_generator()())) {
|
||||
clientId(to_string(boost::uuids::random_generator()())) {
|
||||
for (auto const& op : opers) {
|
||||
operations.push_back(op);
|
||||
}
|
||||
|
@ -365,15 +377,19 @@ public:
|
|||
|
||||
std::string toJson() const override final;
|
||||
|
||||
inline std::string const& path() const override final {
|
||||
inline virtual std::string const& path() const override final {
|
||||
return AgencyTransaction::TypeUrl[1];
|
||||
}
|
||||
|
||||
inline virtual std::string getClientId() const override final {
|
||||
return clientId;
|
||||
}
|
||||
|
||||
virtual bool validate(AgencyCommResult const& result) const override final;
|
||||
|
||||
std::vector<AgencyPrecondition> preconditions;
|
||||
std::vector<AgencyOperation> operations;
|
||||
std::string transactionId;
|
||||
std::string clientId;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -427,6 +443,10 @@ public:
|
|||
return AgencyTransaction::TypeUrl[3];
|
||||
}
|
||||
|
||||
inline virtual std::string getClientId() const override final {
|
||||
return std::string();
|
||||
}
|
||||
|
||||
virtual bool validate(AgencyCommResult const& result) const override final;
|
||||
|
||||
std::vector<AgencyPrecondition> preconditions;
|
||||
|
@ -454,10 +474,14 @@ public:
|
|||
|
||||
std::string toJson() const override final;
|
||||
|
||||
inline std::string const& path() const override final {
|
||||
inline virtual std::string const& path() const override final {
|
||||
return AgencyTransaction::TypeUrl[0];
|
||||
}
|
||||
|
||||
inline virtual std::string getClientId() const override final {
|
||||
return std::string();
|
||||
}
|
||||
|
||||
virtual bool validate(AgencyCommResult const& result) const override final;
|
||||
|
||||
std::vector<std::string> keys;
|
||||
|
@ -614,7 +638,8 @@ class AgencyComm {
|
|||
bool ensureStructureInitialized();
|
||||
|
||||
AgencyCommResult sendWithFailover(arangodb::rest::RequestType, double,
|
||||
std::string const&, std::string const&);
|
||||
std::string const&, std::string const&,
|
||||
std::string const& clientId = std::string());
|
||||
|
||||
private:
|
||||
bool lock(std::string const&, double, double,
|
||||
|
@ -623,7 +648,8 @@ class AgencyComm {
|
|||
bool unlock(std::string const&, arangodb::velocypack::Slice const&, double);
|
||||
|
||||
AgencyCommResult send(httpclient::GeneralClientConnection*, rest::RequestType,
|
||||
double, std::string const&, std::string const&);
|
||||
double, std::string const&, std::string const&,
|
||||
std::string const& clientId = std::string());
|
||||
|
||||
bool tryInitializeStructure(std::string const& jwtSecret);
|
||||
|
||||
|
|
|
@ -810,7 +810,6 @@ void Agent::run() {
|
|||
|
||||
} else {
|
||||
_appendCV.wait(1000000);
|
||||
updateConfiguration();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -910,9 +909,9 @@ void Agent::detectActiveAgentFailures() {
|
|||
system_clock::now() - lastAcked.at(id)).count();
|
||||
if (ds > 180.0) {
|
||||
std::string repl = _config.nextAgentInLine();
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Active agent " << id << " has failed. << "
|
||||
<< repl << " will be promoted to active agency membership";
|
||||
// Guarded in ::
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Active agent " << id << " has failed. << " << repl
|
||||
<< " will be promoted to active agency membership";
|
||||
_activator = std::make_unique<AgentActivator>(this, id, repl);
|
||||
_activator->start();
|
||||
return;
|
||||
|
@ -923,13 +922,6 @@ void Agent::detectActiveAgentFailures() {
|
|||
}
|
||||
|
||||
|
||||
void Agent::updateConfiguration() {
|
||||
|
||||
// First ask last know leader
|
||||
|
||||
}
|
||||
|
||||
|
||||
/// Orderly shutdown
|
||||
void Agent::beginShutdown() {
|
||||
Thread::beginShutdown();
|
||||
|
|
|
@ -223,9 +223,6 @@ class Agent : public arangodb::Thread {
|
|||
/// @brief persist agency configuration in RAFT
|
||||
void persistConfiguration(term_t t);
|
||||
|
||||
/// @brief Update my configuration as passive agent
|
||||
void updateConfiguration();
|
||||
|
||||
/// @brief Find out, if we've had acknowledged RPCs recent enough
|
||||
bool challengeLeadership();
|
||||
|
||||
|
|
|
@ -252,8 +252,7 @@ JOB_STATUS FailedFollower::status() {
|
|||
Node const& planned = _snapshot(planPath);
|
||||
Node const& current = _snapshot(curPath);
|
||||
|
||||
if (planned.slice() == current.slice()) {
|
||||
|
||||
if (compareServerLists(planned.slice(), current.slice())) {
|
||||
// Remove shard from /arango/Target/FailedServers/<server> array
|
||||
Builder del;
|
||||
del.openArray();
|
||||
|
|
|
@ -25,6 +25,28 @@
|
|||
|
||||
using namespace arangodb::consensus;
|
||||
|
||||
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
|
||||
if (!plan.isArray() || !current.isArray()) {
|
||||
return false;
|
||||
}
|
||||
std::vector<std::string> planv, currv;
|
||||
for (auto const& srv : VPackArrayIterator(plan)) {
|
||||
if (srv.isString()) {
|
||||
planv.push_back(srv.copyString());
|
||||
}
|
||||
}
|
||||
for (auto const& srv : VPackArrayIterator(current)) {
|
||||
if (srv.isString()) {
|
||||
currv.push_back(srv.copyString());
|
||||
}
|
||||
}
|
||||
bool equalLeader = !planv.empty() && !currv.empty() &&
|
||||
planv.front() == currv.front();
|
||||
std::sort(planv.begin(), planv.end());
|
||||
std::sort(currv.begin(), currv.end());
|
||||
return equalLeader && currv == planv;
|
||||
}
|
||||
|
||||
Job::Job(Node const& snapshot, Agent* agent, std::string const& jobId,
|
||||
std::string const& creator, std::string const& agencyPrefix) :
|
||||
_snapshot(snapshot),
|
||||
|
|
|
@ -37,6 +37,12 @@
|
|||
namespace arangodb {
|
||||
namespace consensus {
|
||||
|
||||
// This is intended for lists of servers with the first being the leader
|
||||
// and all others followers. Both arguments must be arrays. Returns true,
|
||||
// if the first items in both slice are equal and if both arrays contain
|
||||
// the same set of strings.
|
||||
bool compareServerLists(Slice plan, Slice current);
|
||||
|
||||
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
|
||||
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
|
||||
"/Target/Finished/", "/Target/Failed/"});
|
||||
|
|
|
@ -190,7 +190,7 @@ bool MoveShard::start() {
|
|||
}
|
||||
}
|
||||
|
||||
// Are we ditributeShardsLiked by others?
|
||||
// Are we distributeShardsLiked by others?
|
||||
// Invoke moveShard here with others
|
||||
auto collections = _snapshot(planColPrefix + _database).children();
|
||||
std::vector<std::string> colsLikeMe;
|
||||
|
@ -430,16 +430,7 @@ JOB_STATUS MoveShard::status() {
|
|||
Slice current = _snapshot(curPath).slice();
|
||||
Slice plan = _snapshot(planPath).slice();
|
||||
|
||||
std::vector<std::string> planv, currv;
|
||||
for (auto const& srv : VPackArrayIterator(plan)) {
|
||||
planv.push_back(srv.copyString());
|
||||
}
|
||||
std::sort(planv.begin(), planv.end());
|
||||
for (auto const& srv : VPackArrayIterator(current)) {
|
||||
currv.push_back(srv.copyString());
|
||||
}
|
||||
std::sort(currv.begin(), currv.end());
|
||||
if (currv == planv) {
|
||||
if (compareServerLists(plan, current)) {
|
||||
if (current[0].copyString() ==
|
||||
std::string("_") + _from) { // Retired leader
|
||||
|
||||
|
|
|
@ -318,21 +318,40 @@ std::vector<bool> Store::apply(
|
|||
body.add("index", VPackValue(lastCommitIndex));
|
||||
auto ret = in.equal_range(url);
|
||||
|
||||
// mop: XXX not exactly sure what is supposed to happen here
|
||||
// if there are multiple subobjects being updates at the same time
|
||||
// e.g.
|
||||
// /hans/wurst
|
||||
// /hans/wurst/peter: 1
|
||||
// /hans/wurst
|
||||
// /hans/wurst/uschi: 2
|
||||
// we are generating invalid json...not sure if this here is a
|
||||
// valid fix...it is most likely broken :S
|
||||
std::string currentKey;
|
||||
for (auto it = ret.first; it != ret.second; ++it) {
|
||||
body.add(it->second->key, VPackValue(VPackValueType::Object));
|
||||
if (currentKey != it->second->key) {
|
||||
if (!currentKey.empty()) {
|
||||
body.close();
|
||||
}
|
||||
body.add(it->second->key, VPackValue(VPackValueType::Object));
|
||||
currentKey = it->second->key;
|
||||
}
|
||||
// mop: XXX maybe there are duplicates here as well?
|
||||
// e.g. a key is set and deleted in the same transaction?
|
||||
body.add(it->second->modified, VPackValue(VPackValueType::Object));
|
||||
body.add("op", VPackValue(it->second->oper));
|
||||
body.close();
|
||||
}
|
||||
if (!currentKey.empty()) {
|
||||
body.close();
|
||||
}
|
||||
|
||||
body.close();
|
||||
|
||||
std::string endpoint, path;
|
||||
if (endpointPathFromUrl(url, endpoint, path)) {
|
||||
auto headerFields =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
|
||||
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
"1", 1, endpoint, rest::RequestType::POST, path,
|
||||
std::make_shared<std::string>(body.toString()), headerFields,
|
||||
|
|
|
@ -103,13 +103,14 @@ void Supervision::upgradeAgency() {
|
|||
|
||||
// Check all DB servers, guarded above doChecks
|
||||
std::vector<check_t> Supervision::checkDBServers() {
|
||||
|
||||
std::vector<check_t> ret;
|
||||
Node::Children const& machinesPlanned =
|
||||
_snapshot(planDBServersPrefix).children();
|
||||
Node::Children const serversRegistered =
|
||||
_snapshot(currentServersRegisteredPrefix).children();
|
||||
|
||||
bool reportPersistent;
|
||||
bool reportPersistent = false;
|
||||
|
||||
std::vector<std::string> todelete;
|
||||
for (auto const& machine : _snapshot(healthPrefix).children()) {
|
||||
|
@ -148,6 +149,8 @@ std::vector<check_t> Supervision::checkDBServers() {
|
|||
good = true;
|
||||
}
|
||||
|
||||
reportPersistent = (heartbeatStatus != lastStatus);
|
||||
|
||||
query_t report = std::make_shared<Builder>();
|
||||
report->openArray();
|
||||
report->openArray();
|
||||
|
@ -174,9 +177,6 @@ std::vector<check_t> Supervision::checkDBServers() {
|
|||
|
||||
if (good) {
|
||||
|
||||
if (lastStatus != Supervision::HEALTH_STATUS_GOOD) {
|
||||
reportPersistent = true;
|
||||
}
|
||||
report->add(
|
||||
"LastHeartbeatAcked",
|
||||
VPackValue(timepointToString(std::chrono::system_clock::now())));
|
||||
|
@ -210,7 +210,6 @@ std::vector<check_t> Supervision::checkDBServers() {
|
|||
// for at least grace period
|
||||
if (t.count() > _gracePeriod && secondsSinceLeader > _gracePeriod) {
|
||||
if (lastStatus == "BAD") {
|
||||
reportPersistent = true;
|
||||
report->add("Status", VPackValue("FAILED"));
|
||||
FailedServer fsj(_snapshot, _agent, std::to_string(_jobId++),
|
||||
"supervision", _agencyPrefix, serverID);
|
||||
|
@ -257,6 +256,9 @@ std::vector<check_t> Supervision::checkDBServers() {
|
|||
|
||||
// Check all coordinators, guarded above doChecks
|
||||
std::vector<check_t> Supervision::checkCoordinators() {
|
||||
|
||||
bool reportPersistent = false;
|
||||
|
||||
std::vector<check_t> ret;
|
||||
Node::Children const& machinesPlanned =
|
||||
_snapshot(planCoordinatorsPrefix).children();
|
||||
|
@ -305,6 +307,8 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
good = true;
|
||||
}
|
||||
|
||||
reportPersistent = (heartbeatStatus != lastStatus);
|
||||
|
||||
query_t report = std::make_shared<Builder>();
|
||||
report->openArray();
|
||||
report->openArray();
|
||||
|
@ -329,6 +333,7 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
}
|
||||
|
||||
if (good) {
|
||||
|
||||
if (goodServerId.empty()) {
|
||||
goodServerId = serverID;
|
||||
}
|
||||
|
@ -359,6 +364,9 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
report->close();
|
||||
if (!this->isStopping()) {
|
||||
_agent->transient(report);
|
||||
if (reportPersistent) { // STATUS changes should be persisted
|
||||
_agent->write(report);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -62,145 +62,148 @@ class Optimizer {
|
|||
// lower level values mean earlier rule execution
|
||||
|
||||
// note that levels must be unique
|
||||
initial = 100,
|
||||
|
||||
// "Pass 1": moving nodes "up" (potentially outside loops):
|
||||
pass1 = 100,
|
||||
// ========================================================
|
||||
|
||||
// determine the "right" type of CollectNode and
|
||||
// add a sort node for each COLLECT (may be removed later)
|
||||
specializeCollectRule_pass1 = 105,
|
||||
specializeCollectRule_pass1,
|
||||
|
||||
inlineSubqueriesRule_pass1 = 106,
|
||||
inlineSubqueriesRule_pass1,
|
||||
|
||||
// split and-combined filters into multiple smaller filters
|
||||
splitFiltersRule_pass1 = 110,
|
||||
splitFiltersRule_pass1,
|
||||
|
||||
// move calculations up the dependency chain (to pull them out of
|
||||
// inner loops etc.)
|
||||
moveCalculationsUpRule_pass1 = 120,
|
||||
moveCalculationsUpRule_pass1,
|
||||
|
||||
// move filters up the dependency chain (to make result sets as small
|
||||
// as possible as early as possible)
|
||||
moveFiltersUpRule_pass1 = 130,
|
||||
moveFiltersUpRule_pass1,
|
||||
|
||||
// remove calculations that are repeatedly used in a query
|
||||
removeRedundantCalculationsRule_pass1 = 140,
|
||||
removeRedundantCalculationsRule_pass1,
|
||||
|
||||
/// "Pass 2": try to remove redundant or unnecessary nodes
|
||||
pass2 = 200,
|
||||
// "Pass 2": try to remove redundant or unnecessary nodes
|
||||
// ======================================================
|
||||
|
||||
// remove filters from the query that are not necessary at all
|
||||
// filters that are always true will be removed entirely
|
||||
// filters that are always false will be replaced with a NoResults node
|
||||
removeUnnecessaryFiltersRule_pass2 = 210,
|
||||
removeUnnecessaryFiltersRule_pass2,
|
||||
|
||||
// remove calculations that are never necessary
|
||||
removeUnnecessaryCalculationsRule_pass2 = 220,
|
||||
removeUnnecessaryCalculationsRule_pass2,
|
||||
|
||||
// remove redundant sort blocks
|
||||
removeRedundantSortsRule_pass2 = 230,
|
||||
removeRedundantSortsRule_pass2,
|
||||
|
||||
/// "Pass 3": interchange EnumerateCollection nodes in all possible ways
|
||||
/// this is level 500, please never let new plans from higher
|
||||
/// levels go back to this or lower levels!
|
||||
pass3 = 500,
|
||||
interchangeAdjacentEnumerationsRule_pass3 = 510,
|
||||
// "Pass 3": interchange EnumerateCollection nodes in all possible ways
|
||||
// this is level 500, please never let new plans from higher
|
||||
// levels go back to this or lower levels!
|
||||
// ======================================================
|
||||
|
||||
interchangeAdjacentEnumerationsRule_pass3,
|
||||
|
||||
// "Pass 4": moving nodes "up" (potentially outside loops) (second try):
|
||||
pass4 = 600,
|
||||
// ======================================================
|
||||
|
||||
// move calculations up the dependency chain (to pull them out of
|
||||
// inner loops etc.)
|
||||
moveCalculationsUpRule_pass4 = 610,
|
||||
moveCalculationsUpRule_pass4,
|
||||
|
||||
// move filters up the dependency chain (to make result sets as small
|
||||
// as possible as early as possible)
|
||||
moveFiltersUpRule_pass4 = 620,
|
||||
moveFiltersUpRule_pass4,
|
||||
|
||||
/// "Pass 5": try to remove redundant or unnecessary nodes (second try)
|
||||
// remove filters from the query that are not necessary at all
|
||||
// filters that are always true will be removed entirely
|
||||
// filters that are always false will be replaced with a NoResults node
|
||||
pass5 = 700,
|
||||
// ======================================================
|
||||
|
||||
// remove redundant sort blocks
|
||||
removeRedundantSortsRule_pass5 = 710,
|
||||
removeRedundantSortsRule_pass5,
|
||||
|
||||
// remove SORT RAND() if appropriate
|
||||
removeSortRandRule_pass5 = 720,
|
||||
removeSortRandRule_pass5,
|
||||
|
||||
// remove INTO for COLLECT if appropriate
|
||||
removeCollectVariablesRule_pass5 = 740,
|
||||
removeCollectVariablesRule_pass5,
|
||||
|
||||
// propagate constant attributes in FILTERs
|
||||
propagateConstantAttributesRule_pass5 = 750,
|
||||
propagateConstantAttributesRule_pass5,
|
||||
|
||||
// remove unused out variables for data-modification queries
|
||||
removeDataModificationOutVariablesRule_pass5 = 760,
|
||||
removeDataModificationOutVariablesRule_pass5,
|
||||
|
||||
/// "Pass 6": use indexes if possible for FILTER and/or SORT nodes
|
||||
pass6 = 800,
|
||||
// ======================================================
|
||||
|
||||
// replace simple OR conditions with IN
|
||||
replaceOrWithInRule_pass6 = 810,
|
||||
replaceOrWithInRule_pass6,
|
||||
|
||||
// remove redundant OR conditions
|
||||
removeRedundantOrRule_pass6 = 820,
|
||||
removeRedundantOrRule_pass6,
|
||||
|
||||
applyGeoIndexRule = 825,
|
||||
applyGeoIndexRule,
|
||||
|
||||
useIndexesRule_pass6 = 830,
|
||||
useIndexesRule_pass6,
|
||||
|
||||
// try to remove filters covered by index ranges
|
||||
removeFiltersCoveredByIndexRule_pass6 = 840,
|
||||
removeFiltersCoveredByIndexRule_pass6,
|
||||
|
||||
removeUnnecessaryFiltersRule_pass6 = 850,
|
||||
removeUnnecessaryFiltersRule_pass6,
|
||||
|
||||
// try to find sort blocks which are superseeded by indexes
|
||||
useIndexForSortRule_pass6 = 860,
|
||||
useIndexForSortRule_pass6,
|
||||
|
||||
// sort values used in IN comparisons of remaining filters
|
||||
sortInValuesRule_pass6 = 865,
|
||||
sortInValuesRule_pass6,
|
||||
|
||||
// remove calculations that are never necessary
|
||||
removeUnnecessaryCalculationsRule_pass6 = 870,
|
||||
removeUnnecessaryCalculationsRule_pass6,
|
||||
|
||||
// merge filters into graph traversals
|
||||
optimizeTraversalsRule_pass6 = 880,
|
||||
prepareTraversalsRule_pass6 = 881,
|
||||
optimizeTraversalsRule_pass6,
|
||||
prepareTraversalsRule_pass6,
|
||||
|
||||
/// Pass 9: push down calculations beyond FILTERs and LIMITs
|
||||
moveCalculationsDownRule_pass9 = 900,
|
||||
moveCalculationsDownRule_pass9,
|
||||
|
||||
/// Pass 9: patch update statements
|
||||
patchUpdateStatementsRule_pass9 = 902,
|
||||
patchUpdateStatementsRule_pass9,
|
||||
|
||||
/// "Pass 10": final transformations for the cluster
|
||||
// make operations on sharded collections use distribute
|
||||
distributeInClusterRule_pass10 = 1000,
|
||||
distributeInClusterRule_pass10,
|
||||
|
||||
// make operations on sharded collections use scatter / gather / remote
|
||||
scatterInClusterRule_pass10 = 1010,
|
||||
scatterInClusterRule_pass10,
|
||||
|
||||
// move FilterNodes & Calculation nodes in between
|
||||
// scatter(remote) <-> gather(remote) so they're
|
||||
// distributed to the cluster nodes.
|
||||
distributeFilternCalcToClusterRule_pass10 = 1020,
|
||||
distributeFilternCalcToClusterRule_pass10,
|
||||
|
||||
// move SortNodes into the distribution.
|
||||
// adjust gathernode to also contain the sort criteria.
|
||||
distributeSortToClusterRule_pass10 = 1030,
|
||||
distributeSortToClusterRule_pass10,
|
||||
|
||||
// try to get rid of a RemoteNode->ScatterNode combination which has
|
||||
// only a SingletonNode and possibly some CalculationNodes as dependencies
|
||||
removeUnnecessaryRemoteScatterRule_pass10 = 1040,
|
||||
removeUnnecessaryRemoteScatterRule_pass10,
|
||||
|
||||
// remove any superflous satellite collection joins...
|
||||
// put it after Scatter rule because we would do
|
||||
// the work twice otherwise
|
||||
removeSatelliteJoinsRule_pass10 = 1045,
|
||||
removeSatelliteJoinsRule_pass10,
|
||||
|
||||
// recognize that a RemoveNode can be moved to the shards
|
||||
undistributeRemoveAfterEnumCollRule_pass10 = 1050
|
||||
|
||||
undistributeRemoveAfterEnumCollRule_pass10
|
||||
};
|
||||
|
||||
public:
|
||||
|
|
|
@ -3928,10 +3928,6 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
// GEO RULE ///////////////////////////////////////////////////////////////////
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Description of what this Rule tries to achieve:
|
||||
// https://docs.google.com/document/d/1G57UP08ZFywUXKi5cLvEIKpZP-AUKGwG9oAnFOX8LLo
|
||||
//
|
||||
|
||||
struct GeoIndexInfo{
|
||||
operator bool() const { return distanceNode && valid; }
|
||||
|
|
|
@ -78,8 +78,6 @@ SET(ARANGOD_SOURCES
|
|||
Actions/actions.cpp
|
||||
Agency/ActivationCallback.cpp
|
||||
Agency/AddFollower.cpp
|
||||
Agency/AgencyCallback.cpp
|
||||
Agency/AgencyCallbackRegistry.cpp
|
||||
Agency/AgencyComm.cpp
|
||||
Agency/AgencyFeature.cpp
|
||||
Agency/Agent.cpp
|
||||
|
@ -176,6 +174,8 @@ SET(ARANGOD_SOURCES
|
|||
Aql/VariableGenerator.cpp
|
||||
Aql/grammar.cpp
|
||||
Aql/tokens.cpp
|
||||
Cluster/AgencyCallback.cpp
|
||||
Cluster/AgencyCallbackRegistry.cpp
|
||||
Cluster/ClusterComm.cpp
|
||||
Cluster/ClusterEdgeCursor.cpp
|
||||
Cluster/ClusterFeature.cpp
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#ifndef CLUSTER_AGENCYCALLACKREGISTRY_H
|
||||
#define CLUSTER_AGENCYCALLACKREGISTRY_H 1
|
||||
|
||||
#include "Agency/AgencyCallback.h"
|
||||
#include "Cluster/AgencyCallback.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
|
||||
namespace arangodb {
|
|
@ -903,10 +903,11 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name,
|
|||
(int)arangodb::rest::ResponseCode::PRECONDITION_FAILED) {
|
||||
return setErrormsg(TRI_ERROR_ARANGO_DUPLICATE_NAME, errorMsg);
|
||||
}
|
||||
errorMsg = std::string("Failed to create database in ") + __FILE__ + ":" + std::to_string(__LINE__);
|
||||
errorMsg = std::string("Failed to create database with ") +
|
||||
res._clientId + " at " + __FILE__ + ":" + std::to_string(__LINE__);
|
||||
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE_IN_PLAN,
|
||||
errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
// Now update our own cache of planned databases:
|
||||
loadPlan();
|
||||
|
@ -1171,6 +1172,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Could not get agency dump!";
|
||||
}
|
||||
} else {
|
||||
errorMsg += std::string("\nClientId ") + res._clientId;
|
||||
errorMsg += std::string("\n") + __FILE__ + std::to_string(__LINE__);
|
||||
errorMsg += std::string("\n") + res.errorMessage();
|
||||
errorMsg += std::string("\n") + res.errorDetails();
|
||||
|
@ -1295,9 +1297,9 @@ int ClusterInfo::dropCollectionCoordinator(std::string const& databaseName,
|
|||
res = ac.sendTransactionWithFailover(trans);
|
||||
|
||||
if (!res.successful()) {
|
||||
LOG(ERR) << "###################### WAS ERLAUBE? ####################";
|
||||
AgencyCommResult ag = ac.getValues("");
|
||||
if (ag.successful()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "ClientId: " << res._clientId;
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Agency dump:\n"
|
||||
<< ag.slice().toJson();
|
||||
} else {
|
||||
|
@ -1552,11 +1554,6 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
std::shared_ptr<LogicalCollection> c =
|
||||
getCollection(databaseName, collectionID);
|
||||
|
||||
// Note that nobody is removing this collection in the plan, since
|
||||
// we hold the write lock in the agency, therefore it does not matter
|
||||
// that getCollection fetches the read lock and releases it before
|
||||
// we get it again.
|
||||
//
|
||||
READ_LOCKER(readLocker, _planProt.lock);
|
||||
|
||||
if (c == nullptr) {
|
||||
|
@ -1761,6 +1758,7 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0);
|
||||
|
||||
if (!result.successful()) {
|
||||
errorMsg += "ClientId: " + result._clientId;
|
||||
errorMsg += std::string(" ") + __FILE__ + ":" + std::to_string(__LINE__);
|
||||
resultBuilder = *resBuilder;
|
||||
return TRI_ERROR_CLUSTER_COULD_NOT_CREATE_INDEX_IN_PLAN;
|
||||
|
@ -1981,6 +1979,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName,
|
|||
AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0);
|
||||
|
||||
if (!result.successful()) {
|
||||
errorMsg += "ClientId: " + result._clientId;
|
||||
errorMsg += std::string(" ") + __FILE__ + ":" + std::to_string(__LINE__);
|
||||
events::DropIndex(collectionID, idString,
|
||||
TRI_ERROR_CLUSTER_COULD_NOT_DROP_INDEX_IN_PLAN);
|
||||
|
@ -2050,20 +2049,20 @@ void ClusterInfo::loadServers() {
|
|||
result.slice()[0].get(
|
||||
std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Current", "ServersRegistered"}));
|
||||
|
||||
|
||||
velocypack::Slice serversAliases =
|
||||
result.slice()[0].get(
|
||||
std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Target", "MapUniqueToShortID"}));
|
||||
|
||||
if (serversRegistered.isObject()) {
|
||||
|
||||
if (serversRegistered.isObject()) {
|
||||
decltype(_servers) newServers;
|
||||
decltype(_serverAliases) newAliases;
|
||||
|
||||
|
||||
size_t i = 0;
|
||||
for (auto const& res : VPackObjectIterator(serversRegistered)) {
|
||||
velocypack::Slice slice = res.value;
|
||||
|
||||
|
||||
if (slice.isObject() && slice.hasKey("endpoint")) {
|
||||
std::string server =
|
||||
arangodb::basics::VelocyPackHelper::getStringValue(
|
||||
|
@ -2080,7 +2079,7 @@ void ClusterInfo::loadServers() {
|
|||
newServers.emplace(std::make_pair(res.key.copyString(), server));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Now set the new value:
|
||||
{
|
||||
WRITE_LOCKER(writeLocker, _serversProt.lock);
|
||||
|
@ -2092,13 +2091,13 @@ void ClusterInfo::loadServers() {
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||
<< "Error while loading " << prefixServers
|
||||
<< " httpCode: " << result.httpCode()
|
||||
<< " errorCode: " << result.errorCode()
|
||||
<< " errorMessage: " << result.errorMessage()
|
||||
<< " body: " << result.body();
|
||||
<< "Error while loading " << prefixServers
|
||||
<< " httpCode: " << result.httpCode()
|
||||
<< " errorCode: " << result.errorCode()
|
||||
<< " errorMessage: " << result.errorMessage()
|
||||
<< " body: " << result.body();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include "Agency/AgencyCallbackRegistry.h"
|
||||
#include "Cluster/AgencyCallbackRegistry.h"
|
||||
#include "Agency/AgencyComm.h"
|
||||
#include "Basics/Mutex.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
|
|
|
@ -100,6 +100,12 @@ static VPackBuilder newShardEntry(VPackSlice oldValue, ServerID const& sid,
|
|||
void FollowerInfo::add(ServerID const& sid) {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
|
||||
// First check if there is anything to do:
|
||||
for (auto const& s : *_followers) {
|
||||
if (s == sid) {
|
||||
return; // Do nothing, if follower already there
|
||||
}
|
||||
}
|
||||
// Fully copy the vector:
|
||||
auto v = std::make_shared<std::vector<ServerID>>(*_followers);
|
||||
v->push_back(sid); // add a single entry
|
||||
|
@ -180,11 +186,25 @@ void FollowerInfo::add(ServerID const& sid) {
|
|||
void FollowerInfo::remove(ServerID const& sid) {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
|
||||
// First check if there is anything to do:
|
||||
bool found = false;
|
||||
for (auto const& s : *_followers) {
|
||||
if (s == sid) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
return; // nothing to do
|
||||
}
|
||||
|
||||
auto v = std::make_shared<std::vector<ServerID>>();
|
||||
v->reserve(_followers->size() - 1);
|
||||
for (auto const& i : *_followers) {
|
||||
if (i != sid) {
|
||||
v->push_back(i);
|
||||
if (_followers->size() > 0) {
|
||||
v->reserve(_followers->size() - 1);
|
||||
for (auto const& i : *_followers) {
|
||||
if (i != sid) {
|
||||
v->push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
_followers = v; // will cast to std::vector<ServerID> const
|
||||
|
|
|
@ -37,11 +37,12 @@ class FollowerInfo {
|
|||
std::shared_ptr<std::vector<ServerID> const> _followers;
|
||||
Mutex _mutex;
|
||||
arangodb::LogicalCollection* _docColl;
|
||||
bool _isLeader;
|
||||
|
||||
public:
|
||||
|
||||
explicit FollowerInfo(arangodb::LogicalCollection* d)
|
||||
: _followers(new std::vector<ServerID>()), _docColl(d) { }
|
||||
: _followers(new std::vector<ServerID>()), _docColl(d), _isLeader(false) { }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get information about current followers of a shard.
|
||||
|
@ -74,6 +75,22 @@ class FollowerInfo {
|
|||
|
||||
void clear();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief report if we are the leader
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool isLeader() const {
|
||||
return _isLeader;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set leadership
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void setLeader(bool b) {
|
||||
_isLeader = b;
|
||||
}
|
||||
|
||||
};
|
||||
} // end namespace arangodb
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
#include "RestAgencyCallbacksHandler.h"
|
||||
|
||||
#include "Agency/AgencyCallbackRegistry.h"
|
||||
#include "Cluster/AgencyCallbackRegistry.h"
|
||||
#include "Rest/HttpRequest.h"
|
||||
#include "Rest/HttpResponse.h"
|
||||
|
||||
|
|
|
@ -24,12 +24,12 @@
|
|||
|
||||
#include <stdexcept>
|
||||
|
||||
#include "Agency/AgencyCallbackRegistry.h"
|
||||
#include "Agency/AgencyFeature.h"
|
||||
#include "Agency/RestAgencyHandler.h"
|
||||
#include "Agency/RestAgencyPrivHandler.h"
|
||||
#include "Aql/RestAqlHandler.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Cluster/AgencyCallbackRegistry.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/RestAgencyCallbacksHandler.h"
|
||||
|
|
|
@ -115,10 +115,10 @@ bool RestDocumentHandler::createDocument() {
|
|||
VPackSlice body = parsedBody->slice();
|
||||
|
||||
arangodb::OperationOptions opOptions;
|
||||
opOptions.isRestore = extractBooleanParameter("isRestore", false);
|
||||
opOptions.waitForSync = extractBooleanParameter("waitForSync", false);
|
||||
opOptions.returnNew = extractBooleanParameter("returnNew", false);
|
||||
opOptions.silent = extractBooleanParameter("silent", false);
|
||||
opOptions.isRestore = extractBooleanParameter(StaticStrings::IsRestoreString, false);
|
||||
opOptions.waitForSync = extractBooleanParameter(StaticStrings::WaitForSyncString, false);
|
||||
opOptions.returnNew = extractBooleanParameter(StaticStrings::ReturnNewString, false);
|
||||
opOptions.silent = extractBooleanParameter(StaticStrings::SilentString, false);
|
||||
|
||||
// find and load collection given by name or identifier
|
||||
auto transactionContext(StandaloneTransactionContext::Create(_vocbase));
|
||||
|
@ -380,12 +380,12 @@ bool RestDocumentHandler::modifyDocument(bool isPatch) {
|
|||
}
|
||||
|
||||
OperationOptions opOptions;
|
||||
opOptions.isRestore = extractBooleanParameter("isRestore", false);
|
||||
opOptions.ignoreRevs = extractBooleanParameter("ignoreRevs", true);
|
||||
opOptions.waitForSync = extractBooleanParameter("waitForSync", false);
|
||||
opOptions.returnNew = extractBooleanParameter("returnNew", false);
|
||||
opOptions.returnOld = extractBooleanParameter("returnOld", false);
|
||||
opOptions.silent = extractBooleanParameter("silent", false);
|
||||
opOptions.isRestore = extractBooleanParameter(StaticStrings::IsRestoreString, false);
|
||||
opOptions.ignoreRevs = extractBooleanParameter(StaticStrings::IgnoreRevsString, true);
|
||||
opOptions.waitForSync = extractBooleanParameter(StaticStrings::WaitForSyncString, false);
|
||||
opOptions.returnNew = extractBooleanParameter(StaticStrings::ReturnNewString, false);
|
||||
opOptions.returnOld = extractBooleanParameter(StaticStrings::ReturnOldString, false);
|
||||
opOptions.silent = extractBooleanParameter(StaticStrings::SilentString, false);
|
||||
|
||||
// extract the revision, if single document variant and header given:
|
||||
std::shared_ptr<VPackBuilder> builder;
|
||||
|
@ -442,8 +442,8 @@ bool RestDocumentHandler::modifyDocument(bool isPatch) {
|
|||
OperationResult result(TRI_ERROR_NO_ERROR);
|
||||
if (isPatch) {
|
||||
// patching an existing document
|
||||
opOptions.keepNull = extractBooleanParameter("keepNull", true);
|
||||
opOptions.mergeObjects = extractBooleanParameter("mergeObjects", true);
|
||||
opOptions.keepNull = extractBooleanParameter(StaticStrings::KeepNullString, true);
|
||||
opOptions.mergeObjects = extractBooleanParameter(StaticStrings::MergeObjectsString, true);
|
||||
result = trx.update(collectionName, body, opOptions);
|
||||
} else {
|
||||
result = trx.replace(collectionName, body, opOptions);
|
||||
|
@ -507,10 +507,10 @@ bool RestDocumentHandler::deleteDocument() {
|
|||
}
|
||||
|
||||
OperationOptions opOptions;
|
||||
opOptions.returnOld = extractBooleanParameter("returnOld", false);
|
||||
opOptions.ignoreRevs = extractBooleanParameter("ignoreRevs", true);
|
||||
opOptions.waitForSync = extractBooleanParameter("waitForSync", false);
|
||||
opOptions.silent = extractBooleanParameter("silent", false);
|
||||
opOptions.returnOld = extractBooleanParameter(StaticStrings::ReturnOldString, false);
|
||||
opOptions.ignoreRevs = extractBooleanParameter(StaticStrings::IgnoreRevsString, true);
|
||||
opOptions.waitForSync = extractBooleanParameter(StaticStrings::WaitForSyncString, false);
|
||||
opOptions.silent = extractBooleanParameter(StaticStrings::SilentString, false);
|
||||
|
||||
auto transactionContext(StandaloneTransactionContext::Create(_vocbase));
|
||||
|
||||
|
@ -600,7 +600,7 @@ bool RestDocumentHandler::readManyDocuments() {
|
|||
std::string const& collectionName = suffixes[0];
|
||||
|
||||
OperationOptions opOptions;
|
||||
opOptions.ignoreRevs = extractBooleanParameter("ignoreRevs", true);
|
||||
opOptions.ignoreRevs = extractBooleanParameter(StaticStrings::IgnoreRevsString, true);
|
||||
|
||||
auto transactionContext(StandaloneTransactionContext::Create(_vocbase));
|
||||
SingleCollectionTransaction trx(transactionContext, collectionName,
|
||||
|
|
|
@ -3447,7 +3447,6 @@ void RestReplicationHandler::handleCommandRemoveFollower() {
|
|||
"did not find collection");
|
||||
return;
|
||||
}
|
||||
|
||||
col->followers()->remove(followerId.copyString());
|
||||
|
||||
VPackBuilder b;
|
||||
|
|
|
@ -608,7 +608,7 @@ TRI_voc_rid_t RestVocbaseBaseHandler::extractRevision(char const* header,
|
|||
/// @brief extracts a boolean parameter value
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool RestVocbaseBaseHandler::extractBooleanParameter(char const* name,
|
||||
bool RestVocbaseBaseHandler::extractBooleanParameter(std::string const& name,
|
||||
bool def) const {
|
||||
bool found;
|
||||
std::string const& value = _request->value(name, found);
|
||||
|
|
|
@ -264,7 +264,11 @@ class RestVocbaseBaseHandler : public RestBaseHandler {
|
|||
/// @brief extracts a boolean parameter value
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool extractBooleanParameter(char const* name, bool def) const;
|
||||
bool extractBooleanParameter(std::string const& name, bool def) const;
|
||||
|
||||
bool extractBooleanParameter(char const* name, bool def) const {
|
||||
return extractBooleanParameter(std::string(name), def);
|
||||
}
|
||||
|
||||
protected:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -1199,7 +1199,7 @@ void MMFilesCollection::removeRevision(TRI_voc_rid_t revisionId, bool updateStat
|
|||
TRI_ASSERT(revisionId != 0);
|
||||
if (updateStats) {
|
||||
MMFilesDocumentPosition const old = _revisionsCache.fetchAndRemove(revisionId);
|
||||
if (old && !old.pointsToWal()) {
|
||||
if (old && !old.pointsToWal() && old.fid() != 0) {
|
||||
TRI_ASSERT(old.dataptr() != nullptr);
|
||||
uint8_t const* vpack = static_cast<uint8_t const*>(old.dataptr());
|
||||
int64_t size = MMFilesDatafileHelper::AlignedSize<int64_t>(arangodb::MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT) + VPackSlice(vpack).byteSize());
|
||||
|
|
|
@ -374,16 +374,11 @@ int MMFilesCollectorThread::collectLogfiles(bool& worked) {
|
|||
|
||||
try {
|
||||
int res = collect(logfile);
|
||||
// LOG_TOPIC(TRACE, Logger::COLLECTOR) << "collected logfile: " << // logfile->id() << ". result: "
|
||||
// << res;
|
||||
// LOG_TOPIC(TRACE, Logger::COLLECTOR) << "collected logfile: " << logfile->id() << ". result: " << res;
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
// reset collector status
|
||||
{
|
||||
CONDITION_LOCKER(guard, _collectorResultCondition);
|
||||
_collectorResult = TRI_ERROR_NO_ERROR;
|
||||
_collectorResultCondition.broadcast();
|
||||
}
|
||||
broadcastCollectorResult(res);
|
||||
|
||||
RocksDBFeature::syncWal();
|
||||
|
||||
|
@ -393,11 +388,7 @@ int MMFilesCollectorThread::collectLogfiles(bool& worked) {
|
|||
_logfileManager->forceStatus(logfile, wal::Logfile::StatusType::SEALED);
|
||||
|
||||
// set error in collector
|
||||
{
|
||||
CONDITION_LOCKER(guard, _collectorResultCondition);
|
||||
_collectorResult = res;
|
||||
_collectorResultCondition.broadcast();
|
||||
}
|
||||
broadcastCollectorResult(res);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -979,3 +970,9 @@ int MMFilesCollectorThread::updateDatafileStatistics(
|
|||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
void MMFilesCollectorThread::broadcastCollectorResult(int res) {
|
||||
CONDITION_LOCKER(guard, _collectorResultCondition);
|
||||
_collectorResult = res;
|
||||
_collectorResultCondition.broadcast();
|
||||
}
|
||||
|
|
|
@ -103,6 +103,8 @@ class MMFilesCollectorThread final : public Thread {
|
|||
/// @brief update a collection's datafile information
|
||||
int updateDatafileStatistics(LogicalCollection*, MMFilesCollectorCache*);
|
||||
|
||||
void broadcastCollectorResult(int res);
|
||||
|
||||
private:
|
||||
/// @brief the logfile manager
|
||||
wal::LogfileManager* _logfileManager;
|
||||
|
|
|
@ -39,26 +39,6 @@ MMFilesDocumentOperation::MMFilesDocumentOperation(LogicalCollection* collection
|
|||
}
|
||||
|
||||
MMFilesDocumentOperation::~MMFilesDocumentOperation() {
|
||||
TRI_ASSERT(_status != StatusType::INDEXED);
|
||||
|
||||
if (_status == StatusType::HANDLED) {
|
||||
try {
|
||||
if (_type == TRI_VOC_DOCUMENT_OPERATION_UPDATE ||
|
||||
_type == TRI_VOC_DOCUMENT_OPERATION_REPLACE) {
|
||||
// remove old, now unused revision
|
||||
TRI_ASSERT(!_oldRevision.empty());
|
||||
TRI_ASSERT(!_newRevision.empty());
|
||||
_collection->removeRevision(_oldRevision._revisionId, true);
|
||||
} else if (_type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) {
|
||||
// remove old, now unused revision
|
||||
TRI_ASSERT(!_oldRevision.empty());
|
||||
TRI_ASSERT(_newRevision.empty());
|
||||
_collection->removeRevision(_oldRevision._revisionId, true);
|
||||
}
|
||||
} catch (...) {
|
||||
// never throw here because of destructor
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MMFilesDocumentOperation* MMFilesDocumentOperation::swap() {
|
||||
|
@ -108,16 +88,13 @@ void MMFilesDocumentOperation::setRevisions(DocumentDescriptor const& oldRevisio
|
|||
|
||||
void MMFilesDocumentOperation::revert(arangodb::Transaction* trx) {
|
||||
TRI_ASSERT(trx != nullptr);
|
||||
|
||||
if (_status == StatusType::CREATED ||
|
||||
_status == StatusType::SWAPPED ||
|
||||
_status == StatusType::REVERTED) {
|
||||
|
||||
if (_status == StatusType::SWAPPED || _status == StatusType::REVERTED) {
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_status == StatusType::INDEXED || _status == StatusType::HANDLED);
|
||||
|
||||
// set to reverted now
|
||||
|
||||
// fetch old status and set it to reverted now
|
||||
StatusType status = _status;
|
||||
_status = StatusType::REVERTED;
|
||||
|
||||
TRI_voc_rid_t oldRevisionId = 0;
|
||||
|
@ -136,39 +113,82 @@ void MMFilesDocumentOperation::revert(arangodb::Transaction* trx) {
|
|||
newDoc = VPackSlice(_newRevision._vpack);
|
||||
}
|
||||
|
||||
try {
|
||||
_collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc);
|
||||
} catch (...) {
|
||||
// TODO: decide whether we should rethrow here
|
||||
// clear caches so the following operations all use
|
||||
if (oldRevisionId != 0) {
|
||||
_collection->removeRevisionCacheEntry(oldRevisionId);
|
||||
}
|
||||
if (newRevisionId != 0) {
|
||||
_collection->removeRevisionCacheEntry(newRevisionId);
|
||||
}
|
||||
|
||||
if (_type == TRI_VOC_DOCUMENT_OPERATION_INSERT) {
|
||||
TRI_ASSERT(_oldRevision.empty());
|
||||
TRI_ASSERT(!_newRevision.empty());
|
||||
|
||||
if (status != StatusType::CREATED) {
|
||||
// remove revision from indexes
|
||||
try {
|
||||
_collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
||||
// remove now obsolete new revision
|
||||
try {
|
||||
_collection->removeRevision(newRevisionId, true);
|
||||
} catch (...) {
|
||||
// operation probably was never inserted
|
||||
// TODO: decide whether we should rethrow here
|
||||
}
|
||||
} else if (_type == TRI_VOC_DOCUMENT_OPERATION_UPDATE ||
|
||||
_type == TRI_VOC_DOCUMENT_OPERATION_REPLACE) {
|
||||
TRI_ASSERT(!_oldRevision.empty());
|
||||
TRI_ASSERT(!_newRevision.empty());
|
||||
|
||||
try {
|
||||
// re-insert the old revision
|
||||
_collection->insertRevision(_oldRevision._revisionId, _oldRevision._vpack, 0, true);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
if (status != StatusType::CREATED) {
|
||||
try {
|
||||
// restore the old index state
|
||||
_collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
||||
// let the primary index entry point to the correct document
|
||||
SimpleIndexElement* element = _collection->primaryIndex()->lookupKeyRef(trx, Transaction::extractKeyFromDocument(newDoc));
|
||||
if (element != nullptr && element->revisionId() != 0) {
|
||||
VPackSlice keySlice(Transaction::extractKeyFromDocument(oldDoc));
|
||||
element->updateRevisionId(oldRevisionId, static_cast<uint32_t>(keySlice.begin() - oldDoc.begin()));
|
||||
}
|
||||
_collection->updateRevision(oldRevisionId, oldDoc.begin(), 0, false);
|
||||
|
||||
// remove now obsolete new revision
|
||||
if (oldRevisionId != newRevisionId) {
|
||||
// we need to check for the same revision id here
|
||||
try {
|
||||
_collection->removeRevision(newRevisionId, true);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
} else if (_type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) {
|
||||
TRI_ASSERT(!_oldRevision.empty());
|
||||
TRI_ASSERT(_newRevision.empty());
|
||||
|
||||
try {
|
||||
_collection->removeRevision(newRevisionId, true);
|
||||
_collection->insertRevision(_oldRevision._revisionId, _oldRevision._vpack, 0, true);
|
||||
} catch (...) {
|
||||
// operation probably was never inserted
|
||||
// TODO: decide whether we should rethrow here
|
||||
}
|
||||
|
||||
if (status != StatusType::CREATED) {
|
||||
try {
|
||||
// remove from indexes again
|
||||
_collection->rollbackOperation(trx, _type, oldRevisionId, oldDoc, newRevisionId, newDoc);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_WAL_DOCUMENT_OPERATION_H
|
||||
#define ARANGOD_WAL_DOCUMENT_OPERATION_H 1
|
||||
#ifndef ARANGOD_MMFILES_DOCUMENT_OPERATION_H
|
||||
#define ARANGOD_MMFILES_DOCUMENT_OPERATION_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
@ -71,10 +71,6 @@ struct MMFilesDocumentOperation {
|
|||
_status = StatusType::HANDLED;
|
||||
}
|
||||
|
||||
void done() noexcept {
|
||||
_status = StatusType::SWAPPED;
|
||||
}
|
||||
|
||||
void revert(arangodb::Transaction*);
|
||||
|
||||
private:
|
||||
|
|
|
@ -945,7 +945,7 @@ static void JS_FiguresVocbaseCol(
|
|||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock collectionLoad
|
||||
/// @brief was docuBlock leaderResign
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_LeaderResign(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
|
@ -981,13 +981,156 @@ static void JS_LeaderResign(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_V8_THROW_EXCEPTION(res);
|
||||
}
|
||||
trx.documentCollection()->followers()->clear();
|
||||
// do not reset followers at this time...we are still the only source of truth
|
||||
// to trust...
|
||||
//trx.documentCollection()->followers()->clear();
|
||||
trx.documentCollection()->followers()->setLeader(false);
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_UNDEFINED();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock assumeLeadership
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_AssumeLeadership(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
arangodb::LogicalCollection const* collection =
|
||||
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(),
|
||||
WRP_VOCBASE_COL_TYPE);
|
||||
|
||||
if (collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = collection->vocbase();
|
||||
std::string collectionName = collection->name();
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto transactionContext = std::make_shared<V8TransactionContext>(vocbase, true);
|
||||
|
||||
SingleCollectionTransaction trx(transactionContext, collectionName,
|
||||
TRI_TRANSACTION_READ);
|
||||
int res = trx.begin();
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_V8_THROW_EXCEPTION(res);
|
||||
}
|
||||
trx.documentCollection()->followers()->clear();
|
||||
trx.documentCollection()->followers()->setLeader(true);
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_UNDEFINED();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock isLeader
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_IsLeader(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
bool b = false;
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
arangodb::LogicalCollection const* collection =
|
||||
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(),
|
||||
WRP_VOCBASE_COL_TYPE);
|
||||
|
||||
if (collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = collection->vocbase();
|
||||
std::string collectionName = collection->name();
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto realCollection = vocbase->lookupCollection(collectionName);
|
||||
if (realCollection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
b = realCollection->followers()->isLeader();
|
||||
}
|
||||
|
||||
if (b) {
|
||||
TRI_V8_RETURN_TRUE();
|
||||
} else {
|
||||
TRI_V8_RETURN_FALSE();
|
||||
}
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock getFollowers
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_GetFollowers(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
v8::Handle<v8::Array> list = v8::Array::New(isolate);
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
arangodb::LogicalCollection const* collection =
|
||||
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(),
|
||||
WRP_VOCBASE_COL_TYPE);
|
||||
|
||||
if (collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = collection->vocbase();
|
||||
std::string collectionName = collection->name();
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto transactionContext = std::make_shared<V8TransactionContext>(vocbase, true);
|
||||
|
||||
SingleCollectionTransaction trx(transactionContext, collectionName,
|
||||
TRI_TRANSACTION_READ);
|
||||
int res = trx.begin();
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_V8_THROW_EXCEPTION(res);
|
||||
}
|
||||
std::unique_ptr<arangodb::FollowerInfo> const& followerInfo = trx.documentCollection()->followers();
|
||||
std::shared_ptr<std::vector<ServerID> const> followers = followerInfo->get();
|
||||
uint32_t i = 0;
|
||||
for (auto const& n : *followers) {
|
||||
list->Set(i++, TRI_V8_STD_STRING(n));
|
||||
}
|
||||
}
|
||||
|
||||
TRI_V8_RETURN(list);
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock collectionLoad
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -3213,6 +3356,12 @@ void TRI_InitV8Collection(v8::Handle<v8::Context> context,
|
|||
JS_InsertVocbaseCol);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("leaderResign"),
|
||||
JS_LeaderResign, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("assumeLeadership"),
|
||||
JS_AssumeLeadership, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("isLeader"),
|
||||
JS_IsLeader, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("getFollowers"),
|
||||
JS_GetFollowers, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("load"),
|
||||
JS_LoadVocbaseCol);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("name"),
|
||||
|
|
|
@ -2282,8 +2282,14 @@ int LogicalCollection::update(Transaction* trx, VPackSlice const newSlice,
|
|||
|
||||
try {
|
||||
insertRevision(revisionId, marker->vpack(), 0, true);
|
||||
|
||||
operation.setRevisions(DocumentDescriptor(oldRevisionId, oldDoc.begin()),
|
||||
DocumentDescriptor(revisionId, newDoc.begin()));
|
||||
|
||||
if (oldRevisionId == revisionId) {
|
||||
// update with same revision id => can happen if isRestore = true
|
||||
result.clear(0);
|
||||
}
|
||||
|
||||
res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, newDoc,
|
||||
operation, marker, options.waitForSync);
|
||||
|
@ -2439,8 +2445,14 @@ int LogicalCollection::replace(Transaction* trx, VPackSlice const newSlice,
|
|||
|
||||
try {
|
||||
insertRevision(revisionId, marker->vpack(), 0, true);
|
||||
|
||||
operation.setRevisions(DocumentDescriptor(oldRevisionId, oldDoc.begin()),
|
||||
DocumentDescriptor(revisionId, newDoc.begin()));
|
||||
|
||||
if (oldRevisionId == revisionId) {
|
||||
// update with same revision id => can happen if isRestore = true
|
||||
result.clear(0);
|
||||
}
|
||||
|
||||
res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, newDoc,
|
||||
operation, marker, options.waitForSync);
|
||||
|
@ -2455,6 +2467,10 @@ int LogicalCollection::replace(Transaction* trx, VPackSlice const newSlice,
|
|||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
operation.revert(trx);
|
||||
} else {
|
||||
if (oldRevisionId == revisionId) {
|
||||
// update with same revision id => can happen if isRestore = true
|
||||
result.clear(0);
|
||||
}
|
||||
readRevision(trx, result, revisionId);
|
||||
|
||||
if (options.waitForSync) {
|
||||
|
@ -2590,6 +2606,11 @@ int LogicalCollection::remove(arangodb::Transaction* trx,
|
|||
TRI_IF_FAILURE("RemoveDocumentNoOperation") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
try {
|
||||
removeRevision(oldRevisionId, true);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
TRI_IF_FAILURE("RemoveDocumentNoOperationExcept") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
|
@ -2683,6 +2704,11 @@ int LogicalCollection::remove(arangodb::Transaction* trx,
|
|||
}
|
||||
|
||||
operation.indexed();
|
||||
|
||||
try {
|
||||
removeRevision(oldRevisionId, true);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
TRI_IF_FAILURE("RemoveDocumentNoOperation") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
|
@ -2725,17 +2751,11 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx,
|
|||
TRI_ASSERT(newRevisionId != 0);
|
||||
TRI_ASSERT(!newDoc.isNone());
|
||||
|
||||
removeRevisionCacheEntry(newRevisionId);
|
||||
|
||||
// ignore any errors we're getting from this
|
||||
deletePrimaryIndex(trx, newRevisionId, newDoc);
|
||||
deleteSecondaryIndexes(trx, newRevisionId, newDoc, true);
|
||||
|
||||
// remove new revision
|
||||
try {
|
||||
removeRevision(newRevisionId, false);
|
||||
} catch (...) {
|
||||
// TODO: decide whether we should rethrow here
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -2745,6 +2765,10 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx,
|
|||
TRI_ASSERT(!oldDoc.isNone());
|
||||
TRI_ASSERT(newRevisionId != 0);
|
||||
TRI_ASSERT(!newDoc.isNone());
|
||||
|
||||
removeRevisionCacheEntry(oldRevisionId);
|
||||
removeRevisionCacheEntry(newRevisionId);
|
||||
|
||||
// remove the current values from the indexes
|
||||
deleteSecondaryIndexes(trx, newRevisionId, newDoc, true);
|
||||
// re-insert old state
|
||||
|
@ -2757,6 +2781,8 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx,
|
|||
TRI_ASSERT(!oldDoc.isNone());
|
||||
TRI_ASSERT(newRevisionId == 0);
|
||||
TRI_ASSERT(newDoc.isNone());
|
||||
|
||||
removeRevisionCacheEntry(oldRevisionId);
|
||||
|
||||
int res = insertPrimaryIndex(trx, oldRevisionId, oldDoc);
|
||||
|
||||
|
@ -3292,8 +3318,6 @@ int LogicalCollection::updateDocument(
|
|||
// rollback
|
||||
deleteSecondaryIndexes(trx, newRevisionId, newDoc, true);
|
||||
insertSecondaryIndexes(trx, oldRevisionId, oldDoc, true);
|
||||
removeRevision(newRevisionId, false);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -3308,6 +3332,16 @@ int LogicalCollection::updateDocument(
|
|||
}
|
||||
|
||||
operation.indexed();
|
||||
|
||||
if (oldRevisionId != newRevisionId) {
|
||||
try {
|
||||
removeRevision(oldRevisionId, true);
|
||||
} catch (...) {
|
||||
}
|
||||
} else {
|
||||
// clear readcache entry for the revision
|
||||
removeRevisionCacheEntry(oldRevisionId);
|
||||
}
|
||||
|
||||
TRI_IF_FAILURE("UpdateDocumentNoOperation") { return TRI_ERROR_DEBUG; }
|
||||
|
||||
|
@ -3770,13 +3804,18 @@ bool LogicalCollection::updateRevisionConditional(
|
|||
void LogicalCollection::removeRevision(TRI_voc_rid_t revisionId,
|
||||
bool updateStats) {
|
||||
// clean up cache entry
|
||||
TRI_ASSERT(_revisionsCache);
|
||||
_revisionsCache->removeRevision(revisionId);
|
||||
removeRevisionCacheEntry(revisionId);
|
||||
|
||||
// and remove from storage engine
|
||||
getPhysical()->removeRevision(revisionId, updateStats);
|
||||
}
|
||||
|
||||
void LogicalCollection::removeRevisionCacheEntry(TRI_voc_rid_t revisionId) {
|
||||
// clean up cache entry
|
||||
TRI_ASSERT(_revisionsCache);
|
||||
_revisionsCache->removeRevision(revisionId);
|
||||
}
|
||||
|
||||
/// @brief a method to skip certain documents in AQL write operations,
|
||||
/// this is only used in the enterprise edition for smart graphs
|
||||
#ifndef USE_ENTERPRISE
|
||||
|
|
|
@ -390,6 +390,7 @@ class LogicalCollection {
|
|||
void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal);
|
||||
bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal);
|
||||
void removeRevision(TRI_voc_rid_t revisionId, bool updateStats);
|
||||
void removeRevisionCacheEntry(TRI_voc_rid_t revisionId);
|
||||
|
||||
private:
|
||||
// SECTION: Index creation
|
||||
|
|
|
@ -193,7 +193,6 @@ static void FreeOperations(arangodb::Transaction* activeTrx, TRI_transaction_t*
|
|||
try {
|
||||
op->revert(activeTrx);
|
||||
} catch (...) {
|
||||
// TODO: decide whether we should rethrow here
|
||||
}
|
||||
delete op;
|
||||
}
|
||||
|
@ -201,10 +200,7 @@ static void FreeOperations(arangodb::Transaction* activeTrx, TRI_transaction_t*
|
|||
// no rollback. simply delete all operations
|
||||
for (auto it = trxCollection->_operations->rbegin();
|
||||
it != trxCollection->_operations->rend(); ++it) {
|
||||
MMFilesDocumentOperation* op = (*it);
|
||||
|
||||
//op->done(); // set to done so dtor of DocumentOperation won't fail
|
||||
delete op;
|
||||
delete (*it);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -121,14 +121,18 @@ struct DocumentDescriptor {
|
|||
DocumentDescriptor(TRI_voc_rid_t revisionId, uint8_t const* vpack) : _revisionId(revisionId), _vpack(vpack) {}
|
||||
|
||||
bool empty() const { return _vpack == nullptr; }
|
||||
|
||||
void reset(DocumentDescriptor const& other) {
|
||||
_revisionId = other._revisionId;
|
||||
_vpack = other._vpack;
|
||||
}
|
||||
|
||||
/*
|
||||
void reset(TRI_voc_rid_t revisionId, uint8_t const* vpack) {
|
||||
_revisionId = revisionId;
|
||||
_vpack = vpack;
|
||||
}
|
||||
*/
|
||||
void clear() {
|
||||
_revisionId = 0;
|
||||
_vpack = nullptr;
|
||||
|
|
|
@ -869,10 +869,11 @@ int LogfileManager::flush(bool waitForSync, bool waitForCollector,
|
|||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
// we need to wait for the collector...
|
||||
// LOG(TRACE) << "entering waitForCollector with lastOpenLogfileId " << //
|
||||
// (unsigned long long) lastOpenLogfileId;
|
||||
// LOG(TRACE) << "entering waitForCollector with lastOpenLogfileId " << lastOpenLogfileId;
|
||||
res = this->waitForCollector(lastOpenLogfileId, maxWaitTime);
|
||||
|
||||
if (res == TRI_ERROR_LOCK_TIMEOUT) {
|
||||
LOG(ERR) << "got lock timeout when waiting for WAL flush. lastOpenLogfileId: " << lastOpenLogfileId;
|
||||
}
|
||||
} else if (res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
|
||||
// current logfile is empty and cannot be collected
|
||||
|
@ -881,6 +882,10 @@ int LogfileManager::flush(bool waitForSync, bool waitForCollector,
|
|||
|
||||
if (lastSealedLogfileId > 0) {
|
||||
res = this->waitForCollector(lastSealedLogfileId, maxWaitTime);
|
||||
|
||||
if (res == TRI_ERROR_LOCK_TIMEOUT) {
|
||||
LOG(ERR) << "got lock timeout when waiting for WAL flush. lastSealedLogfileId: " << lastSealedLogfileId;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1305,10 +1310,6 @@ Logfile* LogfileManager::getLogfile(Logfile::IdType id,
|
|||
int LogfileManager::getWriteableLogfile(uint32_t size,
|
||||
Logfile::StatusType& status,
|
||||
Logfile*& result) {
|
||||
static uint64_t const SleepTime = 10 * 1000;
|
||||
double const end = TRI_microtime() + 15.0;
|
||||
size_t iterations = 0;
|
||||
|
||||
// always initialize the result
|
||||
result = nullptr;
|
||||
|
||||
|
@ -1316,6 +1317,9 @@ int LogfileManager::getWriteableLogfile(uint32_t size,
|
|||
// intentionally don't return a logfile
|
||||
return TRI_ERROR_DEBUG;
|
||||
}
|
||||
|
||||
size_t iterations = 0;
|
||||
double const end = TRI_microtime() + 15.0;
|
||||
|
||||
while (true) {
|
||||
{
|
||||
|
@ -1363,7 +1367,7 @@ int LogfileManager::getWriteableLogfile(uint32_t size,
|
|||
_allocatorThread->signal(size);
|
||||
}
|
||||
|
||||
int res = _allocatorThread->waitForResult(SleepTime);
|
||||
int res = _allocatorThread->waitForResult(15000);
|
||||
|
||||
if (res != TRI_ERROR_LOCK_TIMEOUT && res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_ASSERT(result == nullptr);
|
||||
|
@ -1680,43 +1684,42 @@ void LogfileManager::waitForCollector() {
|
|||
// wait until a specific logfile has been collected
|
||||
int LogfileManager::waitForCollector(Logfile::IdType logfileId,
|
||||
double maxWaitTime) {
|
||||
static int64_t const SingleWaitPeriod = 50 * 1000;
|
||||
|
||||
int64_t maxIterations = INT64_MAX; // wait forever
|
||||
if (maxWaitTime > 0.0) {
|
||||
// if specified, wait for a shorter period of time
|
||||
maxIterations = static_cast<int64_t>(maxWaitTime * 1000000.0 /
|
||||
(double)SingleWaitPeriod);
|
||||
LOG(TRACE) << "will wait for max. " << maxWaitTime
|
||||
<< " seconds for collector to finish";
|
||||
if (maxWaitTime <= 0.0) {
|
||||
maxWaitTime = 24.0 * 3600.0; // wait "forever"
|
||||
}
|
||||
|
||||
LOG(TRACE) << "waiting for collector thread to collect logfile " << logfileId;
|
||||
|
||||
// wait for the collector thread to finish the collection
|
||||
int64_t iterations = 0;
|
||||
double const end = TRI_microtime() + maxWaitTime;
|
||||
|
||||
while (++iterations < maxIterations) {
|
||||
while (true) {
|
||||
if (_lastCollectedId >= logfileId) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int res = _collectorThread->waitForResult(SingleWaitPeriod);
|
||||
int res = _collectorThread->waitForResult(50 * 1000);
|
||||
|
||||
// LOG(TRACE) << "still waiting for collector. logfileId: " << logfileId <<
|
||||
// " lastCollected:
|
||||
// " << // _lastCollectedId << ", result: " << res;
|
||||
// " lastCollected: " << _lastCollectedId << ", result: " << res;
|
||||
|
||||
if (res != TRI_ERROR_LOCK_TIMEOUT && res != TRI_ERROR_NO_ERROR) {
|
||||
// some error occurred
|
||||
return res;
|
||||
}
|
||||
|
||||
double const now = TRI_microtime();
|
||||
|
||||
if (now > end) {
|
||||
break;
|
||||
}
|
||||
|
||||
usleep(20000);
|
||||
// try again
|
||||
}
|
||||
|
||||
// TODO: remove debug info here
|
||||
LOG(ERR) << "going into lock timeout. having waited for logfile: " << logfileId << ", maxIterations: " << maxIterations << ", maxWaitTime: " << maxWaitTime;
|
||||
LOG(ERR) << "going into lock timeout. having waited for logfile: " << logfileId << ", maxWaitTime: " << maxWaitTime;
|
||||
logStatus();
|
||||
|
||||
// waited for too long
|
||||
|
|
|
@ -34,6 +34,8 @@ const functionsDocumentation = {
|
|||
'authentication_parameters': 'authentication parameters tests',
|
||||
'boost': 'boost test suites',
|
||||
'config': 'checks the config file parsing',
|
||||
'client_resilience': 'client resilience tests',
|
||||
'cluster_sync': 'cluster sync tests',
|
||||
'dump': 'dump tests',
|
||||
'dump_authentication': 'dump tests with authentication',
|
||||
'dfdb': 'start test',
|
||||
|
@ -48,7 +50,6 @@ const functionsDocumentation = {
|
|||
'replication_static': 'replication static tests',
|
||||
'replication_sync': 'replication sync tests',
|
||||
'resilience': 'resilience tests',
|
||||
'client_resilience': 'client resilience tests',
|
||||
'shell_client': 'shell client tests',
|
||||
'shell_replication': 'shell replication tests',
|
||||
'shell_server': 'shell server tests',
|
||||
|
@ -158,7 +159,9 @@ const optionsDefaults = {
|
|||
'loopEternal': false,
|
||||
'loopSleepSec': 1,
|
||||
'loopSleepWhen': 1,
|
||||
'minPort': 1024,
|
||||
'maxPort': 32768,
|
||||
'mochaGrep': undefined,
|
||||
'onlyNightly': false,
|
||||
'password': '',
|
||||
'replication': false,
|
||||
|
@ -627,20 +630,32 @@ function cleanupDBDirectories (options) {
|
|||
// / @brief finds a free port
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function findFreePort (maxPort) {
|
||||
function findFreePort (minPort, maxPort, usedPorts) {
|
||||
if (typeof maxPort !== 'number') {
|
||||
maxPort = 32768;
|
||||
}
|
||||
if (maxPort < 2048) {
|
||||
maxPort = 2048;
|
||||
|
||||
if (maxPort - minPort < 0) {
|
||||
throw new Error('minPort ' + minPort + ' is smaller than maxPort ' + maxPort);
|
||||
}
|
||||
|
||||
let tries = 0;
|
||||
while (true) {
|
||||
const port = Math.floor(Math.random() * (maxPort - 1024)) + 1024;
|
||||
const port = Math.floor(Math.random() * (maxPort - minPort)) + minPort;
|
||||
tries++;
|
||||
if (tries > 20) {
|
||||
throw new Error('Couldn\'t find a port after ' + tries + ' tries. portrange of ' + minPort + ', ' + maxPort + ' too narrow?');
|
||||
}
|
||||
if (Array.isArray(usedPorts) && usedPorts.indexOf(port) >= 0) {
|
||||
continue;
|
||||
}
|
||||
const free = testPort('tcp://0.0.0.0:' + port);
|
||||
|
||||
if (free) {
|
||||
return port;
|
||||
}
|
||||
|
||||
require('internal').wait(0.1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,13 +682,13 @@ function makePathGeneric (path) {
|
|||
function runThere (options, instanceInfo, file) {
|
||||
try {
|
||||
let testCode;
|
||||
|
||||
let mochaGrep = options.mochaGrep ? ', ' + JSON.stringify(options.mochaGrep) : '';
|
||||
if (file.indexOf('-spec') === -1) {
|
||||
testCode = 'const runTest = require("jsunity").runTest; ' +
|
||||
'return runTest(' + JSON.stringify(file) + ', true);';
|
||||
} else {
|
||||
testCode = 'const runTest = require("@arangodb/mocha-runner"); ' +
|
||||
'return runTest(' + JSON.stringify(file) + ', true);';
|
||||
'return runTest(' + JSON.stringify(file) + ', true' + mochaGrep + ');';
|
||||
}
|
||||
|
||||
if (options.propagateInstanceInfo) {
|
||||
|
@ -1323,10 +1338,13 @@ function startInstanceCluster (instanceInfo, protocol, options,
|
|||
options.agencyWaitForSync = false;
|
||||
startInstanceAgency(instanceInfo, protocol, options, ...makeArgs('agency', 'agency', {}));
|
||||
|
||||
let usedPorts = [];
|
||||
let agencyEndpoint = instanceInfo.endpoint;
|
||||
let i;
|
||||
for (i = 0; i < options.dbServers; i++) {
|
||||
let endpoint = protocol + '://127.0.0.1:' + findFreePort(options.maxPort);
|
||||
let port = findFreePort(options.minPort, options.maxPort, usedPorts);
|
||||
usedPorts.push(port);
|
||||
let endpoint = protocol + '://127.0.0.1:' + port;
|
||||
let primaryArgs = _.clone(options.extraArgs);
|
||||
primaryArgs['server.endpoint'] = endpoint;
|
||||
primaryArgs['cluster.my-address'] = endpoint;
|
||||
|
@ -1338,7 +1356,9 @@ function startInstanceCluster (instanceInfo, protocol, options,
|
|||
}
|
||||
|
||||
for (i=0;i<options.coordinators;i++) {
|
||||
let endpoint = protocol + '://127.0.0.1:' + findFreePort(options.maxPort);
|
||||
let port = findFreePort(options.minPort, options.maxPort, usedPorts);
|
||||
usedPorts.push(port);
|
||||
let endpoint = protocol + '://127.0.0.1:' + port;
|
||||
let coordinatorArgs = _.clone(options.extraArgs);
|
||||
coordinatorArgs['server.endpoint'] = endpoint;
|
||||
coordinatorArgs['cluster.my-address'] = endpoint;
|
||||
|
@ -1392,7 +1412,7 @@ function startArango (protocol, options, addArgs, rootDir, role) {
|
|||
let port;
|
||||
|
||||
if (!addArgs['server.endpoint']) {
|
||||
port = findFreePort(options.maxPort);
|
||||
port = findFreePort(options.minPort, options.maxPort);
|
||||
endpoint = protocol + '://127.0.0.1:' + port;
|
||||
} else {
|
||||
endpoint = addArgs['server.endpoint'];
|
||||
|
@ -1454,6 +1474,7 @@ function startInstanceAgency (instanceInfo, protocol, options, addArgs, rootDir)
|
|||
}
|
||||
const wfs = options.agencyWaitForSync;
|
||||
|
||||
let usedPorts = [];
|
||||
for (let i = 0; i < N; i++) {
|
||||
let instanceArgs = _.clone(addArgs);
|
||||
instanceArgs['log.file'] = fs.join(rootDir, 'log' + String(i));
|
||||
|
@ -1463,7 +1484,8 @@ function startInstanceAgency (instanceInfo, protocol, options, addArgs, rootDir)
|
|||
instanceArgs['agency.wait-for-sync'] = String(wfs);
|
||||
instanceArgs['agency.supervision'] = String(S);
|
||||
instanceArgs['database.directory'] = dataDir + String(i);
|
||||
const port = findFreePort(options.maxPort);
|
||||
const port = findFreePort(options.minPort, options.maxPort, usedPorts);
|
||||
usedPorts.push(port);
|
||||
instanceArgs['server.endpoint'] = protocol + '://127.0.0.1:' + port;
|
||||
instanceArgs['agency.my-address'] = protocol + '://127.0.0.1:' + port;
|
||||
instanceArgs['agency.supervision-grace-period'] = '5';
|
||||
|
@ -1807,6 +1829,7 @@ function findTests () {
|
|||
testsCases.resilience = doOnePath('js/server/tests/resilience');
|
||||
|
||||
testsCases.client_resilience = doOnePath('js/client/tests/resilience');
|
||||
testsCases.cluster_sync = doOnePath('js/server/tests/cluster-sync');
|
||||
|
||||
testsCases.server = testsCases.common.concat(testsCases.server_only);
|
||||
testsCases.client = testsCases.common.concat(testsCases.client_only);
|
||||
|
@ -3552,6 +3575,28 @@ testFuncs.shell_server = function (options) {
|
|||
return performTests(options, testsCases.server, 'shell_server', runThere);
|
||||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief TEST: cluster_sync
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testFuncs.cluster_sync = function (options) {
|
||||
if (options.cluster) {
|
||||
// may sound strange but these are actually pure logic tests
|
||||
// and should not be executed on the cluster
|
||||
return {
|
||||
'cluster_sync': {
|
||||
'status': true,
|
||||
'message': 'skipped because of cluster',
|
||||
'skipped': true
|
||||
}
|
||||
};
|
||||
}
|
||||
findTests();
|
||||
options.propagateInstanceInfo = true;
|
||||
|
||||
return performTests(options, testsCases.cluster_sync, 'cluster_sync', runThere);
|
||||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief TEST: shell_server_aql
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -3610,10 +3655,10 @@ testFuncs.endpoints = function(options) {
|
|||
|
||||
let endpoints = {
|
||||
'tcpv4': function() {
|
||||
return 'tcp://127.0.0.1:' + findFreePort(options.maxPort);
|
||||
return 'tcp://127.0.0.1:' + findFreePort(options.minPort, options.maxPort);
|
||||
},
|
||||
'tcpv6': function() {
|
||||
return 'tcp://[::1]:' + findFreePort(options.maxPort);
|
||||
return 'tcp://[::1]:' + findFreePort(options.minPort, options.maxPort);
|
||||
},
|
||||
'unix': function() {
|
||||
if (platform.substr(0, 3) === 'win') {
|
||||
|
@ -3820,7 +3865,7 @@ testFuncs.upgrade = function (options) {
|
|||
fs.makeDirectoryRecursive(tmpDataDir);
|
||||
|
||||
const appDir = fs.join(tmpDataDir, 'app');
|
||||
const port = findFreePort(options.maxPort);
|
||||
const port = findFreePort(options.minPort, options.maxPort);
|
||||
|
||||
let args = makeArgsArangod(options, appDir);
|
||||
args['server.endpoint'] = 'tcp://127.0.0.1:' + port;
|
||||
|
|
|
@ -34,8 +34,8 @@ const colors = require('internal').COLORS;
|
|||
|
||||
const $_MODULE_CONTEXT = Symbol.for('@arangodb/module.context');
|
||||
|
||||
module.exports = function (files, returnJson) {
|
||||
const results = runTests(run, files, 'suite');
|
||||
module.exports = function (files, returnJson, grep) {
|
||||
const results = runTests(run, files, 'suite', grep);
|
||||
print();
|
||||
logSuite(results);
|
||||
logStats(results.stats);
|
||||
|
|
|
@ -53,7 +53,7 @@ exports.reporters = {
|
|||
default: DefaultReporter
|
||||
};
|
||||
|
||||
exports.run = function runMochaTests (run, files, reporterName) {
|
||||
exports.run = function runMochaTests (run, files, reporterName, grep) {
|
||||
if (!Array.isArray(files)) {
|
||||
files = [files];
|
||||
}
|
||||
|
@ -85,6 +85,9 @@ exports.run = function runMochaTests (run, files, reporterName) {
|
|||
return this;
|
||||
}
|
||||
};
|
||||
if (grep) {
|
||||
mocha.grep(grep);
|
||||
}
|
||||
|
||||
// Clean up after chai.should(), etc
|
||||
var globals = Object.getOwnPropertyNames(global);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -29,6 +29,7 @@
|
|||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var internal = require('internal');
|
||||
var ERRORS = internal.errors;
|
||||
var endpointToURL = require('@arangodb/cluster').endpointToURL;
|
||||
var request;
|
||||
if (ArangoServerState.role() === 'PRIMARY') {
|
||||
|
@ -272,7 +273,15 @@ function syncCollectionFinalize (database, collname, from, config) {
|
|||
coll.remove(entry.data._key);
|
||||
} catch (errx) {
|
||||
console.error('syncCollectionFinalize: remove', entry, JSON.stringify(errx));
|
||||
throw errx;
|
||||
if (errx.errorNum !== ERRORS.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code) {
|
||||
throw errx;
|
||||
}
|
||||
// We swallow the NOT FOUND error here. It is possible that
|
||||
// a follower tries to synchronize to a leader with whom it
|
||||
// is already in sync. In that case there could have been a
|
||||
// synchronously replicated removal operation that has happened
|
||||
// whilst we were resynchronizing the shard. In this case, the
|
||||
// removal would have happened already.
|
||||
}
|
||||
} else if (entry.type === mType.REPLICATION_TRANSACTION_START) {
|
||||
transactions[entry.tid] = [];
|
||||
|
|
|
@ -0,0 +1,757 @@
|
|||
/* global describe, it, before, beforeEach, afterEach */
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief JavaScript cluster functionality
|
||||
// /
|
||||
// / @file
|
||||
// /
|
||||
// / DISCLAIMER
|
||||
// /
|
||||
// / Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
// /
|
||||
// / Licensed under the Apache License, Version 2.0 (the "License")
|
||||
// / you may not use this file except in compliance with the License.
|
||||
// / You may obtain a copy of the License at
|
||||
// /
|
||||
// / http://www.apache.org/licenses/LICENSE-2.0
|
||||
// /
|
||||
// / Unless required by applicable law or agreed to in writing, software
|
||||
// / distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// / See the License for the specific language governing permissions and
|
||||
// / limitations under the License.
|
||||
// /
|
||||
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
// /
|
||||
// / @author Andreas Streichardt
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const db = require('internal').db;
|
||||
const cluster = require('@arangodb/cluster');
|
||||
const expect = require('chai').expect;
|
||||
const ArangoCollection = require('@arangodb/arango-collection').ArangoCollection;
|
||||
|
||||
describe('Cluster sync', function() {
|
||||
describe('Databaseplan to local', function() {
|
||||
before(function() {
|
||||
require('@arangodb/sync-replication-debug').setup();
|
||||
});
|
||||
|
||||
beforeEach(function() {
|
||||
db._databases().forEach(database => {
|
||||
if (database !== '_system') {
|
||||
db._dropDatabase(database);
|
||||
}
|
||||
});
|
||||
});
|
||||
it('should create a planned database', function() {
|
||||
let plan = {
|
||||
"Databases": {
|
||||
"test": {
|
||||
"id": 1,
|
||||
"name": "test"
|
||||
}
|
||||
}
|
||||
};
|
||||
let errors = cluster.executePlanForDatabases(plan.Databases);
|
||||
let databases = db._databases();
|
||||
expect(databases).to.have.lengthOf(2);
|
||||
expect(databases).to.contain('test');
|
||||
expect(errors).to.be.empty;
|
||||
});
|
||||
it('should leave everything in place if a planned database already exists', function() {
|
||||
let plan = {
|
||||
Databases: {
|
||||
"test": {
|
||||
"id": 1,
|
||||
"name": "test"
|
||||
}
|
||||
}
|
||||
};
|
||||
db._createDatabase('test');
|
||||
let errors = cluster.executePlanForDatabases(plan.Databases);
|
||||
let databases = db._databases();
|
||||
expect(databases).to.have.lengthOf(2);
|
||||
expect(databases).to.contain('test');
|
||||
expect(errors).to.be.empty;
|
||||
});
|
||||
it('should delete a database if it is not used anymore', function() {
|
||||
db._createDatabase('peng');
|
||||
let plan = {
|
||||
Databases: {
|
||||
}
|
||||
};
|
||||
cluster.executePlanForDatabases(plan.Databases);
|
||||
let databases = db._databases();
|
||||
expect(databases).to.have.lengthOf(1);
|
||||
expect(databases).to.contain('_system');
|
||||
});
|
||||
});
|
||||
describe('Collection plan to local', function() {
|
||||
let numSystemCollections;
|
||||
before(function() {
|
||||
require('@arangodb/sync-replication-debug').setup();
|
||||
});
|
||||
|
||||
beforeEach(function() {
|
||||
db._databases().forEach(database => {
|
||||
if (database !== '_system') {
|
||||
db._dropDatabase(database);
|
||||
}
|
||||
});
|
||||
db._createDatabase('test');
|
||||
db._useDatabase('test');
|
||||
numSystemCollections = db._collections().length;
|
||||
});
|
||||
afterEach(function() {
|
||||
db._useDatabase('_system');
|
||||
});
|
||||
it('should create and load a collection if it does not exist', function() {
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 3,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let collections = db._collections();
|
||||
expect(collections.map(collection => collection.name())).to.contain('s100001');
|
||||
expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_LOADED);
|
||||
});
|
||||
it('should create a collection if it does not exist (unloaded case)', function() {
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let collections = db._collections();
|
||||
expect(collections.map(collection => collection.name())).to.contain('s100001');
|
||||
expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_UNLOADED);
|
||||
});
|
||||
it('should unload an existing collection', function() {
|
||||
db._create('s100001');
|
||||
expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_LOADED);
|
||||
let plan = {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan);
|
||||
db._useDatabase('test');
|
||||
expect(db._collection('s100001').status()).to.equal(ArangoCollection.STATUS_UNLOADED);
|
||||
});
|
||||
it('should delete a stale collection', function() {
|
||||
db._create('s100001');
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let collections = db._collections();
|
||||
expect(collections).to.have.lengthOf(numSystemCollections);
|
||||
});
|
||||
it('should ignore a collection for which it is not responsible', function() {
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"swag",
|
||||
]
|
||||
},
|
||||
"status": 3,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let collections = db._collections();
|
||||
expect(collections).to.have.lengthOf(numSystemCollections);
|
||||
});
|
||||
it('should delete a collection for which it lost responsibility', function() {
|
||||
db._create('s100001');
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"debug-follower", // this is a different server than we are
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let collections = db._collections();
|
||||
expect(collections).to.have.lengthOf(numSystemCollections);
|
||||
});
|
||||
it('should create an additional index if instructed to do so', function() {
|
||||
db._create('s100001');
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
},
|
||||
{
|
||||
"error": false,
|
||||
"errorMessage": "",
|
||||
"errorNum": 0,
|
||||
"fields": [
|
||||
"user"
|
||||
],
|
||||
"id": "100005",
|
||||
"sparse": true,
|
||||
"type": "hash",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let indexes = db._collection('s100001').getIndexes();
|
||||
expect(indexes).to.have.lengthOf(2);
|
||||
});
|
||||
it('should remove an additional index if instructed to do so', function() {
|
||||
db._create('s100001');
|
||||
db._collection('s100001').ensureIndex({ type: "hash", fields: [ "name" ] });
|
||||
let plan = {
|
||||
Databases: {
|
||||
"_system": {
|
||||
"id": 1,
|
||||
"name": "_system"
|
||||
},
|
||||
"test": {
|
||||
"id": 2,
|
||||
"name": "test"
|
||||
}
|
||||
},
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
cluster.executePlanForCollections(plan.Collections);
|
||||
db._useDatabase('test');
|
||||
let indexes = db._collection('s100001').getIndexes();
|
||||
expect(indexes).to.have.lengthOf(1);
|
||||
});
|
||||
it('should report an error when collection creation failed', function() {
|
||||
let plan = {
|
||||
Collections: {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"Möter": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let errors = cluster.executePlanForCollections(plan.Collections);
|
||||
expect(errors).to.be.an('object');
|
||||
expect(errors).to.have.property('Möter');
|
||||
});
|
||||
it('should be leading a collection when ordered to be leader', function() {
|
||||
let plan = {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 3,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
};
|
||||
let errors = cluster.executePlanForCollections(plan);
|
||||
db._useDatabase('test');
|
||||
expect(db._collection('s100001').isLeader()).to.equal(true);
|
||||
});
|
||||
it('should be following a leader when ordered to be follower', function() {
|
||||
let plan = {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"the leader-leader",
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
};
|
||||
let errors = cluster.executePlanForCollections(plan);
|
||||
db._useDatabase('test');
|
||||
expect(db._collection('s100001').isLeader()).to.equal(false);
|
||||
});
|
||||
it('should be able to switch from leader to follower', function() {
|
||||
let plan = {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
};
|
||||
let errors = cluster.executePlanForCollections(plan);
|
||||
plan.test['100001'].shards['s100001'].unshift('der-hund');
|
||||
cluster.executePlanForCollections(plan);
|
||||
db._useDatabase('test');
|
||||
expect(db._collection('s100001').isLeader()).to.equal(false);
|
||||
});
|
||||
it('should be able to switch from follower to leader', function() {
|
||||
let plan = {
|
||||
test: {
|
||||
"100001": {
|
||||
"deleted": false,
|
||||
"doCompact": true,
|
||||
"id": "100001",
|
||||
"indexBuckets": 8,
|
||||
"indexes": [
|
||||
{
|
||||
"fields": [
|
||||
"_key"
|
||||
],
|
||||
"id": "0",
|
||||
"sparse": false,
|
||||
"type": "primary",
|
||||
"unique": true
|
||||
}
|
||||
],
|
||||
"isSystem": false,
|
||||
"isVolatile": false,
|
||||
"journalSize": 1048576,
|
||||
"keyOptions": {
|
||||
"allowUserKeys": true,
|
||||
"type": "traditional"
|
||||
},
|
||||
"name": "test",
|
||||
"numberOfShards": 1,
|
||||
"replicationFactor": 2,
|
||||
"shardKeys": [
|
||||
"_key"
|
||||
],
|
||||
"shards": {
|
||||
"s100001": [
|
||||
"old-leader",
|
||||
"",
|
||||
]
|
||||
},
|
||||
"status": 2,
|
||||
"type": 2,
|
||||
"waitForSync": false
|
||||
}
|
||||
}
|
||||
};
|
||||
let errors = cluster.executePlanForCollections(plan);
|
||||
plan.test['100001'].shards['s100001'] = [""];
|
||||
cluster.executePlanForCollections(plan);
|
||||
db._useDatabase('test');
|
||||
expect(db._collection('s100001').isLeader()).to.equal(true);
|
||||
});
|
||||
});
|
||||
describe('Update current', function() {
|
||||
beforeEach(function() {
|
||||
db._databases().forEach(database => {
|
||||
if (database !== '_system') {
|
||||
db._dropDatabase(database);
|
||||
}
|
||||
});
|
||||
});
|
||||
it('should report a new database', function() {
|
||||
let Current = {
|
||||
Databases: {},
|
||||
};
|
||||
});
|
||||
});
|
||||
});
|
|
@ -157,7 +157,6 @@ function MovingShardsSuite () {
|
|||
wait(1.0);
|
||||
global.ArangoClusterInfo.flush();
|
||||
var servers = findCollectionServers("_system", c[i].name());
|
||||
console.info("Seeing servers:", i, c[i].name(), servers);
|
||||
if (servers.indexOf(id) === -1) {
|
||||
// Now check current as well:
|
||||
var collInfo =
|
||||
|
|
|
@ -64,6 +64,237 @@ var sortedKeys = function (col) {
|
|||
return keys;
|
||||
};
|
||||
|
||||
function transactionRevisionsSuite () {
|
||||
'use strict';
|
||||
var cn = "UnitTestsTransaction";
|
||||
var c = null;
|
||||
|
||||
return {
|
||||
|
||||
setUp : function () {
|
||||
internal.debugClearFailAt();
|
||||
db._drop(cn);
|
||||
c = db._create(cn);
|
||||
},
|
||||
|
||||
tearDown : function () {
|
||||
internal.debugClearFailAt();
|
||||
|
||||
if (c !== null) {
|
||||
c.drop();
|
||||
}
|
||||
|
||||
c = null;
|
||||
internal.wait(0);
|
||||
},
|
||||
|
||||
testInsertUniqueFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.insert({ _key: "test", value: 2 });
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.count());
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
|
||||
|
||||
testInsertUniqueSingleFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
c.insert({ _key: "test", value: 2 });
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.count());
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
testInsertTransactionFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.insert({ _key: "test2", value: 2 });
|
||||
throw "foo";
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
testRemoveTransactionFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.remove("test");
|
||||
throw "foo";
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
testRemoveInsertWithSameRev : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.remove("test");
|
||||
c.insert({ _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true });
|
||||
}
|
||||
});
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(2, c.document("test").value);
|
||||
},
|
||||
|
||||
testUpdateWithSameRev : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true });
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(2, c.document("test").value);
|
||||
},
|
||||
|
||||
testUpdateWithSameRevTransaction : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true });
|
||||
}
|
||||
});
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(2, c.document("test").value);
|
||||
},
|
||||
|
||||
testUpdateFailingWithSameRev : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true });
|
||||
throw "foo";
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
testUpdateFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.update({ _key: "test", value: 2 });
|
||||
throw "foo";
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
testUpdateAndInsertFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.update({ _key: "test", value: 2 });
|
||||
c.insert({ _key: "test", value: 3 });
|
||||
throw "foo";
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.document("test").value);
|
||||
},
|
||||
|
||||
testRemoveAndInsert : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.remove("test");
|
||||
c.insert({ _key: "test", value: 2 });
|
||||
}
|
||||
});
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(2, c.document("test").value);
|
||||
},
|
||||
|
||||
testRemoveAndInsertFailing : function () {
|
||||
var doc = c.insert({ _key: "test", value: 1 });
|
||||
try {
|
||||
db._executeTransaction({
|
||||
collections: { write: c.name() },
|
||||
action: function() {
|
||||
c.remove("test");
|
||||
c.insert({ _key: "test", value: 3 });
|
||||
throw "foo";
|
||||
}
|
||||
});
|
||||
fail();
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
assertEqual(1, c.toArray().length);
|
||||
assertEqual(1, c.figures().revisions.count);
|
||||
assertEqual(1, c.document("test").value);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -5157,6 +5388,9 @@ function transactionServerFailuresSuite () {
|
|||
/// @brief executes the test suites
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(transactionRevisionsSuite);
|
||||
jsunity.run(transactionRollbackSuite);
|
||||
|
||||
// only run this test suite if server-side failures are enabled
|
||||
if (internal.debugCanUseFailAt()) {
|
||||
jsunity.run(transactionServerFailuresSuite);
|
||||
|
@ -5167,7 +5401,6 @@ jsunity.run(transactionCollectionsSuite);
|
|||
jsunity.run(transactionOperationsSuite);
|
||||
jsunity.run(transactionBarriersSuite);
|
||||
jsunity.run(transactionGraphSuite);
|
||||
jsunity.run(transactionRollbackSuite);
|
||||
jsunity.run(transactionCountSuite);
|
||||
jsunity.run(transactionCrossCollectionSuite);
|
||||
jsunity.run(transactionConstraintsSuite);
|
||||
|
|
|
@ -45,6 +45,16 @@ std::string const StaticStrings::RevString("_rev");
|
|||
std::string const StaticStrings::FromString("_from");
|
||||
std::string const StaticStrings::ToString("_to");
|
||||
|
||||
// URL parameter names
|
||||
std::string const StaticStrings::IgnoreRevsString("ignoreRevs");
|
||||
std::string const StaticStrings::IsRestoreString("isRestore");
|
||||
std::string const StaticStrings::KeepNullString("keepNull");
|
||||
std::string const StaticStrings::MergeObjectsString("mergeObjects");
|
||||
std::string const StaticStrings::ReturnNewString("returnNew");
|
||||
std::string const StaticStrings::ReturnOldString("returnOld");
|
||||
std::string const StaticStrings::SilentString("silent");
|
||||
std::string const StaticStrings::WaitForSyncString("waitForSync");
|
||||
|
||||
// database and collection names
|
||||
std::string const StaticStrings::SystemDatabase("_system");
|
||||
|
||||
|
|
|
@ -50,6 +50,16 @@ class StaticStrings {
|
|||
static std::string const RevString;
|
||||
static std::string const FromString;
|
||||
static std::string const ToString;
|
||||
|
||||
// URL parameter names
|
||||
static std::string const IgnoreRevsString;
|
||||
static std::string const IsRestoreString;
|
||||
static std::string const KeepNullString;
|
||||
static std::string const MergeObjectsString;
|
||||
static std::string const ReturnNewString;
|
||||
static std::string const ReturnOldString;
|
||||
static std::string const SilentString;
|
||||
static std::string const WaitForSyncString;
|
||||
|
||||
// database and collection names
|
||||
static std::string const SystemDatabase;
|
||||
|
|
|
@ -350,7 +350,7 @@ SIMPLE_CLIENT_COULD_NOT_READ,2003,"could not read from server","Will be raised w
|
|||
## Communicator errors
|
||||
################################################################################
|
||||
|
||||
COMMUNICATOR_REQUEST_ABORTED,2100,"Request aborted", "Request was aborted."
|
||||
COMMUNICATOR_REQUEST_ABORTED,2100,"Request aborted","Request was aborted."
|
||||
|
||||
################################################################################
|
||||
## Foxx management errors
|
||||
|
|
|
@ -33,8 +33,12 @@ TRANSPORT="tcp"
|
|||
LOG_LEVEL="INFO"
|
||||
LOG_LEVEL_AGENCY=""
|
||||
LOG_LEVEL_CLUSTER=""
|
||||
XTERM="x-terminal-emulator"
|
||||
XTERMOPTIONS="--geometry=80x43"
|
||||
if [ -z "$XTERM" ] ; then
|
||||
XTERM="x-terminal-emulator"
|
||||
fi
|
||||
if [ -z "$XTERMOPTIONS" ] ; then
|
||||
XTERMOPTIONS="--geometry=80x43"
|
||||
fi
|
||||
SECONDARIES=0
|
||||
BUILD="build"
|
||||
JWT_SECRET=""
|
||||
|
@ -227,7 +231,7 @@ start() {
|
|||
--server.endpoint $TRANSPORT://0.0.0.0:$PORT \
|
||||
--cluster.my-role $ROLE \
|
||||
--log.file cluster/$PORT.log \
|
||||
--log.level info \
|
||||
--log.level $LOG_LEVEL \
|
||||
--server.statistics true \
|
||||
--server.threads 5 \
|
||||
--javascript.startup-directory ./js \
|
||||
|
@ -250,7 +254,7 @@ startTerminal() {
|
|||
PORT=$2
|
||||
mkdir cluster/data$PORT
|
||||
echo Starting $TYPE on port $PORT
|
||||
$XTERM $XTERMOPTIONS -e ${BUILD}/bin/arangod \
|
||||
$XTERM $XTERMOPTIONS -e "${BUILD}/bin/arangod \
|
||||
-c none \
|
||||
--database.directory cluster/data$PORT \
|
||||
--cluster.agency-endpoint $TRANSPORT://127.0.0.1:$BASE \
|
||||
|
@ -258,7 +262,7 @@ startTerminal() {
|
|||
--server.endpoint $TRANSPORT://0.0.0.0:$PORT \
|
||||
--cluster.my-role $ROLE \
|
||||
--log.file cluster/$PORT.log \
|
||||
--log.level info \
|
||||
--log.level $LOG_LEVEL \
|
||||
--server.statistics true \
|
||||
--server.threads 5 \
|
||||
--javascript.startup-directory ./js \
|
||||
|
@ -266,7 +270,7 @@ startTerminal() {
|
|||
--javascript.app-path ./js/apps \
|
||||
$AUTHENTICATION \
|
||||
$SSLKEYFILE \
|
||||
--console &
|
||||
--console" &
|
||||
}
|
||||
|
||||
startDebugger() {
|
||||
|
@ -287,7 +291,7 @@ startDebugger() {
|
|||
--server.endpoint $TRANSPORT://0.0.0.0:$PORT \
|
||||
--cluster.my-role $ROLE \
|
||||
--log.file cluster/$PORT.log \
|
||||
--log.level info \
|
||||
--log.level $LOG_LEVEL \
|
||||
--server.statistics false \
|
||||
--server.threads 5 \
|
||||
--javascript.startup-directory ./js \
|
||||
|
@ -295,7 +299,7 @@ startDebugger() {
|
|||
--javascript.app-path ./js/apps \
|
||||
$SSLKEYFILE \
|
||||
$AUTHENTICATION &
|
||||
$XTERM $XTERMOPTIONS -e gdb ${BUILD}/bin/arangod -p $! &
|
||||
$XTERM $XTERMOPTIONS -e "gdb ${BUILD}/bin/arangod -p $!" &
|
||||
}
|
||||
|
||||
startRR() {
|
||||
|
@ -308,7 +312,7 @@ startRR() {
|
|||
PORT=$2
|
||||
mkdir cluster/data$PORT
|
||||
echo Starting $TYPE on port $PORT with rr tracer
|
||||
$XTERM $XTERMOPTIONS -e rr ${BUILD}/bin/arangod \
|
||||
$XTERM $XTERMOPTIONS -e "rr ${BUILD}/bin/arangod \
|
||||
-c none \
|
||||
--database.directory cluster/data$PORT \
|
||||
--cluster.agency-endpoint $TRANSPORT://127.0.0.1:$BASE \
|
||||
|
@ -316,7 +320,7 @@ startRR() {
|
|||
--server.endpoint $TRANSPORT://0.0.0.0:$PORT \
|
||||
--cluster.my-role $ROLE \
|
||||
--log.file cluster/$PORT.log \
|
||||
--log.level info \
|
||||
--log.level $LOG_LEVEL \
|
||||
--server.statistics true \
|
||||
--server.threads 5 \
|
||||
--javascript.startup-directory ./js \
|
||||
|
@ -324,7 +328,7 @@ startRR() {
|
|||
--javascript.app-path ./js/apps \
|
||||
$AUTHENTICATION \
|
||||
$SSLKEYFILE \
|
||||
--console &
|
||||
--console" &
|
||||
}
|
||||
|
||||
PORTTOPDB=`expr 8629 + $NRDBSERVERS - 1`
|
||||
|
|
Loading…
Reference in New Issue