1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into json_agency_comm

This commit is contained in:
Kaveh Vahedipour 2016-04-26 10:55:13 +02:00
commit 4e2d86dc67
15 changed files with 67 additions and 44 deletions

View File

@ -158,7 +158,7 @@ class ObjectIterator {
}
if (slice.head() == 0x14 && slice.length() > 0) {
_current = slice.keyAt(0).start();
_current = slice.keyAt(0, false).start();
}
}
@ -227,14 +227,14 @@ class ObjectIterator {
inline bool valid() const throw() { return (_position < _size); }
inline Slice key() const {
inline Slice key(bool translate = true) const {
if (_position >= _size) {
throw Exception(Exception::IndexOutOfBounds);
}
if (_current != nullptr) {
return Slice(_current);
}
return _slice.keyAt(_position);
return _slice.keyAt(_position, translate);
}
inline Slice value() const {

View File

@ -321,12 +321,12 @@ class Slice {
// attribute name
// - 0x12 : object with 8-byte index table entries, not sorted by
// attribute name
Slice keyAt(ValueLength index) const {
Slice keyAt(ValueLength index, bool translate = true) const {
if (!isObject()) {
throw Exception(Exception::InvalidValueType, "Expecting type Object");
}
return getNthKey(index, true);
return getNthKey(index, translate);
}
Slice valueAt(ValueLength index) const {
@ -427,7 +427,7 @@ class Slice {
// translates an integer key into a string
Slice translate() const;
// return the value for an Int object
int64_t getInt() const;

View File

@ -141,7 +141,7 @@ HttpHandler::status_t RestAgencyHandler::handleWrite () {
// Wait for commit of highest except if it is 0?
if (!ret.indices.empty() && call_mode == "waitForCommitted") {
index_t max_index =
arangodb::consensus::index_t max_index =
*std::max_element(ret.indices.begin(), ret.indices.end());
if (max_index > 0) {
_agent->waitFor(max_index);

View File

@ -83,7 +83,7 @@ HttpHandler::status_t RestAgencyPrivHandler::execute() {
} else {
term_t term, prevLogTerm;
arangodb::consensus::id_t id; // leaderId for appendEntries, cadidateId for requestVote
index_t prevLogIndex, leaderCommit;
arangodb::consensus::index_t prevLogIndex, leaderCommit;
if (_request->suffix()[0] == "appendEntries") { // appendEntries
if (_request->requestType() != GeneralRequest::RequestType::POST) {
return reportMethodNotAllowed();

View File

@ -57,13 +57,13 @@ State::State(std::string const& endpoint)
VPackSlice value = arangodb::basics::VelocyPackHelper::EmptyObjectValue();
buf->append(value.startAs<char const>(), value.byteSize());
if (!_log.size()) {
_log.push_back(log_t(index_t(0), term_t(0), arangodb::consensus::id_t(0), buf));
_log.push_back(log_t(arangodb::consensus::index_t(0), term_t(0), arangodb::consensus::id_t(0), buf));
}
}
State::~State() {}
bool State::persist(index_t index, term_t term, arangodb::consensus::id_t lid,
bool State::persist(arangodb::consensus::index_t index, term_t term, arangodb::consensus::id_t lid,
arangodb::velocypack::Slice const& entry) {
Builder body;
body.add(VPackValue(VPackValueType::Object));
@ -94,10 +94,10 @@ bool State::persist(index_t index, term_t term, arangodb::consensus::id_t lid,
}
//Leader
std::vector<index_t> State::log (
std::vector<arangodb::consensus::index_t> State::log (
query_t const& query, std::vector<bool> const& appl, term_t term, arangodb::consensus::id_t lid) {
std::vector<index_t> idx(appl.size());
std::vector<arangodb::consensus::index_t> idx(appl.size());
std::vector<bool> good = appl;
size_t j = 0;
@ -118,7 +118,7 @@ std::vector<index_t> State::log (
// Follower
bool State::log(query_t const& queries, term_t term, arangodb::consensus::id_t lid,
index_t prevLogIndex, term_t prevLogTerm) { // TODO: Throw exc
arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) { // TODO: Throw exc
if (queries->slice().type() != VPackValueType::Array) {
return false;
}
@ -140,7 +140,7 @@ bool State::log(query_t const& queries, term_t term, arangodb::consensus::id_t l
}
// Get log entries from indices "start" to "end"
std::vector<log_t> State::get(index_t start, index_t end) const {
std::vector<log_t> State::get(arangodb::consensus::index_t start, arangodb::consensus::index_t end) const {
std::vector<log_t> entries;
MUTEX_LOCKER(mutexLocker, _logLock);
if (end == (std::numeric_limits<uint64_t>::max)()) end = _log.size() - 1;
@ -150,7 +150,7 @@ std::vector<log_t> State::get(index_t start, index_t end) const {
return entries;
}
std::vector<VPackSlice> State::slices(index_t start, index_t end) const {
std::vector<VPackSlice> State::slices(arangodb::consensus::index_t start, arangodb::consensus::index_t end) const {
std::vector<VPackSlice> slices;
MUTEX_LOCKER(mutexLocker, _logLock);
if (end == (std::numeric_limits<uint64_t>::max)()) end = _log.size() - 1;
@ -160,7 +160,7 @@ std::vector<VPackSlice> State::slices(index_t start, index_t end) const {
return slices;
}
log_t const& State::operator[](index_t index) const {
log_t const& State::operator[](arangodb::consensus::index_t index) const {
MUTEX_LOCKER(mutexLocker, _logLock);
return _log[index];
}
@ -270,7 +270,7 @@ bool State::loadCollection(std::string const& name) {
return false;
}
bool State::find (index_t prevIndex, term_t prevTerm) {
bool State::find (arangodb::consensus::index_t prevIndex, term_t prevTerm) {
MUTEX_LOCKER(mutexLocker, _logLock);
if (prevIndex > _log.size()) {
return false;

View File

@ -86,8 +86,11 @@ struct AqlValue final {
public:
// construct an empty AqlValue
// note: this is the default constructor and should be as cheap as possible
AqlValue() {
initFromSlice(arangodb::velocypack::Slice());
// construct a slice of type None
_data.internal[0] = '\x00';
setType(AqlValueType::VPACK_INLINE);
}
// construct from document

View File

@ -192,7 +192,7 @@ void HeartbeatThread::runDBServer() {
CONDITION_LOCKER(locker, _condition);
wasNotified = _wasNotified;
if (!wasNotified) {
locker.wait(static_cast<uint64_t>(remain) * 1000000);
locker.wait(static_cast<uint64_t>(remain * 1000000.0));
wasNotified = _wasNotified;
_wasNotified = false;
}

View File

@ -673,8 +673,7 @@ void Index::expandInSearchValues(VPackSlice const base,
// all of them are now unique so we simply have to multiply
size_t level = n - 1;
std::vector<size_t> positions;
positions.resize(n);
std::vector<size_t> positions(n, 0);
bool done = false;
while (!done) {
TRI_IF_FAILURE("Index::permutationIN") {

View File

@ -106,6 +106,7 @@ void CollectionKeys::create(TRI_voc_tick_t maxTick) {
TRI_ASSERT(_markers == nullptr);
_markers = new std::vector<TRI_df_marker_t const*>();
_markers->reserve(16384);
// copy all datafile markers into the result under the read-lock
{

View File

@ -592,7 +592,8 @@ std::unique_ptr<ArangoDBPathFinder::Path> TRI_RunShortestPathSearch(
auto edgeFilterClosure = [&opts](VPackSlice edge)
-> bool { return opts.matchesEdge(edge); };
auto vertexFilterClosure = [&opts](VPackSlice const& vertex) -> bool {
VPackBuilder tmpBuilder;
auto vertexFilterClosure = [&opts, &tmpBuilder](VPackSlice const& vertex) -> bool {
std::string v = vertex.copyString();
size_t pos = v.find('/');
@ -607,12 +608,12 @@ std::unique_ptr<ArangoDBPathFinder::Path> TRI_RunShortestPathSearch(
std::string col = v.substr(0, pos);
std::string key = v.substr(pos + 1);
VPackBuilder tmp;
tmp.openObject();
tmp.add(Transaction::KeyString, VPackValue(key));
tmp.close();
tmpBuilder.clear();
tmpBuilder.openObject();
tmpBuilder.add(Transaction::KeyString, VPackValue(key));
tmpBuilder.close();
OperationOptions opOpts;
OperationResult opRes = opts.trx()->document(col, tmp.slice(), opOpts);
OperationResult opRes = opts.trx()->document(col, tmpBuilder.slice(), opOpts);
if (opRes.failed()) {
return false;
}

View File

@ -1151,10 +1151,20 @@ int TRI_AddOperationTransaction(TRI_transaction_t* trx,
trx, document->_info.id(), TRI_TRANSACTION_WRITE);
if (trxCollection->_operations == nullptr) {
trxCollection->_operations = new std::vector<arangodb::wal::DocumentOperation*>;
trxCollection->_operations->reserve(4);
trx->_hasOperations = true;
} else {
// reserve space for one more element
trxCollection->_operations->reserve(trxCollection->_operations->size() + 1);
}
arangodb::wal::DocumentOperation* copy = operation.swap();
TRI_IF_FAILURE("TransactionOperationPushBack") {
// test what happens if push_back fails
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
trxCollection->_operations->push_back(copy);
copy->handle();
}

View File

@ -30,8 +30,8 @@ using namespace arangodb::wal;
Marker::Marker(TRI_df_marker_t const* existing, TRI_voc_fid_t fid)
: _buffer(reinterpret_cast<char*>(const_cast<TRI_df_marker_t*>(existing))),
_size(existing->getSize()),
_mustFree(false),
_fid(fid) {}
_fid(fid),
_mustFree(false) {}
/// @brief create marker from a VPackSlice
Marker::Marker(TRI_df_marker_type_t type, VPackSlice const& properties)
@ -44,18 +44,11 @@ Marker::Marker(TRI_df_marker_type_t type, VPackSlice const& properties)
Marker::Marker(TRI_df_marker_type_t type, size_t size)
: _buffer(new char[size]),
_size(static_cast<uint32_t>(size)),
_mustFree(true),
_fid(0) {
_fid(0),
_mustFree(true) {
DatafileHelper::InitMarker(reinterpret_cast<TRI_df_marker_t*>(begin()), type, _size);
}
/// @brief destroy marker
Marker::~Marker() {
if (_buffer != nullptr && _mustFree) {
delete[] _buffer;
}
}
/// @brief store a vpack slice
void Marker::storeSlice(size_t offset, arangodb::velocypack::Slice const& slice) {
char* p = static_cast<char*>(begin()) + offset;

View File

@ -71,7 +71,7 @@ class Marker {
Marker(TRI_df_marker_type_t, arangodb::velocypack::Slice const&);
public:
virtual ~Marker();
virtual ~Marker() { freeBuffer(); }
inline void freeBuffer() {
if (_buffer != nullptr && _mustFree) {
@ -106,11 +106,11 @@ class Marker {
/// @brief size of marker data
uint32_t const _size;
/// @brief whether or not the destructor must free the memory
bool _mustFree;
/// @brief id of the logfile the marker is stored in
TRI_voc_fid_t _fid;
bool _mustFree;
/// @brief whether or not the destructor must free the memory
};
class MarkerEnvelope : public Marker {

View File

@ -4153,6 +4153,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4188,6 +4189,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4260,6 +4262,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4329,6 +4332,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4367,6 +4371,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4418,6 +4423,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4458,6 +4464,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4510,6 +4517,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {
@ -4615,6 +4623,7 @@ function transactionServerFailuresSuite () {
"TransactionOperationNoSlot",
"TransactionOperationNoSlotExcept",
"TransactionOperationAfterAdjust",
"TransactionOperationPushBack",
"TransactionOperationAtEnd" ];
failures.forEach (function (f) {

View File

@ -68,8 +68,15 @@ static v8::Handle<v8::Value> ObjectVPackObject(v8::Isolate* isolate,
v8::Handle<v8::Value> val =
TRI_VPackToV8(isolate, it.value(), options, &slice);
if (!val.IsEmpty()) {
auto k = ObjectVPackString(isolate, it.key());
object->ForceSet(k, val);
arangodb::velocypack::ValueLength l;
VPackSlice k = it.key(false);
if (k.isString()) {
char const* p = k.getString(l);
object->ForceSet(TRI_V8_PAIR_STRING(p, l), val);
} else {
char const* p = k.translate().getString(l);
object->ForceSet(TRI_V8_ASCII_PAIR_STRING(p, l), val);
}
}
it.next();
}