mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into feature/add-distinct-aggregator
This commit is contained in:
commit
69dec8ba17
|
@ -297,7 +297,7 @@ class Builder {
|
|||
}
|
||||
|
||||
// Return a Slice of the result:
|
||||
inline Slice slice() const noexcept {
|
||||
inline Slice slice() const {
|
||||
if (isEmpty()) {
|
||||
return Slice();
|
||||
}
|
||||
|
|
|
@ -67,12 +67,12 @@ namespace velocypack {
|
|||
|
||||
class SliceScope;
|
||||
|
||||
class SliceStaticData {
|
||||
friend class Slice;
|
||||
struct SliceStaticData {
|
||||
static uint8_t const FixedTypeLengths[256];
|
||||
static ValueType const TypeMap[256];
|
||||
static unsigned int const WidthMap[32];
|
||||
static unsigned int const FirstSubMap[32];
|
||||
static uint64_t const PrecalculatedHashesForDefaultSeed[256];
|
||||
};
|
||||
|
||||
class Slice {
|
||||
|
@ -87,6 +87,7 @@ class Slice {
|
|||
uint8_t const* _start;
|
||||
|
||||
public:
|
||||
static constexpr uint64_t defaultSeed = 0xdeadbeef;
|
||||
|
||||
// constructor for an empty Value of type None
|
||||
constexpr Slice() noexcept : Slice("\x00") {}
|
||||
|
@ -171,18 +172,24 @@ class Slice {
|
|||
inline uint8_t head() const noexcept { return *_start; }
|
||||
|
||||
// hashes the binary representation of a value
|
||||
inline uint64_t hash(uint64_t seed = 0xdeadbeef) const {
|
||||
return VELOCYPACK_HASH(start(), checkOverflow(byteSize()), seed);
|
||||
inline uint64_t hash(uint64_t seed = defaultSeed) const {
|
||||
size_t const size = checkOverflow(byteSize());
|
||||
if (seed == defaultSeed && size == 1) {
|
||||
uint64_t h = SliceStaticData::PrecalculatedHashesForDefaultSeed[head()];
|
||||
VELOCYPACK_ASSERT(h != 0);
|
||||
return h;
|
||||
}
|
||||
return VELOCYPACK_HASH(start(), size, seed);
|
||||
}
|
||||
|
||||
// hashes the value, normalizing different representations of
|
||||
// arrays, objects and numbers. this function may produce different
|
||||
// hash values than the binary hash() function
|
||||
uint64_t normalizedHash(uint64_t seed = 0xdeadbeef) const;
|
||||
uint64_t normalizedHash(uint64_t seed = defaultSeed) const;
|
||||
|
||||
// hashes the binary representation of a String slice. No check
|
||||
// is done if the Slice value is actually of type String
|
||||
inline uint64_t hashString(uint64_t seed = 0xdeadbeef) const noexcept {
|
||||
inline uint64_t hashString(uint64_t seed = defaultSeed) const noexcept {
|
||||
return VELOCYPACK_HASH(start(), static_cast<size_t>(stringSliceLength()), seed);
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ bool assemblerFunctionsDisabled();
|
|||
std::size_t checkOverflow(ValueLength);
|
||||
#else
|
||||
// on a 64 bit platform, the following function is probably a no-op
|
||||
static inline constexpr std::size_t checkOverflow(ValueLength length) {
|
||||
static inline constexpr std::size_t checkOverflow(ValueLength length) noexcept {
|
||||
return static_cast<std::size_t>(length);
|
||||
}
|
||||
#endif
|
||||
|
@ -114,7 +114,7 @@ static inline ValueLength getVariableValueLength(ValueLength value) noexcept {
|
|||
|
||||
// read a variable length integer in unsigned LEB128 format
|
||||
template <bool reverse>
|
||||
static inline ValueLength readVariableValueLength(uint8_t const* source) {
|
||||
static inline ValueLength readVariableValueLength(uint8_t const* source) noexcept {
|
||||
ValueLength len = 0;
|
||||
uint8_t v;
|
||||
ValueLength p = 0;
|
||||
|
@ -133,7 +133,7 @@ static inline ValueLength readVariableValueLength(uint8_t const* source) {
|
|||
|
||||
// store a variable length integer in unsigned LEB128 format
|
||||
template <bool reverse>
|
||||
static inline void storeVariableValueLength(uint8_t* dst, ValueLength value) {
|
||||
static inline void storeVariableValueLength(uint8_t* dst, ValueLength value) noexcept {
|
||||
VELOCYPACK_ASSERT(value > 0);
|
||||
|
||||
if (reverse) {
|
||||
|
|
|
@ -3,10 +3,10 @@
|
|||
#ifndef VELOCYPACK_VERSION_NUMBER_H
|
||||
#define VELOCYPACK_VERSION_NUMBER_H 1
|
||||
|
||||
#define VELOCYPACK_VERSION "0.1.30"
|
||||
#define VELOCYPACK_VERSION "0.1.31"
|
||||
|
||||
#define VELOCYPACK_VERSION_MAJOR 0
|
||||
#define VELOCYPACK_VERSION_MINOR 1
|
||||
#define VELOCYPACK_VERSION_PATCH 30
|
||||
#define VELOCYPACK_VERSION_PATCH 31
|
||||
|
||||
#endif
|
||||
|
|
|
@ -345,6 +345,269 @@ unsigned int const SliceStaticData::FirstSubMap[32] = {
|
|||
0, // 0x14, compact object, no index table - note: the offset is dynamic!
|
||||
0};
|
||||
|
||||
uint64_t const SliceStaticData::PrecalculatedHashesForDefaultSeed[256] = {
|
||||
#ifdef VELOCYPACK_XXHASH
|
||||
/* 0x00 */ 0xe2b56fa571b3a544, /* 0x01 */ 0xda3cfd1dc58389d8,
|
||||
/* 0x02 */ 0x0000000000000000, /* 0x03 */ 0x0000000000000000,
|
||||
/* 0x04 */ 0x0000000000000000, /* 0x05 */ 0x0000000000000000,
|
||||
/* 0x06 */ 0x0000000000000000, /* 0x07 */ 0x0000000000000000,
|
||||
/* 0x08 */ 0x0000000000000000, /* 0x09 */ 0x0000000000000000,
|
||||
/* 0x0a */ 0xd296bf393ef8d5f5, /* 0x0b */ 0x0000000000000000,
|
||||
/* 0x0c */ 0x0000000000000000, /* 0x0d */ 0x0000000000000000,
|
||||
/* 0x0e */ 0x0000000000000000, /* 0x0f */ 0x0000000000000000,
|
||||
/* 0x10 */ 0x0000000000000000, /* 0x11 */ 0x0000000000000000,
|
||||
/* 0x12 */ 0x0000000000000000, /* 0x13 */ 0x0000000000000000,
|
||||
/* 0x14 */ 0x0000000000000000, /* 0x15 */ 0x0000000000000000,
|
||||
/* 0x16 */ 0x0000000000000000, /* 0x17 */ 0x24db76da0ebbd8bb,
|
||||
/* 0x18 */ 0x1a6a668fb2aa030a, /* 0x19 */ 0xed545328fb397fed,
|
||||
/* 0x1a */ 0xfd783491fceeb46b, /* 0x1b */ 0x0000000000000000,
|
||||
/* 0x1c */ 0x0000000000000000, /* 0x1d */ 0x0000000000000000,
|
||||
/* 0x1e */ 0x505ec293ddfeec5e, /* 0x1f */ 0x9fab26ba108e2fbf,
|
||||
/* 0x20 */ 0x0000000000000000, /* 0x21 */ 0x0000000000000000,
|
||||
/* 0x22 */ 0x0000000000000000, /* 0x23 */ 0x0000000000000000,
|
||||
/* 0x24 */ 0x0000000000000000, /* 0x25 */ 0x0000000000000000,
|
||||
/* 0x26 */ 0x0000000000000000, /* 0x27 */ 0x0000000000000000,
|
||||
/* 0x28 */ 0x0000000000000000, /* 0x29 */ 0x0000000000000000,
|
||||
/* 0x2a */ 0x0000000000000000, /* 0x2b */ 0x0000000000000000,
|
||||
/* 0x2c */ 0x0000000000000000, /* 0x2d */ 0x0000000000000000,
|
||||
/* 0x2e */ 0x0000000000000000, /* 0x2f */ 0x0000000000000000,
|
||||
/* 0x30 */ 0xcdf3868203041650, /* 0x31 */ 0x7c9133bfa6f828a6,
|
||||
/* 0x32 */ 0x62e05b34c4ed7ce4, /* 0x33 */ 0xb79b6530d263533d,
|
||||
/* 0x34 */ 0xf05f611558114f31, /* 0x35 */ 0xc941f9afc86cdcd5,
|
||||
/* 0x36 */ 0x79873ccd694a9f90, /* 0x37 */ 0xd360268d66bf8c1f,
|
||||
/* 0x38 */ 0x19e870f3e36185fe, /* 0x39 */ 0xd154aeb6ba9114e5,
|
||||
/* 0x3a */ 0xa4d6434557b5b885, /* 0x3b */ 0x91584221ca9eda5b,
|
||||
/* 0x3c */ 0xb39b55e7252de481, /* 0x3d */ 0xe6bf494f0a40618e,
|
||||
/* 0x3e */ 0xd2ce603b7dceb6ea, /* 0x3f */ 0xec71f69fe56368f1,
|
||||
/* 0x40 */ 0x142191d3f9a23bce, /* 0x41 */ 0x0000000000000000,
|
||||
/* 0x42 */ 0x0000000000000000, /* 0x43 */ 0x0000000000000000,
|
||||
/* 0x44 */ 0x0000000000000000, /* 0x45 */ 0x0000000000000000,
|
||||
/* 0x46 */ 0x0000000000000000, /* 0x47 */ 0x0000000000000000,
|
||||
/* 0x48 */ 0x0000000000000000, /* 0x49 */ 0x0000000000000000,
|
||||
/* 0x4a */ 0x0000000000000000, /* 0x4b */ 0x0000000000000000,
|
||||
/* 0x4c */ 0x0000000000000000, /* 0x4d */ 0x0000000000000000,
|
||||
/* 0x4e */ 0x0000000000000000, /* 0x4f */ 0x0000000000000000,
|
||||
/* 0x50 */ 0x0000000000000000, /* 0x51 */ 0x0000000000000000,
|
||||
/* 0x52 */ 0x0000000000000000, /* 0x53 */ 0x0000000000000000,
|
||||
/* 0x54 */ 0x0000000000000000, /* 0x55 */ 0x0000000000000000,
|
||||
/* 0x56 */ 0x0000000000000000, /* 0x57 */ 0x0000000000000000,
|
||||
/* 0x58 */ 0x0000000000000000, /* 0x59 */ 0x0000000000000000,
|
||||
/* 0x5a */ 0x0000000000000000, /* 0x5b */ 0x0000000000000000,
|
||||
/* 0x5c */ 0x0000000000000000, /* 0x5d */ 0x0000000000000000,
|
||||
/* 0x5e */ 0x0000000000000000, /* 0x5f */ 0x0000000000000000,
|
||||
/* 0x60 */ 0x0000000000000000, /* 0x61 */ 0x0000000000000000,
|
||||
/* 0x62 */ 0x0000000000000000, /* 0x63 */ 0x0000000000000000,
|
||||
/* 0x64 */ 0x0000000000000000, /* 0x65 */ 0x0000000000000000,
|
||||
/* 0x66 */ 0x0000000000000000, /* 0x67 */ 0x0000000000000000,
|
||||
/* 0x68 */ 0x0000000000000000, /* 0x69 */ 0x0000000000000000,
|
||||
/* 0x6a */ 0x0000000000000000, /* 0x6b */ 0x0000000000000000,
|
||||
/* 0x6c */ 0x0000000000000000, /* 0x6d */ 0x0000000000000000,
|
||||
/* 0x6e */ 0x0000000000000000, /* 0x6f */ 0x0000000000000000,
|
||||
/* 0x70 */ 0x0000000000000000, /* 0x71 */ 0x0000000000000000,
|
||||
/* 0x72 */ 0x0000000000000000, /* 0x73 */ 0x0000000000000000,
|
||||
/* 0x74 */ 0x0000000000000000, /* 0x75 */ 0x0000000000000000,
|
||||
/* 0x76 */ 0x0000000000000000, /* 0x77 */ 0x0000000000000000,
|
||||
/* 0x78 */ 0x0000000000000000, /* 0x79 */ 0x0000000000000000,
|
||||
/* 0x7a */ 0x0000000000000000, /* 0x7b */ 0x0000000000000000,
|
||||
/* 0x7c */ 0x0000000000000000, /* 0x7d */ 0x0000000000000000,
|
||||
/* 0x7e */ 0x0000000000000000, /* 0x7f */ 0x0000000000000000,
|
||||
/* 0x80 */ 0x0000000000000000, /* 0x81 */ 0x0000000000000000,
|
||||
/* 0x82 */ 0x0000000000000000, /* 0x83 */ 0x0000000000000000,
|
||||
/* 0x84 */ 0x0000000000000000, /* 0x85 */ 0x0000000000000000,
|
||||
/* 0x86 */ 0x0000000000000000, /* 0x87 */ 0x0000000000000000,
|
||||
/* 0x88 */ 0x0000000000000000, /* 0x89 */ 0x0000000000000000,
|
||||
/* 0x8a */ 0x0000000000000000, /* 0x8b */ 0x0000000000000000,
|
||||
/* 0x8c */ 0x0000000000000000, /* 0x8d */ 0x0000000000000000,
|
||||
/* 0x8e */ 0x0000000000000000, /* 0x8f */ 0x0000000000000000,
|
||||
/* 0x90 */ 0x0000000000000000, /* 0x91 */ 0x0000000000000000,
|
||||
/* 0x92 */ 0x0000000000000000, /* 0x93 */ 0x0000000000000000,
|
||||
/* 0x94 */ 0x0000000000000000, /* 0x95 */ 0x0000000000000000,
|
||||
/* 0x96 */ 0x0000000000000000, /* 0x97 */ 0x0000000000000000,
|
||||
/* 0x98 */ 0x0000000000000000, /* 0x99 */ 0x0000000000000000,
|
||||
/* 0x9a */ 0x0000000000000000, /* 0x9b */ 0x0000000000000000,
|
||||
/* 0x9c */ 0x0000000000000000, /* 0x9d */ 0x0000000000000000,
|
||||
/* 0x9e */ 0x0000000000000000, /* 0x9f */ 0x0000000000000000,
|
||||
/* 0xa0 */ 0x0000000000000000, /* 0xa1 */ 0x0000000000000000,
|
||||
/* 0xa2 */ 0x0000000000000000, /* 0xa3 */ 0x0000000000000000,
|
||||
/* 0xa4 */ 0x0000000000000000, /* 0xa5 */ 0x0000000000000000,
|
||||
/* 0xa6 */ 0x0000000000000000, /* 0xa7 */ 0x0000000000000000,
|
||||
/* 0xa8 */ 0x0000000000000000, /* 0xa9 */ 0x0000000000000000,
|
||||
/* 0xaa */ 0x0000000000000000, /* 0xab */ 0x0000000000000000,
|
||||
/* 0xac */ 0x0000000000000000, /* 0xad */ 0x0000000000000000,
|
||||
/* 0xae */ 0x0000000000000000, /* 0xaf */ 0x0000000000000000,
|
||||
/* 0xb0 */ 0x0000000000000000, /* 0xb1 */ 0x0000000000000000,
|
||||
/* 0xb2 */ 0x0000000000000000, /* 0xb3 */ 0x0000000000000000,
|
||||
/* 0xb4 */ 0x0000000000000000, /* 0xb5 */ 0x0000000000000000,
|
||||
/* 0xb6 */ 0x0000000000000000, /* 0xb7 */ 0x0000000000000000,
|
||||
/* 0xb8 */ 0x0000000000000000, /* 0xb9 */ 0x0000000000000000,
|
||||
/* 0xba */ 0x0000000000000000, /* 0xbb */ 0x0000000000000000,
|
||||
/* 0xbc */ 0x0000000000000000, /* 0xbd */ 0x0000000000000000,
|
||||
/* 0xbe */ 0x0000000000000000, /* 0xbf */ 0x0000000000000000,
|
||||
/* 0xc0 */ 0x0000000000000000, /* 0xc1 */ 0x0000000000000000,
|
||||
/* 0xc2 */ 0x0000000000000000, /* 0xc3 */ 0x0000000000000000,
|
||||
/* 0xc4 */ 0x0000000000000000, /* 0xc5 */ 0x0000000000000000,
|
||||
/* 0xc6 */ 0x0000000000000000, /* 0xc7 */ 0x0000000000000000,
|
||||
/* 0xc8 */ 0x0000000000000000, /* 0xc9 */ 0x0000000000000000,
|
||||
/* 0xca */ 0x0000000000000000, /* 0xcb */ 0x0000000000000000,
|
||||
/* 0xcc */ 0x0000000000000000, /* 0xcd */ 0x0000000000000000,
|
||||
/* 0xce */ 0x0000000000000000, /* 0xcf */ 0x0000000000000000,
|
||||
/* 0xd0 */ 0x0000000000000000, /* 0xd1 */ 0x0000000000000000,
|
||||
/* 0xd2 */ 0x0000000000000000, /* 0xd3 */ 0x0000000000000000,
|
||||
/* 0xd4 */ 0x0000000000000000, /* 0xd5 */ 0x0000000000000000,
|
||||
/* 0xd6 */ 0x0000000000000000, /* 0xd7 */ 0x0000000000000000,
|
||||
/* 0xd8 */ 0x0000000000000000, /* 0xd9 */ 0x0000000000000000,
|
||||
/* 0xda */ 0x0000000000000000, /* 0xdb */ 0x0000000000000000,
|
||||
/* 0xdc */ 0x0000000000000000, /* 0xdd */ 0x0000000000000000,
|
||||
/* 0xde */ 0x0000000000000000, /* 0xdf */ 0x0000000000000000,
|
||||
/* 0xe0 */ 0x0000000000000000, /* 0xe1 */ 0x0000000000000000,
|
||||
/* 0xe2 */ 0x0000000000000000, /* 0xe3 */ 0x0000000000000000,
|
||||
/* 0xe4 */ 0x0000000000000000, /* 0xe5 */ 0x0000000000000000,
|
||||
/* 0xe6 */ 0x0000000000000000, /* 0xe7 */ 0x0000000000000000,
|
||||
/* 0xe8 */ 0x0000000000000000, /* 0xe9 */ 0x0000000000000000,
|
||||
/* 0xea */ 0x0000000000000000, /* 0xeb */ 0x0000000000000000,
|
||||
/* 0xec */ 0x0000000000000000, /* 0xed */ 0x0000000000000000,
|
||||
/* 0xee */ 0x0000000000000000, /* 0xef */ 0x0000000000000000,
|
||||
/* 0xf0 */ 0x0000000000000000, /* 0xf1 */ 0x0000000000000000,
|
||||
/* 0xf2 */ 0x0000000000000000, /* 0xf3 */ 0x0000000000000000,
|
||||
/* 0xf4 */ 0x0000000000000000, /* 0xf5 */ 0x0000000000000000,
|
||||
/* 0xf6 */ 0x0000000000000000, /* 0xf7 */ 0x0000000000000000,
|
||||
/* 0xf8 */ 0x0000000000000000, /* 0xf9 */ 0x0000000000000000,
|
||||
/* 0xfa */ 0x0000000000000000, /* 0xfb */ 0x0000000000000000,
|
||||
/* 0xfc */ 0x0000000000000000, /* 0xfd */ 0x0000000000000000,
|
||||
/* 0xfe */ 0x0000000000000000, /* 0xff */ 0x0000000000000000
|
||||
#endif
|
||||
#ifdef VELOCYPACK_FASTHASH
|
||||
/* 0x00 */ 0xf747d9afd5fc13cd, /* 0x01 */ 0x9dd59a0795d72dae,
|
||||
/* 0x02 */ 0x0000000000000000, /* 0x03 */ 0x0000000000000000,
|
||||
/* 0x04 */ 0x0000000000000000, /* 0x05 */ 0x0000000000000000,
|
||||
/* 0x06 */ 0x0000000000000000, /* 0x07 */ 0x0000000000000000,
|
||||
/* 0x08 */ 0x0000000000000000, /* 0x09 */ 0x0000000000000000,
|
||||
/* 0x0a */ 0x651f231e0822a1f2, /* 0x0b */ 0x0000000000000000,
|
||||
/* 0x0c */ 0x0000000000000000, /* 0x0d */ 0x0000000000000000,
|
||||
/* 0x0e */ 0x0000000000000000, /* 0x0f */ 0x0000000000000000,
|
||||
/* 0x10 */ 0x0000000000000000, /* 0x11 */ 0x0000000000000000,
|
||||
/* 0x12 */ 0x0000000000000000, /* 0x13 */ 0x0000000000000000,
|
||||
/* 0x14 */ 0x0000000000000000, /* 0x15 */ 0x0000000000000000,
|
||||
/* 0x16 */ 0x0000000000000000, /* 0x17 */ 0x423211fec79af09b,
|
||||
/* 0x18 */ 0xd43a065f33b14e52, /* 0x19 */ 0xf1adc756c139e443,
|
||||
/* 0x1a */ 0x6bf229fb02c686b , /* 0x1b */ 0x0000000000000000,
|
||||
/* 0x1c */ 0x0000000000000000, /* 0x1d */ 0x0000000000000000,
|
||||
/* 0x1e */ 0xc36f498e29ef2aba, /* 0x1f */ 0x1ad28762083cdc7d,
|
||||
/* 0x20 */ 0x0000000000000000, /* 0x21 */ 0x0000000000000000,
|
||||
/* 0x22 */ 0x0000000000000000, /* 0x23 */ 0x0000000000000000,
|
||||
/* 0x24 */ 0x0000000000000000, /* 0x25 */ 0x0000000000000000,
|
||||
/* 0x26 */ 0x0000000000000000, /* 0x27 */ 0x0000000000000000,
|
||||
/* 0x28 */ 0x0000000000000000, /* 0x29 */ 0x0000000000000000,
|
||||
/* 0x2a */ 0x0000000000000000, /* 0x2b */ 0x0000000000000000,
|
||||
/* 0x2c */ 0x0000000000000000, /* 0x2d */ 0x0000000000000000,
|
||||
/* 0x2e */ 0x0000000000000000, /* 0x2f */ 0x0000000000000000,
|
||||
/* 0x30 */ 0xbb1c99a88abf76d2, /* 0x31 */ 0x8d3e0efdb932c4b8,
|
||||
/* 0x32 */ 0xcd41672e3cd8f76e, /* 0x33 */ 0x3f831eadbd6628f8,
|
||||
/* 0x34 */ 0x9e4ea5d4abe810ae, /* 0x35 */ 0x836489847293c2d6,
|
||||
/* 0x36 */ 0x270883ef450bf1c8, /* 0x37 */ 0x4abf5dea3bb7fb98,
|
||||
/* 0x38 */ 0x5a6892806deadcb2, /* 0x39 */ 0xdeaa9c7264ffdad0,
|
||||
/* 0x3a */ 0xabd49df61b8b4756, /* 0x3b */ 0xc068ab004a6dc8de,
|
||||
/* 0x3c */ 0xc2a9f41025e2711b, /* 0x3d */ 0x47cb9c887443ad40,
|
||||
/* 0x3e */ 0xa57497643e705316, /* 0x3f */ 0xea4688cdf868a142,
|
||||
/* 0x40 */ 0x49e51044202c2999, /* 0x41 */ 0x0000000000000000,
|
||||
/* 0x42 */ 0x0000000000000000, /* 0x43 */ 0x0000000000000000,
|
||||
/* 0x44 */ 0x0000000000000000, /* 0x45 */ 0x0000000000000000,
|
||||
/* 0x46 */ 0x0000000000000000, /* 0x47 */ 0x0000000000000000,
|
||||
/* 0x48 */ 0x0000000000000000, /* 0x49 */ 0x0000000000000000,
|
||||
/* 0x4a */ 0x0000000000000000, /* 0x4b */ 0x0000000000000000,
|
||||
/* 0x4c */ 0x0000000000000000, /* 0x4d */ 0x0000000000000000,
|
||||
/* 0x4e */ 0x0000000000000000, /* 0x4f */ 0x0000000000000000,
|
||||
/* 0x50 */ 0x0000000000000000, /* 0x51 */ 0x0000000000000000,
|
||||
/* 0x52 */ 0x0000000000000000, /* 0x53 */ 0x0000000000000000,
|
||||
/* 0x54 */ 0x0000000000000000, /* 0x55 */ 0x0000000000000000,
|
||||
/* 0x56 */ 0x0000000000000000, /* 0x57 */ 0x0000000000000000,
|
||||
/* 0x58 */ 0x0000000000000000, /* 0x59 */ 0x0000000000000000,
|
||||
/* 0x5a */ 0x0000000000000000, /* 0x5b */ 0x0000000000000000,
|
||||
/* 0x5c */ 0x0000000000000000, /* 0x5d */ 0x0000000000000000,
|
||||
/* 0x5e */ 0x0000000000000000, /* 0x5f */ 0x0000000000000000,
|
||||
/* 0x60 */ 0x0000000000000000, /* 0x61 */ 0x0000000000000000,
|
||||
/* 0x62 */ 0x0000000000000000, /* 0x63 */ 0x0000000000000000,
|
||||
/* 0x64 */ 0x0000000000000000, /* 0x65 */ 0x0000000000000000,
|
||||
/* 0x66 */ 0x0000000000000000, /* 0x67 */ 0x0000000000000000,
|
||||
/* 0x68 */ 0x0000000000000000, /* 0x69 */ 0x0000000000000000,
|
||||
/* 0x6a */ 0x0000000000000000, /* 0x6b */ 0x0000000000000000,
|
||||
/* 0x6c */ 0x0000000000000000, /* 0x6d */ 0x0000000000000000,
|
||||
/* 0x6e */ 0x0000000000000000, /* 0x6f */ 0x0000000000000000,
|
||||
/* 0x70 */ 0x0000000000000000, /* 0x71 */ 0x0000000000000000,
|
||||
/* 0x72 */ 0x0000000000000000, /* 0x73 */ 0x0000000000000000,
|
||||
/* 0x74 */ 0x0000000000000000, /* 0x75 */ 0x0000000000000000,
|
||||
/* 0x76 */ 0x0000000000000000, /* 0x77 */ 0x0000000000000000,
|
||||
/* 0x78 */ 0x0000000000000000, /* 0x79 */ 0x0000000000000000,
|
||||
/* 0x7a */ 0x0000000000000000, /* 0x7b */ 0x0000000000000000,
|
||||
/* 0x7c */ 0x0000000000000000, /* 0x7d */ 0x0000000000000000,
|
||||
/* 0x7e */ 0x0000000000000000, /* 0x7f */ 0x0000000000000000,
|
||||
/* 0x80 */ 0x0000000000000000, /* 0x81 */ 0x0000000000000000,
|
||||
/* 0x82 */ 0x0000000000000000, /* 0x83 */ 0x0000000000000000,
|
||||
/* 0x84 */ 0x0000000000000000, /* 0x85 */ 0x0000000000000000,
|
||||
/* 0x86 */ 0x0000000000000000, /* 0x87 */ 0x0000000000000000,
|
||||
/* 0x88 */ 0x0000000000000000, /* 0x89 */ 0x0000000000000000,
|
||||
/* 0x8a */ 0x0000000000000000, /* 0x8b */ 0x0000000000000000,
|
||||
/* 0x8c */ 0x0000000000000000, /* 0x8d */ 0x0000000000000000,
|
||||
/* 0x8e */ 0x0000000000000000, /* 0x8f */ 0x0000000000000000,
|
||||
/* 0x90 */ 0x0000000000000000, /* 0x91 */ 0x0000000000000000,
|
||||
/* 0x92 */ 0x0000000000000000, /* 0x93 */ 0x0000000000000000,
|
||||
/* 0x94 */ 0x0000000000000000, /* 0x95 */ 0x0000000000000000,
|
||||
/* 0x96 */ 0x0000000000000000, /* 0x97 */ 0x0000000000000000,
|
||||
/* 0x98 */ 0x0000000000000000, /* 0x99 */ 0x0000000000000000,
|
||||
/* 0x9a */ 0x0000000000000000, /* 0x9b */ 0x0000000000000000,
|
||||
/* 0x9c */ 0x0000000000000000, /* 0x9d */ 0x0000000000000000,
|
||||
/* 0x9e */ 0x0000000000000000, /* 0x9f */ 0x0000000000000000,
|
||||
/* 0xa0 */ 0x0000000000000000, /* 0xa1 */ 0x0000000000000000,
|
||||
/* 0xa2 */ 0x0000000000000000, /* 0xa3 */ 0x0000000000000000,
|
||||
/* 0xa4 */ 0x0000000000000000, /* 0xa5 */ 0x0000000000000000,
|
||||
/* 0xa6 */ 0x0000000000000000, /* 0xa7 */ 0x0000000000000000,
|
||||
/* 0xa8 */ 0x0000000000000000, /* 0xa9 */ 0x0000000000000000,
|
||||
/* 0xaa */ 0x0000000000000000, /* 0xab */ 0x0000000000000000,
|
||||
/* 0xac */ 0x0000000000000000, /* 0xad */ 0x0000000000000000,
|
||||
/* 0xae */ 0x0000000000000000, /* 0xaf */ 0x0000000000000000,
|
||||
/* 0xb0 */ 0x0000000000000000, /* 0xb1 */ 0x0000000000000000,
|
||||
/* 0xb2 */ 0x0000000000000000, /* 0xb3 */ 0x0000000000000000,
|
||||
/* 0xb4 */ 0x0000000000000000, /* 0xb5 */ 0x0000000000000000,
|
||||
/* 0xb6 */ 0x0000000000000000, /* 0xb7 */ 0x0000000000000000,
|
||||
/* 0xb8 */ 0x0000000000000000, /* 0xb9 */ 0x0000000000000000,
|
||||
/* 0xba */ 0x0000000000000000, /* 0xbb */ 0x0000000000000000,
|
||||
/* 0xbc */ 0x0000000000000000, /* 0xbd */ 0x0000000000000000,
|
||||
/* 0xbe */ 0x0000000000000000, /* 0xbf */ 0x0000000000000000,
|
||||
/* 0xc0 */ 0x0000000000000000, /* 0xc1 */ 0x0000000000000000,
|
||||
/* 0xc2 */ 0x0000000000000000, /* 0xc3 */ 0x0000000000000000,
|
||||
/* 0xc4 */ 0x0000000000000000, /* 0xc5 */ 0x0000000000000000,
|
||||
/* 0xc6 */ 0x0000000000000000, /* 0xc7 */ 0x0000000000000000,
|
||||
/* 0xc8 */ 0x0000000000000000, /* 0xc9 */ 0x0000000000000000,
|
||||
/* 0xca */ 0x0000000000000000, /* 0xcb */ 0x0000000000000000,
|
||||
/* 0xcc */ 0x0000000000000000, /* 0xcd */ 0x0000000000000000,
|
||||
/* 0xce */ 0x0000000000000000, /* 0xcf */ 0x0000000000000000,
|
||||
/* 0xd0 */ 0x0000000000000000, /* 0xd1 */ 0x0000000000000000,
|
||||
/* 0xd2 */ 0x0000000000000000, /* 0xd3 */ 0x0000000000000000,
|
||||
/* 0xd4 */ 0x0000000000000000, /* 0xd5 */ 0x0000000000000000,
|
||||
/* 0xd6 */ 0x0000000000000000, /* 0xd7 */ 0x0000000000000000,
|
||||
/* 0xd8 */ 0x0000000000000000, /* 0xd9 */ 0x0000000000000000,
|
||||
/* 0xda */ 0x0000000000000000, /* 0xdb */ 0x0000000000000000,
|
||||
/* 0xdc */ 0x0000000000000000, /* 0xdd */ 0x0000000000000000,
|
||||
/* 0xde */ 0x0000000000000000, /* 0xdf */ 0x0000000000000000,
|
||||
/* 0xe0 */ 0x0000000000000000, /* 0xe1 */ 0x0000000000000000,
|
||||
/* 0xe2 */ 0x0000000000000000, /* 0xe3 */ 0x0000000000000000,
|
||||
/* 0xe4 */ 0x0000000000000000, /* 0xe5 */ 0x0000000000000000,
|
||||
/* 0xe6 */ 0x0000000000000000, /* 0xe7 */ 0x0000000000000000,
|
||||
/* 0xe8 */ 0x0000000000000000, /* 0xe9 */ 0x0000000000000000,
|
||||
/* 0xea */ 0x0000000000000000, /* 0xeb */ 0x0000000000000000,
|
||||
/* 0xec */ 0x0000000000000000, /* 0xed */ 0x0000000000000000,
|
||||
/* 0xee */ 0x0000000000000000, /* 0xef */ 0x0000000000000000,
|
||||
/* 0xf0 */ 0x0000000000000000, /* 0xf1 */ 0x0000000000000000,
|
||||
/* 0xf2 */ 0x0000000000000000, /* 0xf3 */ 0x0000000000000000,
|
||||
/* 0xf4 */ 0x0000000000000000, /* 0xf5 */ 0x0000000000000000,
|
||||
/* 0xf6 */ 0x0000000000000000, /* 0xf7 */ 0x0000000000000000,
|
||||
/* 0xf8 */ 0x0000000000000000, /* 0xf9 */ 0x0000000000000000,
|
||||
/* 0xfa */ 0x0000000000000000, /* 0xfb */ 0x0000000000000000,
|
||||
/* 0xfc */ 0x0000000000000000, /* 0xfd */ 0x0000000000000000,
|
||||
/* 0xfe */ 0x0000000000000000, /* 0xff */ 0x0000000000000000
|
||||
#endif
|
||||
};
|
||||
|
||||
// creates a Slice from Json and adds it to a scope
|
||||
Slice Slice::fromJson(SliceScope& scope, std::string const& json,
|
||||
Options const* options) {
|
||||
|
@ -452,21 +715,26 @@ uint64_t Slice::normalizedHash(uint64_t seed) const {
|
|||
} else if (isArray()) {
|
||||
// normalize arrays by hashing array length and iterating
|
||||
// over all array members
|
||||
uint64_t const n = length() ^ 0xba5bedf00d;
|
||||
ArrayIterator it(*this);
|
||||
uint64_t const n = it.size() ^ 0xba5bedf00d;
|
||||
value = VELOCYPACK_HASH(&n, sizeof(n), seed);
|
||||
for (auto const& it : ArrayIterator(*this)) {
|
||||
value ^= it.normalizedHash(value);
|
||||
while (it.valid()) {
|
||||
value ^= it.value().normalizedHash(value);
|
||||
it.next();
|
||||
}
|
||||
} else if (isObject()) {
|
||||
// normalize objects by hashing object length and iterating
|
||||
// over all object members
|
||||
uint64_t const n = length() ^ 0xf00ba44ba5;
|
||||
ObjectIterator it(*this, true);
|
||||
uint64_t const n = it.size() ^ 0xf00ba44ba5;
|
||||
uint64_t seed2 = VELOCYPACK_HASH(&n, sizeof(n), seed);
|
||||
value = seed2;
|
||||
for (auto const& it : ObjectIterator(*this, true)) {
|
||||
uint64_t seed3 = it.key.makeKey().normalizedHash(seed2);
|
||||
while (it.valid()) {
|
||||
auto current = (*it);
|
||||
uint64_t seed3 = current.key.normalizedHash(seed2);
|
||||
value ^= seed3;
|
||||
value ^= it.value.normalizedHash(seed3);
|
||||
value ^= current.value.normalizedHash(seed3);
|
||||
it.next();
|
||||
}
|
||||
} else {
|
||||
// fall back to regular hash function
|
||||
|
|
|
@ -12,7 +12,7 @@ There are two slightly different syntaxes for traversals in AQL, one for
|
|||
### Working with named graphs
|
||||
|
||||
```
|
||||
[WITH collection1[, collection2[, ...collectionN]]]
|
||||
[WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]]
|
||||
FOR vertex[, edge[, path]]
|
||||
IN [min[..max]]
|
||||
OUTBOUND|INBOUND|ANY startVertex
|
||||
|
@ -21,7 +21,7 @@ FOR vertex[, edge[, path]]
|
|||
```
|
||||
- `WITH`: optional for single server instances, but required for
|
||||
[graph traversals in a cluster](#graph-traversals-in-a-cluster).
|
||||
- **collections** (collection, *repeatable*): list of collections that will
|
||||
- **collections** (collection, *repeatable*): list of vertex collections that will
|
||||
be involved in the traversal
|
||||
- `FOR`: emits up to three variables:
|
||||
- **vertex** (object): the current vertex in a traversal
|
||||
|
@ -74,7 +74,7 @@ FOR vertex[, edge[, path]]
|
|||
### Working with collection sets
|
||||
|
||||
```
|
||||
[WITH collection1[, collection2[, ...collectionN]]]
|
||||
[WITH vertexCollection1[, vertexCollection2[, ...vertexCollectionN]]]
|
||||
FOR vertex[, edge[, path]]
|
||||
IN [min[..max]]
|
||||
OUTBOUND|INBOUND|ANY startVertex
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# Authentication
|
||||
|
||||
The ArangoDB Kubernetes Operator will by default create ArangoDB deployments
|
||||
that require authentication to access the database.
|
||||
|
||||
It uses a single JWT secret (stored in a Kubernetes secret)
|
||||
to provide *super-user* access between all servers of the deployment
|
||||
as well as access from the ArangoDB Operator to the deployment.
|
||||
|
||||
To disable authentication, set `spec.auth.jwtSecretName` to `None`.
|
||||
|
||||
Initially the deployment is accessible through the web user-interface and
|
||||
API's, using the user `root` with an empty password.
|
||||
Make sure to change this password immediately after starting the deployment!
|
||||
|
||||
## See also
|
||||
|
||||
- [Secure connections (TLS)](./Tls.md)
|
|
@ -0,0 +1,208 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# ArangoDeploymentReplication Custom Resource
|
||||
|
||||
The ArangoDB Replication Operator creates and maintains ArangoDB
|
||||
`arangosync` configurations in a Kubernetes cluster, given a replication specification.
|
||||
This replication specification is a `CustomResource` following
|
||||
a `CustomResourceDefinition` created by the operator.
|
||||
|
||||
Example minimal replication definition for 2 ArangoDB cluster with sync in the same Kubernetes cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: "replication.database.arangodb.com/v1alpha"
|
||||
kind: "ArangoDeploymentReplication"
|
||||
metadata:
|
||||
name: "replication-from-a-to-b"
|
||||
spec:
|
||||
source:
|
||||
deploymentName: cluster-a
|
||||
auth:
|
||||
keyfileSecretName: cluster-a-sync-auth
|
||||
destination:
|
||||
deploymentName: cluster-b
|
||||
```
|
||||
|
||||
This definition results in:
|
||||
|
||||
- the arangosync `SyncMaster` in deployment `cluster-b` is called to configure a synchronization
|
||||
from the syncmasters in `cluster-a` to the syncmasters in `cluster-b`,
|
||||
using the client authentication certificate stored in `Secret` `cluster-a-sync-auth`.
|
||||
To access `cluster-a`, the JWT secret found in the deployment of `cluster-a` is used.
|
||||
To access `cluster-b`, the JWT secret found in the deployment of `cluster-b` is used.
|
||||
|
||||
Example replication definition for replicating from a source that is outside the current Kubernetes cluster
|
||||
to a destination that is in the same Kubernetes cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: "replication.database.arangodb.com/v1alpha"
|
||||
kind: "ArangoDeploymentReplication"
|
||||
metadata:
|
||||
name: "replication-from-a-to-b"
|
||||
spec:
|
||||
source:
|
||||
masterEndpoint: ["https://163.172.149.229:31888", "https://51.15.225.110:31888", "https://51.15.229.133:31888"]
|
||||
auth:
|
||||
keyfileSecretName: cluster-a-sync-auth
|
||||
tls:
|
||||
caSecretName: cluster-a-sync-ca
|
||||
destination:
|
||||
deploymentName: cluster-b
|
||||
```
|
||||
|
||||
This definition results in:
|
||||
|
||||
- the arangosync `SyncMaster` in deployment `cluster-b` is called to configure a synchronization
|
||||
from the syncmasters located at the given list of endpoint URL's to the syncmasters `cluster-b`,
|
||||
using the client authentication certificate stored in `Secret` `cluster-a-sync-auth`.
|
||||
To access `cluster-a`, the keyfile (containing a client authentication certificate) is used.
|
||||
To access `cluster-b`, the JWT secret found in the deployment of `cluster-b` is used.
|
||||
|
||||
## Specification reference
|
||||
|
||||
Below you'll find all settings of the `ArangoDeploymentReplication` custom resource.
|
||||
|
||||
### `spec.source.deploymentName: string`
|
||||
|
||||
This setting specifies the name of an `ArangoDeployment` resource that runs a cluster
|
||||
with sync enabled.
|
||||
|
||||
This cluster configured as the replication source.
|
||||
|
||||
### `spec.source.masterEndpoint: []string`
|
||||
|
||||
This setting specifies zero or more master endpoint URL's of the source cluster.
|
||||
|
||||
Use this setting if the source cluster is not running inside a Kubernetes cluster
|
||||
that is reachable from the Kubernetes cluster the `ArangoDeploymentReplication` resource is deployed in.
|
||||
|
||||
Specifying this setting and `spec.source.deploymentName` at the same time is not allowed.
|
||||
|
||||
### `spec.source.auth.keyfileSecretName: string`
|
||||
|
||||
This setting specifies the name of a `Secret` containing a client authentication certificate called `tls.keyfile` used to authenticate
|
||||
with the SyncMaster at the specified source.
|
||||
|
||||
If `spec.source.auth.userSecretName` has not been set,
|
||||
the client authentication certificate found in the secret with this name is also used to configure
|
||||
the synchronization and fetch the synchronization status.
|
||||
|
||||
This setting is required.
|
||||
|
||||
### `spec.source.auth.userSecretName: string`
|
||||
|
||||
This setting specifies the name of a `Secret` containing a `username` & `password` used to authenticate
|
||||
with the SyncMaster at the specified source in order to configure synchronization and fetch synchronization status.
|
||||
|
||||
The user identified by the username must have write access in the `_system` database of the source ArangoDB cluster.
|
||||
|
||||
### `spec.source.tls.caSecretName: string`
|
||||
|
||||
This setting specifies the name of a `Secret` containing a TLS CA certificate `ca.crt` used to verify
|
||||
the TLS connection created by the SyncMaster at the specified source.
|
||||
|
||||
This setting is required, unless `spec.source.deploymentName` has been set.
|
||||
|
||||
### `spec.destination.deploymentName: string`
|
||||
|
||||
This setting specifies the name of an `ArangoDeployment` resource that runs a cluster
|
||||
with sync enabled.
|
||||
|
||||
This cluster configured as the replication destination.
|
||||
|
||||
### `spec.destination.masterEndpoint: []string`
|
||||
|
||||
This setting specifies zero or more master endpoint URL's of the destination cluster.
|
||||
|
||||
Use this setting if the destination cluster is not running inside a Kubernetes cluster
|
||||
that is reachable from the Kubernetes cluster the `ArangoDeploymentReplication` resource is deployed in.
|
||||
|
||||
Specifying this setting and `spec.destination.deploymentName` at the same time is not allowed.
|
||||
|
||||
### `spec.destination.auth.keyfileSecretName: string`
|
||||
|
||||
This setting specifies the name of a `Secret` containing a client authentication certificate called `tls.keyfile` used to authenticate
|
||||
with the SyncMaster at the specified destination.
|
||||
|
||||
If `spec.destination.auth.userSecretName` has not been set,
|
||||
the client authentication certificate found in the secret with this name is also used to configure
|
||||
the synchronization and fetch the synchronization status.
|
||||
|
||||
This setting is required, unless `spec.destination.deploymentName` or `spec.destination.auth.userSecretName` has been set.
|
||||
|
||||
Specifying this setting and `spec.destination.userSecretName` at the same time is not allowed.
|
||||
|
||||
### `spec.destination.auth.userSecretName: string`
|
||||
|
||||
This setting specifies the name of a `Secret` containing a `username` & `password` used to authenticate
|
||||
with the SyncMaster at the specified destination in order to configure synchronization and fetch synchronization status.
|
||||
|
||||
The user identified by the username must have write access in the `_system` database of the destination ArangoDB cluster.
|
||||
|
||||
Specifying this setting and `spec.destination.keyfileSecretName` at the same time is not allowed.
|
||||
|
||||
### `spec.destination.tls.caSecretName: string`
|
||||
|
||||
This setting specifies the name of a `Secret` containing a TLS CA certificate `ca.crt` used to verify
|
||||
the TLS connection created by the SyncMaster at the specified destination.
|
||||
|
||||
This setting is required, unless `spec.destination.deploymentName` has been set.
|
||||
|
||||
## Authentication details
|
||||
|
||||
The authentication settings in a `ArangoDeploymentReplication` resource are used for two distinct purposes.
|
||||
|
||||
The first use is the authentication of the syncmasters at the destination with the syncmasters at the source.
|
||||
This is always done using a client authentication certificate which is found in a `tls.keyfile` field
|
||||
in a secret identified by `spec.source.auth.keyfileSecretName`.
|
||||
|
||||
The second use is the authentication of the ArangoDB Replication operator with the syncmasters at the source
|
||||
or destination. These connections are made to configure synchronization, stop configuration and fetch the status
|
||||
of the configuration.
|
||||
The method used for this authentication is derived as follows (where `X` is either `source` or `destination`):
|
||||
|
||||
- If `spec.X.userSecretName` is set, the username + password found in the `Secret` identified by this name is used.
|
||||
- If `spec.X.keyfileSecretName` is set, the client authentication certificate (keyfile) found in the `Secret` identifier by this name is used.
|
||||
- If `spec.X.deploymentName` is set, the JWT secret found in the deployment is used.
|
||||
|
||||
## Creating client authentication certificate keyfiles
|
||||
|
||||
The client authentication certificates needed for the `Secrets` identified by `spec.source.auth.keyfileSecretName` & `spec.destination.auth.keyfileSecretName`
|
||||
are normal ArangoDB keyfiles that can be created by the `arangosync create client-auth keyfile` command.
|
||||
In order to do so, you must have access to the client authentication CA of the source/destination.
|
||||
|
||||
If the client authentication CA at the source/destination also contains a private key (`ca.key`), the ArangoDeployment operator
|
||||
can be used to create such a keyfile for you, without the need to have `arangosync` installed locally.
|
||||
Read the following paragraphs for instructions on how to do that.
|
||||
|
||||
## Creating and using access packages
|
||||
|
||||
An access package is a YAML file that contains:
|
||||
|
||||
- A client authentication certificate, wrapped in a `Secret` in a `tls.keyfile` data field.
|
||||
- A TLS certificate authority public key, wrapped in a `Secret` in a `ca.crt` data field.
|
||||
|
||||
The format of the access package is such that it can be inserted into a Kubernetes cluster using the standard `kubectl` tool.
|
||||
|
||||
To create an access package that can be used to authenticate with the ArangoDB SyncMasters of an `ArangoDeployment`,
|
||||
add a name of a non-existing `Secret` to the `spec.sync.externalAccess.accessPackageSecretNames` field of the `ArangoDeployment`.
|
||||
In response, a `Secret` is created in that Kubernetes cluster, with the given name, that contains a `accessPackage.yaml` data field
|
||||
that contains a Kubernetes resource specification that can be inserted into the other Kubernetes cluster.
|
||||
|
||||
The process for creating and using an access package for authentication at the source cluster is as follows:
|
||||
|
||||
- Edit the `ArangoDeployment` resource of the source cluster, set `spec.sync.externalAccess.accessPackageSecretNames` to `["my-access-package"]`
|
||||
- Wait for the `ArangoDeployment` operator to create a `Secret` named `my-access-package`.
|
||||
- Extract the access package from the Kubernetes source cluster using:
|
||||
|
||||
```bash
|
||||
kubectl get secret my-access-package --template='{{index .data "accessPackage.yaml"}}' | base64 -D > accessPackage.yaml
|
||||
```
|
||||
|
||||
- Insert the secrets found in the access package in the Kubernetes destination cluster using:
|
||||
|
||||
```bash
|
||||
kubectl apply -f accessPackage.yaml
|
||||
```
|
||||
|
||||
As a result, the destination Kubernetes cluster will have 2 additional `Secrets`. One contains a client authentication certificate
|
||||
formatted as a keyfile. Another contains the public key of the TLS CA certificate of the source cluster.
|
|
@ -108,6 +108,23 @@ Possible values are:
|
|||
|
||||
This setting cannot be changed after the cluster has been created.
|
||||
|
||||
### `spec.downtimeAllowed: bool`
|
||||
|
||||
This setting is used to allow automatic reconciliation actions that yield
|
||||
some downtime of the ArangoDB deployment.
|
||||
When this setting is set to `false` (the default), no automatic action that
|
||||
may result in downtime is allowed.
|
||||
If the need for such an action is detected, an event is added to the `ArangoDeployment`.
|
||||
|
||||
Once this setting is set to `true`, the automatic action is executed.
|
||||
|
||||
Operations that may result in downtime are:
|
||||
|
||||
- Rotating TLS CA certificate
|
||||
|
||||
Note: It is still possible that there is some downtime when the Kubernetes
|
||||
cluster is down, or in a bad state, irrespective of the value of this setting.
|
||||
|
||||
### `spec.rocksdb.encryption.keySecretName`
|
||||
|
||||
This setting specifies the name of a kubernetes `Secret` that contains
|
||||
|
@ -206,17 +223,6 @@ replication in the cluster. When enabled, the cluster will contain
|
|||
a number of `syncmaster` & `syncworker` servers.
|
||||
The default value is `false`.
|
||||
|
||||
### `spec.sync.image: string`
|
||||
|
||||
This setting specifies the docker image to use for all ArangoSync servers.
|
||||
When not specified, the `spec.image` value is used.
|
||||
|
||||
### `spec.sync.imagePullPolicy: string`
|
||||
|
||||
This setting specifies the pull policy for the docker image to use for all ArangoSync servers.
|
||||
For possible values, see `spec.imagePullPolicy`.
|
||||
When not specified, the `spec.imagePullPolicy` value is used.
|
||||
|
||||
### `spec.sync.externalAccess.type: string`
|
||||
|
||||
This setting specifies the type of `Service` that will be created to provide
|
||||
|
@ -253,6 +259,19 @@ If not set, this setting defaults to:
|
|||
- If `spec.sync.externalAccess.loadBalancerIP` is set, it defaults to `https://<load-balancer-ip>:<8629>`.
|
||||
- Otherwise it defaults to `https://<sync-service-dns-name>:<8629>`.
|
||||
|
||||
### `spec.sync.externalAccess.accessPackageSecretNames: []string`
|
||||
|
||||
This setting specifies the names of zero of more `Secrets` that will be created by the deployment
|
||||
operator containing "access packages". An access package contains those `Secrets` that are needed
|
||||
to access the SyncMasters of this `ArangoDeployment`.
|
||||
|
||||
By removing a name from this setting, the corresponding `Secret` is also deleted.
|
||||
Note that to remove all access packages, leave an empty array in place (`[]`).
|
||||
Completely removing the setting results in not modifying the list.
|
||||
|
||||
See [the `ArangoDeploymentReplication` specification](./DeploymentReplicationResource.md) for more information
|
||||
on access packages.
|
||||
|
||||
### `spec.sync.auth.jwtSecretName: string`
|
||||
|
||||
This setting specifies the name of a kubernetes `Secret` that contains
|
||||
|
@ -347,6 +366,14 @@ The default value is `8Gi`.
|
|||
This setting is not available for group `coordinators`, `syncmasters` & `syncworkers`
|
||||
because servers in these groups do not need persistent storage.
|
||||
|
||||
### `spec.<group>.serviceAccountName: string`
|
||||
|
||||
This setting specifies the `serviceAccountName` for the `Pods` created
|
||||
for each server of this group.
|
||||
|
||||
Using an alternative `ServiceAccount` is typically used to separate access rights.
|
||||
The ArangoDB deployments do not require any special rights.
|
||||
|
||||
### `spec.<group>.storageClassName: string`
|
||||
|
||||
This setting specifies the `storageClass` for the `PersistentVolume`s created
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# Configuring your driver for ArangoDB access
|
||||
|
||||
In this chapter you'll learn how to configure a driver for accessing
|
||||
an ArangoDB deployment in Kubernetes.
|
||||
|
||||
The exact methods to configure a driver are specific to that driver.
|
||||
|
||||
## Database endpoint(s)
|
||||
|
||||
The endpoint(s) (or URLs) to communicate with is the most important
|
||||
parameter your need to configure in your driver.
|
||||
|
||||
Finding the right endpoints depend on wether your client application is running in
|
||||
the same Kubernetes cluster as the ArangoDB deployment or not.
|
||||
|
||||
### Client application in same Kubernetes cluster
|
||||
|
||||
If your client application is running in the same Kubernetes cluster as
|
||||
the ArangoDB deployment, you should configure your driver to use the
|
||||
following endpoint:
|
||||
|
||||
```text
|
||||
https://<deployment-name>.<namespace>.svc:8529
|
||||
```
|
||||
|
||||
Only if your deployment has set `spec.tls.caSecretName` to `None`, should
|
||||
you use `http` instead of `https`.
|
||||
|
||||
### Client application outside Kubernetes cluster
|
||||
|
||||
If your client application is running outside the Kubernetes cluster in which
|
||||
the ArangoDB deployment is running, your driver endpoint depends on the
|
||||
external-access configuration of your ArangoDB deployment.
|
||||
|
||||
If the external-access of the ArangoDB deployment is of type `LoadBalancer`,
|
||||
then use the IP address of that `LoadBalancer` like this:
|
||||
|
||||
```text
|
||||
https://<load-balancer-ip>:8529
|
||||
```
|
||||
|
||||
If the external-access of the ArangoDB deployment is of type `NodePort`,
|
||||
then use the IP address(es) of the `Nodes` of the Kubernetes cluster,
|
||||
combined with the `NodePort` that is used by the external-access service.
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
https://<kubernetes-node-1-ip>:30123
|
||||
```
|
||||
|
||||
You can find the type of external-access by inspecting the external-access `Service`.
|
||||
To do so, run the following command:
|
||||
|
||||
```bash
|
||||
kubectl get service -n <namespace-of-deployment> <deployment-name>-ea
|
||||
```
|
||||
|
||||
The output looks like this:
|
||||
|
||||
```bash
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
example-simple-cluster-ea LoadBalancer 10.106.175.38 192.168.10.208 8529:31890/TCP 1s app=arangodb,arango_deployment=example-simple-cluster,role=coordinator
|
||||
```
|
||||
|
||||
In this case the external-access is of type `LoadBalancer` with a load-balancer IP address
|
||||
of `192.168.10.208`.
|
||||
This results in an endpoint of `https://192.168.10.208:8529`.
|
||||
|
||||
## TLS settings
|
||||
|
||||
As mentioned before the ArangoDB deployment managed by the ArangoDB operator
|
||||
will use a secure (TLS) connection unless you set `spec.tls.caSecretName` to `None`
|
||||
in your `ArangoDeployment`.
|
||||
|
||||
When using a secure connection, you can choose to verify the server certificates
|
||||
provides by the ArangoDB servers or not.
|
||||
|
||||
If you want to verify these certificates, configure your driver with the CA certificate
|
||||
found in a Kubernetes `Secret` found in the same namespace as the `ArangoDeployment`.
|
||||
|
||||
The name of this `Secret` is stored in the `spec.tls.caSecretName` setting of
|
||||
the `ArangoDeployment`. If you don't set this setting explicitly, it will be
|
||||
set automatically.
|
||||
|
||||
Then fetch the CA secret using the following command (or use a Kubernetes client library to fetch it):
|
||||
|
||||
```bash
|
||||
kubectl get secret -n <namespace> <secret-name> --template='{{index .data "ca.crt"}}' | base64 -D > ca.crt
|
||||
```
|
||||
|
||||
This results in a file called `ca.crt` containing a PEM encoded, x509 CA certificate.
|
||||
|
||||
## Query requests
|
||||
|
||||
For most client requests made by a driver, it does not matter if there is any kind
|
||||
of load-balancer between your client application and the ArangoDB deployment.
|
||||
|
||||
{% hint 'info' %}
|
||||
Note that even a simple `Service` of type `ClusterIP` already behaves as a load-balancer.
|
||||
{% endhint %}
|
||||
|
||||
The exception to this is cursor related requests made to an ArangoDB `Cluster` deployment.
|
||||
The coordinator that handles an initial query request (that results in a `Cursor`)
|
||||
will save some in-memory state in that coordinator, if the result of the query
|
||||
is too big to be transfer back in the response of the initial request.
|
||||
|
||||
Follow-up requests have to be made to fetch the remaining data.
|
||||
These follow-up requests must be handled by the same coordinator to which the initial
|
||||
request was made.
|
||||
|
||||
As soon as there is a load-balancer between your client application and the ArangoDB cluster,
|
||||
it is uncertain which coordinator will actually handle the follow-up request.
|
||||
|
||||
To resolve this uncertainty, make sure to run your client application in the same
|
||||
Kubernetes cluster and synchronize your endpoints before making the
|
||||
initial query request.
|
||||
This will result in the use (by the driver) of internal DNS names of all coordinators.
|
||||
A follow-up request can then be sent to exactly the same coordinator.
|
||||
|
||||
If your client application is running outside the Kubernetes cluster this is much harder
|
||||
to solve.
|
||||
The easiest way to work around it, is by making sure that the query results are small
|
||||
enough.
|
||||
When that is not feasible, it is also possible to resolve this
|
||||
when the internal DNS names of your Kubernetes cluster are exposed to your client application
|
||||
and the resuling IP addresses are routeable from your client application.
|
||||
To expose internal DNS names of your Kubernetes cluster, your can use [CoreDNS](https://coredns.io).
|
|
@ -1,7 +1,22 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# ArangoDB Kubernetes Operator
|
||||
|
||||
The ArangoDB Kubernetes Operator (`kube-arangodb`) is a set of two operators
|
||||
that you deploy in your Kubernetes cluster to manage deployments of the
|
||||
ArangoDB database and provide `PersistentVolumes` on local storage of your
|
||||
nodes for optimal storage performance.
|
||||
The ArangoDB Kubernetes Operator (`kube-arangodb`) is a set of operators
|
||||
that you deploy in your Kubernetes cluster to:
|
||||
|
||||
- Manage deployments of the ArangoDB database
|
||||
- Provide `PersistentVolumes` on local storage of your nodes for optimal storage performance.
|
||||
- Configure ArangoDB Datacenter to Datacenter replication
|
||||
|
||||
Each of these uses involves a different custom resource.
|
||||
|
||||
- Use an [`ArangoDeployment` resource](./DeploymentResource.md) to
|
||||
create an ArangoDB database deployment.
|
||||
- Use an [`ArangoLocalStorage` resource](./StorageResource.md) to
|
||||
provide local `PersistentVolumes` for optimal I/O performance.
|
||||
- Use an [`ArangoDeploymentReplication` resource](./DeploymentReplicationResource.md) to
|
||||
configure ArangoDB Datacenter to Datacenter replication.
|
||||
|
||||
Continue with [Using the ArangoDB Kubernetes Operator](./Usage.md)
|
||||
to learn how to install the ArangoDB Kubernetes operator and create
|
||||
your first deployment.
|
||||
|
|
|
@ -40,18 +40,18 @@ If you want to create external access services manually, follow the instructions
|
|||
### Single server
|
||||
|
||||
For a single server deployment, the operator creates a single
|
||||
`Service` named `<cluster-name>`. This service has a normal cluster IP
|
||||
`Service` named `<deployment-name>`. This service has a normal cluster IP
|
||||
address.
|
||||
|
||||
### Full cluster
|
||||
|
||||
For a full cluster deployment, the operator creates two `Services`.
|
||||
|
||||
- `<cluster-name>_servers` a headless `Service` intended to provide
|
||||
- `<deployment-name>-int` a headless `Service` intended to provide
|
||||
DNS names for all pods created by the operator.
|
||||
It selects all ArangoDB & ArangoSync servers in the cluster.
|
||||
|
||||
- `<cluster-name>` a normal `Service` that selects only the coordinators
|
||||
- `<deployment-name>` a normal `Service` that selects only the coordinators
|
||||
of the cluster. This `Service` is configured with `ClientIP` session
|
||||
affinity. This is needed for cursor requests, since they are bound to
|
||||
a specific coordinator.
|
||||
|
@ -59,7 +59,7 @@ For a full cluster deployment, the operator creates two `Services`.
|
|||
When the coordinators are asked to provide endpoints of the cluster
|
||||
(e.g. when calling `client.SynchronizeEndpoints()` in the go driver)
|
||||
the DNS names of the individual `Pods` will be returned
|
||||
(`<pod>.<cluster-name>_servers.<namespace>.svc`)
|
||||
(`<pod>.<deployment-name>-int.<namespace>.svc`)
|
||||
|
||||
### Full cluster with DC2DC
|
||||
|
||||
|
@ -67,23 +67,26 @@ For a full cluster with datacenter replication deployment,
|
|||
the same `Services` are created as for a Full cluster, with the following
|
||||
additions:
|
||||
|
||||
- `<cluster-name>_sync` a normal `Service` that selects only the syncmasters
|
||||
- `<deployment-name>-sync` a normal `Service` that selects only the syncmasters
|
||||
of the cluster.
|
||||
|
||||
## Load balancer
|
||||
|
||||
To reach the ArangoDB servers from outside the Kubernetes cluster, you
|
||||
have to deploy additional services.
|
||||
If you want full control of the `Services` needed to access the ArangoDB deployment
|
||||
from outside your Kubernetes cluster, set `spec.externalAccess.Type` of the `ArangoDeployment` to `None`
|
||||
and create a `Service` as specified below.
|
||||
|
||||
You can use `LoadBalancer` or `NodePort` services, depending on your
|
||||
Create a `Service` of type `LoadBalancer` or `NodePort`, depending on your
|
||||
Kubernetes deployment.
|
||||
|
||||
This service should select:
|
||||
|
||||
- `arangodb_cluster_name: <cluster-name>`
|
||||
- `arango_deployment: <deployment-name>`
|
||||
- `role: coordinator`
|
||||
|
||||
For example:
|
||||
The following example yields a service of type `LoadBalancer` with a specific
|
||||
load balancer IP address.
|
||||
With this service, the ArangoDB cluster can now be reached on `https://1.2.3.4:8529`.
|
||||
|
||||
```yaml
|
||||
kind: Service
|
||||
|
@ -92,7 +95,27 @@ metadata:
|
|||
name: arangodb-cluster-exposed
|
||||
spec:
|
||||
selector:
|
||||
arangodb_cluster_name: arangodb-cluster
|
||||
arango_deployment: arangodb-cluster
|
||||
role: coordinator
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 1.2.3.4
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8529
|
||||
targetPort: 8529
|
||||
```
|
||||
|
||||
The following example yields a service of type `NodePort` with the ArangoDB
|
||||
cluster exposed on port 30529 of all nodes of the Kubernetes cluster.
|
||||
|
||||
```yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: arangodb-cluster-exposed
|
||||
spec:
|
||||
selector:
|
||||
arango_deployment: arangodb-cluster
|
||||
role: coordinator
|
||||
type: NodePort
|
||||
ports:
|
||||
|
|
|
@ -11,6 +11,22 @@ In the `ArangoDeployment` resource, one can specify the type of storage
|
|||
used by groups of servers using the `spec.<group>.storageClassName`
|
||||
setting.
|
||||
|
||||
This is an example of a `Cluster` deployment that stores its agent & dbserver
|
||||
data on `PersistentVolumes` that use the `my-local-ssd` `StorageClass`
|
||||
|
||||
```yaml
|
||||
apiVersion: "database.arangodb.com/v1alpha"
|
||||
kind: "ArangoDeployment"
|
||||
metadata:
|
||||
name: "cluster-using-local-ssh"
|
||||
spec:
|
||||
mode: Cluster
|
||||
agents:
|
||||
storageClassName: my-local-ssd
|
||||
dbservers:
|
||||
storageClassName: my-local-ssd
|
||||
```
|
||||
|
||||
The amount of storage needed is configured using the
|
||||
`spec.<group>.resources.requests.storage` setting.
|
||||
|
||||
|
@ -18,6 +34,22 @@ Note that configuring storage is done per group of servers.
|
|||
It is not possible to configure storage per individual
|
||||
server.
|
||||
|
||||
This is an example of a `Cluster` deployment that requests volumes of 80GB
|
||||
for every dbserver, resulting in a total storage capacity of 240GB (with 3 dbservers).
|
||||
|
||||
```yaml
|
||||
apiVersion: "database.arangodb.com/v1alpha"
|
||||
kind: "ArangoDeployment"
|
||||
metadata:
|
||||
name: "cluster-using-local-ssh"
|
||||
spec:
|
||||
mode: Cluster
|
||||
dbservers:
|
||||
resources:
|
||||
requests:
|
||||
storage: 80Gi
|
||||
```
|
||||
|
||||
## Local storage
|
||||
|
||||
For optimal performance, ArangoDB should be configured with locally attached
|
||||
|
@ -27,6 +59,28 @@ The easiest way to accomplish this is to deploy an
|
|||
[`ArangoLocalStorage` resource](./StorageResource.md).
|
||||
The ArangoDB Storage Operator will use it to provide `PersistentVolumes` for you.
|
||||
|
||||
This is an example of an `ArangoLocalStorage` resource that will result in
|
||||
`PersistentVolumes` created on any node of the Kubernetes cluster
|
||||
under the directory `/mnt/big-ssd-disk`.
|
||||
|
||||
```yaml
|
||||
apiVersion: "storage.arangodb.com/v1alpha"
|
||||
kind: "ArangoLocalStorage"
|
||||
metadata:
|
||||
name: "example-arangodb-storage"
|
||||
spec:
|
||||
storageClass:
|
||||
name: my-local-ssd
|
||||
localPath:
|
||||
- /mnt/big-ssd-disk
|
||||
```
|
||||
|
||||
Note that using local storage required `VolumeScheduling` to be enabled in your
|
||||
Kubernetes cluster. ON Kubernetes 1.10 this is enabled by default, on version
|
||||
1.9 you have to enable it with a `--feature-gate` setting.
|
||||
|
||||
### Manually creating `PersistentVolumes`
|
||||
|
||||
The alternative is to create `PersistentVolumes` manually, for all servers that
|
||||
need persistent storage (single, agents & dbservers).
|
||||
E.g. for a `Cluster` with 3 agents and 5 dbservers, you must create 8 volumes.
|
||||
|
@ -55,14 +109,14 @@ metadata:
|
|||
]}
|
||||
}'
|
||||
spec:
|
||||
capacity:
|
||||
storage: 100Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: local-ssd
|
||||
local:
|
||||
path: /mnt/disks/ssd1
|
||||
capacity:
|
||||
storage: 100Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: local-ssd
|
||||
local:
|
||||
path: /mnt/disks/ssd1
|
||||
```
|
||||
|
||||
For Kubernetes 1.9 and up, you should create a `StorageClass` which is configured
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# TLS
|
||||
# Secure connections (TLS)
|
||||
|
||||
The ArangoDB Kubernetes Operator will by default create ArangoDB deployments
|
||||
that use secure TLS connections.
|
||||
|
@ -24,7 +24,8 @@ kubectl get secret <deploy-name>-ca --template='{{index .data "ca.crt"}}' | base
|
|||
|
||||
### Windows
|
||||
|
||||
TODO
|
||||
To install a CA certificate in Windows, follow the
|
||||
[procedure described here](http://wiki.cacert.org/HowTo/InstallCAcertRoots).
|
||||
|
||||
### MacOS
|
||||
|
||||
|
@ -42,4 +43,13 @@ sudo /usr/bin/security remove-trusted-cert -d ca.crt
|
|||
|
||||
### Linux
|
||||
|
||||
TODO
|
||||
To install a CA certificate in Linux, on Ubuntu, run:
|
||||
|
||||
```bash
|
||||
sudo cp ca.crt /usr/local/share/ca-certificates/<some-name>.crt
|
||||
sudo update-ca-certificates
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [Authentication](./Authentication.md)
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# Troubleshooting
|
||||
|
||||
While Kubernetes and the ArangoDB Kubernetes operator will automatically
|
||||
resolve a lot of issues, there are always cases where human attention
|
||||
is needed.
|
||||
|
||||
This chapter gives your tips & tricks to help you troubleshoot deployments.
|
||||
|
||||
## Where to look
|
||||
|
||||
In Kubernetes all resources can be inspected using `kubectl` using either
|
||||
the `get` or `describe` command.
|
||||
|
||||
To get all details of the resource (both specification & status),
|
||||
run the following command:
|
||||
|
||||
```bash
|
||||
kubectl get <resource-type> <resource-name> -n <namespace> -o yaml
|
||||
```
|
||||
|
||||
For example, to get the entire specification and status
|
||||
of an `ArangoDeployment` resource named `my-arangodb` in the `default` namespace,
|
||||
run:
|
||||
|
||||
```bash
|
||||
kubectl get ArangoDeployment my-arango -n default -o yaml
|
||||
# or shorter
|
||||
kubectl get arango my-arango -o yaml
|
||||
```
|
||||
|
||||
Several types of resources (including all ArangoDB custom resources) support
|
||||
events. These events show what happened to the resource over time.
|
||||
|
||||
To show the events (and most important resource data) of a resource,
|
||||
run the following command:
|
||||
|
||||
```bash
|
||||
kubectl describe <resource-type> <resource-name> -n <namespace>
|
||||
```
|
||||
|
||||
## Getting logs
|
||||
|
||||
Another invaluable source of information is the log of containers being run
|
||||
in Kubernetes.
|
||||
These logs are accessible through the `Pods` that group these containers.
|
||||
|
||||
To fetch the logs of the default container running in a `Pod`, run:
|
||||
|
||||
```bash
|
||||
kubectl logs <pod-name> -n <namespace>
|
||||
# or with follow option to keep inspecting logs while they are written
|
||||
kubectl logs <pod-name> -n <namespace> -f
|
||||
```
|
||||
|
||||
To inspect the logs of a specific container in `Pod`, add `-c <container-name>`.
|
||||
You can find the names of the containers in the `Pod`, using `kubectl describe pod ...`.
|
||||
|
||||
{% hint 'info' %}
|
||||
Note that the ArangoDB operators are being deployed themselves as a Kubernetes `Deployment`
|
||||
with 2 replicas. This means that you will have to fetch the logs of 2 `Pods` running
|
||||
those replicas.
|
||||
{% endhint %}
|
||||
|
||||
## What if
|
||||
|
||||
### The `Pods` of a deployment stay in `Pending` state
|
||||
|
||||
There are two common causes for this.
|
||||
|
||||
1) The `Pods` cannot be scheduled because there are not enough nodes available.
|
||||
This is usally only the case with a `spec.environment` setting that has a value of `Production`.
|
||||
|
||||
Solution:
|
||||
Add more nodes.
|
||||
|
||||
1) There are no `PersistentVolumes` available to be bound to the `PersistentVolumeClaims`
|
||||
created by the operator.
|
||||
|
||||
Solution:
|
||||
Use `kubectl get persistentvolumes` to inspect the available `PersistentVolumes`
|
||||
and if needed, use the [`ArangoLocalStorage` operator](./StorageResource.md) to provision `PersistentVolumes`.
|
||||
|
||||
### When restarting a `Node`, the `Pods` scheduled on that node remain in `Terminating` state
|
||||
|
||||
When a `Node` no longer makes regular calls to the Kubernetes API server, it is
|
||||
marked as not available. Depending on specific settings in your `Pods`, Kubernetes
|
||||
will at some point decide to terminate the `Pod`. As long as the `Node` is not
|
||||
completely removed from the Kubernetes API server, Kubernetes will try to use
|
||||
the `Node` itself to terminate the `Pod`.
|
||||
|
||||
The `ArangoDeployment` operator recognizes this condition and will try to replace those
|
||||
`Pods` with `Pods` on different nodes. The exact behavior differs per type of server.
|
||||
|
||||
### What happens when a `Node` with local data is broken
|
||||
|
||||
When a `Node` with `PersistentVolumes` hosted on that `Node` is broken and
|
||||
cannot be repaired, the data in those `PersistentVolumes` is lost.
|
||||
|
||||
If an `ArangoDeployment` of type `Single` was using one of those `PersistentVolumes`
|
||||
the database is lost and must be restored from a backup.
|
||||
|
||||
If an `ArangoDeployment` of type `ActiveFailover` or `Cluster` was using one of
|
||||
those `PersistentVolumes`, it depends on the type of server that was using the volume.
|
||||
|
||||
- If an `Agent` was using the volume, it can be repaired as long as 2 other agents are still healthy.
|
||||
- If a `DBServer` was using the volume, and the replication factor of all database
|
||||
collections is 2 or higher, and the remaining dbservers are still healthy,
|
||||
the cluster will duplicate the remaining replicas to
|
||||
bring the number of replicases back to the original number.
|
||||
- If a `DBServer` was using the volume, and the replication factor of a database
|
||||
collection is 1 and happens to be stored on that dbserver, the data is lost.
|
||||
- If a single server of an `ActiveFailover` deployment was using the volume, and the
|
||||
other single server is still healthy, the other single server will become leader.
|
||||
After replacing the failed single server, the new follower will synchronize with
|
||||
the leader.
|
|
@ -4,6 +4,8 @@
|
|||
The ArangoDB Kubernetes Operator supports upgrading an ArangoDB from
|
||||
one version to the next.
|
||||
|
||||
## Upgrade an ArangoDB deployment
|
||||
|
||||
To upgrade a cluster, change the version by changing
|
||||
the `spec.image` setting and the apply the updated
|
||||
custom resource using:
|
||||
|
@ -12,6 +14,21 @@ custom resource using:
|
|||
kubectl apply -f yourCustomResourceFile.yaml
|
||||
```
|
||||
|
||||
The ArangoDB operator will perform an sequential upgrade
|
||||
of all servers in your deployment. Only one server is upgraded
|
||||
at a time.
|
||||
|
||||
For patch level upgrades (e.g. 3.3.9 to 3.3.10) each server
|
||||
is stopped and restarted with the new version.
|
||||
|
||||
For minor level upgrades (e.g. 3.3.9 to 3.4.0) each server
|
||||
is stopped, then the new version is started with `--database.auto-upgrade`
|
||||
and once that is finish the new version is started with the normal arguments.
|
||||
|
||||
The process for major level upgrades depends on the specific version.
|
||||
|
||||
## Upgrade the operator itself
|
||||
|
||||
To update the ArangoDB Kubernetes Operator itself to a new version,
|
||||
update the image version of the deployment resource
|
||||
and apply it using:
|
||||
|
@ -19,3 +36,7 @@ and apply it using:
|
|||
```bash
|
||||
kubectl apply -f examples/yourUpdatedDeployment.yaml
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [Scaling](./Scaling.md)
|
||||
|
|
|
@ -9,23 +9,31 @@ cluster first.
|
|||
To do so, run (replace `<version>` with the version of the operator that you want to install):
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/<version>/manifests/crd.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/<version>/manifests/arango-deployment.yaml
|
||||
export URLPREFIX=https://raw.githubusercontent.com/arangodb/kube-arangodb/<version>/manifests
|
||||
kubectl apply -f $URLPREFIX/crd.yaml
|
||||
kubectl apply -f $URLPREFIX/arango-deployment.yaml
|
||||
```
|
||||
|
||||
To use `ArangoLocalStorage`, also run:
|
||||
To use `ArangoLocalStorage` resources, also run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/<version>/manifests/arango-storage.yaml
|
||||
kubectl apply -f $URLPREFIX/arango-storage.yaml
|
||||
```
|
||||
|
||||
To use `ArangoDeploymentReplication` resources, also run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f $URLPREFIX/arango-deployment-replication.yaml
|
||||
```
|
||||
|
||||
You can find the latest release of the ArangoDB Kubernetes Operator
|
||||
[in the kube-arangodb repository](https://github.com/arangodb/kube-arangodb/releases/latest).
|
||||
|
||||
## Cluster creation
|
||||
## ArangoDB deployment creation
|
||||
|
||||
Once the operator is running, you can create your ArangoDB cluster
|
||||
by creating a custom resource and deploying it.
|
||||
Once the operator is running, you can create your ArangoDB database deployment
|
||||
by creating a `ArangoDeployment` custom resource and deploying it into your
|
||||
Kubernetes cluster.
|
||||
|
||||
For example (all examples can be found [in the kube-arangodb repository](https://github.com/arangodb/kube-arangodb/tree/master/examples)):
|
||||
|
||||
|
@ -33,9 +41,9 @@ For example (all examples can be found [in the kube-arangodb repository](https:/
|
|||
kubectl apply -f examples/simple-cluster.yaml
|
||||
```
|
||||
|
||||
## Cluster removal
|
||||
## Deployment removal
|
||||
|
||||
To remove an existing cluster, delete the custom
|
||||
To remove an existing ArangoDB deployment, delete the custom
|
||||
resource. The operator will then delete all created resources.
|
||||
|
||||
For example:
|
||||
|
@ -44,6 +52,10 @@ For example:
|
|||
kubectl delete -f examples/simple-cluster.yaml
|
||||
```
|
||||
|
||||
**Note that this will also delete all data in your ArangoDB deployment!**
|
||||
|
||||
If you want to keep your data, make sure to create a backup before removing the deployment.
|
||||
|
||||
## Operator removal
|
||||
|
||||
To remove the entire ArangoDB Kubernetes Operator, remove all
|
||||
|
@ -51,6 +63,14 @@ clusters first and then remove the operator by running:
|
|||
|
||||
```bash
|
||||
kubectl delete deployment arango-deployment-operator
|
||||
# If `ArangoLocalStorage` is installed
|
||||
# If `ArangoLocalStorage` operator is installed
|
||||
kubectl delete deployment -n kube-system arango-storage-operator
|
||||
# If `ArangoDeploymentReplication` operator is installed
|
||||
kubectl delete deployment arango-deployment-replication-operator
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [Driver configuration](./DriverConfiguration.md)
|
||||
- [Scaling](./Scaling.md)
|
||||
- [Upgrading](./Upgrading.md)
|
||||
|
|
|
@ -5,10 +5,8 @@ ArangoDB features a [Google S2](http://s2geometry.io/) based geospatial index.
|
|||
We support indexing on a subset of the [**GeoJSON**](#geojson) standard
|
||||
(as well as simple latitude longitude pairs).
|
||||
|
||||
AQL's geospatial utility functions are described in [Geo
|
||||
functions](../../AQL/Functions/Geo.html). Helper functions to easily create
|
||||
GeoJSON objects are described in [GeoJSON
|
||||
Constructors](../../AQL/Functions/GeoConstructors.html).
|
||||
AQL's geospatial functions and GeoJSON constructors are described in
|
||||
[Geo functions](../../AQL/Functions/Geo.html).
|
||||
|
||||
Using a Geo-Spatial Index
|
||||
-------------------------
|
||||
|
|
|
@ -7,5 +7,5 @@ It supports parallel querying and batch requests.
|
|||
|
||||
Related blog posts:
|
||||
|
||||
- [Measuring ArangoDB insert performance](https://www.arangodb.com/2012/10/gain-factor-of-5-using-batch-updates/)
|
||||
- [Gain factor of 5 using batch requests](https://www.arangodb.com/2013/11/measuring-arangodb-insert-performance/)
|
||||
- [Measuring ArangoDB insert performance](https://www.arangodb.com/2013/11/measuring-arangodb-insert-performance/)
|
||||
- [Gain factor of 5 using batch requests](https://www.arangodb.com/2012/10/gain-factor-of-5-using-batch-updates/)
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
Arangoinspect Examples
|
||||
======================
|
||||
|
||||
If you are asked by ArangoDB support to provide an inspector output, run
|
||||
the _arangoinspect_ binary to generate a file in the current working folder.
|
||||
|
||||
The resulting JSON file is a collection of meta data acquired from all
|
||||
involved instances. The data includes relevant operating system parameters,
|
||||
ArangoDB process parameters, local database information etc.
|
||||
|
||||
{% hint 'warning' %}
|
||||
Please open the file locally and check if it contains anything that you are
|
||||
not allowed/willing to share and obfuscate it before sharing (user names,
|
||||
files paths etc.).
|
||||
{% endhint %}
|
||||
|
||||
Invoking Arangoinspect
|
||||
----------------------
|
||||
|
||||
Point the tool to an ArangoDB endpoint. In case of a single server, there
|
||||
is only one. You can connect to any node in case of a cluster (_DBServer_,
|
||||
_Coordinator_, _Agent_).
|
||||
|
||||
arangoinspect --server.endpoint tcp://127.0.0.1:8529
|
||||
|
||||
This will start the tool with a prompt for the JWT secret and try to connect
|
||||
to the specified ArangoDB server. You have to type the secret as is used for
|
||||
the `arangod` option `--server.jwt-secret`. For non-cluster deployments,
|
||||
you may authenticate with a user name and password instead:
|
||||
|
||||
arangoinspect --server.ask-jwt-secret false --server.username "root" --server.password "foobar"
|
||||
|
||||
The password can be omitted and entered interactively.
|
||||
|
||||
Example outputs
|
||||
---------------
|
||||
|
||||
If _arangoinspect_ succeeds to authenticate, it starts to gather information
|
||||
and writes the result to `arangodb-inspector.json`, then exits:
|
||||
|
||||
```
|
||||
arangoinspect --server.endpoint tcp://127.0.0.1:8629
|
||||
|
||||
Please specify the JWT secret:
|
||||
Connected to ArangoDB 'http+tcp://127.0.0.1:8629' version: 3.4.devel [server], database: '_system', username: 'root'
|
||||
|
||||
_ ___ _
|
||||
/ \ _ __ __ _ _ __ __ _ ___ |_ _|_ __ ___ _ __ ___ ___| |_ ___ _ __
|
||||
/ _ \ | '__/ _` | '_ \ / _` |/ _ \ | || '_ \/ __| '_ \ / _ \/ __| __/ _ \| '__|
|
||||
/ ___ \| | | (_| | | | | (_| | (_) | | || | | \__ \ |_) | __/ (__| || (_) | |
|
||||
/_/ \_\_| \__,_|_| |_|\__, |\___/ |___|_| |_|___/ .__/ \___|\___|\__\___/|_|
|
||||
|___/ |_|
|
||||
|
||||
2018-06-05T19:40:10Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root'
|
||||
2018-06-05T19:40:10Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root'
|
||||
INFO changing endpoint for AGNT-01e83a4b-8a51-4919-9f50-ff640accb9fa from http+tcp://[::1]:4001 to tcp://[::1]:4001
|
||||
INFO changing endpoint for PRMR-9f5b337e-c1de-4b7d-986a-d6ad2eb8f857 from tcp://127.0.0.1:8629 to tcp://[::1]:8629
|
||||
INFO Analysing agency dump ...
|
||||
INFO Plan (version 22)
|
||||
INFO Databases
|
||||
INFO _system
|
||||
INFO Collections
|
||||
INFO _system
|
||||
INFO _graphs
|
||||
INFO _users
|
||||
INFO _modules
|
||||
INFO _iresearch_analyzers
|
||||
INFO _routing
|
||||
INFO _aqlfunctions
|
||||
INFO _frontend
|
||||
INFO _queues
|
||||
INFO _jobs
|
||||
INFO _apps
|
||||
INFO _appbundles
|
||||
INFO _statisticsRaw
|
||||
INFO _statistics
|
||||
INFO _statistics15
|
||||
INFO Server health
|
||||
INFO DB Servers
|
||||
INFO PRMR-9f5b337e-c1de-4b7d-986a-d6ad2eb8f857(DBServer0001)
|
||||
INFO PRMR-90ff8c20-b0f3-49c5-a5dd-7b186bb7db33(DBServer0002)
|
||||
INFO Coordinators
|
||||
INFO CRDN-0dbf16ec-8a06-4203-9359-447d97757b4e(Coordinator0001)
|
||||
INFO Supervision activity
|
||||
INFO Jobs: undefined(To do: 0, Pending: 0, Finished: 0, Failed: 0)
|
||||
INFO Summary
|
||||
INFO 1 databases
|
||||
INFO 14 collections
|
||||
INFO 14 shards
|
||||
INFO ... agency analysis finished.
|
||||
INFO Collecting diagnostics from all servers ...
|
||||
2018-06-05T19:40:10Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:8629', version 3.4.devel [server], database '_system', username: 'root'
|
||||
2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root'
|
||||
2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:8630', version 3.4.devel [server], database '_system', username: 'root'
|
||||
2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:8530', version 3.4.devel [server], database '_system', username: 'root'
|
||||
2018-06-05T19:40:11Z [19858] INFO Connected to ArangoDB 'http+tcp://[::1]:4001', version 3.4.devel [server], database '_system', username: 'root'
|
||||
INFO ... dignostics collected.
|
||||
INFO Report written to arango-inspector.json.
|
||||
```
|
||||
|
||||
If _arangoinspect_ cannot connect or authentication/authorization fails, then a fatal error
|
||||
will be raised and the tool shuts down:
|
||||
|
||||
```
|
||||
Could not connect to endpoint 'http+tcp://127.0.0.1:8529', database: '_system', username: 'root'
|
||||
Error message: '401: Unauthorized'
|
||||
|
||||
_ ___ _
|
||||
/ \ _ __ __ _ _ __ __ _ ___ |_ _|_ __ ___ _ __ ___ ___| |_ ___ _ __
|
||||
/ _ \ | '__/ _` | '_ \ / _` |/ _ \ | || '_ \/ __| '_ \ / _ \/ __| __/ _ \| '__|
|
||||
/ ___ \| | | (_| | | | | (_| | (_) | | || | | \__ \ |_) | __/ (__| || (_) | |
|
||||
/_/ \_\_| \__,_|_| |_|\__, |\___/ |___|_| |_|___/ .__/ \___|\___|\__\___/|_|
|
||||
|___/ |_|
|
||||
|
||||
FATAL cannot connect to server 'http+tcp://127.0.0.1:8529': 401: Unauthorized
|
||||
```
|
|
@ -0,0 +1,6 @@
|
|||
Arangoinspect Options
|
||||
=====================
|
||||
|
||||
Usage: `arangoinspect [<options>]`
|
||||
|
||||
@startDocuBlock program_options_arangoinspect
|
|
@ -0,0 +1,7 @@
|
|||
Arangoinspect
|
||||
=============
|
||||
|
||||
_Arangoinspect_ is a command-line client tool that collects information of any
|
||||
ArangoDB server setup to facilitate troubleshooting for the ArangoDB support.
|
||||
|
||||
The tool is available starting from ArangoDB v.3.3.11.
|
|
@ -14,6 +14,7 @@ The full ArangoDB package ships with the following programs and tools:
|
|||
| `arangoexport` | [Bulk exporter](Arangoexport/README.md) for the ArangoDB server. It supports JSON, CSV and XML.
|
||||
| `arango-dfdb` | [Datafile debugger](Arango-dfdb/README.md) for ArangoDB (MMFiles storage engine only).
|
||||
| `arangobench` | [Benchmark and test tool](Arangobench/README.md). It can be used for performance and server function testing.
|
||||
| `arangoinspect` | [Inspection tool](Arangoinspect/README.md) that gathers server setup information.
|
||||
| `arangovpack` | Utility to convert [VelocyPack](https://github.com/arangodb/velocypack) data to JSON.
|
||||
|
||||
The client package comes with a subset of programs and tools:
|
||||
|
@ -24,6 +25,7 @@ The client package comes with a subset of programs and tools:
|
|||
- arangodump
|
||||
- arangorestore
|
||||
- arangobench
|
||||
- arangoinspect
|
||||
- arangovpack
|
||||
|
||||
Additional tools which are available separately:
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
* [Datacenter to datacenter Replication](Tutorials/DC2DC/README.md)
|
||||
# https://@github.com/arangodb/kube-arangodb.git;kube-arangodb;docs/Manual;;/
|
||||
* [Kubernetes](Tutorials/Kubernetes/README.md)
|
||||
* [Datacenter to datacenter Replication on Kubernetes](Tutorials/Kubernetes/DC2DC.md)
|
||||
* [Programs & Tools](Programs/README.md)
|
||||
* [ArangoDB Server](Programs/Arangod/README.md)
|
||||
* [Options](Programs/Arangod/Options.md)
|
||||
|
@ -59,6 +60,9 @@
|
|||
* [Arangobench](Programs/Arangobench/README.md)
|
||||
* [Examples](Programs/Arangobench/Examples.md)
|
||||
* [Options](Programs/Arangobench/Options.md)
|
||||
* [Arangoinspect](Programs/Arangoinspect/README.md)
|
||||
* [Examples](Programs/Arangoinspect/Examples.md)
|
||||
* [Options](Programs/Arangoinspect/Options.md)
|
||||
* [Datafile Debugger](Programs/Arango-dfdb/README.md)
|
||||
* [Examples](Programs/Arango-dfdb/Examples.md)
|
||||
# https://@github.com/arangodb/foxx-cli.git;foxx-cli;docs/Manual;;/
|
||||
|
@ -223,15 +227,19 @@
|
|||
# https://@github.com/arangodb/kube-arangodb.git;kube-arangodb;docs/Manual;;/
|
||||
* [Kubernetes](Deployment/Kubernetes/README.md)
|
||||
* [Using the Operator](Deployment/Kubernetes/Usage.md)
|
||||
* [Deployment Resource](Deployment/Kubernetes/DeploymentResource.md)
|
||||
* [Deployment Resource Reference](Deployment/Kubernetes/DeploymentResource.md)
|
||||
* [Driver Configuration](Deployment/Kubernetes/DriverConfiguration.md)
|
||||
* [Authentication](Deployment/Kubernetes/Authentication.md)
|
||||
* [Scaling](Deployment/Kubernetes/Scaling.md)
|
||||
* [Upgrading](Deployment/Kubernetes/Upgrading.md)
|
||||
* [ArangoDB Configuration & Secrets](Deployment/Kubernetes/ConfigAndSecrets.md)
|
||||
* [Metrics](Deployment/Kubernetes/Metrics.md)
|
||||
* [Scaling](Deployment/Kubernetes/Scaling.md)
|
||||
* [Services & Load balancer](Deployment/Kubernetes/ServicesAndLoadBalancer.md)
|
||||
* [Deployment Replication Resource Reference](Deployment/Kubernetes/DeploymentReplicationResource.md)
|
||||
* [Storage](Deployment/Kubernetes/Storage.md)
|
||||
* [Storage Resource](Deployment/Kubernetes/StorageResource.md)
|
||||
* [TLS](Deployment/Kubernetes/Tls.md)
|
||||
* [Upgrading](Deployment/Kubernetes/Upgrading.md)
|
||||
* [Troubleshooting](Deployment/Kubernetes/Troubleshooting.md)
|
||||
* [Administration](Administration/README.md)
|
||||
* [Backup & Restore](Administration/BackupRestore.md)
|
||||
* [Import & Export](Administration/ImportExport.md)
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
<!-- don't edit here, its from https://@github.com/arangodb/kube-arangodb.git / docs/Manual/ -->
|
||||
# Start ArangoDB Cluster to Cluster Synchronization on Kubernetes
|
||||
|
||||
This tutorial guides you through the steps needed to configure
|
||||
an ArangoDB datacenter to datacenter replication between two ArangoDB
|
||||
clusters running in Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. This tutorial assumes that you have 2 ArangoDB clusters running in 2 different Kubernetes clusters.
|
||||
1. Both Kubernetes clusters are equipped with support for `Services` of type `LoadBalancer`.
|
||||
1. You can create (global) DNS names for configured `Services` with low propagation times. E.g. use Cloudflare.
|
||||
1. You have 4 DNS names available:
|
||||
- One for the database in the source ArangoDB cluster. E.g. `src-db.mycompany.com`
|
||||
- One for the ArangoDB syncmasters in the source ArangoDB cluster. E.g. `src-sync.mycompany.com`
|
||||
- One for the database in the destination ArangoDB cluster. E.g. `dst-db.mycompany.com`
|
||||
- One for the ArangoDB syncmasters in the destination ArangoDB cluster. E.g. `dst-sync.mycompany.com`
|
||||
|
||||
## Step 1: Enable Datacenter Replication Support on source ArangoDB cluster
|
||||
|
||||
Set your current Kubernetes context to the Kubernetes source cluster.
|
||||
|
||||
Edit the `ArangoDeployment` of the source ArangoDB clusters.
|
||||
|
||||
Set:
|
||||
|
||||
- `spec.tls.altNames` to `["src-db.mycompany.com"]` (can include more names / IP addresses)
|
||||
- `spec.sync.enabled` to `true`
|
||||
- `spec.sync.externalAccess.masterEndpoint` to `["https://src-sync.mycompany.com:8629"]`
|
||||
- `spec.sync.externalAccess.accessPackageSecretNames` to `["src-accesspackage"]`
|
||||
|
||||
## Step 2: Extract access-package from source ArangoDB cluster
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
kubectl get secret src-accesspackage --template='{{index .data "accessPackage.yaml"}}' | \
|
||||
base64 -D > accessPackage.yaml
|
||||
```
|
||||
|
||||
## Step 3: Configure source DNS names
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
kubectl get service
|
||||
```
|
||||
|
||||
Find the IP address contained in the `LoadBalancer` column for the following `Services`:
|
||||
|
||||
- `<deployment-name>-ea` Use this IP address for the `src-db.mycompany.com` DNS name.
|
||||
- `<deployment-name>-sync` Use this IP address for the `src-sync.mycompany.com` DNS name.
|
||||
|
||||
The process for configuring DNS names is specific to each DNS provider.
|
||||
|
||||
## Step 4: Enable Datacenter Replication Support on destination ArangoDB cluster
|
||||
|
||||
Set your current Kubernetes context to the Kubernetes destination cluster.
|
||||
|
||||
Edit the `ArangoDeployment` of the source ArangoDB clusters.
|
||||
|
||||
Set:
|
||||
|
||||
- `spec.tls.altNames` to `["dst-db.mycompany.com"]` (can include more names / IP addresses)
|
||||
- `spec.sync.enabled` to `true`
|
||||
- `spec.sync.externalAccess.masterEndpoint` to `["https://dst-sync.mycompany.com:8629"]`
|
||||
|
||||
## Step 5: Import access package in destination cluster
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f accessPackage.yaml
|
||||
```
|
||||
|
||||
Note: This imports two `Secrets`, containing TLS information about the source cluster,
|
||||
into the destination cluster
|
||||
|
||||
## Step 6: Configure destination DNS names
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
kubectl get service
|
||||
```
|
||||
|
||||
Find the IP address contained in the `LoadBalancer` column for the following `Services`:
|
||||
|
||||
- `<deployment-name>-ea` Use this IP address for the `dst-db.mycompany.com` DNS name.
|
||||
- `<deployment-name>-sync` Use this IP address for the `dst-sync.mycompany.com` DNS name.
|
||||
|
||||
The process for configuring DNS names is specific to each DNS provider.
|
||||
|
||||
## Step 7: Create an `ArangoDeploymentReplication` resource
|
||||
|
||||
Create a yaml file (e.g. called `src-to-dst-repl.yaml`) with the following content:
|
||||
|
||||
```yaml
|
||||
apiVersion: "replication.database.arangodb.com/v1alpha"
|
||||
kind: "ArangoDeploymentReplication"
|
||||
metadata:
|
||||
name: "replication-src-to-dst"
|
||||
spec:
|
||||
source:
|
||||
masterEndpoint: ["https://src-sync.mycompany.com:8629"]
|
||||
auth:
|
||||
keyfileSecretName: src-accesspackage-auth
|
||||
tls:
|
||||
caSecretName: src-accesspackage-ca
|
||||
destination:
|
||||
deploymentName: <dst-deployment-name>
|
||||
```
|
||||
|
||||
## Step 8: Wait for DNS names to propagate
|
||||
|
||||
Wait until the DNS names configured in step 3 and 6 resolve to their configured
|
||||
IP addresses.
|
||||
|
||||
Depending on your DNS provides this can take a few minutes up to 24 hours.
|
||||
|
||||
## Step 9: Activate replication
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f src-to-dst-repl.yaml
|
||||
```
|
||||
|
||||
Replication from the source cluster to the destination cluster will now be configured.
|
||||
|
||||
Check the status of the replication by inspecting the status of the `ArangoDeploymentReplication` resource using:
|
||||
|
||||
```bash
|
||||
kubectl describe ArangoDeploymentReplication replication-src-to-dst
|
||||
```
|
||||
|
||||
As soon as the replication is configured, the `Add collection` button in the `Collections`
|
||||
page of the web UI (of the destination cluster) will be grayed out.
|
|
@ -0,0 +1,423 @@
|
|||
{
|
||||
"check-configuration" : {
|
||||
"default" : false,
|
||||
"description" : "check the configuration and exit",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"config" : {
|
||||
"default" : "",
|
||||
"description" : "the configuration file or 'none'",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "string"
|
||||
},
|
||||
"configuration" : {
|
||||
"default" : "",
|
||||
"description" : "the configuration file or 'none'",
|
||||
"hidden" : false,
|
||||
"section" : "",
|
||||
"type" : "string"
|
||||
},
|
||||
"console.audit-file" : {
|
||||
"default" : "",
|
||||
"description" : "audit log file to save commands and results",
|
||||
"hidden" : false,
|
||||
"section" : "console",
|
||||
"type" : "string"
|
||||
},
|
||||
"console.auto-complete" : {
|
||||
"default" : true,
|
||||
"description" : "enable auto completion",
|
||||
"hidden" : false,
|
||||
"section" : "console",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"console.colors" : {
|
||||
"default" : true,
|
||||
"description" : "enable color support",
|
||||
"hidden" : false,
|
||||
"section" : "console",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"console.pager" : {
|
||||
"default" : false,
|
||||
"description" : "enable paging",
|
||||
"hidden" : false,
|
||||
"section" : "console",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"console.pager-command" : {
|
||||
"default" : "less -X -R -F -L",
|
||||
"description" : "pager command",
|
||||
"hidden" : true,
|
||||
"section" : "console",
|
||||
"type" : "string"
|
||||
},
|
||||
"console.pretty-print" : {
|
||||
"default" : true,
|
||||
"description" : "enable pretty printing",
|
||||
"hidden" : false,
|
||||
"section" : "console",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"console.prompt" : {
|
||||
"default" : "%E@%d> ",
|
||||
"description" : "prompt used in REPL. prompt components are: '%t': current time as timestamp, '%p': duration of last command in seconds, '%d': name of current database, '%e': current endpoint, '%E': current endpoint without protocol, '%u': current user",
|
||||
"hidden" : false,
|
||||
"section" : "console",
|
||||
"type" : "string"
|
||||
},
|
||||
"default-language" : {
|
||||
"default" : "",
|
||||
"description" : "ISO-639 language code",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "string"
|
||||
},
|
||||
"define" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "define key=value for a @key@ entry in config file",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "string..."
|
||||
},
|
||||
"dump-dependencies" : {
|
||||
"default" : false,
|
||||
"description" : "dump dependency graph",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"dump-options" : {
|
||||
"default" : true,
|
||||
"description" : "dump configuration options in JSON format",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"javascript.check-syntax" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "syntax check code Javascript code from file",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "string..."
|
||||
},
|
||||
"javascript.client-module" : {
|
||||
"default" : "inspector.js",
|
||||
"description" : "client module to use at startup",
|
||||
"hidden" : true,
|
||||
"section" : "javascript",
|
||||
"type" : "string"
|
||||
},
|
||||
"javascript.current-module-directory" : {
|
||||
"default" : true,
|
||||
"description" : "add current directory to module path",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"javascript.execute" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "execute Javascript code from file",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "string..."
|
||||
},
|
||||
"javascript.execute-string" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "execute Javascript code from string",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "string..."
|
||||
},
|
||||
"javascript.gc-interval" : {
|
||||
"default" : 50,
|
||||
"description" : "request-based garbage collection interval (each n.th commands)",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "uint64"
|
||||
},
|
||||
"javascript.module-directory" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "additional paths containing JavaScript modules",
|
||||
"hidden" : true,
|
||||
"section" : "javascript",
|
||||
"type" : "string..."
|
||||
},
|
||||
"javascript.startup-directory" : {
|
||||
"default" : "./js",
|
||||
"description" : "startup paths containing the Javascript files",
|
||||
"hidden" : true,
|
||||
"section" : "javascript",
|
||||
"type" : "string"
|
||||
},
|
||||
"javascript.unit-tests" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "do not start as shell, run unit tests instead",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "string..."
|
||||
},
|
||||
"javascript.v8-max-heap" : {
|
||||
"default" : 3072,
|
||||
"description" : "maximal heap size (in MB)",
|
||||
"hidden" : false,
|
||||
"section" : "javascript",
|
||||
"type" : "uint64"
|
||||
},
|
||||
"javascript.v8-options" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "options to pass to v8",
|
||||
"hidden" : true,
|
||||
"section" : "javascript",
|
||||
"type" : "string..."
|
||||
},
|
||||
"jslint" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "do not start as shell, run jslint instead",
|
||||
"hidden" : false,
|
||||
"section" : "",
|
||||
"type" : "string..."
|
||||
},
|
||||
"log" : {
|
||||
"default" : [
|
||||
"info"
|
||||
],
|
||||
"description" : "the global or topic-specific log level",
|
||||
"hidden" : true,
|
||||
"section" : "",
|
||||
"type" : "string..."
|
||||
},
|
||||
"log.color" : {
|
||||
"default" : true,
|
||||
"description" : "use colors for TTY logging",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.escape" : {
|
||||
"default" : true,
|
||||
"description" : "escape characters when logging",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.file" : {
|
||||
"default" : "-",
|
||||
"description" : "shortcut for '--log.output file://<filename>'",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "string"
|
||||
},
|
||||
"log.force-direct" : {
|
||||
"default" : false,
|
||||
"description" : "do not start a seperate thread for logging",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.foreground-tty" : {
|
||||
"default" : false,
|
||||
"description" : "also log to tty if backgrounded",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.keep-logrotate" : {
|
||||
"default" : false,
|
||||
"description" : "keep the old log file after receiving a sighup",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.level" : {
|
||||
"default" : [
|
||||
"info"
|
||||
],
|
||||
"description" : "the global or topic-specific log level",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "string..."
|
||||
},
|
||||
"log.line-number" : {
|
||||
"default" : false,
|
||||
"description" : "append line number and file name",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.output" : {
|
||||
"default" : [
|
||||
],
|
||||
"description" : "log destination(s)",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "string..."
|
||||
},
|
||||
"log.performance" : {
|
||||
"default" : false,
|
||||
"description" : "shortcut for '--log.level performance=trace'",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.prefix" : {
|
||||
"default" : "",
|
||||
"description" : "prefix log message with this string",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "string"
|
||||
},
|
||||
"log.role" : {
|
||||
"default" : false,
|
||||
"description" : "log server role",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.shorten-filenames" : {
|
||||
"default" : true,
|
||||
"description" : "shorten filenames in log output (use with --log.line-number)",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.thread" : {
|
||||
"default" : false,
|
||||
"description" : "show thread identifier in log message",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.thread-name" : {
|
||||
"default" : false,
|
||||
"description" : "show thread name in log message",
|
||||
"hidden" : true,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.use-local-time" : {
|
||||
"default" : false,
|
||||
"description" : "use local timezone instead of UTC",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"log.use-microtime" : {
|
||||
"default" : false,
|
||||
"description" : "use microtime instead",
|
||||
"hidden" : false,
|
||||
"section" : "log",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"quiet" : {
|
||||
"default" : false,
|
||||
"description" : "silent startup",
|
||||
"hidden" : false,
|
||||
"section" : "",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"random.generator" : {
|
||||
"default" : 1,
|
||||
"description" : "random number generator to use (1 = MERSENNE, 2 = RANDOM, 3 = URANDOM, 4 = COMBINED (not for Windows), 5 = WinCrypt (Windows only)",
|
||||
"hidden" : true,
|
||||
"section" : "random",
|
||||
"type" : "uint32",
|
||||
"values" : "Possible values: 1, 2, 3, 4"
|
||||
},
|
||||
"server.ask-jwt-secret" : {
|
||||
"default" : true,
|
||||
"description" : "if this option is specified, the user will be prompted for a JWT secret. This option is not compatible with --server.username or --server.password. If specified, it will be used for all connections - even when a new connection to another server is created",
|
||||
"hidden" : true,
|
||||
"section" : "server",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"server.authentication" : {
|
||||
"default" : false,
|
||||
"description" : "require authentication credentials when connecting (does not affect the server-side authentication settings)",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "boolean"
|
||||
},
|
||||
"server.connection-timeout" : {
|
||||
"default" : 5,
|
||||
"description" : "connection timeout in seconds",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "double"
|
||||
},
|
||||
"server.database" : {
|
||||
"default" : "_system",
|
||||
"description" : "database name to use when connecting",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "string"
|
||||
},
|
||||
"server.endpoint" : {
|
||||
"default" : "http+tcp://127.0.0.1:8529",
|
||||
"description" : "endpoint to connect to, use 'none' to start without a server",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "string"
|
||||
},
|
||||
"server.max-packet-size" : {
|
||||
"default" : 134217728,
|
||||
"description" : "maximum packet size (in bytes) for client/server communication",
|
||||
"hidden" : true,
|
||||
"section" : "server",
|
||||
"type" : "uint64"
|
||||
},
|
||||
"server.password" : {
|
||||
"default" : "",
|
||||
"description" : "password to use when connecting. If not specified and authentication is required, the user will be prompted for a password",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "string"
|
||||
},
|
||||
"server.request-timeout" : {
|
||||
"default" : 1200,
|
||||
"description" : "request timeout in seconds",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "double"
|
||||
},
|
||||
"server.username" : {
|
||||
"default" : "root",
|
||||
"description" : "username to use when connecting",
|
||||
"hidden" : false,
|
||||
"section" : "server",
|
||||
"type" : "string"
|
||||
},
|
||||
"ssl.protocol" : {
|
||||
"default" : 5,
|
||||
"description" : "ssl protocol (1 = SSLv2, 2 = SSLv2 or SSLv3 (negotiated), 3 = SSLv3, 4 = TLSv1, 5 = TLSV1.2)",
|
||||
"hidden" : false,
|
||||
"section" : "ssl",
|
||||
"type" : "uint64",
|
||||
"values" : "Possible values: 1, 2, 3, 4, 5"
|
||||
},
|
||||
"temp.path" : {
|
||||
"default" : "",
|
||||
"description" : "path for temporary files",
|
||||
"hidden" : false,
|
||||
"section" : "temp",
|
||||
"type" : "string"
|
||||
},
|
||||
"version" : {
|
||||
"default" : false,
|
||||
"description" : "reports the version and exits",
|
||||
"hidden" : false,
|
||||
"section" : "",
|
||||
"type" : "boolean"
|
||||
}
|
||||
}
|
|
@ -34,6 +34,8 @@
|
|||
#include "VocBase/AccessMode.h"
|
||||
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <vector>
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
|
@ -274,7 +276,12 @@ class Ast {
|
|||
AstNode* createNodeAttributeAccess(AstNode const*, std::vector<std::string> const&);
|
||||
AstNode* createNodeAttributeAccess(AstNode const* node, std::vector<basics::AttributeName> const& attrs) {
|
||||
std::vector<std::string> vec; //change to std::string_view once available
|
||||
std::transform(attrs.begin(), attrs.end(), std::back_inserter(vec), +[](basics::AttributeName const& a) { return a.name; });
|
||||
std::transform(attrs.begin(),
|
||||
attrs.end(),
|
||||
std::back_inserter(vec),
|
||||
[](basics::AttributeName const& a) {
|
||||
return a.name;
|
||||
});
|
||||
return createNodeAttributeAccess(node,vec);
|
||||
}
|
||||
|
||||
|
|
|
@ -3421,8 +3421,11 @@ void arangodb::aql::collectInClusterRule(Optimizer* opt,
|
|||
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
|
||||
SmallVector<ExecutionNode*> nodes{a};
|
||||
plan->findNodesOfType(nodes, EN::COLLECT, true);
|
||||
|
||||
std::unordered_set<Variable const*> allUsed;
|
||||
|
||||
for (auto& node : nodes) {
|
||||
allUsed.clear();
|
||||
auto used = node->getVariablesUsedHere();
|
||||
|
||||
// found a node we need to replace in the plan
|
||||
|
@ -3437,6 +3440,16 @@ void arangodb::aql::collectInClusterRule(Optimizer* opt,
|
|||
|
||||
while (current != nullptr) {
|
||||
bool eligible = true;
|
||||
|
||||
// check if any of the nodes we pass use a variable that will not be
|
||||
// available after we insert a new COLLECT on top of it (note: COLLECT
|
||||
// will eliminate all variables from the scope but its own)
|
||||
for (auto const& it : current->getVariablesUsedHere()) {
|
||||
if (current->getType() != EN::GATHER) {
|
||||
// Gather nodes are taken care of separately below
|
||||
allUsed.emplace(it);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto const& it : current->getVariablesSetHere()) {
|
||||
if (std::find(used.begin(), used.end(), it) != used.end()) {
|
||||
|
@ -3464,6 +3477,18 @@ void arangodb::aql::collectInClusterRule(Optimizer* opt,
|
|||
}
|
||||
|
||||
if (previous != nullptr) {
|
||||
for (auto const& otherVariable : allUsed) {
|
||||
auto const setHere = collectNode->getVariablesSetHere();
|
||||
if (std::find(setHere.begin(), setHere.end(), otherVariable) == setHere.end()) {
|
||||
eligible = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!eligible) {
|
||||
break;
|
||||
}
|
||||
|
||||
bool removeGatherNodeSort = false;
|
||||
|
||||
if (collectNode->aggregationMethod() == CollectOptions::CollectMethod::COUNT) {
|
||||
|
|
|
@ -416,6 +416,7 @@ void Query::prepare(QueryRegistry* registry, uint64_t queryHash) {
|
|||
enterState(QueryExecutionState::ValueType::EXECUTION);
|
||||
|
||||
TRI_ASSERT(_engine == nullptr);
|
||||
TRI_ASSERT(_trx != nullptr);
|
||||
// note that the engine returned here may already be present in our
|
||||
// own _engine attribute (the instanciation procedure may modify us
|
||||
// by calling our engine(ExecutionEngine*) function
|
||||
|
@ -1297,6 +1298,9 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
|
|||
|
||||
/// @brief create a transaction::Context
|
||||
std::shared_ptr<transaction::Context> Query::createTransactionContext() {
|
||||
if (_transactionContext) {
|
||||
return _transactionContext;
|
||||
}
|
||||
if (_contextOwnedByExterior) {
|
||||
// we must use v8
|
||||
return transaction::V8Context::Create(_vocbase, true);
|
||||
|
|
|
@ -111,6 +111,11 @@ class Query {
|
|||
_trx = trx;
|
||||
init();
|
||||
}
|
||||
|
||||
/// @brief inject a transaction context to use
|
||||
void setTransactionContext(std::shared_ptr<transaction::Context> const& ctx) {
|
||||
_transactionContext = ctx;
|
||||
}
|
||||
|
||||
QueryProfile* profile() const {
|
||||
return _profile.get();
|
||||
|
@ -324,6 +329,9 @@ class Query {
|
|||
|
||||
/// @brief pointer to vocbase the query runs in
|
||||
TRI_vocbase_t& _vocbase;
|
||||
|
||||
/// @brief transaction context to use for this query
|
||||
std::shared_ptr<transaction::Context> _transactionContext;
|
||||
|
||||
/// @brief the currently used V8 context
|
||||
V8Context* _context;
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
#include "Scheduler/JobGuard.h"
|
||||
#include "Scheduler/JobQueue.h"
|
||||
#include "Scheduler/SchedulerFeature.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "Transaction/SmartContext.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
|
@ -224,12 +224,15 @@ void RestAqlHandler::setupClusterQuery() {
|
|||
if (found) {
|
||||
ttl = arangodb::basics::StringUtils::doubleDecimal(ttlstring);
|
||||
}
|
||||
|
||||
// creates a StandaloneContext or a leasing context
|
||||
auto ctx = transaction::SmartContext::Create(_vocbase);
|
||||
|
||||
VPackBuilder answerBuilder;
|
||||
answerBuilder.openObject();
|
||||
bool needToLock = true;
|
||||
bool res = registerSnippets(snippetsSlice, collectionBuilder.slice(), variablesSlice,
|
||||
options, ttl, needToLock, answerBuilder);
|
||||
options, ctx, ttl, needToLock, answerBuilder);
|
||||
if (!res) {
|
||||
// TODO we need to trigger cleanup here??
|
||||
// Registering the snippets failed.
|
||||
|
@ -237,8 +240,7 @@ void RestAqlHandler::setupClusterQuery() {
|
|||
}
|
||||
|
||||
if (!traverserSlice.isNone()) {
|
||||
|
||||
res = registerTraverserEngines(traverserSlice, needToLock, ttl, answerBuilder);
|
||||
res = registerTraverserEngines(traverserSlice, ctx, ttl, needToLock, answerBuilder);
|
||||
|
||||
if (!res) {
|
||||
// TODO we need to trigger cleanup here??
|
||||
|
@ -257,6 +259,7 @@ bool RestAqlHandler::registerSnippets(
|
|||
VPackSlice const collectionSlice,
|
||||
VPackSlice const variablesSlice,
|
||||
std::shared_ptr<VPackBuilder> options,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
double const ttl,
|
||||
bool& needToLock,
|
||||
VPackBuilder& answerBuilder
|
||||
|
@ -291,6 +294,9 @@ bool RestAqlHandler::registerSnippets(
|
|||
options,
|
||||
(needToLock ? PART_MAIN : PART_DEPENDENT)
|
||||
);
|
||||
|
||||
// enables the query to get the correct transaction
|
||||
query->setTransactionContext(ctx);
|
||||
|
||||
bool prepared = false;
|
||||
try {
|
||||
|
@ -362,7 +368,9 @@ bool RestAqlHandler::registerSnippets(
|
|||
return true;
|
||||
}
|
||||
|
||||
bool RestAqlHandler::registerTraverserEngines(VPackSlice const traverserEngines, bool& needToLock, double ttl, VPackBuilder& answerBuilder) {
|
||||
bool RestAqlHandler::registerTraverserEngines(VPackSlice const traverserEngines,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
double ttl, bool& needToLock, VPackBuilder& answerBuilder) {
|
||||
TRI_ASSERT(traverserEngines.isArray());
|
||||
|
||||
TRI_ASSERT(answerBuilder.isOpenObject());
|
||||
|
@ -371,7 +379,7 @@ bool RestAqlHandler::registerTraverserEngines(VPackSlice const traverserEngines,
|
|||
|
||||
for (auto const& te : VPackArrayIterator(traverserEngines)) {
|
||||
try {
|
||||
auto id = _traverserRegistry->createNew(_vocbase, te, needToLock, ttl);
|
||||
auto id = _traverserRegistry->createNew(_vocbase, ctx, te, ttl, needToLock);
|
||||
|
||||
needToLock = false;
|
||||
TRI_ASSERT(id != 0);
|
||||
|
|
|
@ -110,13 +110,15 @@ class RestAqlHandler : public RestVocbaseBaseHandler {
|
|||
arangodb::velocypack::Slice const collections,
|
||||
arangodb::velocypack::Slice const variables,
|
||||
std::shared_ptr<arangodb::velocypack::Builder> options,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
double const ttl,
|
||||
bool& needToLock,
|
||||
arangodb::velocypack::Builder& answer);
|
||||
|
||||
bool registerTraverserEngines(arangodb::velocypack::Slice const traversers,
|
||||
bool& needToLock,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
double const ttl,
|
||||
bool& needToLock,
|
||||
arangodb::velocypack::Builder& answer);
|
||||
|
||||
// Send slice as result with the given response type.
|
||||
|
|
|
@ -654,9 +654,9 @@ Result auth::UserManager::removeAllUsers() {
|
|||
|
||||
bool auth::UserManager::checkPassword(std::string const& username,
|
||||
std::string const& password) {
|
||||
// AuthResult result(username);
|
||||
if (username.empty() || IsRole(username)) {
|
||||
return false;
|
||||
if (username.empty() || IsRole(username) ||
|
||||
ServerState::serverMode() == ServerState::Mode::MAINTENANCE) {
|
||||
return false; // we cannot authenticate during bootstrap
|
||||
}
|
||||
|
||||
loadFromDB();
|
||||
|
|
|
@ -465,6 +465,7 @@ SET(ARANGOD_SOURCES
|
|||
Transaction/Helpers.cpp
|
||||
Transaction/Methods.cpp
|
||||
Transaction/Options.cpp
|
||||
Transaction/SmartContext.cpp
|
||||
Transaction/StandaloneContext.cpp
|
||||
Transaction/Status.cpp
|
||||
Transaction/V8Context.cpp
|
||||
|
|
|
@ -311,16 +311,27 @@ static void extractErrorCodes(ClusterCommResult const& res,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int distributeBabyOnShards(
|
||||
std::unordered_map<ShardID, std::vector<VPackValueLength>>& shardMap,
|
||||
std::unordered_map<ShardID, std::vector<VPackSlice>>& shardMap,
|
||||
ClusterInfo* ci, std::string const& collid,
|
||||
std::shared_ptr<LogicalCollection> collinfo,
|
||||
std::shared_ptr<LogicalCollection> const& collinfo,
|
||||
std::vector<std::pair<ShardID, VPackValueLength>>& reverseMapping,
|
||||
VPackSlice const node, VPackValueLength const index) {
|
||||
VPackSlice const& value) {
|
||||
// Now find the responsible shard:
|
||||
bool usesDefaultShardingAttributes;
|
||||
ShardID shardID;
|
||||
int error = ci->getResponsibleShard(collinfo.get(), node, false, shardID,
|
||||
usesDefaultShardingAttributes);
|
||||
int error;
|
||||
if (value.isString()) {
|
||||
VPackBuilder temp;
|
||||
temp.openObject();
|
||||
temp.add(StaticStrings::KeyString, value);
|
||||
temp.close();
|
||||
|
||||
error = ci->getResponsibleShard(collinfo.get(), temp.slice(), false, shardID,
|
||||
usesDefaultShardingAttributes);
|
||||
} else {
|
||||
error = ci->getResponsibleShard(collinfo.get(), value, false, shardID,
|
||||
usesDefaultShardingAttributes);
|
||||
}
|
||||
if (error == TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND) {
|
||||
return TRI_ERROR_CLUSTER_SHARD_GONE;
|
||||
}
|
||||
|
@ -332,11 +343,10 @@ static int distributeBabyOnShards(
|
|||
// We found the responsible shard. Add it to the list.
|
||||
auto it = shardMap.find(shardID);
|
||||
if (it == shardMap.end()) {
|
||||
std::vector<VPackValueLength> counter({index});
|
||||
shardMap.emplace(shardID, counter);
|
||||
shardMap.emplace(shardID, std::vector<VPackSlice>{value});
|
||||
reverseMapping.emplace_back(shardID, 0);
|
||||
} else {
|
||||
it->second.emplace_back(index);
|
||||
it->second.emplace_back(value);
|
||||
reverseMapping.emplace_back(shardID, it->second.size() - 1);
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
@ -350,18 +360,16 @@ static int distributeBabyOnShards(
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int distributeBabyOnShards(
|
||||
std::unordered_map<ShardID,
|
||||
std::vector<std::pair<VPackValueLength, std::string>>>&
|
||||
shardMap,
|
||||
std::unordered_map<ShardID, std::vector<std::pair<VPackSlice, std::string>>>& shardMap,
|
||||
ClusterInfo* ci, std::string const& collid,
|
||||
std::shared_ptr<LogicalCollection> collinfo,
|
||||
std::shared_ptr<LogicalCollection> const& collinfo,
|
||||
std::vector<std::pair<ShardID, VPackValueLength>>& reverseMapping,
|
||||
VPackSlice const node, VPackValueLength const index, bool isRestore) {
|
||||
VPackSlice const value, bool isRestore) {
|
||||
ShardID shardID;
|
||||
bool userSpecifiedKey = false;
|
||||
std::string _key = "";
|
||||
|
||||
if (!node.isObject()) {
|
||||
if (!value.isObject()) {
|
||||
// We have invalid input at this point.
|
||||
// However we can work with the other babies.
|
||||
// This is for compatibility with single server
|
||||
|
@ -379,7 +387,7 @@ static int distributeBabyOnShards(
|
|||
// attributes a bit further down the line when we have determined
|
||||
// the responsible shard.
|
||||
|
||||
VPackSlice keySlice = node.get(StaticStrings::KeyString);
|
||||
VPackSlice keySlice = value.get(StaticStrings::KeyString);
|
||||
if (keySlice.isNone()) {
|
||||
// The user did not specify a key, let's create one:
|
||||
_key = collinfo->keyGenerator()->generate();
|
||||
|
@ -391,10 +399,10 @@ static int distributeBabyOnShards(
|
|||
bool usesDefaultShardingAttributes;
|
||||
int error = TRI_ERROR_NO_ERROR;
|
||||
if (userSpecifiedKey) {
|
||||
error = ci->getResponsibleShard(collinfo.get(), node, true, shardID,
|
||||
error = ci->getResponsibleShard(collinfo.get(), value, true, shardID,
|
||||
usesDefaultShardingAttributes);
|
||||
} else {
|
||||
error = ci->getResponsibleShard(collinfo.get(), node, true, shardID,
|
||||
error = ci->getResponsibleShard(collinfo.get(), value, true, shardID,
|
||||
usesDefaultShardingAttributes, _key);
|
||||
}
|
||||
if (error == TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND) {
|
||||
|
@ -412,12 +420,10 @@ static int distributeBabyOnShards(
|
|||
// We found the responsible shard. Add it to the list.
|
||||
auto it = shardMap.find(shardID);
|
||||
if (it == shardMap.end()) {
|
||||
std::vector<std::pair<VPackValueLength, std::string>> counter(
|
||||
{{index, _key}});
|
||||
shardMap.emplace(shardID, counter);
|
||||
shardMap.emplace(shardID, std::vector<std::pair<VPackSlice, std::string>>{{value, _key}});
|
||||
reverseMapping.emplace_back(shardID, 0);
|
||||
} else {
|
||||
it->second.emplace_back(index, _key);
|
||||
it->second.emplace_back(value, _key);
|
||||
reverseMapping.emplace_back(shardID, it->second.size() - 1);
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
@ -884,7 +890,7 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
/// @brief counts number of documents in a coordinator, by shard
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||
int countOnCoordinator(std::string const& dbname, std::string const& cname,
|
||||
std::vector<std::pair<std::string, uint64_t>>& result,
|
||||
bool sendNoLockHeader) {
|
||||
// Set a few variables needed for our work:
|
||||
|
@ -900,7 +906,7 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
try {
|
||||
collinfo = ci->getCollection(dbname, collname);
|
||||
collinfo = ci->getCollection(dbname, cname);
|
||||
} catch (...) {
|
||||
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
|
||||
}
|
||||
|
@ -1111,19 +1117,16 @@ Result createDocumentOnCoordinator(
|
|||
|
||||
// create vars used in this function
|
||||
bool const useMultiple = slice.isArray(); // insert more than one document
|
||||
std::unordered_map< ShardID
|
||||
, std::vector<std::pair<VPackValueLength, std::string>>
|
||||
> shardMap;
|
||||
std::unordered_map<ShardID, std::vector<std::pair<VPackSlice, std::string>>> shardMap;
|
||||
std::vector<std::pair<ShardID, VPackValueLength>> reverseMapping;
|
||||
|
||||
{
|
||||
// create shard map
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
if (useMultiple) {
|
||||
VPackValueLength length = slice.length();
|
||||
for (VPackValueLength idx = 0; idx < length; ++idx) {
|
||||
for (VPackSlice value : VPackArrayIterator(slice)) {
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo,
|
||||
reverseMapping, slice.at(idx), idx,
|
||||
reverseMapping, value,
|
||||
options.isRestore);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
|
@ -1131,7 +1134,7 @@ Result createDocumentOnCoordinator(
|
|||
}
|
||||
} else {
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo, reverseMapping,
|
||||
slice, 0, options.isRestore);
|
||||
slice, options.isRestore);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
}
|
||||
|
@ -1173,11 +1176,11 @@ Result createDocumentOnCoordinator(
|
|||
reqBuilder.openArray();
|
||||
for (auto const& idx : it.second) {
|
||||
if (idx.second.empty()) {
|
||||
reqBuilder.add(slice.at(idx.first));
|
||||
reqBuilder.add(idx.first);
|
||||
} else {
|
||||
reqBuilder.openObject();
|
||||
reqBuilder.add(StaticStrings::KeyString, VPackValue(idx.second));
|
||||
TRI_SanitizeObject(slice.at(idx.first), reqBuilder);
|
||||
TRI_SanitizeObject(idx.first, reqBuilder);
|
||||
reqBuilder.close();
|
||||
}
|
||||
}
|
||||
|
@ -1214,7 +1217,7 @@ Result createDocumentOnCoordinator(
|
|||
|
||||
std::unordered_map<ShardID, std::shared_ptr<VPackBuilder>> resultMap;
|
||||
|
||||
collectResultsFromAllShards<std::pair<VPackValueLength, std::string>>(
|
||||
collectResultsFromAllShards<std::pair<VPackSlice, std::string>>(
|
||||
shardMap, requests, errorCounter, resultMap, responseCode);
|
||||
|
||||
responseCode =
|
||||
|
@ -1274,13 +1277,13 @@ int deleteDocumentOnCoordinator(
|
|||
// Send the correct documents to the correct shards
|
||||
// Merge the results with static merge helper
|
||||
|
||||
std::unordered_map<ShardID, std::vector<VPackValueLength>> shardMap;
|
||||
std::unordered_map<ShardID, std::vector<VPackSlice>> shardMap;
|
||||
std::vector<std::pair<ShardID, VPackValueLength>> reverseMapping;
|
||||
auto workOnOneNode = [&shardMap, &ci, &collid, &collinfo, &reverseMapping](
|
||||
VPackSlice const node, VPackValueLength const index) -> int {
|
||||
VPackSlice const value) -> int {
|
||||
// Sort out the _key attribute and identify the shard responsible for it.
|
||||
|
||||
StringRef _key(transaction::helpers::extractKeyPart(node));
|
||||
StringRef _key(transaction::helpers::extractKeyPart(value));
|
||||
ShardID shardID;
|
||||
if (_key.empty()) {
|
||||
// We have invalid input at this point.
|
||||
|
@ -1305,26 +1308,25 @@ int deleteDocumentOnCoordinator(
|
|||
// We found the responsible shard. Add it to the list.
|
||||
auto it = shardMap.find(shardID);
|
||||
if (it == shardMap.end()) {
|
||||
std::vector<VPackValueLength> counter({index});
|
||||
shardMap.emplace(shardID, counter);
|
||||
shardMap.emplace(shardID, std::vector<VPackSlice>{value});
|
||||
reverseMapping.emplace_back(shardID, 0);
|
||||
} else {
|
||||
it->second.emplace_back(index);
|
||||
it->second.emplace_back(value);
|
||||
reverseMapping.emplace_back(shardID, it->second.size() - 1);
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
};
|
||||
|
||||
if (useMultiple) {
|
||||
for (VPackValueLength idx = 0; idx < slice.length(); ++idx) {
|
||||
int res = workOnOneNode(slice.at(idx), idx);
|
||||
if (useMultiple) { // slice is array of document values
|
||||
for (VPackSlice value : VPackArrayIterator(slice)) {
|
||||
int res = workOnOneNode(value);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
// Is early abortion correct?
|
||||
return res;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int res = workOnOneNode(slice, 0);
|
||||
int res = workOnOneNode(slice);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
}
|
||||
|
@ -1342,8 +1344,8 @@ int deleteDocumentOnCoordinator(
|
|||
} else {
|
||||
reqBuilder.clear();
|
||||
reqBuilder.openArray();
|
||||
for (auto const& idx : it.second) {
|
||||
reqBuilder.add(slice.at(idx));
|
||||
for (auto const& value : it.second) {
|
||||
reqBuilder.add(value);
|
||||
}
|
||||
reqBuilder.close();
|
||||
body = std::make_shared<std::string>(reqBuilder.slice().toJson());
|
||||
|
@ -1377,7 +1379,7 @@ int deleteDocumentOnCoordinator(
|
|||
}
|
||||
|
||||
std::unordered_map<ShardID, std::shared_ptr<VPackBuilder>> resultMap;
|
||||
collectResultsFromAllShards<VPackValueLength>(
|
||||
collectResultsFromAllShards<VPackSlice>(
|
||||
shardMap, requests, errorCounter, resultMap, responseCode);
|
||||
mergeResults(reverseMapping, resultMap, resultBody);
|
||||
return TRI_ERROR_NO_ERROR; // the cluster operation was OK, however,
|
||||
|
@ -1585,7 +1587,7 @@ int rotateActiveJournalOnAllDBServers(std::string const& dbname,
|
|||
|
||||
int getDocumentOnCoordinator(
|
||||
std::string const& dbname, std::string const& collname,
|
||||
VPackSlice const slice, OperationOptions const& options,
|
||||
VPackSlice slice, OperationOptions const& options,
|
||||
std::unique_ptr<std::unordered_map<std::string, std::string>> headers,
|
||||
arangodb::rest::ResponseCode& responseCode,
|
||||
std::unordered_map<int, size_t>& errorCounter,
|
||||
|
@ -1617,17 +1619,16 @@ int getDocumentOnCoordinator(
|
|||
|
||||
ShardID shardID;
|
||||
|
||||
std::unordered_map<ShardID, std::vector<VPackValueLength>> shardMap;
|
||||
std::unordered_map<ShardID, std::vector<VPackSlice>> shardMap;
|
||||
std::vector<std::pair<ShardID, VPackValueLength>> reverseMapping;
|
||||
bool useMultiple = slice.isArray();
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
bool canUseFastPath = true;
|
||||
if (useMultiple) {
|
||||
VPackValueLength length = slice.length();
|
||||
for (VPackValueLength idx = 0; idx < length; ++idx) {
|
||||
for (VPackSlice value : VPackArrayIterator(slice)) {
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo,
|
||||
reverseMapping, slice.at(idx), idx);
|
||||
reverseMapping, value);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
canUseFastPath = false;
|
||||
shardMap.clear();
|
||||
|
@ -1637,7 +1638,7 @@ int getDocumentOnCoordinator(
|
|||
}
|
||||
} else {
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo, reverseMapping,
|
||||
slice, 0);
|
||||
slice);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
canUseFastPath = false;
|
||||
}
|
||||
|
@ -1698,8 +1699,8 @@ int getDocumentOnCoordinator(
|
|||
} else {
|
||||
reqBuilder.clear();
|
||||
reqBuilder.openArray();
|
||||
for (auto const& idx : it.second) {
|
||||
reqBuilder.add(slice.at(idx));
|
||||
for (auto const& value : it.second) {
|
||||
reqBuilder.add(value);
|
||||
}
|
||||
reqBuilder.close();
|
||||
body = std::make_shared<std::string>(reqBuilder.slice().toJson());
|
||||
|
@ -1734,7 +1735,7 @@ int getDocumentOnCoordinator(
|
|||
}
|
||||
|
||||
std::unordered_map<ShardID, std::shared_ptr<VPackBuilder>> resultMap;
|
||||
collectResultsFromAllShards<VPackValueLength>(
|
||||
collectResultsFromAllShards<VPackSlice>(
|
||||
shardMap, requests, errorCounter, resultMap, responseCode);
|
||||
|
||||
mergeResults(reverseMapping, resultMap, resultBody);
|
||||
|
@ -2315,17 +2316,16 @@ int modifyDocumentOnCoordinator(
|
|||
|
||||
ShardID shardID;
|
||||
|
||||
std::unordered_map<ShardID, std::vector<VPackValueLength>> shardMap;
|
||||
std::unordered_map<ShardID, std::vector<VPackSlice>> shardMap;
|
||||
std::vector<std::pair<ShardID, VPackValueLength>> reverseMapping;
|
||||
bool useMultiple = slice.isArray();
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
bool canUseFastPath = true;
|
||||
if (useMultiple) {
|
||||
VPackValueLength length = slice.length();
|
||||
for (VPackValueLength idx = 0; idx < length; ++idx) {
|
||||
for (VPackSlice value : VPackArrayIterator(slice)) {
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo,
|
||||
reverseMapping, slice.at(idx), idx);
|
||||
reverseMapping, value);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (!isPatch) {
|
||||
return res;
|
||||
|
@ -2337,8 +2337,7 @@ int modifyDocumentOnCoordinator(
|
|||
}
|
||||
}
|
||||
} else {
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo, reverseMapping,
|
||||
slice, 0);
|
||||
res = distributeBabyOnShards(shardMap, ci, collid, collinfo, reverseMapping, slice);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (!isPatch) {
|
||||
return res;
|
||||
|
@ -2405,8 +2404,8 @@ int modifyDocumentOnCoordinator(
|
|||
} else {
|
||||
reqBuilder.clear();
|
||||
reqBuilder.openArray();
|
||||
for (auto const& idx : it.second) {
|
||||
reqBuilder.add(slice.at(idx));
|
||||
for (auto const& value : it.second) {
|
||||
reqBuilder.add(value);
|
||||
}
|
||||
reqBuilder.close();
|
||||
body = std::make_shared<std::string>(reqBuilder.slice().toJson());
|
||||
|
@ -2439,7 +2438,7 @@ int modifyDocumentOnCoordinator(
|
|||
}
|
||||
|
||||
std::unordered_map<ShardID, std::shared_ptr<VPackBuilder>> resultMap;
|
||||
collectResultsFromAllShards<VPackValueLength>(
|
||||
collectResultsFromAllShards<VPackSlice>(
|
||||
shardMap, requests, errorCounter, resultMap, responseCode);
|
||||
|
||||
mergeResults(reverseMapping, resultMap, resultBody);
|
||||
|
|
|
@ -125,7 +125,7 @@ int deleteDocumentOnCoordinator(
|
|||
|
||||
int getDocumentOnCoordinator(
|
||||
std::string const& dbname, std::string const& collname,
|
||||
VPackSlice const slice, OperationOptions const& options,
|
||||
VPackSlice slice, OperationOptions const& options,
|
||||
std::unique_ptr<std::unordered_map<std::string, std::string>> headers,
|
||||
arangodb::rest::ResponseCode& responseCode,
|
||||
std::unordered_map<int, size_t>& errorCounter,
|
||||
|
|
|
@ -34,7 +34,7 @@ using namespace arangodb;
|
|||
/// @brief get information about current followers of a shard.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<std::vector<ServerID> const> FollowerInfo::get() {
|
||||
std::shared_ptr<std::vector<ServerID> const> FollowerInfo::get() const {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
return _followers;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ namespace arangodb {
|
|||
|
||||
class FollowerInfo {
|
||||
std::shared_ptr<std::vector<ServerID> const> _followers;
|
||||
Mutex _mutex;
|
||||
mutable Mutex _mutex;
|
||||
arangodb::LogicalCollection* _docColl;
|
||||
std::string _theLeader;
|
||||
// if the latter is empty, the we are leading
|
||||
|
@ -49,7 +49,7 @@ class FollowerInfo {
|
|||
/// @brief get information about current followers of a shard.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<std::vector<ServerID> const> get();
|
||||
std::shared_ptr<std::vector<ServerID> const> get() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief add a follower to a shard, this is only done by the server side
|
||||
|
@ -89,7 +89,7 @@ class FollowerInfo {
|
|||
/// @brief get the leader
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::string getLeader() {
|
||||
std::string getLeader() const {
|
||||
MUTEX_LOCKER(locker, _mutex);
|
||||
return _theLeader;
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ static const std::string VERTICES = "vertices";
|
|||
#ifndef USE_ENTERPRISE
|
||||
/*static*/ std::unique_ptr<BaseEngine> BaseEngine::BuildEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
VPackSlice info,
|
||||
bool needToLock
|
||||
) {
|
||||
|
@ -67,9 +68,9 @@ static const std::string VERTICES = "vertices";
|
|||
}
|
||||
|
||||
if (type.isEqualString("traversal")) {
|
||||
return std::make_unique<TraverserEngine>(vocbase, info, needToLock);
|
||||
return std::make_unique<TraverserEngine>(vocbase, ctx, info, needToLock);
|
||||
} else if (type.isEqualString("shortestPath")) {
|
||||
return std::make_unique<ShortestPathEngine>(vocbase, info, needToLock);
|
||||
return std::make_unique<ShortestPathEngine>(vocbase, ctx, info, needToLock);
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
|
@ -77,7 +78,9 @@ static const std::string VERTICES = "vertices";
|
|||
}
|
||||
#endif
|
||||
|
||||
BaseEngine::BaseEngine(TRI_vocbase_t& vocbase, VPackSlice info, bool needToLock)
|
||||
BaseEngine::BaseEngine(TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
VPackSlice info, bool needToLock)
|
||||
: _query(nullptr), _trx(nullptr), _collections(&vocbase) {
|
||||
VPackSlice shardsSlice = info.get(SHARDS);
|
||||
|
||||
|
@ -127,8 +130,7 @@ BaseEngine::BaseEngine(TRI_vocbase_t& vocbase, VPackSlice info, bool needToLock)
|
|||
// FIXME: in the future this needs to be replaced with
|
||||
// the new cluster wide transactions
|
||||
transaction::Options trxOpts;
|
||||
auto ctx = arangodb::transaction::StandaloneContext::Create(vocbase);
|
||||
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
VPackSlice inaccessSlice = shardsSlice.get(INACCESSIBLE);
|
||||
if (inaccessSlice.isArray()) {
|
||||
|
@ -272,10 +274,11 @@ void BaseEngine::getVertexData(VPackSlice vertex, VPackBuilder& builder) {
|
|||
|
||||
BaseTraverserEngine::BaseTraverserEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
VPackSlice info,
|
||||
bool needToLock
|
||||
)
|
||||
: BaseEngine(vocbase, info, needToLock), _opts(nullptr) {}
|
||||
: BaseEngine(vocbase, ctx, info, needToLock), _opts(nullptr) {}
|
||||
|
||||
BaseTraverserEngine::~BaseTraverserEngine() {}
|
||||
|
||||
|
@ -407,10 +410,11 @@ void BaseTraverserEngine::getVertexData(VPackSlice vertex, size_t depth,
|
|||
|
||||
ShortestPathEngine::ShortestPathEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
)
|
||||
: BaseEngine(vocbase, info, needToLock) {
|
||||
: BaseEngine(vocbase, ctx, info, needToLock) {
|
||||
VPackSlice optsSlice = info.get(OPTIONS);
|
||||
if (optsSlice.isNone() || !optsSlice.isObject()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
|
@ -507,10 +511,10 @@ void ShortestPathEngine::getEdges(VPackSlice vertex, bool backward,
|
|||
|
||||
TraverserEngine::TraverserEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
)
|
||||
: BaseTraverserEngine(vocbase, info, needToLock) {
|
||||
) : BaseTraverserEngine(vocbase, ctx, info, needToLock) {
|
||||
|
||||
VPackSlice optsSlice = info.get(OPTIONS);
|
||||
if (!optsSlice.isObject()) {
|
||||
|
|
|
@ -71,12 +71,14 @@ class BaseEngine {
|
|||
|
||||
static std::unique_ptr<BaseEngine> BuildEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
);
|
||||
|
||||
BaseEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
);
|
||||
|
@ -115,6 +117,7 @@ class BaseTraverserEngine : public BaseEngine {
|
|||
|
||||
BaseTraverserEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
);
|
||||
|
@ -150,6 +153,7 @@ class ShortestPathEngine : public BaseEngine {
|
|||
|
||||
ShortestPathEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
);
|
||||
|
@ -176,6 +180,7 @@ class TraverserEngine : public BaseTraverserEngine {
|
|||
|
||||
TraverserEngine(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
);
|
||||
|
@ -192,4 +197,4 @@ class TraverserEngine : public BaseTraverserEngine {
|
|||
} // namespace traverser
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -29,21 +29,23 @@
|
|||
#include "Basics/WriteLocker.h"
|
||||
#include "Cluster/TraverserEngine.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::traverser;
|
||||
|
||||
TraverserEngineRegistry::EngineInfo::EngineInfo(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
)
|
||||
bool needToLock)
|
||||
: _isInUse(false),
|
||||
_toBeDeleted(false),
|
||||
_engine(BaseEngine::BuildEngine(vocbase, info, needToLock)),
|
||||
_engine(BaseEngine::BuildEngine(vocbase, ctx, info, needToLock)),
|
||||
_timeToLive(0),
|
||||
_expires(0) {}
|
||||
|
||||
|
@ -59,14 +61,15 @@ TraverserEngineRegistry::~TraverserEngineRegistry() {
|
|||
/// @brief Create a new Engine and return it's id
|
||||
TraverserEngineID TraverserEngineRegistry::createNew(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice engineInfo,
|
||||
bool needToLock,
|
||||
double ttl /*= 600.0*/
|
||||
double ttl, /*= 600.0*/
|
||||
bool needToLock
|
||||
) {
|
||||
TraverserEngineID id = TRI_NewTickServer();
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Register TraverserEngine with id " << id;
|
||||
TRI_ASSERT(id != 0);
|
||||
auto info = std::make_unique<EngineInfo>(vocbase, engineInfo, needToLock);
|
||||
auto info = std::make_unique<EngineInfo>(vocbase, ctx, engineInfo, needToLock);
|
||||
info->_timeToLive = ttl;
|
||||
info->_expires = TRI_microtime() + ttl;
|
||||
|
||||
|
|
|
@ -31,6 +31,10 @@
|
|||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
namespace transaction {
|
||||
class Context;
|
||||
}
|
||||
|
||||
namespace traverser {
|
||||
|
||||
class BaseEngine;
|
||||
|
@ -51,9 +55,10 @@ class TraverserEngineRegistry {
|
|||
/// internally went wrong.
|
||||
TEST_VIRTUAL TraverserEngineID createNew(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice engineInfo,
|
||||
bool needToLock,
|
||||
double ttl = 600.0
|
||||
double ttl,
|
||||
bool needToLock
|
||||
);
|
||||
|
||||
/// @brief Get the engine with the given ID.
|
||||
|
@ -95,6 +100,7 @@ class TraverserEngineRegistry {
|
|||
|
||||
EngineInfo(
|
||||
TRI_vocbase_t& vocbase,
|
||||
std::shared_ptr<transaction::Context> const& ctx,
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock
|
||||
);
|
||||
|
|
|
@ -132,11 +132,11 @@ std::unique_ptr<transaction::ContextData> ClusterEngine::createTransactionContex
|
|||
}
|
||||
|
||||
std::unique_ptr<TransactionState> ClusterEngine::createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) {
|
||||
return std::make_unique<ClusterTransactionState>(
|
||||
resolver, TRI_NewTickServer(), options
|
||||
vocbase, TRI_NewTickServer(), options
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ class ClusterEngine final : public StorageEngine {
|
|||
std::unique_ptr<TransactionManager> createTransactionManager() override;
|
||||
std::unique_ptr<transaction::ContextData> createTransactionContextData() override;
|
||||
std::unique_ptr<TransactionState> createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) override;
|
||||
std::unique_ptr<TransactionCollection> createTransactionCollection(
|
||||
|
@ -372,4 +372,4 @@ class ClusterEngine final : public StorageEngine {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "ClusterTransactionCollection.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Cluster/CollectionLockState.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Transaction/Hints.h"
|
||||
|
@ -165,24 +166,24 @@ int ClusterTransactionCollection::use(int nestingLevel) {
|
|||
|
||||
if (_collection == nullptr) {
|
||||
// open the collection
|
||||
if (!_transaction->hasHint(transaction::Hints::Hint::LOCK_NEVER) &&
|
||||
!_transaction->hasHint(transaction::Hints::Hint::NO_USAGE_LOCK)) {
|
||||
// use and usage-lock
|
||||
TRI_vocbase_col_status_e status;
|
||||
LOG_TRX(_transaction, nestingLevel) << "using collection " << _cid;
|
||||
_collection = _transaction->vocbase().useCollection(_cid, status);
|
||||
if (_collection != nullptr) {
|
||||
_usageLocked = true;
|
||||
}
|
||||
} else {
|
||||
// use without usage-lock (lock already set externally)
|
||||
_collection = _transaction->vocbase().lookupCollection(_cid).get();
|
||||
|
||||
if (_collection == nullptr) {
|
||||
return TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND;
|
||||
}
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
if (ci == nullptr) {
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
_sharedCollection = ci->getCollection(_transaction->vocbase().name(), std::to_string(_cid));
|
||||
if (_sharedCollection) {
|
||||
_collection = _sharedCollection.get();
|
||||
if (!_transaction->hasHint(transaction::Hints::Hint::LOCK_NEVER) &&
|
||||
!_transaction->hasHint(transaction::Hints::Hint::NO_USAGE_LOCK)) {
|
||||
// use and usage-lock
|
||||
LOG_TRX(_transaction, nestingLevel) << "using collection " << _cid;
|
||||
_usageLocked = true;
|
||||
}
|
||||
}
|
||||
} catch(...) {}
|
||||
|
||||
if (_collection == nullptr) {
|
||||
int res = TRI_errno();
|
||||
if (res == TRI_ERROR_ARANGO_COLLECTION_NOT_LOADED) {
|
||||
|
|
|
@ -88,7 +88,9 @@ class ClusterTransactionCollection final : public TransactionCollection {
|
|||
private:
|
||||
AccessMode::Type _lockType; // collection lock type, used for exclusive locks
|
||||
int _nestingLevel; // the transaction level that added this collection
|
||||
bool _usageLocked;
|
||||
bool _usageLocked; // is this already locked
|
||||
/// @brief shared ptr to the collection so we can safely use _collection
|
||||
std::shared_ptr<LogicalCollection> _sharedCollection;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -39,10 +39,10 @@ struct ClusterTransactionData final : public TransactionData {};
|
|||
|
||||
/// @brief transaction type
|
||||
ClusterTransactionState::ClusterTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options)
|
||||
: TransactionState(resolver, tid, options) {}
|
||||
: TransactionState(vocbase, tid, options) {}
|
||||
|
||||
/// @brief free a transaction container
|
||||
ClusterTransactionState::~ClusterTransactionState() {
|
||||
|
|
|
@ -51,7 +51,7 @@ class ClusterTransactionState final : public TransactionState {
|
|||
|
||||
public:
|
||||
ClusterTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
);
|
||||
|
@ -74,4 +74,4 @@ class ClusterTransactionState final : public TransactionState {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -164,22 +164,23 @@ GeneralCommTask::RequestFlow GeneralCommTask::prepareExecution(
|
|||
// now check the authentication will determine if the user can access
|
||||
// this path checks db permissions and contains exceptions for the
|
||||
// users API to allow logins
|
||||
const rest::ResponseCode ok = GeneralCommTask::canAccessPath(req);
|
||||
if (ok == rest::ResponseCode::UNAUTHORIZED) {
|
||||
const rest::ResponseCode code = GeneralCommTask::canAccessPath(req);
|
||||
if (code == rest::ResponseCode::UNAUTHORIZED) {
|
||||
addErrorResponse(rest::ResponseCode::UNAUTHORIZED,
|
||||
req.contentTypeResponse(), req.messageId(),
|
||||
TRI_ERROR_FORBIDDEN,
|
||||
"not authorized to execute this request");
|
||||
return RequestFlow::Abort;
|
||||
}
|
||||
TRI_ASSERT(ok == rest::ResponseCode::OK); // nothing else allowed
|
||||
|
||||
// check for an HLC time stamp, only after authentication
|
||||
std::string const& timeStamp = req.header(StaticStrings::HLCHeader, found);
|
||||
if (found) {
|
||||
uint64_t parsed = basics::HybridLogicalClock::decodeTimeStamp(timeStamp);
|
||||
if (parsed != 0 && parsed != UINT64_MAX) {
|
||||
TRI_HybridLogicalClock(parsed);
|
||||
|
||||
if (code == rest::ResponseCode::OK && req.authenticated()) {
|
||||
// check for an HLC time stamp only with auth
|
||||
std::string const& timeStamp = req.header(StaticStrings::HLCHeader, found);
|
||||
if (found) {
|
||||
uint64_t parsed = basics::HybridLogicalClock::decodeTimeStamp(timeStamp);
|
||||
if (parsed != 0 && parsed != UINT64_MAX) {
|
||||
TRI_HybridLogicalClock(parsed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -473,6 +474,8 @@ rest::ResponseCode GeneralCommTask::canAccessPath(
|
|||
if (!_auth->isActive()) {
|
||||
// no authentication required at all
|
||||
return rest::ResponseCode::OK;
|
||||
} else if (ServerState::serverMode() == ServerState::Mode::MAINTENANCE) {
|
||||
return rest::ResponseCode::SERVICE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
std::string const& path = request.requestPath();
|
||||
|
|
|
@ -762,16 +762,13 @@ void HttpCommTask::resetState() {
|
|||
|
||||
ResponseCode HttpCommTask::handleAuthHeader(HttpRequest* request) const {
|
||||
bool found;
|
||||
std::string const& authStr =
|
||||
request->header(StaticStrings::Authorization, found);
|
||||
|
||||
std::string const& authStr = request->header(StaticStrings::Authorization, found);
|
||||
if (!found) {
|
||||
events::CredentialsMissing(request);
|
||||
return rest::ResponseCode::UNAUTHORIZED;
|
||||
}
|
||||
|
||||
size_t methodPos = authStr.find_first_of(' ');
|
||||
|
||||
if (methodPos != std::string::npos) {
|
||||
// skip over authentication method
|
||||
char const* auth = authStr.c_str() + methodPos;
|
||||
|
|
|
@ -247,9 +247,9 @@ void registerViewFactory() {
|
|||
}
|
||||
|
||||
template<typename Impl>
|
||||
arangodb::Result transactionStateRegistrationCallback(
|
||||
arangodb::Result transactionDataSourceRegistrationCallback(
|
||||
arangodb::LogicalDataSource& dataSource,
|
||||
arangodb::TransactionState& state
|
||||
arangodb::transaction::Methods& trx
|
||||
) {
|
||||
if (arangodb::iresearch::DATA_SOURCE_TYPE != dataSource.type()) {
|
||||
return arangodb::Result(); // not an IResearchView (noop)
|
||||
|
@ -264,7 +264,7 @@ arangodb::Result transactionStateRegistrationCallback(
|
|||
|
||||
if (!view) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to get LogicalView while processing a TransactionState by IResearchFeature for tid '" << state.id() << "' name '" << dataSource.name() << "'";
|
||||
<< "failure to get LogicalView while processing a TransactionState by IResearchFeature for name '" << dataSource.name() << "'";
|
||||
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
@ -278,26 +278,26 @@ arangodb::Result transactionStateRegistrationCallback(
|
|||
|
||||
if (!impl) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failure to get IResearchView while processing a TransactionState by IResearchFeature for tid '" << state.id() << "' cid '" << dataSource.name() << "'";
|
||||
<< "failure to get IResearchView while processing a TransactionState by IResearchFeature for cid '" << dataSource.name() << "'";
|
||||
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
impl->apply(state);
|
||||
|
||||
return arangodb::Result();
|
||||
return arangodb::Result(
|
||||
impl->apply(trx) ? TRI_ERROR_NO_ERROR : TRI_ERROR_INTERNAL
|
||||
);
|
||||
}
|
||||
|
||||
void registerTransactionStateCallback() {
|
||||
void registerTransactionDataSourceRegistrationCallback() {
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
// NOOP
|
||||
} else if(arangodb::ServerState::instance()->isDBServer()) {
|
||||
arangodb::transaction::Methods::addStateRegistrationCallback(
|
||||
transactionStateRegistrationCallback<arangodb::iresearch::IResearchViewDBServer>
|
||||
arangodb::transaction::Methods::addDataSourceRegistrationCallback(
|
||||
transactionDataSourceRegistrationCallback<arangodb::iresearch::IResearchViewDBServer>
|
||||
);
|
||||
} else {
|
||||
arangodb::transaction::Methods::addStateRegistrationCallback(
|
||||
transactionStateRegistrationCallback<arangodb::iresearch::IResearchView>
|
||||
arangodb::transaction::Methods::addDataSourceRegistrationCallback(
|
||||
transactionDataSourceRegistrationCallback<arangodb::iresearch::IResearchView>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -361,8 +361,8 @@ void IResearchFeature::prepare() {
|
|||
// register 'arangosearch' view
|
||||
registerViewFactory();
|
||||
|
||||
// register 'arangosearch' TransactionState state-change callback factory
|
||||
registerTransactionStateCallback();
|
||||
// register 'arangosearch' Transaction DataSource registration callback
|
||||
registerTransactionDataSourceRegistrationCallback();
|
||||
|
||||
registerRecoveryHelper();
|
||||
}
|
||||
|
|
|
@ -32,8 +32,9 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Transaction/UserTransaction.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "velocypack/Iterator.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
|
@ -224,7 +225,7 @@ namespace iresearch {
|
|||
}
|
||||
|
||||
static std::vector<std::string> const EMPTY;
|
||||
arangodb::transaction::UserTransaction trx(
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
EMPTY, // readCollections
|
||||
EMPTY, // writeCollections
|
||||
|
@ -421,4 +422,4 @@ namespace iresearch {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -205,7 +205,7 @@ void ensureLink(
|
|||
static std::vector<std::string> const EMPTY;
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
col->id(),
|
||||
col.get(),
|
||||
arangodb::AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
|
@ -357,7 +357,7 @@ void IResearchRocksDBRecoveryHelper::PutCF(uint32_t column_family_id,
|
|||
auto doc = RocksDBValue::data(value);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(coll->vocbase()),
|
||||
coll->id(),
|
||||
coll.get(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
|
@ -399,7 +399,7 @@ void IResearchRocksDBRecoveryHelper::DeleteCF(uint32_t column_family_id,
|
|||
auto docId = RocksDBKey::documentId(RocksDBEntryType::Document, key);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(coll->vocbase()),
|
||||
coll->id(),
|
||||
coll.get(),
|
||||
arangodb::AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
|
@ -463,4 +463,4 @@ void IResearchRocksDBRecoveryHelper::LogData(const rocksdb::Slice& blob) {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -50,13 +50,14 @@
|
|||
#include "Basics/files.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/DatabasePathFeature.h"
|
||||
#include "RestServer/FlushFeature.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "Transaction/UserTransaction.h"
|
||||
#include "velocypack/Builder.h"
|
||||
#include "velocypack/Iterator.h"
|
||||
#include "VocBase/LocalDocumentId.h"
|
||||
|
@ -968,8 +969,16 @@ IResearchView::MemoryStore& IResearchView::activeMemoryStore() const {
|
|||
return _memoryNode->_store;
|
||||
}
|
||||
|
||||
void IResearchView::apply(arangodb::TransactionState& state) {
|
||||
state.addStatusChangeCallback(_trxReadCallback);
|
||||
bool IResearchView::apply(arangodb::transaction::Methods& trx) {
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
state->addStatusChangeCallback(_trxReadCallback);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int IResearchView::drop(TRI_voc_cid_t cid) {
|
||||
|
@ -1253,7 +1262,7 @@ void IResearchView::getPropertiesVPack(
|
|||
options.allowImplicitCollections = false;
|
||||
|
||||
try {
|
||||
arangodb::transaction::UserTransaction trx(
|
||||
arangodb::transaction::Methods trx(
|
||||
transaction::StandaloneContext::Create(vocbase()),
|
||||
collections, // readCollections
|
||||
EMPTY, // writeCollections
|
||||
|
@ -1984,8 +1993,8 @@ void IResearchView::verifyKnownCollections() {
|
|||
{
|
||||
static const arangodb::transaction::Options defaults;
|
||||
struct State final: public arangodb::TransactionState {
|
||||
State(arangodb::CollectionNameResolver const& resolver)
|
||||
: arangodb::TransactionState(resolver, 0, defaults) {}
|
||||
State(TRI_vocbase_t& vocbase)
|
||||
: arangodb::TransactionState(vocbase, 0, defaults) {}
|
||||
virtual arangodb::Result abortTransaction(
|
||||
arangodb::transaction::Methods*
|
||||
) override { return TRI_ERROR_NOT_IMPLEMENTED; }
|
||||
|
@ -1998,8 +2007,7 @@ void IResearchView::verifyKnownCollections() {
|
|||
virtual bool hasFailedOperations() const override { return false; }
|
||||
};
|
||||
|
||||
arangodb::CollectionNameResolver resolver(vocbase());
|
||||
State state(resolver);
|
||||
State state(vocbase());
|
||||
|
||||
if (!appendKnownCollections(cids, *snapshot(state, true))) {
|
||||
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
|
||||
|
|
|
@ -130,9 +130,10 @@ class IResearchView final: public arangodb::DBServerLogicalView,
|
|||
using arangodb::LogicalView::name;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief apply any changes to 'state' required by this view
|
||||
/// @brief apply any changes to 'trx' required by this view
|
||||
/// @return success
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
void apply(arangodb::TransactionState& state);
|
||||
bool apply(arangodb::transaction::Methods& trx);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief persist the specified WAL file into permanent storage
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "RestServer/DatabasePathFeature.h"
|
||||
#include "RestServer/ViewTypesFeature.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
NS_LOCAL
|
||||
|
@ -306,8 +307,16 @@ arangodb::Result IResearchViewDBServer::appendVelocyPack(
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
void IResearchViewDBServer::apply(arangodb::TransactionState& state) {
|
||||
state.addStatusChangeCallback(_trxReadCallback);
|
||||
bool IResearchViewDBServer::apply(arangodb::transaction::Methods& trx) {
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
state->addStatusChangeCallback(_trxReadCallback);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
arangodb::Result IResearchViewDBServer::drop() {
|
||||
|
|
|
@ -30,15 +30,21 @@
|
|||
#include "velocypack/Builder.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
|
||||
NS_BEGIN(arangodb)
|
||||
namespace arangodb {
|
||||
|
||||
class DatabasePathFeature; // forward declaration
|
||||
class TransactionState; // forward declaration
|
||||
|
||||
NS_END // arangodb
|
||||
namespace transaction {
|
||||
|
||||
NS_BEGIN(arangodb)
|
||||
NS_BEGIN(iresearch)
|
||||
class Methods; // forward declaration
|
||||
|
||||
} // transaction
|
||||
|
||||
} // arangodb
|
||||
|
||||
namespace arangodb {
|
||||
namespace iresearch {
|
||||
|
||||
class PrimaryKeyIndexReader; // forward declaration
|
||||
|
||||
|
@ -48,8 +54,9 @@ class IResearchViewDBServer final: public arangodb::LogicalView {
|
|||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief apply any changes to 'state' required by this view
|
||||
/// @return success
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
void apply(arangodb::TransactionState& state);
|
||||
bool apply(arangodb::transaction::Methods& trx);
|
||||
|
||||
virtual arangodb::Result drop() override;
|
||||
|
||||
|
@ -126,7 +133,7 @@ class IResearchViewDBServer final: public arangodb::LogicalView {
|
|||
);
|
||||
};
|
||||
|
||||
NS_END // iresearch
|
||||
NS_END // arangodb
|
||||
} // iresearch
|
||||
} // arangodb
|
||||
|
||||
#endif
|
|
@ -28,6 +28,7 @@
|
|||
#include "Cluster/TraverserEngine.h"
|
||||
#include "Cluster/TraverserEngineRegistry.h"
|
||||
#include "RestServer/TraverserEngineRegistryFeature.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::traverser;
|
||||
|
@ -97,7 +98,8 @@ void InternalRestTraverserHandler::createEngine() {
|
|||
return;
|
||||
}
|
||||
|
||||
auto id = _registry->createNew(_vocbase, body, true);
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
auto id = _registry->createNew(_vocbase, std::move(ctx), body, 600.0, true);
|
||||
|
||||
TRI_ASSERT(id != 0);
|
||||
VPackBuilder resultBuilder;
|
||||
|
|
|
@ -1770,7 +1770,7 @@ void MMFilesCollection::open(bool ignoreErrors) {
|
|||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
cid,
|
||||
_logicalCollection,
|
||||
AccessMode::Type::READ
|
||||
);
|
||||
|
||||
|
|
|
@ -99,9 +99,7 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
|
|||
// copy all document tokens into the result under the read-lock
|
||||
{
|
||||
auto ctx = transaction::StandaloneContext::Create(_collection->vocbase());
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, _collection->id(), AccessMode::Type::READ
|
||||
);
|
||||
SingleCollectionTransaction trx(ctx, _collection, AccessMode::Type::READ);
|
||||
|
||||
// already locked by _guard
|
||||
trx.addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
|
||||
|
|
|
@ -755,7 +755,7 @@ int MMFilesCollectorThread::processCollectionOperations(MMFilesCollectorCache* c
|
|||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(collection->vocbase()),
|
||||
collection->id(),
|
||||
collection,
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
|
|
|
@ -437,7 +437,7 @@ void MMFilesCompactorThread::compactDatafiles(LogicalCollection* collection,
|
|||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::transaction::StandaloneContext::Create(collection->vocbase()),
|
||||
collection->id(),
|
||||
collection,
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
|
@ -1017,7 +1017,7 @@ void MMFilesCompactorThread::run() {
|
|||
uint64_t MMFilesCompactorThread::getNumberOfDocuments(LogicalCollection* collection) {
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(_vocbase),
|
||||
collection->id(),
|
||||
collection,
|
||||
AccessMode::Type::READ
|
||||
);
|
||||
|
||||
|
|
|
@ -290,11 +290,11 @@ std::unique_ptr<transaction::ContextData> MMFilesEngine::createTransactionContex
|
|||
}
|
||||
|
||||
std::unique_ptr<TransactionState> MMFilesEngine::createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) {
|
||||
return std::unique_ptr<TransactionState>(
|
||||
new MMFilesTransactionState(resolver, TRI_NewTickServer(), options)
|
||||
new MMFilesTransactionState(vocbase, TRI_NewTickServer(), options)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -3660,4 +3660,4 @@ WalAccess const* MMFilesEngine::walAccess() const {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -133,7 +133,7 @@ class MMFilesEngine final : public StorageEngine {
|
|||
std::unique_ptr<TransactionManager> createTransactionManager() override;
|
||||
std::unique_ptr<transaction::ContextData> createTransactionContextData() override;
|
||||
std::unique_ptr<TransactionState> createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) override;
|
||||
std::unique_ptr<TransactionCollection> createTransactionCollection(
|
||||
|
@ -622,4 +622,4 @@ class MMFilesEngine final : public StorageEngine {
|
|||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -127,7 +127,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
{
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::READ
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -154,7 +154,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
{
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::READ
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -288,7 +288,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
// first chunk
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
|
@ -365,7 +365,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
|
@ -746,4 +746,4 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -51,10 +51,10 @@ static inline MMFilesLogfileManager* GetMMFilesLogfileManager() {
|
|||
|
||||
/// @brief transaction type
|
||||
MMFilesTransactionState::MMFilesTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
): TransactionState(resolver, tid, options),
|
||||
): TransactionState(vocbase, tid, options),
|
||||
_rocksTransaction(nullptr),
|
||||
_beginWritten(false),
|
||||
_hasOperations(false) {}
|
||||
|
@ -271,7 +271,7 @@ int MMFilesTransactionState::addOperation(LocalDocumentId const& documentId,
|
|||
bool const wakeUpSynchronizer = isSingleOperationTransaction;
|
||||
|
||||
auto slotInfo = MMFilesLogfileManager::instance()->allocateAndWrite(
|
||||
_resolver.vocbase().id(),
|
||||
_vocbase.id(),
|
||||
collection->id(),
|
||||
marker,
|
||||
wakeUpSynchronizer,
|
||||
|
@ -330,7 +330,7 @@ int MMFilesTransactionState::addOperation(LocalDocumentId const& documentId,
|
|||
operation.handled();
|
||||
|
||||
arangodb::aql::QueryCache::instance()->invalidate(
|
||||
&(_resolver.vocbase()), collection->name()
|
||||
&_vocbase, collection->name()
|
||||
);
|
||||
|
||||
physical->increaseUncollectedLogfileEntries(1);
|
||||
|
@ -357,7 +357,7 @@ int MMFilesTransactionState::addOperation(LocalDocumentId const& documentId,
|
|||
_hasOperations = true;
|
||||
|
||||
arangodb::aql::QueryCache::instance()->invalidate(
|
||||
&(_resolver.vocbase()), collection->name()
|
||||
&_vocbase, collection->name()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -397,7 +397,7 @@ int MMFilesTransactionState::writeBeginMarker() {
|
|||
|
||||
try {
|
||||
MMFilesTransactionMarker marker(
|
||||
TRI_DF_MARKER_VPACK_BEGIN_TRANSACTION, _resolver.vocbase().id(), _id
|
||||
TRI_DF_MARKER_VPACK_BEGIN_TRANSACTION, _vocbase.id(), _id
|
||||
);
|
||||
|
||||
res = GetMMFilesLogfileManager()->allocateAndWrite(marker, false).errorCode;
|
||||
|
@ -443,7 +443,7 @@ int MMFilesTransactionState::writeAbortMarker() {
|
|||
|
||||
try {
|
||||
MMFilesTransactionMarker marker(
|
||||
TRI_DF_MARKER_VPACK_ABORT_TRANSACTION, _resolver.vocbase().id(), _id
|
||||
TRI_DF_MARKER_VPACK_ABORT_TRANSACTION, _vocbase.id(), _id
|
||||
);
|
||||
|
||||
res = GetMMFilesLogfileManager()->allocateAndWrite(marker, false).errorCode;
|
||||
|
@ -483,7 +483,7 @@ int MMFilesTransactionState::writeCommitMarker() {
|
|||
|
||||
try {
|
||||
MMFilesTransactionMarker marker(
|
||||
TRI_DF_MARKER_VPACK_COMMIT_TRANSACTION, _resolver.vocbase().id(), _id
|
||||
TRI_DF_MARKER_VPACK_COMMIT_TRANSACTION, _vocbase.id(), _id
|
||||
);
|
||||
|
||||
res = GetMMFilesLogfileManager()->allocateAndWrite(marker, _options.waitForSync).errorCode;
|
||||
|
|
|
@ -61,7 +61,7 @@ class TransactionCollection;
|
|||
class MMFilesTransactionState final : public TransactionState {
|
||||
public:
|
||||
MMFilesTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
);
|
||||
|
@ -125,4 +125,4 @@ class MMFilesTransactionState final : public TransactionState {
|
|||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -63,7 +63,7 @@ static void JS_RotateVocbaseCol(
|
|||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::V8Context::Create(collection->vocbase(), true),
|
||||
collection->id(),
|
||||
collection,
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
|
|
@ -302,8 +302,7 @@ int MMFilesWalRecoverState::executeSingleOperation(
|
|||
|
||||
try {
|
||||
auto ctx = transaction::StandaloneContext::Create(*vocbase);
|
||||
SingleCollectionTransaction trx(ctx, collectionId,
|
||||
AccessMode::Type::WRITE);
|
||||
SingleCollectionTransaction trx(ctx, collection, AccessMode::Type::WRITE);
|
||||
|
||||
trx.addHint(transaction::Hints::Hint::SINGLE_OPERATION);
|
||||
trx.addHint(transaction::Hints::Hint::NO_BEGIN_MARKER);
|
||||
|
@ -998,8 +997,7 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
return state->canContinue();
|
||||
} else {
|
||||
auto ctx = transaction::StandaloneContext::Create(*vocbase);
|
||||
arangodb::SingleCollectionTransaction trx(ctx, collectionId,
|
||||
AccessMode::Type::WRITE);
|
||||
arangodb::SingleCollectionTransaction trx(ctx, col.get(), AccessMode::Type::WRITE);
|
||||
std::shared_ptr<arangodb::Index> unused;
|
||||
int res = physical->restoreIndex(&trx, payloadSlice, unused);
|
||||
|
||||
|
@ -1611,7 +1609,7 @@ int MMFilesWalRecoverState::fillIndexes() {
|
|||
|
||||
auto ctx = transaction::StandaloneContext::Create(collection->vocbase());
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
ctx, collection->id(), AccessMode::Type::WRITE
|
||||
ctx, collection, AccessMode::Type::WRITE
|
||||
);
|
||||
int res = physical->fillAllIndexes(&trx);
|
||||
|
||||
|
|
|
@ -32,9 +32,9 @@
|
|||
#include "Pregel/WorkerConfig.h"
|
||||
#include "Scheduler/Scheduler.h"
|
||||
#include "Scheduler/SchedulerFeature.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Transaction/UserTransaction.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
#include "Utils/OperationOptions.h"
|
||||
|
@ -333,13 +333,13 @@ RangeIterator<Edge<E>> GraphStore<V, E>::edgeIterator(
|
|||
|
||||
template <typename V, typename E>
|
||||
std::unique_ptr<transaction::Methods> GraphStore<V, E>::_createTransaction() {
|
||||
transaction::Options transactionOptions;
|
||||
transactionOptions.waitForSync = false;
|
||||
transactionOptions.allowImplicitCollections = true;
|
||||
transaction::Options trxOpts;
|
||||
trxOpts.waitForSync = false;
|
||||
trxOpts.allowImplicitCollections = true;
|
||||
auto ctx =
|
||||
transaction::StandaloneContext::Create(_vocbaseGuard.database());
|
||||
std::unique_ptr<transaction::Methods> trx(
|
||||
new transaction::UserTransaction(ctx, {}, {}, {}, transactionOptions));
|
||||
auto trx = std::unique_ptr<transaction::Methods>(
|
||||
new transaction::Methods(ctx, {}, {}, {}, trxOpts));
|
||||
Result res = trx->begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
|
@ -496,7 +496,7 @@ template <typename V, typename E>
|
|||
void GraphStore<V, E>::_storeVertices(std::vector<ShardID> const& globalShards,
|
||||
RangeIterator<VertexEntry>& it) {
|
||||
// transaction on one shard
|
||||
std::unique_ptr<transaction::UserTransaction> trx;
|
||||
std::unique_ptr<transaction::Methods> trx;
|
||||
PregelShard currentShard = (PregelShard)-1;
|
||||
Result res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
|
@ -520,7 +520,7 @@ void GraphStore<V, E>::_storeVertices(std::vector<ShardID> const& globalShards,
|
|||
|
||||
transactionOptions.waitForSync = false;
|
||||
transactionOptions.allowImplicitCollections = false;
|
||||
trx.reset(new transaction::UserTransaction(
|
||||
trx.reset(new transaction::Methods(
|
||||
transaction::StandaloneContext::Create(_vocbaseGuard.database()),
|
||||
{},
|
||||
{shard},
|
||||
|
|
|
@ -554,7 +554,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(arangodb::LogicalCollection* c
|
|||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
|
@ -761,7 +761,7 @@ Result DatabaseInitialSyncer::fetchCollectionSync(arangodb::LogicalCollection* c
|
|||
// remote collection has no documents. now truncate our local collection
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -818,7 +818,7 @@ Result DatabaseInitialSyncer::changeCollection(arangodb::LogicalCollection* col,
|
|||
int64_t DatabaseInitialSyncer::getSize(arangodb::LogicalCollection* col) {
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()),
|
||||
col->id(),
|
||||
col,
|
||||
AccessMode::Type::READ
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -938,7 +938,7 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()),
|
||||
col->id(),
|
||||
col,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -1062,7 +1062,7 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
try {
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase()),
|
||||
col->id(),
|
||||
col,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/Hints.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
#include "Utils/OperationOptions.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
|
|
@ -593,7 +593,7 @@ Result Syncer::createCollection(
|
|||
TRI_ASSERT(!simulate32Client() || col->guid() == col->name());
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(vocbase),
|
||||
col->id(),
|
||||
col,
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -696,7 +696,6 @@ Result Syncer::createIndex(VPackSlice const& slice) {
|
|||
}
|
||||
|
||||
auto col = resolveCollection(*vocbase, slice);
|
||||
|
||||
if (col == nullptr) {
|
||||
return Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND,
|
||||
"did not find collection for index");
|
||||
|
@ -712,7 +711,7 @@ Result Syncer::createIndex(VPackSlice const& slice) {
|
|||
try {
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(*vocbase),
|
||||
col->id(),
|
||||
col.get(),
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
|
|
@ -375,7 +375,7 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
|
|||
// update the apply tick for all standalone operations
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(*vocbase),
|
||||
coll->id(),
|
||||
coll,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ void RestCollectionHandler::handleCommandPut() {
|
|||
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, coll->id(), AccessMode::Type::EXCLUSIVE
|
||||
ctx, coll, AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
res = trx.begin();
|
||||
|
@ -396,7 +396,7 @@ void RestCollectionHandler::handleCommandPut() {
|
|||
} else if (sub == "rotate") {
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, coll->id(), AccessMode::Type::WRITE
|
||||
ctx, coll, AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
res = trx.begin();
|
||||
|
@ -520,7 +520,7 @@ void RestCollectionHandler::collectionRepresentation(
|
|||
|
||||
if (showCount) {
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
SingleCollectionTransaction trx(ctx, coll->id(), AccessMode::Type::READ);
|
||||
SingleCollectionTransaction trx(ctx, coll, AccessMode::Type::READ);
|
||||
Result res = trx.begin();
|
||||
|
||||
if (res.fail()) {
|
||||
|
|
|
@ -923,7 +923,7 @@ Result RestReplicationHandler::processRestoreCollection(
|
|||
// instead, truncate them
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, col->id(), AccessMode::Type::EXCLUSIVE
|
||||
ctx, col, AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
// to turn off waitForSync!
|
||||
|
@ -2072,7 +2072,7 @@ void RestReplicationHandler::handleCommandAddFollower() {
|
|||
// Short cut for the case that the collection is empty
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, col->id(), AccessMode::Type::EXCLUSIVE
|
||||
ctx, col.get(), AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
auto res = trx.begin();
|
||||
|
||||
|
@ -2284,8 +2284,7 @@ void RestReplicationHandler::handleCommandHoldReadLockCollection() {
|
|||
|
||||
auto ctx = transaction::StandaloneContext::Create(_vocbase);
|
||||
auto trx =
|
||||
std::make_shared<SingleCollectionTransaction>(ctx, col->id(), access);
|
||||
|
||||
std::make_shared<SingleCollectionTransaction>(ctx, col.get(), access);
|
||||
trx->addHint(transaction::Hints::Hint::LOCK_ENTIRELY);
|
||||
|
||||
Result res = trx->begin();
|
||||
|
|
|
@ -1660,7 +1660,7 @@ uint64_t RocksDBCollection::recalculateCounts() {
|
|||
auto ctx =
|
||||
transaction::StandaloneContext::Create(_logicalCollection->vocbase());
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, _logicalCollection->id(), AccessMode::Type::EXCLUSIVE
|
||||
ctx, _logicalCollection, AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
auto res = trx.begin();
|
||||
|
||||
|
@ -1795,7 +1795,7 @@ void RocksDBCollection::recalculateIndexEstimates(
|
|||
auto ctx =
|
||||
transaction::StandaloneContext::Create(_logicalCollection->vocbase());
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
ctx, _logicalCollection->id(), AccessMode::Type::EXCLUSIVE
|
||||
ctx, _logicalCollection, AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
auto res = trx.begin();
|
||||
|
||||
|
|
|
@ -658,11 +658,11 @@ std::unique_ptr<transaction::ContextData> RocksDBEngine::createTransactionContex
|
|||
}
|
||||
|
||||
std::unique_ptr<TransactionState> RocksDBEngine::createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) {
|
||||
return std::unique_ptr<TransactionState>(
|
||||
new RocksDBTransactionState(resolver, TRI_NewTickServer(), options)
|
||||
new RocksDBTransactionState(vocbase, TRI_NewTickServer(), options)
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
std::unique_ptr<TransactionManager> createTransactionManager() override;
|
||||
std::unique_ptr<transaction::ContextData> createTransactionContextData() override;
|
||||
std::unique_ptr<TransactionState> createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) override;
|
||||
std::unique_ptr<TransactionCollection> createTransactionCollection(
|
||||
|
@ -472,4 +472,4 @@ class RocksDBEngine final : public StorageEngine {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -422,7 +422,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
// first chunk
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
col->id(),
|
||||
col,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
|
@ -480,7 +480,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(syncer.vocbase()),
|
||||
col->id(),
|
||||
col,
|
||||
AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
|
||||
|
|
|
@ -37,8 +37,8 @@
|
|||
#include "RocksDBEngine/RocksDBMethods.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionState.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Transaction/UserTransaction.h"
|
||||
#include "Utils/DatabaseGuard.h"
|
||||
#include "Utils/ExecContext.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
@ -146,7 +146,7 @@ void RocksDBReplicationContext::internalBind(
|
|||
auto ctx = transaction::StandaloneContext::Create(vocbase);
|
||||
|
||||
_trx.reset(
|
||||
new transaction::UserTransaction(ctx, {}, {}, {}, transactionOptions));
|
||||
new transaction::Methods(ctx, {}, {}, {}, transactionOptions));
|
||||
|
||||
auto state = RocksDBTransactionState::toState(_trx.get());
|
||||
|
||||
|
|
|
@ -57,10 +57,10 @@ struct RocksDBTransactionData final : public TransactionData {};
|
|||
|
||||
/// @brief transaction type
|
||||
RocksDBTransactionState::RocksDBTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
): TransactionState(resolver, tid, options),
|
||||
): TransactionState(vocbase, tid, options),
|
||||
_rocksTransaction(nullptr),
|
||||
_snapshot(nullptr),
|
||||
_rocksWriteOptions(),
|
||||
|
@ -192,7 +192,7 @@ void RocksDBTransactionState::createTransaction() {
|
|||
// add transaction begin marker
|
||||
if (!hasHint(transaction::Hints::Hint::SINGLE_OPERATION)) {
|
||||
auto header =
|
||||
RocksDBLogValue::BeginTransaction(_resolver.vocbase().id(), _id);
|
||||
RocksDBLogValue::BeginTransaction(_vocbase.id(), _id);
|
||||
|
||||
_rocksTransaction->PutLogData(header.slice());
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
|
@ -235,7 +235,7 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
|
|||
if (!hasHint(transaction::Hints::Hint::SINGLE_OPERATION)) {
|
||||
// add custom commit marker to increase WAL tailing reliability
|
||||
auto logValue =
|
||||
RocksDBLogValue::CommitTransaction(_resolver.vocbase().id(), id());
|
||||
RocksDBLogValue::CommitTransaction(_vocbase.id(), id());
|
||||
|
||||
_rocksTransaction->PutLogData(logValue.slice());
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
|
@ -394,7 +394,7 @@ void RocksDBTransactionState::prepareOperation(TRI_voc_cid_t cid, TRI_voc_rid_t
|
|||
case TRI_VOC_DOCUMENT_OPERATION_UPDATE:
|
||||
case TRI_VOC_DOCUMENT_OPERATION_REPLACE: {
|
||||
auto logValue =
|
||||
RocksDBLogValue::SinglePut(_resolver.vocbase().id(), cid);
|
||||
RocksDBLogValue::SinglePut(_vocbase.id(), cid);
|
||||
|
||||
_rocksTransaction->PutLogData(logValue.slice());
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
|
@ -407,7 +407,7 @@ void RocksDBTransactionState::prepareOperation(TRI_voc_cid_t cid, TRI_voc_rid_t
|
|||
TRI_ASSERT(rid != 0);
|
||||
|
||||
auto logValue =
|
||||
RocksDBLogValue::SingleRemoveV2(_resolver.vocbase().id(), cid, rid);
|
||||
RocksDBLogValue::SingleRemoveV2(_vocbase.id(), cid, rid);
|
||||
|
||||
_rocksTransaction->PutLogData(logValue.slice());
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
|
@ -457,7 +457,7 @@ Result RocksDBTransactionState::addOperation(
|
|||
// clear the query cache for this collection
|
||||
if (arangodb::aql::QueryCache::instance()->mayBeActive()) {
|
||||
arangodb::aql::QueryCache::instance()->invalidate(
|
||||
&(_resolver.vocbase()), collection->collectionName()
|
||||
&_vocbase, collection->collectionName()
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ class RocksDBTransactionState final : public TransactionState {
|
|||
|
||||
public:
|
||||
RocksDBTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
);
|
||||
|
@ -221,4 +221,4 @@ class RocksDBKeyLeaser {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -96,7 +96,7 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
virtual std::unique_ptr<TransactionManager> createTransactionManager() = 0;
|
||||
virtual std::unique_ptr<transaction::ContextData> createTransactionContextData() = 0;
|
||||
virtual std::unique_ptr<TransactionState> createTransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
transaction::Options const& options
|
||||
) = 0;
|
||||
virtual std::unique_ptr<TransactionCollection> createTransactionCollection(
|
||||
|
@ -539,4 +539,4 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -37,17 +37,17 @@ using namespace arangodb;
|
|||
|
||||
/// @brief transaction type
|
||||
TransactionState::TransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
):
|
||||
_vocbase(vocbase),
|
||||
_id(tid),
|
||||
_type(AccessMode::Type::READ),
|
||||
_status(transaction::Status::CREATED),
|
||||
_arena(),
|
||||
_collections{_arena}, // assign arena to vector
|
||||
_serverRole(ServerState::instance()->getRole()),
|
||||
_resolver(resolver),
|
||||
_hints(),
|
||||
_nestingLevel(0),
|
||||
_options(options) {}
|
||||
|
@ -119,6 +119,7 @@ TransactionState::Cookie::ptr TransactionState::cookie(
|
|||
|
||||
/// @brief add a collection to a transaction
|
||||
int TransactionState::addCollection(TRI_voc_cid_t cid,
|
||||
std::string const& cname,
|
||||
AccessMode::Type accessType,
|
||||
int nestingLevel, bool force) {
|
||||
LOG_TRX(this, nestingLevel) << "adding collection " << cid;
|
||||
|
@ -148,7 +149,7 @@ int TransactionState::addCollection(TRI_voc_cid_t cid,
|
|||
"AccessMode::Type total order fail");
|
||||
// we may need to recheck permissions here
|
||||
if (trxCollection->accessType() < accessType) {
|
||||
int res = checkCollectionPermission(cid, accessType);
|
||||
int res = checkCollectionPermission(cid, cname, accessType);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
}
|
||||
|
@ -170,7 +171,7 @@ int TransactionState::addCollection(TRI_voc_cid_t cid,
|
|||
}
|
||||
|
||||
// now check the permissions
|
||||
int res = checkCollectionPermission(cid, accessType);
|
||||
int res = checkCollectionPermission(cid, cname, accessType);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
}
|
||||
|
@ -324,6 +325,7 @@ bool TransactionState::isExclusiveTransactionOnSingleCollection() const {
|
|||
}
|
||||
|
||||
int TransactionState::checkCollectionPermission(TRI_voc_cid_t cid,
|
||||
std::string const& cname,
|
||||
AccessMode::Type accessType) const {
|
||||
ExecContext const* exec = ExecContext::CURRENT;
|
||||
|
||||
|
@ -336,9 +338,7 @@ int TransactionState::checkCollectionPermission(TRI_voc_cid_t cid,
|
|||
return TRI_ERROR_ARANGO_READ_ONLY;
|
||||
}
|
||||
|
||||
std::string const colName = _resolver.getCollectionNameCluster(cid);
|
||||
auto level = exec->collectionAuthLevel(_resolver.vocbase().name(), colName);
|
||||
|
||||
auto level = exec->collectionAuthLevel(_vocbase.name(), cname);
|
||||
TRI_ASSERT(level != auth::Level::UNDEFINED); // not allowed here
|
||||
|
||||
if (level == auth::Level::NONE) {
|
||||
|
@ -352,7 +352,7 @@ int TransactionState::checkCollectionPermission(TRI_voc_cid_t cid,
|
|||
|
||||
if (level == auth::Level::RO && collectionWillWrite) {
|
||||
LOG_TOPIC(TRACE, Logger::AUTHORIZATION) << "User " << exec->user()
|
||||
<< " has no write right for collection " << colName;
|
||||
<< " has no write right for collection " << cname;
|
||||
|
||||
return TRI_ERROR_ARANGO_READ_ONLY;
|
||||
}
|
||||
|
@ -394,14 +394,12 @@ void TransactionState::clearQueryCache() {
|
|||
}
|
||||
|
||||
if (!collections.empty()) {
|
||||
arangodb::aql::QueryCache::instance()->invalidate(
|
||||
&(_resolver.vocbase()), collections
|
||||
);
|
||||
arangodb::aql::QueryCache::instance()->invalidate(&_vocbase, collections);
|
||||
}
|
||||
} catch (...) {
|
||||
// in case something goes wrong, we have to remove all queries from the
|
||||
// cache
|
||||
arangodb::aql::QueryCache::instance()->invalidate(&(_resolver.vocbase()));
|
||||
arangodb::aql::QueryCache::instance()->invalidate(&_vocbase);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ class TransactionState {
|
|||
TransactionState& operator=(TransactionState const&) = delete;
|
||||
|
||||
TransactionState(
|
||||
CollectionNameResolver const& resolver,
|
||||
TRI_vocbase_t& vocbase,
|
||||
TRI_voc_tid_t tid,
|
||||
transaction::Options const& options
|
||||
);
|
||||
|
@ -102,12 +102,12 @@ class TransactionState {
|
|||
bool isDBServer() const { return ServerState::isDBServer(_serverRole); }
|
||||
bool isCoordinator() const { return ServerState::isCoordinator(_serverRole); }
|
||||
|
||||
transaction::Options& options() { return _options; }
|
||||
transaction::Options const& options() const { return _options; }
|
||||
TRI_vocbase_t& vocbase() const { return _resolver.vocbase(); }
|
||||
TRI_voc_tid_t id() const { return _id; }
|
||||
transaction::Status status() const { return _status; }
|
||||
bool isRunning() const { return _status == transaction::Status::RUNNING; }
|
||||
inline transaction::Options& options() { return _options; }
|
||||
inline transaction::Options const& options() const { return _options; }
|
||||
inline TRI_vocbase_t& vocbase() const { return _vocbase; }
|
||||
inline TRI_voc_tid_t id() const { return _id; }
|
||||
inline transaction::Status status() const { return _status; }
|
||||
inline bool isRunning() const { return _status == transaction::Status::RUNNING; }
|
||||
|
||||
int increaseNesting() { return ++_nestingLevel; }
|
||||
int decreaseNesting() {
|
||||
|
@ -142,8 +142,8 @@ class TransactionState {
|
|||
AccessMode::Type accessType);
|
||||
|
||||
/// @brief add a collection to a transaction
|
||||
int addCollection(TRI_voc_cid_t cid, AccessMode::Type accessType,
|
||||
int nestingLevel, bool force);
|
||||
int addCollection(TRI_voc_cid_t cid, std::string const& cname,
|
||||
AccessMode::Type accessType, int nestingLevel, bool force);
|
||||
|
||||
/// @brief make sure all declared collections are used & locked
|
||||
Result ensureCollections(int nestingLevel = 0);
|
||||
|
@ -160,6 +160,7 @@ class TransactionState {
|
|||
/// @brief release collection locks for a transaction
|
||||
int unuseCollections(int nestingLevel);
|
||||
|
||||
/// FIXME delete, server-based locking should take care of this
|
||||
int lockCollections();
|
||||
|
||||
/// @brief whether or not a transaction consists of a single operation
|
||||
|
@ -201,7 +202,10 @@ class TransactionState {
|
|||
/// @brief whether or not a transaction is an exclusive transaction on a single collection
|
||||
bool isExclusiveTransactionOnSingleCollection() const;
|
||||
|
||||
int checkCollectionPermission(TRI_voc_cid_t cid, AccessMode::Type) const;
|
||||
/// @brief check if current user can access this collection
|
||||
int checkCollectionPermission(TRI_voc_cid_t cid,
|
||||
std::string const& cname,
|
||||
AccessMode::Type) const;
|
||||
|
||||
/// @brief release collection locks for a transaction
|
||||
int releaseCollections();
|
||||
|
@ -209,6 +213,9 @@ class TransactionState {
|
|||
/// @brief clear the query cache for all collections that were modified by
|
||||
/// the transaction
|
||||
void clearQueryCache();
|
||||
|
||||
/// @brief vocbase for this transaction
|
||||
TRI_vocbase_t& _vocbase;
|
||||
|
||||
/// @brief local trx id
|
||||
TRI_voc_tid_t const _id;
|
||||
|
@ -224,18 +231,18 @@ class TransactionState {
|
|||
|
||||
ServerState::RoleEnum const _serverRole; // role of the server
|
||||
|
||||
CollectionNameResolver const& _resolver;
|
||||
|
||||
transaction::Hints _hints; // hints;
|
||||
int _nestingLevel;
|
||||
|
||||
transaction::Options _options;
|
||||
|
||||
private:
|
||||
std::map<void const*, Cookie::ptr> _cookies; // a collection of stored cookies
|
||||
std::vector<StatusChangeCallback const*> _statusChangeCallbacks; // functrs to call for status change (pointer to allow for use of std::vector)
|
||||
/// a collection of stored cookies
|
||||
std::map<void const*, Cookie::ptr> _cookies;
|
||||
/// functrs to call for status change (pointer to allow for use of std::vector)
|
||||
std::vector<StatusChangeCallback const*> _statusChangeCallbacks;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -74,22 +74,53 @@ using namespace arangodb::transaction::helpers;
|
|||
namespace {
|
||||
|
||||
// wrap vector inside a static function to ensure proper initialization order
|
||||
std::vector<arangodb::transaction::Methods::StateRegistrationCallback>& getStateRegistrationCallbacks() {
|
||||
static std::vector<arangodb::transaction::Methods::StateRegistrationCallback> callbacks;
|
||||
std::vector<arangodb::transaction::Methods::DataSourceRegistrationCallback>& getDataSourceRegistrationCallbacks() {
|
||||
static std::vector<arangodb::transaction::Methods::DataSourceRegistrationCallback> callbacks;
|
||||
|
||||
return callbacks;
|
||||
}
|
||||
|
||||
/// @return the status change callbacks stored in state
|
||||
/// or nullptr if none and !create
|
||||
std::vector<arangodb::transaction::Methods::StatusChangeCallback>* getStatusChangeCallbacks(
|
||||
arangodb::TransactionState& state,
|
||||
bool create = false
|
||||
) {
|
||||
struct CookieType: public arangodb::TransactionState::Cookie {
|
||||
std::vector<arangodb::transaction::Methods::StatusChangeCallback> _callbacks;
|
||||
};
|
||||
|
||||
static const int key = 0; // arbitrary location in memory, common for all
|
||||
|
||||
// TODO FIXME find a better way to look up a ViewState
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
auto* cookie = dynamic_cast<CookieType*>(state.cookie(&key));
|
||||
#else
|
||||
auto* cookie = static_cast<CookieType*>(state.cookie(&key));
|
||||
#endif
|
||||
|
||||
if (!cookie && create) {
|
||||
auto ptr = std::make_unique<CookieType>();
|
||||
|
||||
cookie = ptr.get();
|
||||
state.cookie(&key, std::move(ptr));
|
||||
}
|
||||
|
||||
return cookie ? &(cookie->_callbacks) : nullptr;
|
||||
}
|
||||
|
||||
/// @brief notify callbacks of association of 'cid' with this TransactionState
|
||||
/// @note done separately from addCollection() to avoid creating a
|
||||
/// TransactionCollection instance for virtual entities, e.g. View
|
||||
arangodb::Result applyStateRegistrationCallbacks(
|
||||
arangodb::Result applyDataSourceRegistrationCallbacks(
|
||||
LogicalDataSource& dataSource,
|
||||
arangodb::TransactionState& state
|
||||
arangodb::transaction::Methods& trx
|
||||
) {
|
||||
for (auto& callback: getStateRegistrationCallbacks()) {
|
||||
for (auto& callback: getDataSourceRegistrationCallbacks()) {
|
||||
TRI_ASSERT(callback); // addDataSourceRegistrationCallback(...) ensures valid
|
||||
|
||||
try {
|
||||
auto res = callback(dataSource, state);
|
||||
auto res = callback(dataSource, trx);
|
||||
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
|
@ -102,6 +133,50 @@ arangodb::Result applyStateRegistrationCallbacks(
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
/// @brief notify callbacks of association of 'cid' with this TransactionState
|
||||
/// @note done separately from addCollection() to avoid creating a
|
||||
/// TransactionCollection instance for virtual entities, e.g. View
|
||||
void applyStatusChangeCallbacks(
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::transaction::Status status
|
||||
) noexcept {
|
||||
TRI_ASSERT(
|
||||
arangodb::transaction::Status::ABORTED == status
|
||||
|| arangodb::transaction::Status::COMMITTED == status
|
||||
|| arangodb::transaction::Status::RUNNING == status
|
||||
);
|
||||
TRI_ASSERT(
|
||||
!trx.state() // for embeded transactions status is not always updated
|
||||
|| (trx.state()->isTopLevelTransaction() && trx.state()->status() == status)
|
||||
|| (!trx.state()->isTopLevelTransaction()
|
||||
&& arangodb::transaction::Status::RUNNING == trx.state()->status()
|
||||
)
|
||||
);
|
||||
|
||||
auto* state = trx.state();
|
||||
|
||||
if (!state) {
|
||||
return; // nothing to apply
|
||||
}
|
||||
|
||||
auto* callbacks = getStatusChangeCallbacks(*state);
|
||||
|
||||
if (!callbacks) {
|
||||
return; // no callbacks to apply
|
||||
}
|
||||
|
||||
// no need to lock since transactions are single-threaded
|
||||
for (auto& callback: *callbacks) {
|
||||
TRI_ASSERT(callback); // addStatusChangeCallback(...) ensures valid
|
||||
|
||||
try {
|
||||
callback(trx, status);
|
||||
} catch (...) {
|
||||
// we must not propagate exceptions from here
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void throwCollectionNotFound(char const* name) {
|
||||
if (name == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||
|
@ -162,14 +237,37 @@ static OperationResult emptyResult(OperationOptions const& options) {
|
|||
}
|
||||
} // namespace
|
||||
|
||||
/*static*/ void transaction::Methods::addStateRegistrationCallback(
|
||||
StateRegistrationCallback callback
|
||||
/*static*/ void transaction::Methods::addDataSourceRegistrationCallback(
|
||||
DataSourceRegistrationCallback const& callback
|
||||
) {
|
||||
getStateRegistrationCallbacks().emplace_back(callback);
|
||||
if (callback) {
|
||||
getDataSourceRegistrationCallbacks().emplace_back(callback);
|
||||
}
|
||||
}
|
||||
|
||||
/*static*/ void transaction::Methods::clearStateRegistrationCallbacks() {
|
||||
getStateRegistrationCallbacks().clear();
|
||||
bool transaction::Methods::addStatusChangeCallback(
|
||||
StatusChangeCallback const& callback
|
||||
) {
|
||||
if (!callback) {
|
||||
return true; // nothing to call back
|
||||
}
|
||||
|
||||
if (!_state) {
|
||||
return false; // nothing to add to
|
||||
}
|
||||
|
||||
auto* statusChangeCallbacks = getStatusChangeCallbacks(*_state, true);
|
||||
|
||||
TRI_ASSERT(nullptr != statusChangeCallbacks); // 'create' was specified
|
||||
|
||||
// no need to lock since transactions are single-threaded
|
||||
statusChangeCallbacks->emplace_back(callback);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*static*/ void transaction::Methods::clearDataSourceRegistrationCallbacks() {
|
||||
getDataSourceRegistrationCallbacks().clear();
|
||||
}
|
||||
|
||||
/// @brief Get the field names of the used index
|
||||
|
@ -633,10 +731,11 @@ transaction::Methods::Methods(
|
|||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
|
||||
_state = engine->createTransactionState(
|
||||
_transactionContextPtr->resolver(), options
|
||||
_transactionContextPtr->vocbase(), options
|
||||
).release();
|
||||
TRI_ASSERT(_state != nullptr);
|
||||
|
||||
TRI_ASSERT(_state->isTopLevelTransaction());
|
||||
|
||||
// register the transaction in the context
|
||||
_transactionContextPtr->registerTransaction(_state);
|
||||
}
|
||||
|
@ -644,6 +743,26 @@ transaction::Methods::Methods(
|
|||
TRI_ASSERT(_state != nullptr);
|
||||
}
|
||||
|
||||
/// @brief create the transaction, used to be UserTransaction
|
||||
transaction::Methods::Methods(std::shared_ptr<transaction::Context> const& ctx,
|
||||
std::vector<std::string> const& readCollections,
|
||||
std::vector<std::string> const& writeCollections,
|
||||
std::vector<std::string> const& exclusiveCollections,
|
||||
transaction::Options const& options)
|
||||
: transaction::Methods(ctx, options) {
|
||||
addHint(transaction::Hints::Hint::LOCK_ENTIRELY);
|
||||
|
||||
for (auto const& it : exclusiveCollections) {
|
||||
addCollection(it, AccessMode::Type::EXCLUSIVE);
|
||||
}
|
||||
for (auto const& it : writeCollections) {
|
||||
addCollection(it, AccessMode::Type::WRITE);
|
||||
}
|
||||
for (auto const& it : readCollections) {
|
||||
addCollection(it, AccessMode::Type::READ);
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief destroy the transaction
|
||||
transaction::Methods::~Methods() {
|
||||
if (_state->isEmbeddedTransaction()) {
|
||||
|
@ -778,10 +897,17 @@ Result transaction::Methods::begin() {
|
|||
if (_state->isTopLevelTransaction()) {
|
||||
_state->updateStatus(transaction::Status::RUNNING);
|
||||
}
|
||||
return Result();
|
||||
} else {
|
||||
auto res = _state->beginTransaction(_localHints);
|
||||
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return _state->beginTransaction(_localHints);
|
||||
applyStatusChangeCallbacks(*this, Status::RUNNING);
|
||||
|
||||
return Result();
|
||||
}
|
||||
|
||||
/// @brief commit / finish the transaction
|
||||
|
@ -807,10 +933,17 @@ Result transaction::Methods::commit() {
|
|||
if (_state->isTopLevelTransaction()) {
|
||||
_state->updateStatus(transaction::Status::COMMITTED);
|
||||
}
|
||||
return Result();
|
||||
} else {
|
||||
auto res = _state->commitTransaction(this);
|
||||
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return _state->commitTransaction(this);
|
||||
applyStatusChangeCallbacks(*this, Status::COMMITTED);
|
||||
|
||||
return Result();
|
||||
}
|
||||
|
||||
/// @brief abort the transaction
|
||||
|
@ -824,11 +957,17 @@ Result transaction::Methods::abort() {
|
|||
if (_state->isTopLevelTransaction()) {
|
||||
_state->updateStatus(transaction::Status::ABORTED);
|
||||
}
|
||||
} else {
|
||||
auto res = _state->abortTransaction(this);
|
||||
|
||||
return Result();
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return _state->abortTransaction(this);
|
||||
applyStatusChangeCallbacks(*this, Status::ABORTED);
|
||||
|
||||
return Result();
|
||||
}
|
||||
|
||||
/// @brief finish a transaction (commit or abort), based on the previous state
|
||||
|
@ -920,18 +1059,18 @@ OperationResult transaction::Methods::anyLocal(
|
|||
}
|
||||
|
||||
TRI_voc_cid_t transaction::Methods::addCollectionAtRuntime(
|
||||
TRI_voc_cid_t cid, std::string const& collectionName,
|
||||
TRI_voc_cid_t cid, std::string const& cname,
|
||||
AccessMode::Type type) {
|
||||
auto collection = trxCollection(cid);
|
||||
|
||||
if (collection == nullptr) {
|
||||
int res = _state->addCollection(cid, type, _state->nestingLevel(), true);
|
||||
int res = _state->addCollection(cid, cname, type, _state->nestingLevel(), true);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (res == TRI_ERROR_TRANSACTION_UNREGISTERED_COLLECTION) {
|
||||
// special error message to indicate which collection was undeclared
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
res, std::string(TRI_errno_string(res)) + ": " + collectionName +
|
||||
res, std::string(TRI_errno_string(res)) + ": " + cname +
|
||||
" [" + AccessMode::typeString(type) + "]");
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
|
@ -943,7 +1082,7 @@ TRI_voc_cid_t transaction::Methods::addCollectionAtRuntime(
|
|||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||
}
|
||||
|
||||
auto result = applyStateRegistrationCallbacks(*dataSource, *_state);
|
||||
auto result = applyDataSourceRegistrationCallbacks(*dataSource, *this);
|
||||
|
||||
if (!result.ok()) {
|
||||
THROW_ARANGO_EXCEPTION(result.errorNumber());
|
||||
|
@ -953,7 +1092,7 @@ TRI_voc_cid_t transaction::Methods::addCollectionAtRuntime(
|
|||
collection = trxCollection(cid);
|
||||
|
||||
if (collection == nullptr) {
|
||||
throwCollectionNotFound(collectionName.c_str());
|
||||
throwCollectionNotFound(cname.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1845,7 +1984,7 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // isDBServer - early block
|
||||
|
||||
if (options.returnOld || options.returnNew) {
|
||||
pinData(cid); // will throw when it fails
|
||||
|
@ -2152,7 +2291,7 @@ OperationResult transaction::Methods::removeLocal(
|
|||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // isDBServer - early block
|
||||
|
||||
if (options.returnOld) {
|
||||
pinData(cid); // will throw when it fails
|
||||
|
@ -2474,7 +2613,7 @@ OperationResult transaction::Methods::truncateLocal(
|
|||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // isDBServer - early block
|
||||
|
||||
pinData(cid); // will throw when it fails
|
||||
|
||||
|
@ -2952,7 +3091,7 @@ arangodb::LogicalCollection* transaction::Methods::documentCollection(
|
|||
}
|
||||
|
||||
/// @brief add a collection by id, with the name supplied
|
||||
Result transaction::Methods::addCollection(TRI_voc_cid_t cid, char const* name,
|
||||
Result transaction::Methods::addCollection(TRI_voc_cid_t cid, std::string const& cname,
|
||||
AccessMode::Type type) {
|
||||
if (_state == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
|
@ -2969,51 +3108,60 @@ Result transaction::Methods::addCollection(TRI_voc_cid_t cid, char const* name,
|
|||
"cannot add collection to committed or aborted transaction");
|
||||
}
|
||||
|
||||
if (_state->isTopLevelTransaction()
|
||||
&& status != transaction::Status::CREATED) {
|
||||
// transaction already started?
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_TRANSACTION_INTERNAL,
|
||||
"cannot add collection to a previously started top-level transaction"
|
||||
);
|
||||
}
|
||||
|
||||
if (cid == 0) {
|
||||
// invalid cid
|
||||
throwCollectionNotFound(name);
|
||||
throwCollectionNotFound(cname.c_str());
|
||||
}
|
||||
|
||||
auto addCollection = [this, &cname, type](TRI_voc_cid_t cid)->void {
|
||||
auto res =
|
||||
_state->addCollection(cid, cname, type, _state->nestingLevel(), false);
|
||||
|
||||
if (TRI_ERROR_NO_ERROR == res) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (TRI_ERROR_TRANSACTION_UNREGISTERED_COLLECTION == res) {
|
||||
// special error message to indicate which collection was undeclared
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
res,
|
||||
std::string(TRI_errno_string(res)) + ": " + cname
|
||||
+ " [" + AccessMode::typeString(type) + "]"
|
||||
);
|
||||
}
|
||||
|
||||
if (TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND == res) {
|
||||
throwCollectionNotFound(cname.c_str());
|
||||
}
|
||||
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
};
|
||||
|
||||
Result res;
|
||||
bool visited = false;
|
||||
auto visitor = _state->isEmbeddedTransaction()
|
||||
? std::function<bool(LogicalCollection&)>(
|
||||
[this, name, type, &res, cid, &visited](LogicalCollection& col)->bool {
|
||||
res = addCollectionEmbedded(col.id(), name, type);
|
||||
std::function<bool(LogicalCollection&)> visitor(
|
||||
[this, &addCollection, &res, cid, &visited](LogicalCollection& col)->bool {
|
||||
addCollection(col.id()); // will throw on error
|
||||
res = applyDataSourceRegistrationCallbacks(col, *this);
|
||||
visited |= cid == col.id();
|
||||
|
||||
if (!res.ok()) {
|
||||
return false; // break on error
|
||||
}
|
||||
|
||||
res = applyStateRegistrationCallbacks(col, *_state);
|
||||
visited |= cid == col.id();
|
||||
|
||||
return res.ok(); // add the remaining collections (or break on error)
|
||||
}
|
||||
)
|
||||
: std::function<bool(LogicalCollection&)>(
|
||||
[this, name, type, &res, cid, &visited](LogicalCollection& col)->bool {
|
||||
res = addCollectionToplevel(col.id(), name, type);
|
||||
|
||||
if (!res.ok()) {
|
||||
return false; // break on error
|
||||
}
|
||||
|
||||
res = applyStateRegistrationCallbacks(col, *_state);
|
||||
visited |= cid == col.id();
|
||||
|
||||
return res.ok(); // add the remaining collections (or break on error)
|
||||
}
|
||||
)
|
||||
;
|
||||
return res.ok(); // add the remaining collections (or break on error)
|
||||
}
|
||||
);
|
||||
|
||||
if (!resolver()->visitCollections(visitor, cid) || !res.ok()) {
|
||||
// trigger exception as per the original behaviour (tests depend on this)
|
||||
if (res.ok() && !visited) {
|
||||
res = _state->isEmbeddedTransaction()
|
||||
? addCollectionEmbedded(cid, name, type)
|
||||
: addCollectionToplevel(cid, name, type)
|
||||
;
|
||||
addCollection(cid); // will throw on error
|
||||
}
|
||||
|
||||
return res.ok() ? Result(TRI_ERROR_INTERNAL) : res; // return first error
|
||||
|
@ -3027,28 +3175,15 @@ Result transaction::Methods::addCollection(TRI_voc_cid_t cid, char const* name,
|
|||
auto dataSource = resolver()->getDataSource(cid);
|
||||
|
||||
return dataSource
|
||||
? applyStateRegistrationCallbacks(*dataSource, *_state)
|
||||
? applyDataSourceRegistrationCallbacks(*dataSource, *this)
|
||||
: Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND)
|
||||
;
|
||||
}
|
||||
|
||||
/// @brief add a collection by id, with the name supplied
|
||||
Result transaction::Methods::addCollection(TRI_voc_cid_t cid,
|
||||
std::string const& name,
|
||||
AccessMode::Type type) {
|
||||
return addCollection(cid, name.c_str(), type);
|
||||
}
|
||||
|
||||
/// @brief add a collection by id
|
||||
Result transaction::Methods::addCollection(TRI_voc_cid_t cid,
|
||||
AccessMode::Type type) {
|
||||
return addCollection(cid, nullptr, type);
|
||||
}
|
||||
|
||||
/// @brief add a collection by name
|
||||
Result transaction::Methods::addCollection(std::string const& name,
|
||||
AccessMode::Type type) {
|
||||
return addCollection(resolver()->getCollectionId(name), name.c_str(), type);
|
||||
return addCollection(resolver()->getCollectionId(name), name, type);
|
||||
}
|
||||
|
||||
/// @brief test if a collection is already locked
|
||||
|
@ -3207,61 +3342,6 @@ transaction::Methods::IndexHandle transaction::Methods::getIndexByIdentifier(
|
|||
return IndexHandle(idx);
|
||||
}
|
||||
|
||||
/// @brief add a collection to an embedded transaction
|
||||
Result transaction::Methods::addCollectionEmbedded(TRI_voc_cid_t cid,
|
||||
char const* name,
|
||||
AccessMode::Type type) {
|
||||
TRI_ASSERT(_state != nullptr);
|
||||
|
||||
int res = _state->addCollection(cid, type, _state->nestingLevel(), false);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (res == TRI_ERROR_TRANSACTION_UNREGISTERED_COLLECTION) {
|
||||
// special error message to indicate which collection was undeclared
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
res, std::string(TRI_errno_string(res)) + ": " +
|
||||
resolver()->getCollectionNameCluster(cid) + " [" +
|
||||
AccessMode::typeString(type) + "]");
|
||||
} else if (res == TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND) {
|
||||
throwCollectionNotFound(name);
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief add a collection to a top-level transaction
|
||||
Result transaction::Methods::addCollectionToplevel(TRI_voc_cid_t cid,
|
||||
char const* name,
|
||||
AccessMode::Type type) {
|
||||
TRI_ASSERT(_state != nullptr);
|
||||
|
||||
int res;
|
||||
|
||||
if (_state->status() != transaction::Status::CREATED) {
|
||||
// transaction already started?
|
||||
res = TRI_ERROR_TRANSACTION_INTERNAL;
|
||||
} else {
|
||||
res = _state->addCollection(cid, type, _state->nestingLevel(), false);
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (res == TRI_ERROR_TRANSACTION_UNREGISTERED_COLLECTION) {
|
||||
// special error message to indicate which collection was undeclared
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
res, std::string(TRI_errno_string(res)) + ": " +
|
||||
resolver()->getCollectionNameCluster(cid) + " [" +
|
||||
AccessMode::typeString(type) + "]");
|
||||
} else if (res == TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND) {
|
||||
throwCollectionNotFound(name);
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Result transaction::Methods::resolveId(char const* handle, size_t length,
|
||||
TRI_voc_cid_t& cid, char const*& key,
|
||||
size_t& outLength) {
|
||||
|
|
|
@ -144,21 +144,39 @@ class Methods {
|
|||
|
||||
public:
|
||||
|
||||
/// @brief create the transaction, used to be UserTransaction
|
||||
Methods(std::shared_ptr<transaction::Context> const& ctx,
|
||||
std::vector<std::string> const& readCollections,
|
||||
std::vector<std::string> const& writeCollections,
|
||||
std::vector<std::string> const& exclusiveCollections,
|
||||
transaction::Options const& options);
|
||||
|
||||
/// @brief destroy the transaction
|
||||
virtual ~Methods();
|
||||
|
||||
typedef Result(*StateRegistrationCallback)(LogicalDataSource& dataSource, TransactionState& state);
|
||||
typedef Result(*DataSourceRegistrationCallback)(LogicalDataSource& dataSource, Methods& trx);
|
||||
|
||||
/// @brief add a callback to be called for state instance association events
|
||||
/// e.g. addCollection(...)
|
||||
/// @brief definition from TransactionState::StatusChangeCallback
|
||||
/// @param status the new status of the transaction
|
||||
/// will match trx.state()->status() for top-level transactions
|
||||
/// may not match trx.state()->status() for embeded transactions
|
||||
/// since their staus is not updated from RUNNING
|
||||
typedef std::function<void(transaction::Methods& trx, transaction::Status& status)> StatusChangeCallback;
|
||||
|
||||
/// @brief add a callback to be called for LogicalDataSource instance
|
||||
/// association events, e.g. addCollection(...)
|
||||
/// @note not thread-safe on the assumption of static factory registration
|
||||
static void addStateRegistrationCallback(StateRegistrationCallback callback);
|
||||
static void addDataSourceRegistrationCallback(DataSourceRegistrationCallback const& callback);
|
||||
|
||||
/// @brief clear all called for state instance association events
|
||||
/// @brief add a callback to be called for state change events
|
||||
/// @return success
|
||||
bool addStatusChangeCallback(StatusChangeCallback const& callback);
|
||||
|
||||
/// @brief clear all called for LogicalDataSource instance association events
|
||||
/// @note not thread-safe on the assumption of static factory registration
|
||||
/// @note FOR USE IN TESTS ONLY to reset test state
|
||||
/// FIXME TODO StateRegistrationCallback logic should be moved into its own feature
|
||||
static void clearStateRegistrationCallbacks();
|
||||
static void clearDataSourceRegistrationCallbacks();
|
||||
|
||||
/// @brief default batch size for index and other operations
|
||||
static constexpr uint64_t defaultBatchSize() { return 1000; }
|
||||
|
@ -507,13 +525,7 @@ class Methods {
|
|||
TransactionCollection const*) const;
|
||||
|
||||
/// @brief add a collection by id, with the name supplied
|
||||
ENTERPRISE_VIRT Result addCollection(TRI_voc_cid_t, char const*, AccessMode::Type);
|
||||
|
||||
/// @brief add a collection by id, with the name supplied
|
||||
Result addCollection(TRI_voc_cid_t, std::string const&, AccessMode::Type);
|
||||
|
||||
/// @brief add a collection by id
|
||||
Result addCollection(TRI_voc_cid_t, AccessMode::Type);
|
||||
ENTERPRISE_VIRT Result addCollection(TRI_voc_cid_t, std::string const&, AccessMode::Type);
|
||||
|
||||
/// @brief add a collection by name
|
||||
Result addCollection(std::string const&, AccessMode::Type);
|
||||
|
@ -584,12 +596,6 @@ class Methods {
|
|||
std::vector<std::shared_ptr<arangodb::Index>> indexesForCollectionCoordinator(
|
||||
std::string const&) const;
|
||||
|
||||
/// @brief add a collection to an embedded transaction
|
||||
Result addCollectionEmbedded(TRI_voc_cid_t, char const* name, AccessMode::Type);
|
||||
|
||||
/// @brief add a collection to a top-level transaction
|
||||
Result addCollectionToplevel(TRI_voc_cid_t, char const* name, AccessMode::Type);
|
||||
|
||||
protected:
|
||||
/// @brief the state
|
||||
TransactionState* _state;
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Simon Grätzer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "SmartContext.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "StorageEngine/TransactionManager.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
|
||||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
//static thread_local TRI_voc_tid_t CURRENT_TRX_ID;
|
||||
|
||||
/// @brief create the context
|
||||
transaction::SmartContext::SmartContext(TRI_vocbase_t& vocbase)
|
||||
: Context(vocbase), _state(nullptr) {}
|
||||
|
||||
/// @brief order a custom type handler for the collection
|
||||
std::shared_ptr<arangodb::velocypack::CustomTypeHandler> transaction::SmartContext::orderCustomTypeHandler() {
|
||||
if (_customTypeHandler == nullptr) {
|
||||
_customTypeHandler.reset(
|
||||
transaction::Context::createCustomTypeHandler(&_vocbase, &resolver())
|
||||
);
|
||||
_options.customTypeHandler = _customTypeHandler.get();
|
||||
_dumpOptions.customTypeHandler = _customTypeHandler.get();
|
||||
}
|
||||
|
||||
TRI_ASSERT(_customTypeHandler != nullptr);
|
||||
|
||||
return _customTypeHandler;
|
||||
}
|
||||
|
||||
/// @brief return the resolver
|
||||
CollectionNameResolver const& transaction::SmartContext::resolver() {
|
||||
if (_resolver == nullptr) {
|
||||
createResolver();
|
||||
}
|
||||
|
||||
TRI_ASSERT(_resolver != nullptr);
|
||||
|
||||
return *_resolver;
|
||||
}
|
||||
|
||||
/// @brief get parent transaction (if any) and increase nesting
|
||||
TransactionState* transaction::SmartContext::getParentTransaction() const {
|
||||
return _state;
|
||||
}
|
||||
|
||||
/// @brief register the transaction, so other Method instances can get it
|
||||
void transaction::SmartContext::registerTransaction(TransactionState* state) {
|
||||
TRI_ASSERT(_state == nullptr);
|
||||
_state = state;
|
||||
}
|
||||
|
||||
/// @brief unregister the transaction
|
||||
void transaction::SmartContext::unregisterTransaction() noexcept {
|
||||
_state = nullptr;
|
||||
}
|
||||
|
||||
/// @brief whether or not the transaction is embeddable
|
||||
bool transaction::SmartContext::isEmbeddable() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<transaction::Context> transaction::SmartContext::Create(TRI_vocbase_t& vocbase) {
|
||||
return std::make_shared<transaction::SmartContext>(vocbase);
|
||||
}
|
||||
} // arangodb
|
|
@ -0,0 +1,86 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Simon Grätzer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_TRANSACTION_GLOBAL_CONTEXT_H
|
||||
#define ARANGOD_TRANSACTION_GLOBAL_CONTEXT_H 1
|
||||
|
||||
#include "Context.h"
|
||||
#include "Basics/Common.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class TransactionState;
|
||||
|
||||
namespace transaction {
|
||||
|
||||
/// transaction context that will manage the creation or acquisition of a TransactionState
|
||||
/// for transction::Methods instances for cluster wide transactions. Cluster wide transactions
|
||||
/// essentially just mean that all operations will use a consistent transaction ID and
|
||||
/// on the same server the same TransactionState instance will be used.
|
||||
/// The class supports three different use-cases
|
||||
/// (1) Constructor with TID and Type::Default can be used to share a TransactionState between
|
||||
/// multiple transaction::Methods instances
|
||||
/// (2) Constructor with TID and Type::Global will try to lease an already existing TransactionState
|
||||
/// from the TransactionManager. This supports global transaction with explicit begin / end requests
|
||||
/// (3) Construcor with TransactionState* is used to manage a global transaction
|
||||
class SmartContext final : public Context {
|
||||
public:
|
||||
|
||||
/// @brief create the context, with given TID
|
||||
explicit SmartContext(TRI_vocbase_t& vocbase);
|
||||
|
||||
/// @brief destroy the context
|
||||
~SmartContext() = default;
|
||||
|
||||
/// @brief order a custom type handler
|
||||
std::shared_ptr<arangodb::velocypack::CustomTypeHandler>
|
||||
orderCustomTypeHandler() override final;
|
||||
|
||||
/// @brief return the resolver
|
||||
CollectionNameResolver const& resolver() override final;
|
||||
|
||||
/// @brief get parent transaction (if any) and increase nesting
|
||||
TransactionState* getParentTransaction() const override;
|
||||
|
||||
/// @brief register the transaction,
|
||||
void registerTransaction(TransactionState*) override;
|
||||
|
||||
/// @brief unregister the transaction
|
||||
void unregisterTransaction() noexcept override;
|
||||
|
||||
/// @brief whether or not the transaction is embeddable
|
||||
bool isEmbeddable() const override;
|
||||
|
||||
static std::shared_ptr<Context> Create(TRI_vocbase_t&);
|
||||
|
||||
private:
|
||||
/// @brief managed TransactionState
|
||||
TransactionState *_state;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -61,7 +61,7 @@ CollectionNameResolver const& transaction::StandaloneContext::resolver() {
|
|||
}
|
||||
|
||||
/// @brief create a context, returned in a shared ptr
|
||||
/*static*/ std::shared_ptr<transaction::StandaloneContext> transaction::StandaloneContext::Create(
|
||||
/*static*/ std::shared_ptr<transaction::Context> transaction::StandaloneContext::Create(
|
||||
TRI_vocbase_t& vocbase
|
||||
) {
|
||||
return std::make_shared<transaction::StandaloneContext>(vocbase);
|
||||
|
|
|
@ -65,7 +65,7 @@ class StandaloneContext final : public Context {
|
|||
bool isEmbeddable() const override { return false; }
|
||||
|
||||
/// @brief create a context, returned in a shared ptr
|
||||
static std::shared_ptr<transaction::StandaloneContext> Create(
|
||||
static std::shared_ptr<transaction::Context> Create(
|
||||
TRI_vocbase_t& vocbase
|
||||
);
|
||||
};
|
||||
|
@ -73,4 +73,4 @@ class StandaloneContext final : public Context {
|
|||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_TRANSACTION_USER_TRANSACTION_H
|
||||
#define ARANGOD_TRANSACTION_USER_TRANSACTION_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/Options.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace transaction {
|
||||
|
||||
class UserTransaction final : public transaction::Methods {
|
||||
public:
|
||||
/// @brief create the transaction
|
||||
UserTransaction(std::shared_ptr<transaction::Context> transactionContext,
|
||||
std::vector<std::string> const& readCollections,
|
||||
std::vector<std::string> const& writeCollections,
|
||||
std::vector<std::string> const& exclusiveCollections,
|
||||
transaction::Options const& options)
|
||||
: transaction::Methods(transactionContext, options) {
|
||||
addHint(transaction::Hints::Hint::LOCK_ENTIRELY);
|
||||
|
||||
for (auto const& it : exclusiveCollections) {
|
||||
addCollection(it, AccessMode::Type::EXCLUSIVE);
|
||||
}
|
||||
|
||||
for (auto const& it : writeCollections) {
|
||||
addCollection(it, AccessMode::Type::WRITE);
|
||||
}
|
||||
|
||||
for (auto const& it : readCollections) {
|
||||
addCollection(it, AccessMode::Type::READ);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -29,21 +29,38 @@
|
|||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
/// @brief create the transaction, using a collection id
|
||||
/// @brief create the transaction, using a collection
|
||||
SingleCollectionTransaction::SingleCollectionTransaction(
|
||||
std::shared_ptr<transaction::Context> const& transactionContext, TRI_voc_cid_t cid,
|
||||
AccessMode::Type accessType)
|
||||
std::shared_ptr<transaction::Context> const& transactionContext,
|
||||
LogicalCollection const* col, AccessMode::Type accessType)
|
||||
: transaction::Methods(transactionContext),
|
||||
_cid(cid),
|
||||
_cid(col->id()),
|
||||
_trxCollection(nullptr),
|
||||
_documentCollection(nullptr),
|
||||
_accessType(accessType) {
|
||||
|
||||
// add the (sole) collection
|
||||
addCollection(cid, _accessType);
|
||||
addCollection(col->id(), col->name(), _accessType);
|
||||
addHint(transaction::Hints::Hint::NO_DLD);
|
||||
}
|
||||
|
||||
|
||||
/// @brief create the transaction, using a collection id
|
||||
SingleCollectionTransaction::SingleCollectionTransaction(
|
||||
std::shared_ptr<transaction::Context> const& transactionContext,
|
||||
LogicalView const& view, AccessMode::Type accessType)
|
||||
: transaction::Methods(transactionContext),
|
||||
_cid(view.id()),
|
||||
_trxCollection(nullptr),
|
||||
_documentCollection(nullptr),
|
||||
_accessType(accessType) {
|
||||
|
||||
// add the (sole) view
|
||||
addCollection(view.id(), view.name(), _accessType);
|
||||
addHint(transaction::Hints::Hint::NO_DLD);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,11 +38,18 @@ class SingleCollectionTransaction final : public transaction::Methods {
|
|||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create the transaction, using a collection id
|
||||
/// @brief create the transaction, using a collection
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SingleCollectionTransaction(std::shared_ptr<transaction::Context> const&,
|
||||
TRI_voc_cid_t, AccessMode::Type);
|
||||
LogicalCollection const*, AccessMode::Type);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create the transaction, using a view
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SingleCollectionTransaction(std::shared_ptr<transaction::Context> const&,
|
||||
LogicalView const&, AccessMode::Type);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create the transaction, using a collection name
|
||||
|
|
|
@ -431,7 +431,7 @@ static v8::Handle<v8::Object> RequestCppToV8(v8::Isolate* isolate,
|
|||
}
|
||||
std::string const& body = httpreq->body();
|
||||
req->ForceSet(RequestBodyKey, TRI_V8_STD_STRING(isolate, body));
|
||||
headers["content-length"] = StringUtils::itoa(request->contentLength());
|
||||
headers[StaticStrings::ContentLength] = StringUtils::itoa(request->contentLength());
|
||||
} else if (rest::ContentType::VPACK == request->contentType()) {
|
||||
// the VPACK is passed as it is to to Javascript
|
||||
// FIXME not every VPack can be converted to JSON
|
||||
|
@ -443,8 +443,8 @@ static v8::Handle<v8::Object> RequestCppToV8(v8::Isolate* isolate,
|
|||
<< jsonString;
|
||||
|
||||
req->ForceSet(RequestBodyKey, TRI_V8_STD_STRING(isolate, jsonString));
|
||||
headers["content-length"] = StringUtils::itoa(jsonString.size());
|
||||
headers["content-type"] = StaticStrings::MimeTypeJson;
|
||||
headers[StaticStrings::ContentLength] = StringUtils::itoa(jsonString.size());
|
||||
headers[StaticStrings::ContentTypeHeader] = StaticStrings::MimeTypeJson;
|
||||
} else {
|
||||
throw std::logic_error("unhandled request type");
|
||||
}
|
||||
|
|
|
@ -991,7 +991,7 @@ static void JS_FiguresVocbaseCol(
|
|||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::V8Context::Create(collection->vocbase(), true),
|
||||
collection->id(),
|
||||
collection,
|
||||
AccessMode::Type::READ
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
@ -2288,7 +2288,7 @@ static void InsertVocbaseCol(v8::Isolate* isolate,
|
|||
auto transactionContext =
|
||||
std::make_shared<transaction::V8Context>(collection->vocbase(), true);
|
||||
SingleCollectionTransaction trx(
|
||||
transactionContext, collection->id(), AccessMode::Type::WRITE
|
||||
transactionContext, collection, AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
if (!payloadIsArray && !options.overwrite) {
|
||||
|
@ -2438,7 +2438,7 @@ static void JS_TruncateVocbaseCol(
|
|||
|
||||
auto ctx = transaction::V8Context::Create(collection->vocbase(), true);
|
||||
SingleCollectionTransaction trx(
|
||||
ctx, collection->id(), AccessMode::Type::EXCLUSIVE
|
||||
ctx, collection, AccessMode::Type::EXCLUSIVE
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ static void JS_AllQuery(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
std::shared_ptr<transaction::V8Context> transactionContext =
|
||||
transaction::V8Context::Create(collection->vocbase(), true);
|
||||
SingleCollectionTransaction trx(
|
||||
transactionContext, collection->id(), AccessMode::Type::READ
|
||||
transactionContext, collection, AccessMode::Type::READ
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
||||
|
@ -299,7 +299,7 @@ static void JS_AnyQuery(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
std::shared_ptr<transaction::V8Context> transactionContext =
|
||||
transaction::V8Context::Create(col->vocbase(), true);
|
||||
SingleCollectionTransaction trx(
|
||||
transactionContext, col->id(), AccessMode::Type::READ
|
||||
transactionContext, col, AccessMode::Type::READ
|
||||
);
|
||||
Result res = trx.begin();
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
#include "Statistics/StatisticsFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/UserTransaction.h"
|
||||
#include "Transaction/V8Context.h"
|
||||
#include "Utils/ExecContext.h"
|
||||
#include "V8/JSLoader.h"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue