mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
c47505d514
|
@ -34,9 +34,11 @@ endif()
|
|||
|
||||
if ("${CMAKE_TARGET_ARCHITECTURES}" STREQUAL "armv7")
|
||||
set(V8_PROC_ARCH "arm")
|
||||
set(QEMU_ARCH "qemu-arm")
|
||||
list(APPEND V8_GYP_ARGS -Darm_version=7 -Darm_fpu=default -Darm_float_abi=default)
|
||||
elseif("${CMAKE_TARGET_ARCHITECTURES}" STREQUAL "aarch64")
|
||||
set(V8_PROC_ARCH "arm64")
|
||||
set(QEMU_ARCH "qemu-aarch64")
|
||||
list(APPEND V8_GYP_ARGS -Darm_fpu=default -Darm_float_abi=default)
|
||||
else ()
|
||||
if ("${BITS}" STREQUAL "64")
|
||||
|
@ -64,7 +66,21 @@ list(APPEND V8_GYP_ARGS
|
|||
if (CROSS_COMPILING)
|
||||
list(APPEND V8_GYP_ARGS
|
||||
-Dhost_arch=${V8_PROC_ARCH}
|
||||
-DGYP_CROSSCOMPILE=1)
|
||||
-DGYP_CROSSCOMPILE=1
|
||||
-DEXECUTABLE_PREFIX=${V8_PROC_ARCH}.
|
||||
)
|
||||
configure_file (
|
||||
"${CMAKE_SOURCE_DIR}/lib/V8/v8-mkpeephole.in"
|
||||
"${V8_TARGET_DIR}/${V8_PROC_ARCH}.release/${V8_PROC_ARCH}.mkpeephole"
|
||||
NEWLINE_STYLE UNIX
|
||||
@ONLY
|
||||
)
|
||||
configure_file (
|
||||
"${CMAKE_SOURCE_DIR}/lib/V8/v8-mksnapshot.in"
|
||||
"${V8_TARGET_DIR}/${V8_PROC_ARCH}.release/${V8_PROC_ARCH}.mksnapshot"
|
||||
NEWLINE_STYLE UNIX
|
||||
@ONLY
|
||||
)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -17,6 +17,8 @@ if (USE_JEMALLOC)
|
|||
set(JEMALLOC_LIB ${JEMALLOC_HOME}/lib/libjemalloc.a CACHE PATH "library file")
|
||||
set(THIRDPARTY_LIBS ${JEMALLOC_LIB})
|
||||
endif ()
|
||||
else ()
|
||||
set(WITH_JEMALLOC OFF CACHE BOOL "enable jemalloc")
|
||||
endif ()
|
||||
|
||||
# snappy settings
|
||||
|
|
|
@ -96,7 +96,11 @@ else()
|
|||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer -momit-leaf-frame-pointer")
|
||||
if (CROSS_COMPILING)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer -momit-leaf-frame-pointer")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON)
|
||||
|
|
|
@ -182,9 +182,12 @@ class ObjectIterator {
|
|||
|
||||
ObjectIterator() = delete;
|
||||
|
||||
explicit ObjectIterator(Slice const& slice, bool allowRandomIteration = false)
|
||||
// The useSequentialIteration flag indicates whether or not the iteration
|
||||
// simply jumps from key/value pair to key/value pair without using the
|
||||
// index. The default `false` is to use the index if it is there.
|
||||
explicit ObjectIterator(Slice const& slice, bool useSequentialIteration = false)
|
||||
: _slice(slice), _size(_slice.length()), _position(0), _current(nullptr),
|
||||
_allowRandomIteration(allowRandomIteration) {
|
||||
_useSequentialIteration(useSequentialIteration) {
|
||||
if (!slice.isObject()) {
|
||||
throw Exception(Exception::InvalidValueType, "Expecting Object slice");
|
||||
}
|
||||
|
@ -193,7 +196,7 @@ class ObjectIterator {
|
|||
auto h = slice.head();
|
||||
if (h == 0x14) {
|
||||
_current = slice.keyAt(0, false).start();
|
||||
} else if (allowRandomIteration) {
|
||||
} else if (useSequentialIteration) {
|
||||
_current = slice.begin() + slice.findDataOffset(h);
|
||||
}
|
||||
}
|
||||
|
@ -204,7 +207,7 @@ class ObjectIterator {
|
|||
_size(other._size),
|
||||
_position(other._position),
|
||||
_current(other._current),
|
||||
_allowRandomIteration(other._allowRandomIteration) {}
|
||||
_useSequentialIteration(other._useSequentialIteration) {}
|
||||
|
||||
ObjectIterator& operator=(ObjectIterator const& other) = delete;
|
||||
|
||||
|
@ -306,7 +309,7 @@ class ObjectIterator {
|
|||
ValueLength const _size;
|
||||
ValueLength _position;
|
||||
uint8_t const* _current;
|
||||
bool const _allowRandomIteration;
|
||||
bool const _useSequentialIteration;
|
||||
};
|
||||
|
||||
} // namespace arangodb::velocypack
|
||||
|
|
40
CHANGELOG
40
CHANGELOG
|
@ -1,11 +1,22 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* fixed issue #2450
|
||||
* fixed issue #2459: compile success but can not run with rocksdb
|
||||
|
||||
* fixed issue #2448
|
||||
* `--server.maximal-queue-size` is now an absolute maximum. If the queue is
|
||||
full, then 503 is returned. Setting it to 0 means "no limit".
|
||||
|
||||
* fixed issue #2442
|
||||
* (Enterprise only) added authentication against a LDAP server
|
||||
|
||||
|
||||
v3.2.alpha4 (2017-04-25)
|
||||
------------------------
|
||||
|
||||
* fixed issue #2450: Bad optimization plan on simple query
|
||||
|
||||
* fixed issue #2448: ArangoDB Web UI takes no action when Delete button is clicked
|
||||
|
||||
* fixed issue #2442: Frontend shows already deleted databases during login
|
||||
|
||||
* added 'x-content-type-options: nosniff' to avoid MSIE bug
|
||||
|
||||
|
@ -76,6 +87,10 @@ devel
|
|||
Enabling this option activated some proprietary timers for only selected
|
||||
events in arangod. Instead better use `perf` to gather timings.
|
||||
|
||||
|
||||
v3.2.alpha3 (2017-03-22)
|
||||
------------------------
|
||||
|
||||
* increase default collection lock timeout from 30 to 900 seconds
|
||||
|
||||
* added function `db._engine()` for retrieval of storage engine information at
|
||||
|
@ -113,7 +128,7 @@ devel
|
|||
when unused.
|
||||
|
||||
Waiting for an unused V8 context will now also abort if no V8 context can be
|
||||
acquired/created after 60 seconds.
|
||||
acquired/created after 120 seconds.
|
||||
|
||||
* improved diagnostic messages written to logfiles by supervisor process
|
||||
|
||||
|
@ -230,7 +245,7 @@ v3.2.alpha1 (2017-02-05)
|
|||
* fix potential port number over-/underruns
|
||||
|
||||
* added startup option `--log.shorten-filenames` for controlling whether filenames
|
||||
in log message should be shortened to just the filename with the absolute path
|
||||
in log messages should be shortened to just the filename with the absolute path
|
||||
|
||||
* removed IndexThreadFeature, made `--database.index-threads` option obsolete
|
||||
|
||||
|
@ -240,18 +255,27 @@ v3.2.alpha1 (2017-02-05)
|
|||
|
||||
* generated Foxx services now use swagger tags
|
||||
|
||||
|
||||
v3.1.19 (XXXX-XX-XX)
|
||||
--------------------
|
||||
|
||||
|
||||
* Fixed a StackOverflow issue in Traversal and ShortestPath. Occured if many (>1000) input
|
||||
values in a row do not return any result. Fixes issue: #2445
|
||||
|
||||
v3.1.19 (XXXX-XX-XX)
|
||||
--------------------
|
||||
* fixed issue #2448
|
||||
|
||||
* fixed issue #2442
|
||||
|
||||
* added 'x-content-type-options: nosniff' to avoid MSIE bug
|
||||
|
||||
* fixed issue #2441
|
||||
|
||||
* fixed issue #2440
|
||||
|
||||
* Fixed a StackOverflow issue in Traversal and ShortestPath. Occured if many (>1000) input
|
||||
values in a row do not return any result. Fixes issue: #2445
|
||||
|
||||
* fix occasional hanging shutdowns on OS X
|
||||
|
||||
|
||||
v3.1.18 (2017-04-18)
|
||||
|
|
|
@ -255,6 +255,11 @@ else ()
|
|||
)
|
||||
endif ()
|
||||
|
||||
if (CROSS_COMPILING)
|
||||
# curently off, need additional params to configure like --hoast=triple <params>
|
||||
SET(USE_JEMALLOC OFF)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
## EXTERNAL PROGRAMS
|
||||
################################################################################
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
LDAP
|
||||
====
|
||||
|
||||
__This feature is available in the Enterprise Edition.__
|
||||
|
||||
The basic options are `--ldap.enabled`, `--ldap.tls`, `--ldap.port`, `--ldap.server` and `--ldap.permissions-attribute-name`.
|
||||
|
||||
`--ldap.server` and `--ldap.port` can be replace by `--ldap.url`.
|
||||
|
||||
The default for `--ldap.port` is *389*.
|
||||
|
||||
`--ldap.permissions-attribute-name` has the format *databse-name=(*|rw|none)[,database-name=(*|rw|none)]*.
|
||||
|
||||
There are two modes of operation: *simple auth* and *bind+search*.
|
||||
|
||||
### simple auth
|
||||
|
||||
ArangoDB connects to the ldap server and authenticates with the username and password provided by the
|
||||
api authentication request and searches for the database permissions using the attribute name
|
||||
provided by `--ldap.permissions-attribute-name`.
|
||||
|
||||
Example:
|
||||
|
||||
--ldap.enabled true --ldap.server ldap.company.com \
|
||||
--ldap.permissions-attribute-name arangodbPermissions \
|
||||
--ldap.prefix uid= --ldap.suffix ,dc=company,dc=com
|
||||
|
||||
`--ldap.prefix` and `--ldap.suffix` build the distinguished name (DN). ArangoDB trys to authenticate
|
||||
with *prefix* + *ArangoDB username* + *suffix* against the ldap server and searches for the database permissions.
|
||||
|
||||
### bind+search
|
||||
|
||||
Example with anonymous auth:
|
||||
|
||||
--ldap.enabled true --ldap.server ldap.company.com \
|
||||
--ldap.basedn dc=company,dc=com \
|
||||
--ldap.permissions-attribute-name arangodbPermissions
|
||||
|
||||
With this configuration ArangoDB binds anonymously to the ldap server and searches for the user.
|
||||
If the user is found a authentication is done with the users DN and password and then database permissions are fetched.
|
||||
|
||||
Example with DN and password:
|
||||
|
||||
--ldap.enabled true --ldap.server ldap.company.com \
|
||||
--ldap.basedn dc=company,dc=com \
|
||||
--ldap.permissions-attribute-name arangodbPermissions
|
||||
--ldap.binddn cn=admin,dc=company,dc=com --ldap.bindpasswd admin
|
||||
|
||||
With this configuration ArangoDB binds with `--ldap.bindn` and `--ldap.bindpasswd` to the ldap server and searches for the user.
|
||||
If the user is found a authentication is done with the users DN and password and then database permissions are fetched.
|
||||
|
||||
#### additional options
|
||||
|
||||
|
||||
--ldap.search-filter "objectClass=*"
|
||||
|
||||
Restrict the search to specific object classes. The default is `objectClass=*`.
|
||||
|
||||
--ldap.search-attribute "uid"
|
||||
|
||||
`--ldap.search-attribute` specifies which attribute to compare with the *username*. The default is `uid`.
|
||||
|
||||
--ldap.search-scope sub
|
||||
|
||||
`--ldap.search-scope specifies in which scope to search for a user. Valid are one of *base*, *one* or *sub*. The default is *sub*.
|
||||
|
||||
### ldap url
|
||||
|
||||
--ldap.url ldap://ldap.server.com:1234/dc=example,dc=com?uid?sub
|
||||
|
||||
The ldap url consists of the ldap *server* and *port*, a *basedn*, a *search attribute* and a *scope* which can be one of *base*, *one* or *sub*.
|
||||
|
||||
### TLS options
|
||||
|
||||
A encrypted connection can be established with `--ldap.tls true` under UNIX and GNU/Linux platforms.
|
||||
|
||||
All following options are not available under Windows.
|
||||
|
||||
--ldap.tls
|
||||
|
||||
The default is `false`. With `true` a tls connection is established.
|
||||
|
||||
--ldap.tls-version
|
||||
|
||||
The default is `1.2`. Available versions are `1.0`, `1.1` and `1.2`.
|
||||
|
||||
--ldap.tls-cert-check-strategy
|
||||
|
||||
The default is `hard`. Available strategies are `never`, `hard`, `demand`, `allow` and `try`.
|
||||
|
||||
--ldap.tls-cacert-file
|
||||
|
||||
A file path to one or more (concatenated) certificate authority certificates in pem format.
|
||||
As default no file path is configured.
|
||||
|
||||
Following option has no effect / does not work under macOS.
|
||||
|
||||
--ldap.tls-cacert-dir
|
||||
|
||||
A directory path to certificate authority certificates in [c_rehash](https://www.openssl.org/docs/man1.0.2/apps/c_rehash.html) format.
|
||||
As default no directory path is configured.
|
|
@ -5,60 +5,320 @@ The following list shows in detail which features have been added or improved in
|
|||
ArangoDB 3.2. ArangoDB 3.2 also contains several bugfixes that are not listed
|
||||
here.
|
||||
|
||||
SmartGraphs
|
||||
-----------
|
||||
|
||||
Storage engines
|
||||
---------------
|
||||
|
||||
ArangoDB 3.2 offers two storage engines:
|
||||
|
||||
* the always-existing memory-mapped files storage engine
|
||||
* a new storage engine based on [RocksDB](https://www.github.com/facebook/rocksdb/)
|
||||
|
||||
### Memory-mapped files storage engine (MMFiles)
|
||||
|
||||
The former storage engine (named MMFiles engine henceforth) persists data in memory-mapped
|
||||
files.
|
||||
|
||||
Any data changes are done first in the engine's write-ahead log (WAL). The WAL
|
||||
is replayed after a crash so the engine offers durability and crash-safety. Data
|
||||
from the WAL is eventually moved to collection-specific datafiles. The files are
|
||||
always written in an append-only fashion, so data in files is never overwritten.
|
||||
Obsolete data in files will eventually be purged by background compaction threads.
|
||||
|
||||
Most of this engine's indexes are built in RAM. When a collection is loaded, this requires
|
||||
rebuilding the indexes in RAM from the data stored on disk. The MMFiles engine has
|
||||
collection-level locking.
|
||||
|
||||
This storage engine is a good choice when data (including the indexes) can fit in the
|
||||
server's available RAM. If the size of data plus the in-memory indexes exceeds the size
|
||||
of the available RAM, then this engine may try to allocate more memory than available.
|
||||
This will either make the operating system swap out parts of the data (and cause disk I/O)
|
||||
or, when no swap space is configured, invoke the operating system's out-of-memory process
|
||||
killer.
|
||||
|
||||
The locking strategy allows parallel reads and is often good enough in read-mostly
|
||||
workloads. Writes need exclusive locks on the collections, so they can block other
|
||||
operations in the same collection. The locking strategy also provides transactional consistency
|
||||
and isolation.
|
||||
|
||||
### RocksDB storage engine
|
||||
|
||||
The RocksDB storage engine is new in ArangoDB 3.2. It is designed to store datasets
|
||||
that are bigger than the server's available RAM. It persists all data (including the
|
||||
indexes) in a RocksDB instance.
|
||||
|
||||
That means any document read or write operations will be answered by RocksDB under the
|
||||
hood. RocksDB will serve the data from its own in-RAM caches or from disk.
|
||||
The RocksDB engine has a write-ahead log (WAL) and uses background threads for compaction.
|
||||
It supports data compression.
|
||||
|
||||
The RocksDB storage engine has document-level locking. Read operations do not block and
|
||||
are never blocked by other operations. Write operations only block writes on the same
|
||||
documents/index values. Because multiple writers can operate in parallel on the same
|
||||
collection, there is the possibility of write-write conflicts. If such write conflict
|
||||
is detected, one of the write operations is aborted with error 1200 ("conflict").
|
||||
Client applications can then either abort the operation or retry, based on the required
|
||||
consistency semantics.
|
||||
|
||||
### Storage engine selection
|
||||
|
||||
The storage engine to use in an ArangoDB cluster or a single-server instance must be
|
||||
selected initially. The default storage engine in ArangoDB 3.2 is the MMFiles engine if
|
||||
no storage engine is selected explicitly. This ensures all users upgrading from earlier
|
||||
versions can continue with the well-known MMFiles engine.
|
||||
|
||||
To select the storage-engine, there is the configuration option `--server.storage-engine`.
|
||||
It can be set to either `mmfiles`, `rocksdb` or `auto`. While the first two values will
|
||||
explicitly select a storage engine, the `auto` option will automatically choose the
|
||||
storage engine based on which storage engine was previously selected. If no engine was
|
||||
selected previously, `auto` will select the MMFiles engine. If an engine was previously
|
||||
selected, the selection will be written to a file `ENGINE` in the server's database
|
||||
directory and will be read from there at any subsequent server starts.
|
||||
|
||||
Once the storage engine was selected, the selection cannot be changed by adjusting
|
||||
`--server.storage-engine`. In order to switch to another storage engine, it is required
|
||||
to re-start the server with another (empty) database directory. In order to use data
|
||||
created with the other storage engine, it is required to dump the data first with the
|
||||
old engine and restore it using the new storage engine. This can be achieved via
|
||||
invoking arangodump and arangorestore.
|
||||
|
||||
Unlike in MySQL, the storage engine selection in ArangoDB is for an entire cluster or
|
||||
an entire single-server instance. All databases and collections will use the same storage
|
||||
engine.
|
||||
|
||||
### RocksDB storage engine: known issues
|
||||
|
||||
The RocksDB storage engine in this release has a few known issues and missing features.
|
||||
These will be resolved in the following releases:
|
||||
|
||||
* index selectivity estimates are missing. All indexes will report their selectivity
|
||||
estimate as `0.2`. This may lead to a non-optimal index being used in a query.
|
||||
|
||||
* geo and fulltext indexes are not yet implemented
|
||||
|
||||
* the number of documents reported for collections (`db.<collection>.count()`) may be
|
||||
slightly wrong during transactions.
|
||||
|
||||
* the engine may produce spurious `unique constraint violation` errors for auto-generated
|
||||
`_key` values.
|
||||
|
||||
* transactions are de facto limited in size, but no size restriction is currently
|
||||
enforced. These restrictions will be implemented in a future release.
|
||||
|
||||
* the engine is not yet performance-optimized and well configured.
|
||||
|
||||
* replication as well as cluster dump/restore are not fully working
|
||||
|
||||
The existing indexes in the RocksDB engine are all persistent. The following indexes are
|
||||
supported there:
|
||||
|
||||
* primary: automatically created, indexes `_id` / `_key`
|
||||
|
||||
* edge: automatically created for edge collections, indexes `_from` and `_to`
|
||||
|
||||
* hash, skiplist, persistent: user-defined index, technically it is neither a hash
|
||||
nor a skiplist index. All these index types map to the same RocksDB-based
|
||||
sorted index implementation. The names "hash", "skiplist" and "persistent" are
|
||||
only used for compatibility with the MMFiles engine.
|
||||
|
||||
|
||||
Data format
|
||||
-----------
|
||||
Memory management
|
||||
-----------------
|
||||
|
||||
* added startup options `--vm.resident-limit` and `--vm.path` for file-backed
|
||||
memory mapping after reaching a configurable maximum RAM size
|
||||
|
||||
This prevents ArangoDB from using all available RAM when using large datasets.
|
||||
This will also lower the chances of the arangod process being killed by the
|
||||
operation system's OOM killer.
|
||||
|
||||
Note: these options are not available in all builds and environments.
|
||||
|
||||
* make arangod start with less V8 JavaScript contexts
|
||||
|
||||
This speeds up the server start and makes arangod use less memory at start.
|
||||
Whenever a V8 context is needed by a Foxx action or some other JavaScript operation
|
||||
and there is no usable V8 context, a new context will be created dynamically now.
|
||||
|
||||
Up to `--javascript.v8-contexts` V8 contexts will be created, so this option
|
||||
will change its meaning. Previously as many V8 contexts as specified by this
|
||||
option were created at server start, and the number of V8 contexts did not
|
||||
change at runtime. Now up to this number of V8 contexts will be in use at the
|
||||
same time, but the actual number of V8 contexts is dynamic.
|
||||
|
||||
The garbage collector thread will automatically delete unused V8 contexts after
|
||||
a while. The number of spare contexts will go down to as few as configured in
|
||||
the new option `--javascript.v8-contexts-minimum`. Actually that many V8 contexts
|
||||
are also created at server start.
|
||||
|
||||
The first few requests in new V8 contexts may take longer than in contexts
|
||||
that have been there already. Performance may therefore suffer a bit for the
|
||||
initial requests sent to ArangoDB or when there are only few but performance-
|
||||
critical situations in which new V8 contexts need to be created. If this is a
|
||||
concern, it can easily be fixed by setting `--javascipt.v8-contexts-minimum`
|
||||
and `--javascript.v8-contexts` to a relatively high value, which will guarantee
|
||||
that many number of V8 contexts to be created at startup and kept around even
|
||||
when unused.
|
||||
|
||||
Waiting for an unused V8 context will now also abort and write a log message
|
||||
in case no V8 context can be acquired/created after 60 seconds.
|
||||
|
||||
* the number of pending operations in arangod can now be limited to a configurable
|
||||
number. If this number is exceeded, the server will now respond with HTTP 503
|
||||
(service unavailable). The maximum size of pending operations is controlled via
|
||||
the startup option `--server.maximal-queue-size`. Setting it to 0 means "no limit".
|
||||
|
||||
* the in-memory document revisions cache was removed entirely because it did not
|
||||
provide the expected benefits. The 3.1 implementation shadowed document data in
|
||||
RAM, which increased the server's RAM usage but did not speed up document lookups
|
||||
too much.
|
||||
|
||||
This also obsoletes the startup options `--database.revision-cache-chunk-size` and
|
||||
`--database.revision-cache-target-size`.
|
||||
|
||||
The MMFiles engine now does not use a document revisions cache but has in-memory
|
||||
indexes and maps documents to RAM automatically via mmap when documents are
|
||||
accessed. The RocksDB engine has its own mechanism for caching accessed documents.
|
||||
|
||||
|
||||
Communication Layer
|
||||
-------------------
|
||||
|
||||
* HTTP responses returned by arangod will now include the extra HTTP header
|
||||
`x-content-type-options: nosniff` to work around a cross-site scripting bug
|
||||
in MSIE
|
||||
|
||||
Cluster
|
||||
-------
|
||||
* the default value for `--ssl.protocol` was changed from TLSv1 to TLSv1.2.
|
||||
When not explicitly set, arangod and all client tools will now use TLSv1.2.
|
||||
|
||||
* the JSON data in all incoming HTTP requests in now validated for duplicate
|
||||
attribute names.
|
||||
|
||||
Incoming JSON data with duplicate attribute names will now be rejected as
|
||||
invalid. Previous versions of ArangoDB only validated the uniqueness of
|
||||
attribute names inside incoming JSON for some API endpoints, but not
|
||||
consistently for all APIs.
|
||||
|
||||
* Internal JavaScript REST actions will now hide their stack traces to the client
|
||||
unless in HTTP responses. Instead they will always log to the logfile.
|
||||
|
||||
|
||||
Document revisions cache
|
||||
------------------------
|
||||
JavaScript
|
||||
----------
|
||||
|
||||
* updated V8 version to 5.7.0.0
|
||||
|
||||
* change undocumented behaviour in case of invalid revision ids in
|
||||
`If-Match` and `If-None-Match` headers from 400 (BAD) to 412 (PRECONDITION
|
||||
FAILED).
|
||||
|
||||
* change default string truncation length from 80 characters to 256 characters for
|
||||
`print`/`printShell` functions in ArangoShell and arangod. This will emit longer
|
||||
prefixes of string values before truncating them with `...`, which is helpful
|
||||
for debugging. This change is mostly useful when using the ArangoShell (arangosh).
|
||||
|
||||
|
||||
Pregel
|
||||
------
|
||||
|
||||
|
||||
AQL
|
||||
---
|
||||
|
||||
### Functions added
|
||||
|
||||
|
||||
### Optimizer improvements
|
||||
|
||||
* Geo indexes are now implicitly and automatically used when using appropriate SORT/FILTER
|
||||
statements in AQL, without the need to use the somewhat limited special-purpose geo AQL
|
||||
functions `NEAR` or `WITHIN`.
|
||||
|
||||
Compared to using thespecial purpose AQL functions this approach has the
|
||||
advantage that it is more composable, and will also honor any `LIMIT` values
|
||||
used in the AQL query.
|
||||
|
||||
The special purpose `NEAR` AQL function can now be substituted with the
|
||||
following AQL (provided there is a geo index present on the `doc.latitude`
|
||||
and `doc.longitude` attributes):
|
||||
|
||||
FOR doc in geoSort
|
||||
SORT DISTANCE(doc.latitude, doc.longitude, 0, 0)
|
||||
LIMIT 5
|
||||
RETURN doc
|
||||
|
||||
`WITHIN` can be substituted with the following AQL:
|
||||
|
||||
FOR doc in geoFilter
|
||||
FILTER DISTANCE(doc.latitude, doc.longitude, 0, 0) < 2000
|
||||
RETURN doc
|
||||
|
||||
Note that this will work in the MMFiles engine only.
|
||||
|
||||
|
||||
### Miscellaneous improvements
|
||||
|
||||
* the slow query list now contains the values of bind variables used in the
|
||||
slow queries. Bind variables are also provided for the currently running
|
||||
queries. This helps debugging slow or blocking queries that use dynamic
|
||||
collection names via bind parameters.
|
||||
|
||||
Audit Log
|
||||
---------
|
||||
* AQL breaking change in cluster:
|
||||
The SHORTEST_PATH statement using edge collection names instead
|
||||
of a graph names now requires to explicitly name the vertex collection names
|
||||
within the AQL query in the cluster. It can be done by adding `WITH <name>`
|
||||
at the beginning of the query.
|
||||
|
||||
Example:
|
||||
```
|
||||
FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...]
|
||||
```
|
||||
|
||||
Now has to be:
|
||||
|
||||
```
|
||||
WITH vertices
|
||||
FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...]
|
||||
```
|
||||
|
||||
This change is due to avoid deadlock sitations in clustered case.
|
||||
An error stating the above is included.
|
||||
|
||||
|
||||
Client tools
|
||||
------------
|
||||
|
||||
Added the tool _arangoexport_ to export collections to json and jsonl. It can also export graphs or collections to xgmml.
|
||||
* added data export tool, arangoexport.
|
||||
|
||||
Web Admin Interface
|
||||
-------------------
|
||||
arangoexport can be used to export collections to json, jsonl or xml
|
||||
and export a graph or collections to xgmml.
|
||||
|
||||
* added "jsonl" as input file type for arangoimp
|
||||
|
||||
* added `--translate` option for arangoimp to translate attribute names from
|
||||
the input files to attriubte names expected by ArangoDB
|
||||
|
||||
The `--translate` option can be specified multiple times (once per translation
|
||||
to be executed). The following example renames the "id" column from the input
|
||||
file to "_key", and the "from" column to "_from", and the "to" column to "_to":
|
||||
|
||||
arangoimp --type csv --file data.csv --translate "id=_key" --translate "from=_from" --translate "to=_to"
|
||||
|
||||
`--translate` works for CSV and TSV inputs only.
|
||||
|
||||
* changed default value for client tools option `--server.max-packet-size` from 128 MB
|
||||
to 256 MB. this allows transferring bigger result sets from the server without the
|
||||
client tools rejecting them as invalid.
|
||||
|
||||
|
||||
Authentication
|
||||
--------------
|
||||
|
||||
* added [LDAP](../Administration/Configuration/Ldap.md) authentication (Enterprise only)
|
||||
|
||||
|
||||
Foxx
|
||||
----
|
||||
|
||||
The [cookie session transport](../Foxx/Sessions/Transports/Cookie.md) now supports all options supported by the [cookie method of the response object](../Foxx/Router/Response.md#cookie).
|
||||
* the [cookie session transport](../Foxx/Sessions/Transports/Cookie.md) now supports all options supported by the [cookie method of the response object](../Foxx/Router/Response.md#cookie).
|
||||
|
||||
It's now possible to provide your own version of the `graphql-sync` module when using the [GraphQL extensions for Foxx](../Foxx/GraphQL.md) by passing a copy of the module using the new _graphql_ option.
|
||||
* it's now possible to provide your own version of the `graphql-sync` module when using the [GraphQL extensions for Foxx](../Foxx/GraphQL.md) by passing a copy of the module using the new _graphql_ option.
|
||||
|
||||
Endpoints can now be tagged using the [tag method](../Foxx/Router/Endpoints.md#tag) to generate a cleaner Swagger documentation.
|
||||
* custom API endpoints can now be tagged using the [tag method](../Foxx/Router/Endpoints.md#tag) to generate a cleaner Swagger documentation.
|
||||
|
|
|
@ -4,11 +4,80 @@ Incompatible changes in ArangoDB 3.2
|
|||
It is recommended to check the following list of incompatible changes **before**
|
||||
upgrading to ArangoDB 3.2, and adjust any client programs if necessary.
|
||||
|
||||
AQL
|
||||
---
|
||||
|
||||
* AQL breaking change in cluster:
|
||||
The SHORTEST_PATH statement using edge-collection names instead
|
||||
of a graph name now requires to explicitly name the vertex-collection names
|
||||
within the AQL query in the cluster. It can be done by adding `WITH <name>`
|
||||
at the beginning of the query.
|
||||
|
||||
Example:
|
||||
```
|
||||
FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...]
|
||||
```
|
||||
|
||||
Now has to be:
|
||||
|
||||
```
|
||||
WITH vertices
|
||||
FOR v,e IN OUTBOUND SHORTEST_PATH @start TO @target edges [...]
|
||||
```
|
||||
|
||||
This change is due to avoid dead-lock sitations in clustered case.
|
||||
An error stating the above is included.
|
||||
|
||||
|
||||
REST API
|
||||
--------
|
||||
|
||||
* Removed undocumented internal HTTP API:
|
||||
* PUT _api/edges
|
||||
|
||||
The documented GET _api/edges and the undocumented POST _api/edges remains unmodified.
|
||||
|
||||
* change undocumented behaviour in case of invalid revision ids in
|
||||
`If-Match` and `If-None-Match` headers from returning HTTP status code 400 (bad request)
|
||||
to returning HTTP status code 412 (precondition failed).
|
||||
|
||||
|
||||
JavaScript API
|
||||
--------------
|
||||
|
||||
* change undocumented behaviour in case of invalid revision ids in
|
||||
JavaScript document operations from returning error code 1239 ("illegal document revision")
|
||||
to returning error code 1200 ("conflict").
|
||||
|
||||
|
||||
Foxx
|
||||
----
|
||||
|
||||
JWT token issued by the built-in [JWT session storage](../Foxx/Sessions/Storages/JWT.md) now correctly specify the `iat` and `exp` values in seconds rather than milliseconds as specified in the JSON Web Token standard.
|
||||
* JWT tokens issued by the built-in [JWT session storage](../Foxx/Sessions/Storages/JWT.md) now correctly specify the `iat` and `exp` values in seconds rather than milliseconds as specified in the JSON Web Token standard.
|
||||
|
||||
This may result in previously expired tokens using milliseconds being incorrectly accepted. For this reason it is recommended to replace the signing `secret` or set the new `maxExp` option to a reasonable value that is smaller than the oldest issued expiration timestamp.
|
||||
This may result in previously expired tokens using milliseconds being incorrectly accepted. For this reason it is recommended to replace the signing `secret` or set the new `maxExp` option to a reasonable value that is smaller than the oldest issued expiration timestamp.
|
||||
|
||||
For example setting `maxExp` to `10**12` would invalidate all incorrectly issued tokens before 9 September 2001 without impairing new tokens until the year 33658 (at which point these tokens are hopefully no longer relevant).
|
||||
For example setting `maxExp` to `10**12` would invalidate all incorrectly issued tokens before 9 September 2001 without impairing new tokens until the year 33658 (at which point these tokens are hopefully no longer relevant).
|
||||
|
||||
|
||||
Command-line options changed
|
||||
----------------------------
|
||||
|
||||
* --server.maximal-queue-size is now an absolute maximum. If the queue is
|
||||
full, then 503 is returned. Setting it to 0 means "no limit". The default
|
||||
value for this option is now `0`.
|
||||
|
||||
* the default value for `--ssl.protocol` has been changed from `4` (TLSv1) to `5` (TLSv1.2).
|
||||
|
||||
* the startup options `--database.revision-cache-chunk-size` and
|
||||
`--database.revision-cache-target-size` are now obsolete and do nothing
|
||||
|
||||
* the startup option `--database.index-threads` option is now obsolete
|
||||
|
||||
* the option `--javascript.v8-contexts` is now an absolute maximum. The server
|
||||
may start less V8 contexts for JavaScript execution at startup. If at some
|
||||
point the server needs more V8 contexts it may start them dynamically, until
|
||||
the number of V8 contexts reaches the value of `--javascript.v8-contexts`.
|
||||
|
||||
the minimum number of V8 contexts to create at startup can be configured via
|
||||
the new startup option `--javascript.v8-contexts-minimum`.
|
||||
|
|
|
@ -153,6 +153,7 @@
|
|||
* [Server Configuration](Administration/Configuration/README.md)
|
||||
* [Managing Endpoints](Administration/Configuration/Endpoint.md)
|
||||
* [SSL Configuration](Administration/Configuration/SSL.md)
|
||||
* [LDAP Options](Administration/Configuration/Ldap.md)
|
||||
* [Logging Options](Administration/Configuration/Logging.md)
|
||||
* [General Options](Administration/Configuration/GeneralArangod.md)
|
||||
* [Write-Ahead Log Options](Administration/Configuration/Wal.md)
|
||||
|
|
101
LES-TODOS
101
LES-TODOS
|
@ -1,101 +0,0 @@
|
|||
done
|
||||
----
|
||||
- create new branch
|
||||
- factor out transactions from LogfileManager
|
||||
- concept "collection locks"
|
||||
- 3 states: READ / WRITE / EXCLUSIVE for locks
|
||||
- index implementations => moved & renamed to StorageEngine
|
||||
- move engine files into MMFiles directory
|
||||
- split IndexElement
|
||||
- rename *IndexElement to MMFiles*
|
||||
- move fulltext & geoindex & skiplist helper code into MMFiles
|
||||
- rename "RocksDBIndex" to "PersistentIndex"
|
||||
- Delete OperationCursor->getMore. Replace by getMoreTokens
|
||||
- remove ReadCache
|
||||
- Index API
|
||||
- Indexes always return std::vector<TOKEN>
|
||||
- Rename OperationCursor->getMoreMptr => getMoreTokens, "returns" std::vector<TOKEN>&
|
||||
- GeoIndex hands out TRI_revision instead of Tokens
|
||||
- FulltextIndex hands out TRI_revision instead of Tokens
|
||||
- trx::InvokeOnAllElements : uses SimpleIndexLookupElements => Has to use Tokens instead?
|
||||
- remove TransactionState include from Transaction.h
|
||||
MMFiles reference removals from files:
|
||||
* arangod/V8Server/v8-query.cpp
|
||||
- StorageEngine specific IndexFactory:
|
||||
- create indexes from VPack
|
||||
- enhance/validate given VPack index definitions (set default values, reject illegal ones)
|
||||
- Implement new IndexIterator API next(callback, limit)
|
||||
- Primary
|
||||
- Hash
|
||||
- Skiplist
|
||||
- Persistent
|
||||
- Geo
|
||||
- Fulltext
|
||||
- index API
|
||||
- StorageEngine specific AQL functions
|
||||
- Register for specific function names => branches to StorageEngine impl
|
||||
- Storage Engine can implement these functions with specific code and interna
|
||||
- e.g.: Geo, Fulltext
|
||||
- Replace Usage of new callback-based IndexIterator
|
||||
- move engine-specific parts of transaction.cpp into engine
|
||||
- Logical => Physical
|
||||
- keyGenerator
|
||||
- dropIndex(p) => phys->dropIndex(p, true)
|
||||
- DML API
|
||||
- DDL API
|
||||
- StorageEngineAPI readDocument requires 2 functions:
|
||||
- void readDocument(TOKEN id, VPackBuilder& result) => Collects the document and inserts it asis into result. Does NOT clear result.
|
||||
- void readDocument(TOKEN id, std::vector<std::string> const& attributePath, VPackBuilder& result) => Collects the document and writes only the value at the given attributePath (e.g. `a.b.c`) into result. Does NOT clear result.
|
||||
in progress
|
||||
|
||||
-----------
|
||||
- check for illegal includes
|
||||
- fix includes during API conversion
|
||||
|
||||
to do
|
||||
-----
|
||||
- rename TRI_df_marker_* to something storage-engine specific
|
||||
- applyForTickRange has to be moved to MMFilesCollection (used in replication-dump)
|
||||
- add new serialization RW lock to LogicalCollection. all DML ops must acquire it in read mode, the explicit lock command must acquire it in write mode.
|
||||
- AqlValue needs a (lazy evaluated) type TOKEN that handles collection ID and TOKEN inplace.
|
||||
- slice() => looksup the value in the Database
|
||||
- We need to keep in mind the cluster. If a DBServer creates this token-type it has to be translated BEFORE the register is teleported to coordinator
|
||||
- Remove temporary wrapper LogCol::readDocument()
|
||||
- InitialySyncer.cpp knows details of StorageEngine MMFiles
|
||||
|
||||
MMFiles are known to the following files:
|
||||
* arangod/Replication/InitialSyncer.cpp
|
||||
* arangod/RestHandler/RestExportHandler.cpp
|
||||
* arangod/RestHandler/RestWalHandler.cpp
|
||||
* arangod/RestHandler/RestReplicationHandler.cpp
|
||||
* arangod/RestServer/arangod.cpp
|
||||
* arangod/StorageEngine/EngineSelectorFeature.cpp
|
||||
* arangod/Utils/CollectionExport.cpp
|
||||
* arangod/Utils/CollectionKeys.cpp
|
||||
* arangod/V8Server/v8-replication.cpp
|
||||
* arangod/V8Server/v8-collection.cpp
|
||||
* arangod/V8Server/v8-vocbase.cpp
|
||||
* arangod/VocBase/replication-dump.cpp
|
||||
* arangod/VocBase/vocbase.cpp
|
||||
|
||||
- IndexFactory needs a function to stringifyIndexes and rename idxFoo => IdxZirkusBar
|
||||
|
||||
- Implement new IndexIterator API nextExtra(callback, limit)
|
||||
- Geo
|
||||
|
||||
- OperationOptions
|
||||
- recovoryMarker
|
||||
- re-enable RocksDB storage engine (e.g arangod.cpp)
|
||||
- implement RocksDB storage engine
|
||||
|
||||
Questions
|
||||
---------
|
||||
* For GeoIndex `ignoreNull: true` and `constraint: false` are only set in Cluster mode. Is that art or can it go away?
|
||||
|
||||
OpenIssues Hacki
|
||||
----------------
|
||||
* HashIndex Lookup into a still local buffer, could be replaced by callback as well.
|
||||
* SingleServerTraverser API does NOT takeover responsibility for slice data. getMore() hopes slices to not go away
|
||||
* This API can be improved if we make better use of those callbacks.
|
||||
* ShortestPathBlock does assume that slices do not walk away.
|
||||
* EdgeCollectionInfos in ShortestPath could share one conditionBuilder.
|
|
@ -252,7 +252,7 @@ function main(argv) {
|
|||
}
|
||||
|
||||
// creates yaml like dump at the end
|
||||
UnitTest.unitTestPrettyPrintResults(r, testOutputDirectory);
|
||||
UnitTest.unitTestPrettyPrintResults(r, testOutputDirectory, options);
|
||||
|
||||
return r.status;
|
||||
}
|
||||
|
|
|
@ -1223,7 +1223,7 @@ bool AgencyComm::lock(std::string const& key, double ttl, double timeout,
|
|||
return true;
|
||||
}
|
||||
|
||||
usleep(sleepTime);
|
||||
usleep((TRI_usleep_t) sleepTime);
|
||||
|
||||
if (sleepTime < MAX_SLEEP_TIME) {
|
||||
sleepTime += INITIAL_SLEEP_TIME;
|
||||
|
@ -1264,7 +1264,7 @@ bool AgencyComm::unlock(std::string const& key, VPackSlice const& slice,
|
|||
return true;
|
||||
}
|
||||
|
||||
usleep(sleepTime);
|
||||
usleep((TRI_usleep_t)sleepTime);
|
||||
|
||||
if (sleepTime < MAX_SLEEP_TIME) {
|
||||
sleepTime += INITIAL_SLEEP_TIME;
|
||||
|
|
|
@ -208,6 +208,15 @@ void AgencyFeature::start() {
|
|||
return;
|
||||
}
|
||||
|
||||
// Find the agency prefix:
|
||||
auto feature = ApplicationServer::getFeature<ClusterFeature>("Cluster");
|
||||
if (!feature->agencyPrefix().empty()) {
|
||||
arangodb::consensus::Supervision::setAgencyPrefix(
|
||||
std::string("/") + feature->agencyPrefix());
|
||||
arangodb::consensus::Job::agencyPrefix
|
||||
= std::string("/") + feature->agencyPrefix();
|
||||
}
|
||||
|
||||
// TODO: Port this to new options handling
|
||||
std::string endpoint;
|
||||
|
||||
|
|
|
@ -22,9 +22,10 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Job.h"
|
||||
|
||||
#include "Random/RandomGenerator.h"
|
||||
|
||||
#include <numeric>
|
||||
|
||||
static std::string const DBServer = "DBServer";
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
|
@ -236,6 +237,38 @@ std::vector<std::string> Job::availableServers(Node const& snapshot) {
|
|||
|
||||
}
|
||||
|
||||
template<typename T> std::vector<size_t> idxsort (const std::vector<T> &v) {
|
||||
|
||||
std::vector<size_t> idx(v.size());
|
||||
|
||||
std::iota(idx.begin(), idx.end(), 0);
|
||||
std::sort(idx.begin(), idx.end(),
|
||||
[&v](size_t i, size_t j) {return v[i] < v[j];});
|
||||
|
||||
return idx;
|
||||
|
||||
}
|
||||
|
||||
std::vector<std::string> sortedShardList(Node const& shards) {
|
||||
|
||||
std::vector<size_t> sids;
|
||||
auto const& shardMap = shards.children();
|
||||
for (const auto& shard : shardMap) {
|
||||
sids.push_back(std::stoul(shard.first.substr(1)));
|
||||
}
|
||||
|
||||
std::vector<size_t> idx(idxsort(sids));
|
||||
std::vector<std::string> sorted;
|
||||
for (const auto& i : idx) {
|
||||
auto x = shardMap.begin();
|
||||
std::advance(x,i);
|
||||
sorted.push_back(x->first);
|
||||
}
|
||||
|
||||
return sorted;
|
||||
|
||||
}
|
||||
|
||||
std::vector<Job::shard_t> Job::clones(
|
||||
Node const& snapshot, std::string const& database,
|
||||
std::string const& collection, std::string const& shard) {
|
||||
|
@ -244,34 +277,24 @@ std::vector<Job::shard_t> Job::clones(
|
|||
ret.emplace_back(collection, shard); // add (collection, shard) as first item
|
||||
//typedef std::unordered_map<std::string, std::shared_ptr<Node>> UChildren;
|
||||
|
||||
try {
|
||||
std::string databasePath = planColPrefix + database,
|
||||
planPath = databasePath + "/" + collection + "/shards";
|
||||
|
||||
auto myshards = snapshot(planPath).children();
|
||||
auto steps = std::distance(myshards.begin(), myshards.find(shard));
|
||||
|
||||
for (const auto& colptr : snapshot(databasePath).children()) { // collections
|
||||
|
||||
auto const col = *colptr.second;
|
||||
auto const otherCollection = colptr.first;
|
||||
|
||||
try {
|
||||
std::string const& prototype =
|
||||
col("distributeShardsLike").slice().copyString();
|
||||
if (otherCollection != collection && prototype == collection) {
|
||||
auto othershards = col("shards").children();
|
||||
auto opos = othershards.begin();
|
||||
std::advance(opos, steps);
|
||||
auto const& otherShard = opos->first;
|
||||
ret.emplace_back(otherCollection, otherShard);
|
||||
}
|
||||
} catch(...) {}
|
||||
|
||||
std::string databasePath = planColPrefix + database,
|
||||
planPath = databasePath + "/" + collection + "/shards";
|
||||
|
||||
auto myshards = sortedShardList(snapshot(planPath));
|
||||
auto steps = std::distance(
|
||||
myshards.begin(), std::find(myshards.begin(), myshards.end(), shard));
|
||||
|
||||
for (const auto& colptr : snapshot(databasePath).children()) { // collections
|
||||
|
||||
auto const col = *colptr.second;
|
||||
auto const otherCollection = colptr.first;
|
||||
|
||||
if (otherCollection != collection &&
|
||||
col.has("distributeShardsLike") &&
|
||||
col("distributeShardsLike").slice().copyString() == collection) {
|
||||
ret.emplace_back(otherCollection, sortedShardList(col("shards"))[steps]);
|
||||
}
|
||||
} catch (...) {
|
||||
ret.clear();
|
||||
ret.emplace_back(collection, shard);
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -566,7 +566,7 @@ std::string Supervision::serverHealth(std::string const& serverName) {
|
|||
return status;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION)
|
||||
<< "Couldn't read server health status for server " << serverName;
|
||||
<< "Couldn't read server health status for server " << serverName;
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,7 +119,9 @@ class Supervision : public arangodb::Thread {
|
|||
}
|
||||
|
||||
static void setAgencyPrefix(std::string prefix) {
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION) << "WTF? " << _agencyPrefix;
|
||||
_agencyPrefix = prefix;
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION) << "WTF? " << _agencyPrefix;
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -1241,9 +1241,9 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
// optionally restrict query to certain shards
|
||||
inst->includedShards(query->includedShards());
|
||||
|
||||
plan->root()->walk(inst.get());
|
||||
|
||||
try {
|
||||
plan->root()->walk(inst.get()); // if this throws, we need to
|
||||
// clean up as well
|
||||
engine = inst.get()->buildEngines();
|
||||
root = engine->root();
|
||||
// Now find all shards that take part:
|
||||
|
|
|
@ -27,10 +27,12 @@
|
|||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Graph/AttributeWeightShortestPathFinder.h"
|
||||
#include "Graph/ConstantWeightShortestPathFinder.h"
|
||||
#include "Graph/ShortestPathFinder.h"
|
||||
#include "Graph/ShortestPathResult.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
#include "VocBase/EdgeCollectionInfo.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
@ -48,7 +50,7 @@ ShortestPathBlock::ShortestPathBlock(ExecutionEngine* engine,
|
|||
_vertexReg(ExecutionNode::MaxRegisterId),
|
||||
_edgeVar(nullptr),
|
||||
_edgeReg(ExecutionNode::MaxRegisterId),
|
||||
_opts(nullptr),
|
||||
_opts(static_cast<ShortestPathOptions*>(ep->options())),
|
||||
_posInPath(0),
|
||||
_pathLength(0),
|
||||
_path(nullptr),
|
||||
|
@ -58,8 +60,7 @@ ShortestPathBlock::ShortestPathBlock(ExecutionEngine* engine,
|
|||
_useTargetRegister(false),
|
||||
_usedConstant(false),
|
||||
_engines(nullptr) {
|
||||
_opts = static_cast<ShortestPathOptions*>(ep->options());
|
||||
_mmdr.reset(new ManagedDocumentResult);
|
||||
TRI_ASSERT(_opts != nullptr);
|
||||
|
||||
if (!ep->usesStartInVariable()) {
|
||||
_startVertexId = ep->getStartVertex();
|
||||
|
|
|
@ -36,17 +36,11 @@ class ShortestPathFinder;
|
|||
class ShortestPathResult;
|
||||
}
|
||||
|
||||
namespace traverser {
|
||||
class EdgeCollectionInfo;
|
||||
}
|
||||
|
||||
namespace aql {
|
||||
|
||||
class ShortestPathNode;
|
||||
|
||||
class ShortestPathBlock : public ExecutionBlock {
|
||||
friend struct EdgeWeightExpanderLocal;
|
||||
friend struct EdgeWeightExpanderCluster;
|
||||
|
||||
public:
|
||||
ShortestPathBlock(ExecutionEngine* engine, ShortestPathNode const* ep);
|
||||
|
@ -96,8 +90,6 @@ class ShortestPathBlock : public ExecutionBlock {
|
|||
/// @brief Register for the edge output
|
||||
RegisterId _edgeReg;
|
||||
|
||||
std::unique_ptr<ManagedDocumentResult> _mmdr;
|
||||
|
||||
/// @brief options to compute the shortest path
|
||||
graph::ShortestPathOptions* _opts;
|
||||
|
||||
|
|
|
@ -207,6 +207,7 @@ SET(ARANGOD_SOURCES
|
|||
Cluster/v8-cluster.cpp
|
||||
GeneralServer/AsyncJobManager.cpp
|
||||
GeneralServer/AuthenticationFeature.cpp
|
||||
GeneralServer/AuthenticationHandler.cpp
|
||||
GeneralServer/GeneralCommTask.cpp
|
||||
GeneralServer/GeneralListenTask.cpp
|
||||
GeneralServer/GeneralServer.cpp
|
||||
|
@ -406,6 +407,19 @@ target_link_libraries(arangoserver
|
|||
${SYSTEM_LIBRARIES}
|
||||
)
|
||||
|
||||
if (USE_ENTERPRISE)
|
||||
if (MSVC)
|
||||
target_link_libraries(arangoserver
|
||||
Wldap32.lib
|
||||
)
|
||||
else()
|
||||
target_link_libraries(arangoserver
|
||||
ldap
|
||||
lber
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_executable(${BIN_ARANGOD}
|
||||
RestServer/arangod.cpp
|
||||
)
|
||||
|
|
|
@ -577,7 +577,7 @@ bool ClusterComm::match(ClientTransactionID const& clientTransactionID,
|
|||
/// from deleting `result` and `answer`.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ClusterCommResult const ClusterComm::enquire(Ticket const ticketId) {
|
||||
ClusterCommResult const ClusterComm::enquire(communicator::Ticket const ticketId) {
|
||||
ResponseIterator i;
|
||||
AsyncResponse response;
|
||||
|
||||
|
@ -614,7 +614,7 @@ ClusterCommResult const ClusterComm::enquire(Ticket const ticketId) {
|
|||
|
||||
ClusterCommResult const ClusterComm::wait(
|
||||
ClientTransactionID const& clientTransactionID,
|
||||
CoordTransactionID const coordTransactionID, Ticket const ticketId,
|
||||
CoordTransactionID const coordTransactionID, communicator::Ticket const ticketId,
|
||||
ShardID const& shardID, ClusterCommTimeout timeout) {
|
||||
|
||||
ResponseIterator i;
|
||||
|
@ -1123,6 +1123,19 @@ void ClusterComm::addAuthorization(std::unordered_map<std::string, std::string>*
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<communicator::Ticket> ClusterComm::activeServerTickets(std::vector<std::string> const& servers) {
|
||||
std::vector<communicator::Ticket> tickets;
|
||||
CONDITION_LOCKER(locker, somethingReceived);
|
||||
for (auto const& it: responses) {
|
||||
for (auto const& server: servers) {
|
||||
if (it.second.result && it.second.result->serverID == server) {
|
||||
tickets.push_back(it.first);
|
||||
}
|
||||
}
|
||||
}
|
||||
return tickets;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ClusterComm main loop
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1130,18 +1143,10 @@ void ClusterComm::addAuthorization(std::unordered_map<std::string, std::string>*
|
|||
void ClusterCommThread::abortRequestsToFailedServers() {
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto failedServers = ci->getFailedServers();
|
||||
std::vector<std::string> failedServerEndpoints;
|
||||
failedServerEndpoints.reserve(failedServers.size());
|
||||
|
||||
for (auto const& failedServer: failedServers) {
|
||||
failedServerEndpoints.push_back(_cc->createCommunicatorDestination(ci->getServerEndpoint(failedServer), "/").url());
|
||||
}
|
||||
|
||||
for (auto const& request: _cc->communicator()->requestsInProgress()) {
|
||||
for (auto const& failedServerEndpoint: failedServerEndpoints) {
|
||||
if (request->_destination.url().substr(0, failedServerEndpoint.length()) == failedServerEndpoint) {
|
||||
_cc->communicator()->abortRequest(request->_ticketId);
|
||||
}
|
||||
if (failedServers.size() > 0) {
|
||||
auto ticketIds = _cc->activeServerTickets(failedServers);
|
||||
for (auto const& ticketId: ticketIds) {
|
||||
_cc->communicator()->abortRequest(ticketId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -616,6 +616,12 @@ class ClusterComm {
|
|||
|
||||
void cleanupAllQueues();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief activeServerTickets for a list of servers
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::vector<communicator::Ticket> activeServerTickets(std::vector<std::string> const& servers);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief our background communications thread
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -95,7 +95,7 @@ void ClusterFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
"agency endpoint to connect to",
|
||||
new VectorParameter<StringParameter>(&_agencyEndpoints));
|
||||
|
||||
options->addOption("--cluster.agency-prefix", "agency prefix",
|
||||
options->addHiddenOption("--cluster.agency-prefix", "agency prefix",
|
||||
new StringParameter(&_agencyPrefix));
|
||||
|
||||
options->addOption("--cluster.my-local-info", "this server's local info",
|
||||
|
@ -134,7 +134,11 @@ void ClusterFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
"replication factor for system collections",
|
||||
new UInt32Parameter(&_systemReplicationFactor));
|
||||
|
||||
options->addOption("--cluster.create-waits-for-sync-replication",
|
||||
options->addOption("--cluster.create-waits-for-sync-replication",
|
||||
"active coordinator will wait for all replicas to create collection",
|
||||
new BooleanParameter(&_createWaitsForSyncReplication));
|
||||
|
||||
options->addOption("--cluster.create-waits-for-sync-replication",
|
||||
"active coordinator will wait for all replicas to create collection",
|
||||
new BooleanParameter(&_createWaitsForSyncReplication));
|
||||
}
|
||||
|
@ -144,7 +148,7 @@ void ClusterFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
if (options->processingResult().touched("cluster.disable-dispatcher-kickstarter") ||
|
||||
options->processingResult().touched("cluster.disable-dispatcher-frontend")) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "The dispatcher feature isn't available anymore. Use ArangoDBStarter for this now! See https://github.com/arangodb-helper/ArangoDBStarter/ for more details.";
|
||||
<< "The dispatcher feature isn't available anymore. Use ArangoDBStarter for this now! See https://github.com/arangodb-helper/ArangoDBStarter/ for more details.";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
@ -158,7 +162,7 @@ void ClusterFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
|
||||
// validate --cluster.agency-endpoint (currently a noop)
|
||||
if (_agencyEndpoints.empty()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
LOG_TOPIC(FATAL, Logger::CLUSTER)
|
||||
<< "must at least specify one endpoint in --cluster.agency-endpoint";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -173,13 +177,13 @@ void ClusterFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/");
|
||||
|
||||
if (found != std::string::npos || _agencyPrefix.empty()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value specified for --cluster.agency-prefix";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "invalid value specified for --cluster.agency-prefix";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
// validate system-replication-factor
|
||||
if (_systemReplicationFactor == 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "system replication factor must be greater 0";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "system replication factor must be greater 0";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
@ -190,7 +194,7 @@ void ClusterFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
_requestedRole == ServerState::ROLE_AGENT ||
|
||||
_requestedRole == ServerState::ROLE_UNDEFINED
|
||||
) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Invalid role provided. Possible values: PRIMARY, "
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "Invalid role provided. Possible values: PRIMARY, "
|
||||
"SECONDARY, COORDINATOR";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -202,7 +206,7 @@ void ClusterFeature::reportRole(arangodb::ServerState::RoleEnum role) {
|
|||
if (role == ServerState::ROLE_UNDEFINED) {
|
||||
roleString += ". Determining real role from agency";
|
||||
}
|
||||
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "Starting up with role " << roleString;
|
||||
LOG_TOPIC(INFO, arangodb::Logger::CLUSTER) << "Starting up with role " << roleString;
|
||||
}
|
||||
|
||||
void ClusterFeature::prepare() {
|
||||
|
@ -247,7 +251,7 @@ void ClusterFeature::prepare() {
|
|||
"Authentication");
|
||||
|
||||
if (authenticationFeature->isEnabled() && !authenticationFeature->hasUserdefinedJwt()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Cluster authentication enabled but jwt not set via command line. Please"
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "Cluster authentication enabled but jwt not set via command line. Please"
|
||||
<< " provide --server.jwt-secret which is used throughout the cluster.";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -275,7 +279,7 @@ void ClusterFeature::prepare() {
|
|||
std::string const unified = Endpoint::unifiedForm(_agencyEndpoints[i]);
|
||||
|
||||
if (unified.empty()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid endpoint '" << _agencyEndpoints[i]
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "invalid endpoint '" << _agencyEndpoints[i]
|
||||
<< "' specified for --cluster.agency-endpoint";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -293,7 +297,7 @@ void ClusterFeature::prepare() {
|
|||
ClusterComm::instance()->enableConnectionErrorLogging(false);
|
||||
// perform an initial connect to the agency
|
||||
if (!AgencyCommManager::MANAGER->start()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Could not connect to any agency endpoints ("
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "Could not connect to any agency endpoints ("
|
||||
<< AgencyCommManager::MANAGER->endpointsString() << ")";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -310,13 +314,13 @@ void ClusterFeature::prepare() {
|
|||
|
||||
if (role == ServerState::ROLE_UNDEFINED) {
|
||||
// no role found
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "unable to determine unambiguous role for server '" << _myId
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "unable to determine unambiguous role for server '" << _myId
|
||||
<< "'. No role configured in agency (" << endpoints << ")";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (role == ServerState::ROLE_SINGLE) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "determined single-server role for server '" << _myId
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "determined single-server role for server '" << _myId
|
||||
<< "'. Please check the configurarion in the agency ("
|
||||
<< endpoints << ")";
|
||||
FATAL_ERROR_EXIT();
|
||||
|
@ -343,12 +347,12 @@ void ClusterFeature::prepare() {
|
|||
double start = TRI_microtime();
|
||||
|
||||
while (true) {
|
||||
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "Waiting for DBservers to show up...";
|
||||
LOG_TOPIC(INFO, arangodb::Logger::CLUSTER) << "Waiting for DBservers to show up...";
|
||||
ci->loadCurrentDBServers();
|
||||
std::vector<ServerID> DBServers = ci->getCurrentDBServers();
|
||||
if (DBServers.size() >= 1 &&
|
||||
(DBServers.size() > 1 || TRI_microtime() - start > 15.0)) {
|
||||
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "Found " << DBServers.size() << " DBservers.";
|
||||
LOG_TOPIC(INFO, arangodb::Logger::CLUSTER) << "Found " << DBServers.size() << " DBservers.";
|
||||
break;
|
||||
}
|
||||
sleep(1);
|
||||
|
@ -357,7 +361,7 @@ void ClusterFeature::prepare() {
|
|||
}
|
||||
|
||||
if (_myAddress.empty()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "unable to determine internal address for server '" << _myId
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "unable to determine internal address for server '" << _myId
|
||||
<< "'. Please specify --cluster.my-address or configure the "
|
||||
"address for this server in the agency.";
|
||||
FATAL_ERROR_EXIT();
|
||||
|
@ -365,7 +369,7 @@ void ClusterFeature::prepare() {
|
|||
|
||||
// now we can validate --cluster.my-address
|
||||
if (Endpoint::unifiedForm(_myAddress).empty()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid endpoint '" << _myAddress
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "invalid endpoint '" << _myAddress
|
||||
<< "' specified for --cluster.my-address";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -396,7 +400,7 @@ void ClusterFeature::start() {
|
|||
|
||||
ServerState::RoleEnum role = ServerState::instance()->getRole();
|
||||
|
||||
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "Cluster feature is turned on. Agency version: " << version
|
||||
LOG_TOPIC(INFO, arangodb::Logger::CLUSTER) << "Cluster feature is turned on. Agency version: " << version
|
||||
<< ", Agency endpoints: " << endpoints << ", server id: '" << _myId
|
||||
<< "', internal address: " << _myAddress
|
||||
<< ", role: " << ServerState::roleToString(role);
|
||||
|
@ -412,7 +416,7 @@ void ClusterFeature::start() {
|
|||
if (HeartbeatIntervalMs.isInteger()) {
|
||||
try {
|
||||
_heartbeatInterval = HeartbeatIntervalMs.getUInt();
|
||||
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "using heartbeat interval value '" << _heartbeatInterval
|
||||
LOG_TOPIC(INFO, arangodb::Logger::CLUSTER) << "using heartbeat interval value '" << _heartbeatInterval
|
||||
<< " ms' from agency";
|
||||
} catch (...) {
|
||||
// Ignore if it is not a small int or uint
|
||||
|
@ -424,7 +428,7 @@ void ClusterFeature::start() {
|
|||
if (_heartbeatInterval == 0) {
|
||||
_heartbeatInterval = 5000; // 1/s
|
||||
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "unable to read heartbeat interval from agency. Using "
|
||||
LOG_TOPIC(WARN, arangodb::Logger::CLUSTER) << "unable to read heartbeat interval from agency. Using "
|
||||
<< "default value '" << _heartbeatInterval << " ms'";
|
||||
}
|
||||
|
||||
|
@ -434,7 +438,7 @@ void ClusterFeature::start() {
|
|||
SchedulerFeature::SCHEDULER->ioService());
|
||||
|
||||
if (!_heartbeatThread->init() || !_heartbeatThread->start()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "heartbeat could not connect to agency endpoints ("
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "heartbeat could not connect to agency endpoints ("
|
||||
<< endpoints << ")";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
@ -453,7 +457,7 @@ void ClusterFeature::start() {
|
|||
VPackObjectBuilder b(&builder);
|
||||
builder.add("endpoint", VPackValue(_myAddress));
|
||||
} catch (...) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "out of memory";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "out of memory";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
@ -461,7 +465,7 @@ void ClusterFeature::start() {
|
|||
builder.slice(), 0.0);
|
||||
|
||||
if (!result.successful()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "unable to register server in agency: http code: "
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::CLUSTER) << "unable to register server in agency: http code: "
|
||||
<< result.httpCode() << ", body: " << result.body();
|
||||
FATAL_ERROR_EXIT();
|
||||
} else {
|
||||
|
@ -494,7 +498,7 @@ void ClusterFeature::stop() {
|
|||
usleep(100000);
|
||||
// emit warning after 5 seconds
|
||||
if (++counter == 10 * 5) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "waiting for heartbeat thread to finish";
|
||||
LOG_TOPIC(WARN, arangodb::Logger::CLUSTER) << "waiting for heartbeat thread to finish";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -521,7 +525,7 @@ void ClusterFeature::unprepare() {
|
|||
usleep(100000);
|
||||
// emit warning after 5 seconds
|
||||
if (++counter == 10 * 5) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "waiting for heartbeat thread to finish";
|
||||
LOG_TOPIC(WARN, arangodb::Logger::CLUSTER) << "waiting for heartbeat thread to finish";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,10 @@ class ClusterFeature : public application_features::ApplicationFeature {
|
|||
void start() override final;
|
||||
void unprepare() override final;
|
||||
|
||||
std::string agencyPrefix() {
|
||||
return _agencyPrefix;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::string> _agencyEndpoints;
|
||||
std::string _agencyPrefix;
|
||||
|
|
|
@ -1100,31 +1100,26 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
std::function<bool(VPackSlice const& result)> dbServerChanged =
|
||||
[=](VPackSlice const& result) {
|
||||
if (result.isObject() && result.length() == (size_t)numberOfShards) {
|
||||
std::string tmpError = "";
|
||||
for (auto const& p : VPackObjectIterator(result)) {
|
||||
if (arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
p.value, "error", false)) {
|
||||
std::string tmpMsg = "";
|
||||
|
||||
tmpMsg += " shardID:" + p.key.copyString() + ":";
|
||||
tmpMsg += arangodb::basics::VelocyPackHelper::getStringValue(
|
||||
tmpError += " shardID:" + p.key.copyString() + ":";
|
||||
tmpError += arangodb::basics::VelocyPackHelper::getStringValue(
|
||||
p.value, "errorMessage", "");
|
||||
if (p.value.hasKey("errorNum")) {
|
||||
VPackSlice const errorNum = p.value.get("errorNum");
|
||||
if (errorNum.isNumber()) {
|
||||
tmpMsg += " (errNum=";
|
||||
tmpMsg += basics::StringUtils::itoa(
|
||||
tmpError += " (errNum=";
|
||||
tmpError += basics::StringUtils::itoa(
|
||||
errorNum.getNumericValue<uint32_t>());
|
||||
tmpMsg += ")";
|
||||
tmpError += ")";
|
||||
}
|
||||
}
|
||||
*errMsg = "Error in creation of collection:" + tmpMsg + " "
|
||||
+ __FILE__ + std::to_string(__LINE__);
|
||||
*dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION;
|
||||
return true;
|
||||
}
|
||||
|
||||
// wait that all followers have created our new collection
|
||||
if (waitForReplication) {
|
||||
if (tmpError.empty() && waitForReplication) {
|
||||
uint64_t mutableReplicationFactor = replicationFactor;
|
||||
if (mutableReplicationFactor == 0) {
|
||||
mutableReplicationFactor = dbServers.size();
|
||||
|
@ -1136,7 +1131,13 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
}
|
||||
}
|
||||
}
|
||||
*dbServerResult = setErrormsg(TRI_ERROR_NO_ERROR, *errMsg);
|
||||
if (!tmpError.empty()) {
|
||||
*errMsg = "Error in creation of collection:" + tmpError + " "
|
||||
+ __FILE__ + std::to_string(__LINE__);
|
||||
*dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION;
|
||||
} else {
|
||||
*dbServerResult = setErrormsg(TRI_ERROR_NO_ERROR, *errMsg);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
@ -1315,17 +1316,17 @@ int ClusterInfo::dropCollectionCoordinator(
|
|||
clones.push_back(p->name());
|
||||
}
|
||||
}
|
||||
if (!clones.empty()){
|
||||
errorMsg += "Collection must not be dropped while it is sharding "
|
||||
"prototype for collection[s]";
|
||||
for (auto const& i : clones) {
|
||||
|
||||
if (!clones.empty()){
|
||||
errorMsg += "Collection must not be dropped while it is sharding "
|
||||
"prototype for collection[s]";
|
||||
for (auto const& i : clones) {
|
||||
errorMsg += std::string(" ") + i;
|
||||
}
|
||||
errorMsg += ".";
|
||||
return TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE;
|
||||
|
||||
}
|
||||
|
||||
errorMsg += ".";
|
||||
return TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE;
|
||||
}
|
||||
|
||||
double const realTimeout = getTimeout(timeout);
|
||||
double const endTime = TRI_microtime() + realTimeout;
|
||||
double const interval = getPollInterval();
|
||||
|
|
|
@ -2284,7 +2284,7 @@ ClusterMethods::persistCollectionInAgency(
|
|||
std::string distributeShardsLike = col->distributeShardsLike();
|
||||
std::vector<std::string> dbServers;
|
||||
std::vector<std::string> avoid = col->avoidServers();
|
||||
|
||||
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
if (!distributeShardsLike.empty()) {
|
||||
CollectionNameResolver resolver(col->vocbase());
|
||||
|
@ -2320,7 +2320,6 @@ ClusterMethods::persistCollectionInAgency(
|
|||
}
|
||||
col->distributeShardsLike(otherCidString);
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER) << "WTF? " << ignoreDistributeShardsLikeErrors;
|
||||
if (ignoreDistributeShardsLikeErrors) {
|
||||
col->distributeShardsLike(std::string());
|
||||
} else {
|
||||
|
|
|
@ -258,8 +258,8 @@ class ClusterMethods {
|
|||
static std::unique_ptr<LogicalCollection> createCollectionOnCoordinator(
|
||||
TRI_col_type_e collectionType, TRI_vocbase_t* vocbase,
|
||||
arangodb::velocypack::Slice parameters,
|
||||
bool ignoreDistributeShardsLikeErrors,
|
||||
bool waitForSyncReplication);
|
||||
bool ignoreDistributeShardsLikeErrors = true,
|
||||
bool waitForSyncReplication = true);
|
||||
|
||||
private:
|
||||
|
||||
|
@ -268,7 +268,8 @@ class ClusterMethods {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::unique_ptr<LogicalCollection> persistCollectionInAgency(
|
||||
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors, bool waitForSyncReplication);
|
||||
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors = true,
|
||||
bool waitForSyncReplication = true);
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -22,11 +22,17 @@
|
|||
|
||||
#include "AuthenticationFeature.h"
|
||||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "Random/RandomGenerator.h"
|
||||
#include "RestServer/QueryRegistryFeature.h"
|
||||
|
||||
#if USE_ENTERPRISE
|
||||
#include "Enterprise/Ldap/LdapFeature.h"
|
||||
#include "Enterprise/Ldap/LdapAuthenticationHandler.h"
|
||||
#endif
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::options;
|
||||
|
||||
|
@ -161,3 +167,12 @@ void AuthenticationFeature::prepare() {
|
|||
|
||||
void AuthenticationFeature::stop() {
|
||||
}
|
||||
|
||||
AuthenticationHandler* AuthenticationFeature::getHandler() {
|
||||
#if USE_ENTERPRISE
|
||||
if (application_features::ApplicationServer::getFeature<LdapFeature>("Ldap")->isEnabled()) {
|
||||
return new LdapAuthenticationHandler();
|
||||
}
|
||||
#endif
|
||||
return new DefaultAuthenticationHandler();
|
||||
}
|
||||
|
|
|
@ -24,8 +24,10 @@
|
|||
#define APPLICATION_FEATURES_AUTHENTICATION_FEATURE_H 1
|
||||
|
||||
#include "ApplicationFeatures/ApplicationFeature.h"
|
||||
#include "AuthenticationHandler.h"
|
||||
#include "VocBase/AuthInfo.h"
|
||||
|
||||
|
||||
namespace arangodb {
|
||||
class AuthenticationFeature final
|
||||
: public application_features::ApplicationFeature {
|
||||
|
@ -60,6 +62,7 @@ class AuthenticationFeature final
|
|||
void setJwtSecret(std::string const& jwtSecret) { authInfo()->setJwtSecret(jwtSecret); }
|
||||
AuthInfo* authInfo();
|
||||
AuthLevel canUseDatabase(std::string const& username, std::string const& dbname);
|
||||
AuthenticationHandler* getHandler();
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Manuel Baesler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "AuthenticationHandler.h"
|
||||
#include "Logger/Logger.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
AuthenticationResult DefaultAuthenticationHandler::authenticate(std::string const& username, std::string const& password) {
|
||||
return AuthenticationResult(TRI_ERROR_USER_NOT_FOUND, AuthSource::COLLECTION);
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Manuel Baesler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef GENERAL_SERVER_AUTHENTICATION_HANDLER_H
|
||||
#define GENERAL_SERVER_AUTHENTICATION_HANDLER_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "VocBase/AuthInfo.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class AuthResult;
|
||||
enum class AuthSource;
|
||||
|
||||
class AuthenticationResult : public arangodb::Result {
|
||||
public:
|
||||
explicit AuthenticationResult(AuthSource const& source) : AuthenticationResult(TRI_ERROR_FAILED, source) {}
|
||||
|
||||
AuthenticationResult(int errorNumber, AuthSource const& source) : Result(errorNumber), _authSource(source) {}
|
||||
AuthenticationResult(std::unordered_map<std::string, std::string> const& permissions, AuthSource const& source) :
|
||||
Result(0),
|
||||
_authSource(source),
|
||||
_permissions(permissions) {}
|
||||
|
||||
AuthSource source() const { return _authSource; }
|
||||
std::unordered_map<std::string, std::string> permissions() const { return _permissions; }
|
||||
|
||||
protected:
|
||||
AuthSource _authSource;
|
||||
std::unordered_map<std::string, std::string> _permissions;
|
||||
};
|
||||
|
||||
|
||||
class AuthenticationHandler {
|
||||
public:
|
||||
virtual AuthenticationResult authenticate(std::string const& username, std::string const& password) = 0;
|
||||
virtual ~AuthenticationHandler() {}
|
||||
};
|
||||
|
||||
class DefaultAuthenticationHandler : public AuthenticationHandler {
|
||||
public:
|
||||
DefaultAuthenticationHandler() {}
|
||||
AuthenticationResult authenticate(std::string const& username, std::string const& password) override;
|
||||
virtual ~DefaultAuthenticationHandler() {}
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
|
@ -276,7 +276,7 @@ bool GeneralCommTask::handleRequest(std::shared_ptr<RestHandler> handler) {
|
|||
bool ok = SchedulerFeature::SCHEDULER->queue(std::move(job));
|
||||
|
||||
if (!ok) {
|
||||
handleSimpleError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_QUEUE_FULL,
|
||||
handleSimpleError(rest::ResponseCode::SERVICE_UNAVAILABLE, TRI_ERROR_QUEUE_FULL,
|
||||
TRI_errno_string(TRI_ERROR_QUEUE_FULL), messageId);
|
||||
}
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Hints.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
|
@ -1889,7 +1890,7 @@ int MMFilesCollection::read(transaction::Methods* trx, VPackSlice const key,
|
|||
}
|
||||
|
||||
bool const useDeadlockDetector =
|
||||
(lock && !trx->isSingleOperationTransaction());
|
||||
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
MMFilesCollectionReadLocker collectionLocker(this, useDeadlockDetector, lock);
|
||||
|
||||
int res = lookupDocument(trx, key, result);
|
||||
|
@ -2772,7 +2773,7 @@ int MMFilesCollection::insert(transaction::Methods* trx, VPackSlice const slice,
|
|||
{
|
||||
// use lock?
|
||||
bool const useDeadlockDetector =
|
||||
(lock && !trx->isSingleOperationTransaction());
|
||||
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
try {
|
||||
// TODO Do we use the CollectionLocker on LogicalCollections
|
||||
// or do we use it on the SE specific one?
|
||||
|
@ -3115,7 +3116,7 @@ int MMFilesCollection::update(
|
|||
TRI_IF_FAILURE("UpdateDocumentNoLock") { return TRI_ERROR_DEBUG; }
|
||||
|
||||
bool const useDeadlockDetector =
|
||||
(lock && !trx->isSingleOperationTransaction());
|
||||
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
arangodb::MMFilesCollectionWriteLocker collectionLocker(
|
||||
this, useDeadlockDetector, lock);
|
||||
|
||||
|
@ -3253,7 +3254,7 @@ int MMFilesCollection::replace(
|
|||
}
|
||||
|
||||
bool const useDeadlockDetector =
|
||||
(lock && !trx->isSingleOperationTransaction());
|
||||
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
arangodb::MMFilesCollectionWriteLocker collectionLocker(
|
||||
this, useDeadlockDetector, lock);
|
||||
|
||||
|
@ -3420,7 +3421,7 @@ int MMFilesCollection::remove(arangodb::transaction::Methods* trx,
|
|||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
||||
bool const useDeadlockDetector =
|
||||
(lock && !trx->isSingleOperationTransaction());
|
||||
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
arangodb::MMFilesCollectionWriteLocker collectionLocker(
|
||||
this, useDeadlockDetector, lock);
|
||||
|
||||
|
|
|
@ -1134,9 +1134,8 @@ int MMFilesDatafile::truncateAndSeal(TRI_voc_size_t position) {
|
|||
}
|
||||
|
||||
char zero = 0;
|
||||
int res = TRI_WRITE(fd, &zero, 1);
|
||||
|
||||
if (res < 0) {
|
||||
long written = TRI_WRITE(fd, &zero, 1);
|
||||
if (written < 0) {
|
||||
TRI_SYSTEM_ERROR();
|
||||
TRI_set_errno(TRI_ERROR_SYS_ERROR);
|
||||
TRI_TRACKED_CLOSE_FILE(fd);
|
||||
|
@ -1150,8 +1149,8 @@ int MMFilesDatafile::truncateAndSeal(TRI_voc_size_t position) {
|
|||
}
|
||||
|
||||
// memory map the data
|
||||
res = TRI_MMFile(0, maximalSize, PROT_WRITE | PROT_READ, MAP_SHARED, fd,
|
||||
&mmHandle, 0, &data);
|
||||
int res = TRI_MMFile(0, maximalSize, PROT_WRITE | PROT_READ, MAP_SHARED, fd,
|
||||
&mmHandle, 0, &data);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_SYSTEM_ERROR();
|
||||
|
|
|
@ -3276,7 +3276,8 @@ void MMFilesRestReplicationHandler::handleCommandApplierDeleteState() {
|
|||
int res = _vocbase->replicationApplier()->forget();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "unable to delete applier state";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res,"unable to delete applier state");
|
||||
}
|
||||
|
||||
handleCommandApplierGetState();
|
||||
|
|
|
@ -343,7 +343,8 @@ int MMFilesTransactionCollection::doLock(AccessMode::Type type, int nestingLevel
|
|||
timeout = 0.00000001;
|
||||
}
|
||||
|
||||
bool const useDeadlockDetector = !_transaction->hasHint(transaction::Hints::Hint::SINGLE_OPERATION);
|
||||
bool const useDeadlockDetector = (!_transaction->hasHint(transaction::Hints::Hint::SINGLE_OPERATION) &&
|
||||
!_transaction->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
|
||||
int res;
|
||||
if (!AccessMode::isWriteOrExclusive(type)) {
|
||||
|
@ -404,7 +405,8 @@ int MMFilesTransactionCollection::doUnlock(AccessMode::Type type, int nestingLev
|
|||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
bool const useDeadlockDetector = !_transaction->hasHint(transaction::Hints::Hint::SINGLE_OPERATION);
|
||||
bool const useDeadlockDetector = (!_transaction->hasHint(transaction::Hints::Hint::SINGLE_OPERATION) &&
|
||||
!_transaction->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
|
||||
LogicalCollection* collection = _collection;
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
|
|
@ -98,7 +98,10 @@ static inline bool MustReplicateWalMarkerType(MMFilesMarker const* marker) {
|
|||
type == TRI_DF_MARKER_VPACK_RENAME_COLLECTION ||
|
||||
type == TRI_DF_MARKER_VPACK_CHANGE_COLLECTION ||
|
||||
type == TRI_DF_MARKER_VPACK_CREATE_INDEX ||
|
||||
type == TRI_DF_MARKER_VPACK_DROP_INDEX);
|
||||
type == TRI_DF_MARKER_VPACK_DROP_INDEX ||
|
||||
type == TRI_DF_MARKER_VPACK_CREATE_VIEW ||
|
||||
type == TRI_DF_MARKER_VPACK_DROP_VIEW ||
|
||||
type == TRI_DF_MARKER_VPACK_CHANGE_VIEW);
|
||||
}
|
||||
|
||||
/// @brief whether or not a marker belongs to a transaction
|
||||
|
@ -135,6 +138,12 @@ static TRI_replication_operation_e TranslateType(
|
|||
return REPLICATION_INDEX_CREATE;
|
||||
case TRI_DF_MARKER_VPACK_DROP_INDEX:
|
||||
return REPLICATION_INDEX_DROP;
|
||||
case TRI_DF_MARKER_VPACK_CREATE_VIEW:
|
||||
return REPLICATION_VIEW_CREATE;
|
||||
case TRI_DF_MARKER_VPACK_DROP_VIEW:
|
||||
return REPLICATION_VIEW_DROP;
|
||||
case TRI_DF_MARKER_VPACK_CHANGE_VIEW:
|
||||
return REPLICATION_VIEW_CHANGE;
|
||||
|
||||
default:
|
||||
return REPLICATION_INVALID;
|
||||
|
@ -265,11 +274,14 @@ static int StringifyMarker(MMFilesReplicationDumpContext* dump,
|
|||
case TRI_DF_MARKER_VPACK_CREATE_DATABASE:
|
||||
case TRI_DF_MARKER_VPACK_CREATE_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_CREATE_INDEX:
|
||||
case TRI_DF_MARKER_VPACK_CREATE_VIEW:
|
||||
case TRI_DF_MARKER_VPACK_RENAME_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_CHANGE_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_CHANGE_VIEW:
|
||||
case TRI_DF_MARKER_VPACK_DROP_DATABASE:
|
||||
case TRI_DF_MARKER_VPACK_DROP_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_DROP_INDEX: {
|
||||
case TRI_DF_MARKER_VPACK_DROP_INDEX:
|
||||
case TRI_DF_MARKER_VPACK_DROP_VIEW: {
|
||||
Append(dump, ",\"data\":");
|
||||
|
||||
VPackSlice slice(reinterpret_cast<char const*>(marker) +
|
||||
|
@ -394,11 +406,14 @@ static int SliceifyMarker(MMFilesReplicationDumpContext* dump,
|
|||
case TRI_DF_MARKER_VPACK_CREATE_DATABASE:
|
||||
case TRI_DF_MARKER_VPACK_CREATE_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_CREATE_INDEX:
|
||||
case TRI_DF_MARKER_VPACK_CREATE_VIEW:
|
||||
case TRI_DF_MARKER_VPACK_RENAME_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_CHANGE_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_CHANGE_VIEW:
|
||||
case TRI_DF_MARKER_VPACK_DROP_DATABASE:
|
||||
case TRI_DF_MARKER_VPACK_DROP_COLLECTION:
|
||||
case TRI_DF_MARKER_VPACK_DROP_INDEX: {
|
||||
case TRI_DF_MARKER_VPACK_DROP_INDEX:
|
||||
case TRI_DF_MARKER_VPACK_DROP_VIEW: {
|
||||
VPackSlice slice(reinterpret_cast<char const*>(marker) +
|
||||
MMFilesDatafileHelper::VPackOffset(type));
|
||||
builder.add("data", slice);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringBuffer.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
|
@ -867,6 +868,18 @@ int ContinuousSyncer::applyLogMarker(VPackSlice const& slice,
|
|||
else if (type == REPLICATION_INDEX_DROP) {
|
||||
return dropIndex(slice);
|
||||
}
|
||||
|
||||
else if (type == REPLICATION_VIEW_CREATE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, "view create not yet implemented");
|
||||
}
|
||||
|
||||
else if (type == REPLICATION_VIEW_DROP) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, "view drop not yet implemented");
|
||||
}
|
||||
|
||||
else if (type == REPLICATION_VIEW_CHANGE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, "view change not yet implemented");
|
||||
}
|
||||
|
||||
errorMsg = "unexpected marker type " + StringUtils::itoa(type);
|
||||
|
||||
|
@ -1366,6 +1379,7 @@ int ContinuousSyncer::followMasterLog(std::string& errorMsg,
|
|||
bool checkMore = false;
|
||||
bool active = false;
|
||||
bool fromIncluded = false;
|
||||
bool bumpTick = false;
|
||||
TRI_voc_tick_t tick = 0;
|
||||
|
||||
bool found;
|
||||
|
@ -1391,10 +1405,10 @@ int ContinuousSyncer::followMasterLog(std::string& errorMsg,
|
|||
header =
|
||||
response->getHeaderField(TRI_REPLICATION_HEADER_LASTINCLUDED, found);
|
||||
if (found) {
|
||||
tick = StringUtils::uint64(header);
|
||||
TRI_voc_tick_t lastIncludedTick = StringUtils::uint64(header);
|
||||
|
||||
if (tick > fetchTick) {
|
||||
fetchTick = tick;
|
||||
if (lastIncludedTick > fetchTick) {
|
||||
fetchTick = lastIncludedTick;
|
||||
worked = true;
|
||||
} else {
|
||||
// we got the same tick again, this indicates we're at the end
|
||||
|
@ -1404,6 +1418,13 @@ int ContinuousSyncer::followMasterLog(std::string& errorMsg,
|
|||
header = response->getHeaderField(TRI_REPLICATION_HEADER_LASTTICK, found);
|
||||
if (found) {
|
||||
tick = StringUtils::uint64(header);
|
||||
if (!checkMore && tick > lastIncludedTick) {
|
||||
// the master has a tick value which is not contained in this result
|
||||
// but it claims it does not have any more data
|
||||
// so it's probably a tick from an invisible operation (such as closing
|
||||
// a WAL file)
|
||||
bumpTick = true;
|
||||
}
|
||||
|
||||
WRITE_LOCKER_EVENTUAL(writeLocker, _applier->_statusLock);
|
||||
_applier->_state._lastAvailableContinuousTick = tick;
|
||||
|
@ -1434,7 +1455,7 @@ int ContinuousSyncer::followMasterLog(std::string& errorMsg,
|
|||
TRI_voc_tick_t lastAppliedTick;
|
||||
|
||||
{
|
||||
WRITE_LOCKER_EVENTUAL(writeLocker, _applier->_statusLock);
|
||||
READ_LOCKER(locker, _applier->_statusLock);
|
||||
lastAppliedTick = _applier->_state._lastAppliedContinuousTick;
|
||||
}
|
||||
|
||||
|
@ -1453,6 +1474,22 @@ int ContinuousSyncer::followMasterLog(std::string& errorMsg,
|
|||
_hasWrittenState = true;
|
||||
saveApplierState();
|
||||
}
|
||||
} else if (bumpTick) {
|
||||
WRITE_LOCKER_EVENTUAL(writeLocker, _applier->_statusLock);
|
||||
|
||||
if (_applier->_state._lastProcessedContinuousTick < tick) {
|
||||
_applier->_state._lastProcessedContinuousTick = tick;
|
||||
}
|
||||
|
||||
if (_ongoingTransactions.empty() &&
|
||||
_applier->_state._safeResumeTick == 0) {
|
||||
_applier->_state._safeResumeTick = tick;
|
||||
}
|
||||
|
||||
if (!_hasWrittenState) {
|
||||
_hasWrittenState = true;
|
||||
saveApplierState();
|
||||
}
|
||||
}
|
||||
|
||||
if (!_hasWrittenState && _useTick) {
|
||||
|
|
|
@ -191,8 +191,8 @@ int InitialSyncer::run(std::string& errorMsg, bool incremental) {
|
|||
<< "client: got master state: " << res << " " << errorMsg;
|
||||
return res;
|
||||
}
|
||||
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "client: got master state: " << res
|
||||
<< " " << errorMsg;
|
||||
LOG_TOPIC(DEBUG, Logger::REPLICATION)
|
||||
<< "client: got master state: " << res << " " << errorMsg;
|
||||
|
||||
if (incremental) {
|
||||
if (_masterInfo._majorVersion == 1 ||
|
||||
|
@ -275,8 +275,7 @@ int InitialSyncer::run(std::string& errorMsg, bool incremental) {
|
|||
_masterInfo._endpoint + ": invalid JSON";
|
||||
} else {
|
||||
auto pair = stripObjectIds(slice);
|
||||
res = handleInventoryResponse(pair.first, incremental,
|
||||
errorMsg);
|
||||
res = handleInventoryResponse(pair.first, incremental, errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -664,9 +663,9 @@ int InitialSyncer::handleCollectionDump(arangodb::LogicalCollection* col,
|
|||
std::string const progress =
|
||||
"fetching master collection dump for collection '" + collectionName +
|
||||
"', type: " + typeString + ", id " + cid + ", batch " +
|
||||
StringUtils::itoa(batch) + ", markers processed: " +
|
||||
StringUtils::itoa(markersProcessed) + ", bytes received: " +
|
||||
StringUtils::itoa(bytesReceived);
|
||||
StringUtils::itoa(batch) +
|
||||
", markers processed: " + StringUtils::itoa(markersProcessed) +
|
||||
", bytes received: " + StringUtils::itoa(bytesReceived);
|
||||
|
||||
setProgress(progress);
|
||||
|
||||
|
@ -856,9 +855,9 @@ int InitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
|
|||
sendExtendBarrier();
|
||||
|
||||
std::string const baseUrl = BaseUrl + "/keys";
|
||||
std::string url = baseUrl + "?collection=" + cid + "&to=" +
|
||||
std::to_string(maxTick) + "&batchId=" +
|
||||
std::to_string(_batchId);
|
||||
std::string url = baseUrl + "?collection=" + cid +
|
||||
"&to=" + std::to_string(maxTick) +
|
||||
"&batchId=" + std::to_string(_batchId);
|
||||
|
||||
std::string progress = "fetching collection keys for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
|
@ -1017,8 +1016,8 @@ int InitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
|
|||
OperationResult opRes = trx.truncate(collectionName, options);
|
||||
|
||||
if (!opRes.successful()) {
|
||||
errorMsg = "unable to truncate collection '" + collectionName + "': " +
|
||||
TRI_errno_string(opRes.code);
|
||||
errorMsg = "unable to truncate collection '" + collectionName +
|
||||
"': " + TRI_errno_string(opRes.code);
|
||||
return opRes.code;
|
||||
}
|
||||
|
||||
|
@ -1266,28 +1265,24 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
// smaller values than lowKey mean they don't exist remotely
|
||||
trx.remove(collectionName, key, options);
|
||||
return;
|
||||
}
|
||||
if (cmp1 >= 0 && cmp2 <= 0) {
|
||||
} else if (cmp1 >= 0 && cmp2 <= 0) {
|
||||
// we only need to hash we are in the range
|
||||
if (cmp1 == 0) {
|
||||
foundLowKey = true;
|
||||
} else if (!foundLowKey && cmp1 > 0) {
|
||||
rangeUnequal = true;
|
||||
nextChunk = true;
|
||||
}
|
||||
|
||||
markers.emplace_back(key.copyString(), TRI_ExtractRevisionId(doc));
|
||||
// don't bother hashing if we have't found lower key
|
||||
if (foundLowKey) {
|
||||
VPackSlice revision = doc.get(StaticStrings::RevString);
|
||||
localHash ^= key.hashString();
|
||||
localHash ^= revision.hash();
|
||||
|
||||
markers.emplace_back(key.copyString(), TRI_ExtractRevisionId(doc));
|
||||
|
||||
if (cmp2 == 0) { // found highKey
|
||||
rangeUnequal = std::to_string(localHash) != hashString;
|
||||
nextChunk = true;
|
||||
}
|
||||
} else if (cmp2 == 0) {
|
||||
} else if (cmp2 == 0) { // found high key, but not low key
|
||||
rangeUnequal = true;
|
||||
nextChunk = true;
|
||||
}
|
||||
|
@ -1298,21 +1293,23 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
nextChunk = true;
|
||||
}
|
||||
|
||||
if (rangeUnequal) {
|
||||
int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey,
|
||||
highKey, markers, errorMsg);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
}
|
||||
|
||||
TRI_ASSERT(!rangeUnequal || nextChunk); // A => B
|
||||
if (nextChunk && currentChunkId + 1 < numChunks) {
|
||||
currentChunkId++; // we are out of range, see next chunk
|
||||
resetChunk();
|
||||
|
||||
// key is higher than upper bound, recheck the current document
|
||||
if (cmp2 > 0) {
|
||||
parseDoc(doc, key);
|
||||
if (nextChunk) {// we are out of range, see next chunk
|
||||
if (rangeUnequal && currentChunkId < numChunks) {
|
||||
int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey,
|
||||
highKey, markers, errorMsg);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
}
|
||||
currentChunkId++;
|
||||
if (currentChunkId < numChunks) {
|
||||
resetChunk();
|
||||
// key is higher than upper bound, recheck the current document
|
||||
if (cmp2 > 0) {
|
||||
parseDoc(doc, key);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -1329,6 +1326,19 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
parseDoc(doc, key);
|
||||
},
|
||||
UINT64_MAX);
|
||||
|
||||
// we might have missed chunks, if the keys don't exist at all locally
|
||||
while (currentChunkId < numChunks) {
|
||||
int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey,
|
||||
highKey, markers, errorMsg);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
currentChunkId++;
|
||||
if (currentChunkId < numChunks) {
|
||||
resetChunk();
|
||||
}
|
||||
}
|
||||
|
||||
res = trx.commit();
|
||||
if (!res.ok()) {
|
||||
|
@ -1356,9 +1366,9 @@ int InitialSyncer::syncChunkRocksDB(
|
|||
|
||||
// no match
|
||||
// must transfer keys for non-matching range
|
||||
std::string url = baseUrl + "/" + keysId + "?type=keys&chunk=" +
|
||||
std::to_string(chunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::string url = baseUrl + "/" + keysId +
|
||||
"?type=keys&chunk=" + std::to_string(chunkId) +
|
||||
"&chunkSize=" + std::to_string(chunkSize);
|
||||
|
||||
std::string progress =
|
||||
"fetching keys chunk '" + std::to_string(chunkId) + "' from " + url;
|
||||
|
@ -1425,6 +1435,12 @@ int InitialSyncer::syncChunkRocksDB(
|
|||
std::vector<size_t> toFetch;
|
||||
|
||||
size_t const numKeys = static_cast<size_t>(responseBody.length());
|
||||
if (numKeys == 0) {
|
||||
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
|
||||
": response contains an empty chunk. Collection: " + collectionName +
|
||||
" Chunk: " + std::to_string(chunkId);
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
TRI_ASSERT(numKeys > 0);
|
||||
|
||||
size_t i = 0;
|
||||
|
@ -1456,12 +1472,11 @@ int InitialSyncer::syncChunkRocksDB(
|
|||
continue;
|
||||
}
|
||||
|
||||
std::string const keyString = keySlice.copyString();
|
||||
// remove keys not present anymore
|
||||
while (nextStart < markers.size()) {
|
||||
std::string const& localKey = markers[nextStart].first;
|
||||
|
||||
int res = localKey.compare(keyString);
|
||||
int res = keySlice.compareString(localKey);
|
||||
if (res != 0) {
|
||||
// we have a local key that is not present remotely
|
||||
keyBuilder->clear();
|
||||
|
@ -1479,7 +1494,7 @@ int InitialSyncer::syncChunkRocksDB(
|
|||
|
||||
// see if key exists
|
||||
DocumentIdentifierToken token = physical->lookupKey(trx, keySlice);
|
||||
if (!token._data) {
|
||||
if (token._data == 0) {
|
||||
// key not found locally
|
||||
toFetch.emplace_back(i);
|
||||
} else if (TRI_RidToString(token._data) != pair.at(1).copyString()) {
|
||||
|
@ -1493,6 +1508,23 @@ int InitialSyncer::syncChunkRocksDB(
|
|||
|
||||
i++;
|
||||
}
|
||||
|
||||
// delete all keys at end of the range
|
||||
while (nextStart < markers.size()) {
|
||||
std::string const& localKey = markers[nextStart].first;
|
||||
|
||||
TRI_ASSERT(localKey.compare(highString) > 0);
|
||||
//if (localKey.compare(highString) > 0) {
|
||||
// we have a local key that is not present remotely
|
||||
keyBuilder->clear();
|
||||
keyBuilder->openObject();
|
||||
keyBuilder->add(StaticStrings::KeyString, VPackValue(localKey));
|
||||
keyBuilder->close();
|
||||
|
||||
trx->remove(collectionName, keyBuilder->slice(), options);
|
||||
//}
|
||||
++nextStart;
|
||||
}
|
||||
|
||||
if (!toFetch.empty()) {
|
||||
VPackBuilder keysBuilder;
|
||||
|
@ -1502,9 +1534,9 @@ int InitialSyncer::syncChunkRocksDB(
|
|||
}
|
||||
keysBuilder.close();
|
||||
|
||||
std::string url = baseUrl + "/" + keysId + "?type=docs&chunk=" +
|
||||
std::to_string(chunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::string url = baseUrl + "/" + keysId +
|
||||
"?type=docs&chunk=" + std::to_string(chunkId) +
|
||||
"&chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching documents chunk " + std::to_string(chunkId) +
|
||||
" for collection '" + collectionName + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
@ -1697,30 +1729,31 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
setProgress(progress);
|
||||
|
||||
// sort all our local keys
|
||||
std::sort(markers.begin(), markers.end(), [](uint8_t const* lhs,
|
||||
uint8_t const* rhs) -> bool {
|
||||
VPackSlice const l(lhs);
|
||||
VPackSlice const r(rhs);
|
||||
std::sort(
|
||||
markers.begin(), markers.end(),
|
||||
[](uint8_t const* lhs, uint8_t const* rhs) -> bool {
|
||||
VPackSlice const l(lhs);
|
||||
VPackSlice const r(rhs);
|
||||
|
||||
VPackValueLength lLength, rLength;
|
||||
char const* lKey = l.get(StaticStrings::KeyString).getString(lLength);
|
||||
char const* rKey = r.get(StaticStrings::KeyString).getString(rLength);
|
||||
VPackValueLength lLength, rLength;
|
||||
char const* lKey = l.get(StaticStrings::KeyString).getString(lLength);
|
||||
char const* rKey = r.get(StaticStrings::KeyString).getString(rLength);
|
||||
|
||||
size_t const length =
|
||||
static_cast<size_t>(lLength < rLength ? lLength : rLength);
|
||||
int res = memcmp(lKey, rKey, length);
|
||||
size_t const length =
|
||||
static_cast<size_t>(lLength < rLength ? lLength : rLength);
|
||||
int res = memcmp(lKey, rKey, length);
|
||||
|
||||
if (res < 0) {
|
||||
// left is smaller than right
|
||||
return true;
|
||||
}
|
||||
if (res == 0 && lLength < rLength) {
|
||||
// left is equal to right, but of shorter length
|
||||
return true;
|
||||
}
|
||||
if (res < 0) {
|
||||
// left is smaller than right
|
||||
return true;
|
||||
}
|
||||
if (res == 0 && lLength < rLength) {
|
||||
// left is equal to right, but of shorter length
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
if (checkAborted()) {
|
||||
|
@ -1951,9 +1984,9 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
} else {
|
||||
// no match
|
||||
// must transfer keys for non-matching range
|
||||
std::string url = baseUrl + "/" + keysId + "?type=keys&chunk=" +
|
||||
std::to_string(i) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::string url = baseUrl + "/" + keysId +
|
||||
"?type=keys&chunk=" + std::to_string(i) +
|
||||
"&chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
@ -2117,9 +2150,9 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
}
|
||||
keysBuilder.close();
|
||||
|
||||
std::string url = baseUrl + "/" + keysId + "?type=docs&chunk=" +
|
||||
std::to_string(currentChunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::string url = baseUrl + "/" + keysId +
|
||||
"?type=docs&chunk=" + std::to_string(currentChunkId) +
|
||||
"&chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching documents chunk " +
|
||||
std::to_string(currentChunkId) + " for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
|
|
|
@ -29,11 +29,13 @@
|
|||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Endpoint/ConnectionInfo.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Rest/GeneralResponse.h"
|
||||
#include "RestServer/FeatureCacheFeature.h"
|
||||
#include "Ssl/SslInterface.h"
|
||||
#include "Utils/Events.h"
|
||||
|
@ -182,13 +184,24 @@ rest::ResponseCode VocbaseContext::authenticateRequest() {
|
|||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "Authorization header: " << authStr;
|
||||
|
||||
if (TRI_CaseEqualString(authStr.c_str(), "basic ", 6)) {
|
||||
return basicAuthentication(auth);
|
||||
}
|
||||
if (TRI_CaseEqualString(authStr.c_str(), "bearer ", 7)) {
|
||||
return jwtAuthentication(std::string(auth));
|
||||
try {
|
||||
// note that these methods may throw in case of an error
|
||||
if (TRI_CaseEqualString(authStr.c_str(), "basic ", 6)) {
|
||||
return basicAuthentication(auth);
|
||||
}
|
||||
if (TRI_CaseEqualString(authStr.c_str(), "bearer ", 7)) {
|
||||
return jwtAuthentication(std::string(auth));
|
||||
}
|
||||
// fallthrough intentional
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
// translate error
|
||||
if (ex.code() == TRI_ERROR_USER_NOT_FOUND) {
|
||||
return rest::ResponseCode::UNAUTHORIZED;
|
||||
}
|
||||
return GeneralResponse::responseCode(ex.what());
|
||||
} catch (...) {
|
||||
return rest::ResponseCode::SERVER_ERROR;
|
||||
}
|
||||
// fallthrough intentional
|
||||
}
|
||||
|
||||
events::UnknownAuthenticationMethod(_request);
|
||||
|
|
|
@ -16,6 +16,7 @@ set(ROCKSDB_SOURCES
|
|||
RocksDBEngine/RocksDBHashIndex.cpp
|
||||
RocksDBEngine/RocksDBKey.cpp
|
||||
RocksDBEngine/RocksDBKeyBounds.cpp
|
||||
RocksDBEngine/RocksDBLogValue.cpp
|
||||
RocksDBEngine/RocksDBPrimaryIndex.cpp
|
||||
RocksDBEngine/RocksDBReplicationCommon.cpp
|
||||
RocksDBEngine/RocksDBReplicationContext.cpp
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "RocksDBEngine/RocksDBCounterManager.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
#include "RocksDBEngine/RocksDBKey.h"
|
||||
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionCollection.h"
|
||||
|
@ -70,7 +71,7 @@ static inline rocksdb::Transaction* rocksTransaction(
|
|||
return static_cast<RocksDBTransactionState*>(trx->state())
|
||||
->rocksTransaction();
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
RocksDBCollection::RocksDBCollection(LogicalCollection* collection,
|
||||
VPackSlice const& info)
|
||||
|
@ -195,8 +196,8 @@ void RocksDBCollection::open(bool ignoreErrors) {
|
|||
RocksDBEngine* engine =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
auto counterValue = engine->counterManager()->loadCounter(this->objectId());
|
||||
LOG_TOPIC(ERR, Logger::DEVEL) << " number of documents: "
|
||||
<< counterValue.added();
|
||||
LOG_TOPIC(ERR, Logger::DEVEL)
|
||||
<< " number of documents: " << counterValue.added();
|
||||
_numberDocuments = counterValue.added() - counterValue.removed();
|
||||
_revisionId = counterValue.revisionId();
|
||||
//_numberDocuments = countKeyRange(db, readOptions,
|
||||
|
@ -340,6 +341,11 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(
|
|||
->forceSyncProperties();
|
||||
VPackBuilder builder = _logicalCollection->toVelocyPackIgnore(
|
||||
{"path", "statusString"}, true, /*forPersistence*/ false);
|
||||
auto rtrx = rocksTransaction(trx);
|
||||
rtrx->PutLogData(
|
||||
RocksDBLogValue::IndexCreate(_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid(), info)
|
||||
.slice());
|
||||
_logicalCollection->updateProperties(builder.slice(), doSync);
|
||||
}
|
||||
created = true;
|
||||
|
@ -380,7 +386,9 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
int res =
|
||||
static_cast<RocksDBEngine*>(engine)->writeCreateCollectionMarker(
|
||||
_logicalCollection->vocbase()->id(), _logicalCollection->cid(),
|
||||
builder.slice());
|
||||
builder.slice(),
|
||||
RocksDBLogValue::IndexDrop(_logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid(), iid));
|
||||
return res == TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -435,19 +443,21 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
iter->Seek(documentBounds.start());
|
||||
|
||||
while (iter->Valid() && cmp->Compare(iter->key(), documentBounds.end()) < 0) {
|
||||
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key());
|
||||
|
||||
// add possible log statement
|
||||
state->prepareOperation(cid, revisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
rocksdb::Status s = rtrx->Delete(iter->key());
|
||||
if (!s.ok()) {
|
||||
auto converted = convertStatus(s);
|
||||
THROW_ARANGO_EXCEPTION(converted);
|
||||
}
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key());
|
||||
// report size of key
|
||||
RocksDBOperationResult result =
|
||||
state->addOperation(cid, revisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE,
|
||||
0, iter->key().size());
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(result);
|
||||
}
|
||||
|
@ -467,9 +477,8 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
// don't do anything beyond deleting their contents
|
||||
for (std::shared_ptr<Index> const& index : _indexes) {
|
||||
RocksDBIndex* rindex = static_cast<RocksDBIndex*>(index.get());
|
||||
|
||||
RocksDBKeyBounds indexBounds =
|
||||
RocksDBKeyBounds::Empty();
|
||||
|
||||
RocksDBKeyBounds indexBounds = RocksDBKeyBounds::Empty();
|
||||
switch (rindex->type()) {
|
||||
case RocksDBIndex::TRI_IDX_TYPE_PRIMARY_INDEX:
|
||||
indexBounds = RocksDBKeyBounds::PrimaryIndex(rindex->objectId());
|
||||
|
@ -608,6 +617,11 @@ int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
RocksDBSavePoint guard(rocksTransaction(trx),
|
||||
trx->isSingleOperationTransaction());
|
||||
|
||||
RocksDBTransactionState* state =
|
||||
static_cast<RocksDBTransactionState*>(trx->state());
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
||||
|
||||
res = insertDocument(trx, revisionId, newSlice, options.waitForSync);
|
||||
if (res.ok()) {
|
||||
Result lookupResult = lookupRevisionVPack(revisionId, trx, mdr);
|
||||
|
@ -617,11 +631,9 @@ int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
}
|
||||
|
||||
// report document and key size
|
||||
RocksDBOperationResult result =
|
||||
static_cast<RocksDBTransactionState*>(trx->state())
|
||||
->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_INSERT,
|
||||
newSlice.byteSize(), res.keySize());
|
||||
RocksDBOperationResult result = state->addOperation(
|
||||
_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_INSERT, newSlice.byteSize(), res.keySize());
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
|
@ -696,8 +708,9 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
mergeObjectsForUpdate(trx, oldDoc, newSlice, isEdgeCollection,
|
||||
TRI_RidToString(revisionId), options.mergeObjects,
|
||||
options.keepNull, *builder.get());
|
||||
|
||||
if (trx->state()->isDBServer()) {
|
||||
RocksDBTransactionState* state =
|
||||
static_cast<RocksDBTransactionState*>(trx->state());
|
||||
if (state->isDBServer()) {
|
||||
// Need to check that no sharding keys have changed:
|
||||
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
|
@ -710,8 +723,10 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
RocksDBSavePoint guard(rocksTransaction(trx),
|
||||
trx->isSingleOperationTransaction());
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
||||
VPackSlice const newDoc(builder->slice());
|
||||
|
||||
res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, newDoc,
|
||||
options.waitForSync);
|
||||
|
||||
|
@ -724,10 +739,9 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
TRI_ASSERT(!mdr.empty());
|
||||
|
||||
// report document and key size
|
||||
result = static_cast<RocksDBTransactionState*>(trx->state())
|
||||
->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_UPDATE,
|
||||
newDoc.byteSize(), res.keySize());
|
||||
result = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_UPDATE,
|
||||
newDoc.byteSize(), res.keySize());
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
|
@ -796,7 +810,9 @@ int RocksDBCollection::replace(
|
|||
isEdgeCollection, TRI_RidToString(revisionId),
|
||||
*builder.get());
|
||||
|
||||
if (trx->state()->isDBServer()) {
|
||||
RocksDBTransactionState* state =
|
||||
static_cast<RocksDBTransactionState*>(trx->state());
|
||||
if (state->isDBServer()) {
|
||||
// Need to check that no sharding keys have changed:
|
||||
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
|
||||
trx->resolver()->getCollectionNameCluster(
|
||||
|
@ -809,6 +825,10 @@ int RocksDBCollection::replace(
|
|||
RocksDBSavePoint guard(rocksTransaction(trx),
|
||||
trx->isSingleOperationTransaction());
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
||||
|
||||
RocksDBOperationResult opResult =
|
||||
updateDocument(trx, oldRevisionId, oldDoc, revisionId,
|
||||
VPackSlice(builder->slice()), options.waitForSync);
|
||||
|
@ -822,11 +842,10 @@ int RocksDBCollection::replace(
|
|||
TRI_ASSERT(!mdr.empty());
|
||||
|
||||
// report document and key size
|
||||
result = static_cast<RocksDBTransactionState*>(trx->state())
|
||||
->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REPLACE,
|
||||
VPackSlice(builder->slice()).byteSize(),
|
||||
opResult.keySize());
|
||||
result = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REPLACE,
|
||||
VPackSlice(builder->slice()).byteSize(),
|
||||
opResult.keySize());
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
|
@ -894,13 +913,19 @@ int RocksDBCollection::remove(arangodb::transaction::Methods* trx,
|
|||
RocksDBSavePoint guard(rocksTransaction(trx),
|
||||
trx->isSingleOperationTransaction());
|
||||
|
||||
// add possible log statement under guard
|
||||
RocksDBTransactionState* state =
|
||||
static_cast<RocksDBTransactionState*>(trx->state());
|
||||
state->prepareOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
// RocksDBLogValue val = RocksDBLogValue::DocumentRemove(StringRef(key));
|
||||
// state->rocksTransaction()->PutLogData(val.slice());
|
||||
res = removeDocument(trx, oldRevisionId, oldDoc, options.waitForSync);
|
||||
if (res.ok()) {
|
||||
// report key size
|
||||
res =
|
||||
static_cast<RocksDBTransactionState*>(trx->state())
|
||||
->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0, res.keySize());
|
||||
res = state->addOperation(_logicalCollection->cid(), revisionId,
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0,
|
||||
res.keySize());
|
||||
// transaction size limit reached -- fail
|
||||
if (res.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
|
@ -1120,6 +1145,9 @@ RocksDBOperationResult RocksDBCollection::removeDocument(
|
|||
|
||||
rocksdb::Transaction* rtrx = rocksTransaction(trx);
|
||||
|
||||
rtrx->PutLogData(RocksDBLogValue::DocumentRemove(
|
||||
StringRef(doc.get(StaticStrings::KeyString)))
|
||||
.slice());
|
||||
auto status = rtrx->Delete(key.string());
|
||||
if (!status.ok()) {
|
||||
auto converted = rocksutils::convertStatus(status);
|
||||
|
|
|
@ -47,8 +47,9 @@
|
|||
#include "RocksDBEngine/RocksDBIndex.h"
|
||||
#include "RocksDBEngine/RocksDBIndexFactory.h"
|
||||
#include "RocksDBEngine/RocksDBKey.h"
|
||||
#include "RocksDBEngine/RocksDBRestHandlers.h"
|
||||
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||
#include "RocksDBEngine/RocksDBReplicationManager.h"
|
||||
#include "RocksDBEngine/RocksDBRestHandlers.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionCollection.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionContextData.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionState.h"
|
||||
|
@ -77,7 +78,7 @@ using namespace arangodb::application_features;
|
|||
using namespace arangodb::options;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
|
||||
std::string const RocksDBEngine::EngineName("rocksdb");
|
||||
std::string const RocksDBEngine::FeatureName("RocksDBEngine");
|
||||
|
||||
|
@ -160,7 +161,7 @@ void RocksDBEngine::start() {
|
|||
// options imported set by RocksDBOptionFeature
|
||||
auto* opts = ApplicationServer::getFeature<arangodb::RocksDBOptionFeature>(
|
||||
"RocksDBOption");
|
||||
|
||||
|
||||
_options.write_buffer_size = static_cast<size_t>(opts->_writeBufferSize);
|
||||
_options.max_write_buffer_number =
|
||||
static_cast<int>(opts->_maxWriteBufferNumber);
|
||||
|
@ -176,10 +177,11 @@ void RocksDBEngine::start() {
|
|||
_options.use_direct_reads = opts->_useDirectReads;
|
||||
_options.use_direct_writes = opts->_useDirectWrites;
|
||||
if (opts->_skipCorrupted) {
|
||||
_options.wal_recovery_mode = rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords;
|
||||
_options.wal_recovery_mode =
|
||||
rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords;
|
||||
} else {
|
||||
_options.wal_recovery_mode = rocksdb::WALRecoveryMode::kPointInTimeRecovery;
|
||||
}
|
||||
}
|
||||
|
||||
_options.base_background_compactions =
|
||||
static_cast<int>(opts->_baseBackgroundCompactions);
|
||||
|
@ -194,7 +196,7 @@ void RocksDBEngine::start() {
|
|||
_options.compaction_readahead_size =
|
||||
static_cast<size_t>(opts->_compactionReadaheadSize);
|
||||
|
||||
_options.IncreaseParallelism(TRI_numberProcessors());
|
||||
_options.IncreaseParallelism((int)TRI_numberProcessors());
|
||||
|
||||
_options.create_if_missing = true;
|
||||
_options.max_open_files = -1;
|
||||
|
@ -217,8 +219,9 @@ void RocksDBEngine::start() {
|
|||
TRI_ASSERT(_db != nullptr);
|
||||
_counterManager.reset(new RocksDBCounterManager(_db));
|
||||
_replicationManager.reset(new RocksDBReplicationManager());
|
||||
|
||||
_backgroundThread.reset(new RocksDBBackgroundThread(this, counter_sync_seconds));
|
||||
|
||||
_backgroundThread.reset(
|
||||
new RocksDBBackgroundThread(this, counter_sync_seconds));
|
||||
if (!_backgroundThread->start()) {
|
||||
LOG_TOPIC(ERR, Logger::ENGINES)
|
||||
<< "could not start rocksdb counter manager";
|
||||
|
@ -512,12 +515,18 @@ int RocksDBEngine::writeCreateDatabaseMarker(TRI_voc_tick_t id,
|
|||
|
||||
int RocksDBEngine::writeCreateCollectionMarker(TRI_voc_tick_t databaseId,
|
||||
TRI_voc_cid_t cid,
|
||||
VPackSlice const& slice) {
|
||||
VPackSlice const& slice,
|
||||
RocksDBLogValue&& logValue) {
|
||||
auto key = RocksDBKey::Collection(databaseId, cid);
|
||||
auto value = RocksDBValue::Collection(slice);
|
||||
rocksdb::WriteOptions options; // TODO: check which options would make sense
|
||||
|
||||
rocksdb::Status res = _db->Put(options, key.string(), value.string());
|
||||
// Write marker + key into RocksDB inside one batch
|
||||
rocksdb::WriteBatch batch;
|
||||
batch.PutLogData(logValue.slice());
|
||||
batch.Put(key.string(), value.string());
|
||||
rocksdb::Status res = _db->Write(options, &batch);
|
||||
|
||||
auto result = rocksutils::convertStatus(res);
|
||||
return result.errorNumber();
|
||||
}
|
||||
|
@ -561,7 +570,7 @@ std::string RocksDBEngine::createCollection(
|
|||
VPackBuilder builder = parameters->toVelocyPackIgnore(
|
||||
{"path", "statusString"}, /*translate cid*/ true,
|
||||
/*for persistence*/ true);
|
||||
|
||||
|
||||
// should cause counter to be added to the manager
|
||||
// in case the collection is created for the first time
|
||||
VPackSlice objectId = builder.slice().get("objectId");
|
||||
|
@ -569,8 +578,10 @@ std::string RocksDBEngine::createCollection(
|
|||
RocksDBCounterManager::CounterAdjustment adj;
|
||||
_counterManager->updateCounter(objectId.getUInt(), adj);
|
||||
}
|
||||
|
||||
int res = writeCreateCollectionMarker(vocbase->id(), id, builder.slice());
|
||||
|
||||
int res = writeCreateCollectionMarker(
|
||||
vocbase->id(), id, builder.slice(),
|
||||
RocksDBLogValue::CollectionCreate(vocbase->id()));
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
|
@ -596,7 +607,9 @@ arangodb::Result RocksDBEngine::persistCollection(
|
|||
TRI_ASSERT(cid != 0);
|
||||
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(cid));
|
||||
|
||||
int res = writeCreateCollectionMarker(vocbase->id(), cid, slice);
|
||||
int res = writeCreateCollectionMarker(
|
||||
vocbase->id(), cid, slice,
|
||||
RocksDBLogValue::CollectionCreate(vocbase->id()));
|
||||
result.reset(res);
|
||||
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
|
@ -612,46 +625,75 @@ arangodb::Result RocksDBEngine::persistCollection(
|
|||
arangodb::Result RocksDBEngine::dropCollection(
|
||||
TRI_vocbase_t* vocbase, arangodb::LogicalCollection* collection) {
|
||||
rocksdb::WriteOptions options; // TODO: check which options would make sense
|
||||
Result res;
|
||||
|
||||
/*
|
||||
// drop indexes of collection
|
||||
std::vector<std::shared_ptr<Index>> vecShardIndex =
|
||||
collection->getPhysical()->getIndexes();
|
||||
bool dropFailed = false;
|
||||
for (auto& index : vecShardIndex) {
|
||||
uint64_t indexId = dynamic_cast<RocksDBIndex*>(index.get())->objectId();
|
||||
bool dropped = collection->dropIndex(indexId);
|
||||
if (!dropped) {
|
||||
LOG_TOPIC(ERR, Logger::ENGINES)
|
||||
<< "Failed to drop index with IndexId: " << indexId
|
||||
<< " for collection: " << collection->name();
|
||||
dropFailed = true;
|
||||
}
|
||||
}
|
||||
// If we get here the collection is save to drop.
|
||||
//
|
||||
// This uses the following workflow:
|
||||
// 1. Persist the drop.
|
||||
// * if this fails the collection will remain!
|
||||
// * if this succeeds the collection is gone from user point
|
||||
// 2. Drop all Documents
|
||||
// * If this fails we give up => We have data-garbage in RocksDB, Collection is gone.
|
||||
// 3. Drop all Indexes
|
||||
// * If this fails we give up => We have data-garbage in RocksDB, Collection is gone.
|
||||
// 4. If all succeeds we do not have data-garbage, all is gone.
|
||||
//
|
||||
// (NOTE: The above fails can only occur on full HDD or Machine dying. No write conflicts possible)
|
||||
|
||||
if (dropFailed) {
|
||||
res.reset(TRI_ERROR_INTERNAL,
|
||||
"Failed to drop at least one Index for collection: " +
|
||||
collection->name());
|
||||
}
|
||||
*/
|
||||
// delete documents
|
||||
RocksDBCollection* coll =
|
||||
RocksDBCollection::toRocksDBCollection(collection->getPhysical());
|
||||
RocksDBKeyBounds bounds =
|
||||
RocksDBKeyBounds::CollectionDocuments(coll->objectId());
|
||||
res = rocksutils::removeLargeRange(_db, bounds);
|
||||
TRI_ASSERT(collection->status() == TRI_VOC_COL_STATUS_DELETED);
|
||||
|
||||
if (res.fail()) {
|
||||
return res; // let collection exist so the remaining elements can still be
|
||||
// accessed
|
||||
// Prepare collection remove batch
|
||||
RocksDBLogValue logValue = RocksDBLogValue::CollectionDrop(vocbase->id(), collection->cid());
|
||||
rocksdb::WriteBatch batch;
|
||||
batch.PutLogData(logValue.slice());
|
||||
batch.Delete(RocksDBKey::Collection(vocbase->id(), collection->cid()).string());
|
||||
rocksdb::Status res = _db->Write(options, &batch);
|
||||
|
||||
// TODO FAILURE Simulate !res.ok()
|
||||
if (!res.ok()) {
|
||||
// Persisting the drop failed. Do NOT drop collection.
|
||||
return rocksutils::convertStatus(res);
|
||||
}
|
||||
|
||||
// delete collection
|
||||
// Now Collection is gone.
|
||||
// Cleanup data-mess
|
||||
|
||||
RocksDBCollection* coll =
|
||||
RocksDBCollection::toRocksDBCollection(collection->getPhysical());
|
||||
|
||||
// Unregister counter
|
||||
_counterManager->removeCounter(coll->objectId());
|
||||
auto key = RocksDBKey::Collection(vocbase->id(), collection->cid());
|
||||
return rocksutils::globalRocksDBRemove(key.string(), options);
|
||||
|
||||
|
||||
// delete documents
|
||||
RocksDBKeyBounds bounds =
|
||||
RocksDBKeyBounds::CollectionDocuments(coll->objectId());
|
||||
arangodb::Result result = rocksutils::removeLargeRange(_db, bounds);
|
||||
|
||||
// TODO FAILURE Simulate result.fail()
|
||||
if (result.fail()) {
|
||||
// We try to remove all documents.
|
||||
// If it does not work they cannot be accessed any more and leaked.
|
||||
// User View remains consistent.
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
// delete indexes
|
||||
std::vector<std::shared_ptr<Index>> vecShardIndex = coll->getIndexes();
|
||||
for (auto& index : vecShardIndex) {
|
||||
int dropRes = index->drop();
|
||||
// TODO FAILURE Simulate dropRes != TRI_ERROR_NO_ERROR
|
||||
if (dropRes != TRI_ERROR_NO_ERROR) {
|
||||
// We try to remove all indexed values.
|
||||
// If it does not work they cannot be accessed any more and leaked.
|
||||
// User View remains consistent.
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// if we get here all documents / indexes are gone.
|
||||
// We have no data garbage left.
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
void RocksDBEngine::destroyCollection(TRI_vocbase_t* vocbase,
|
||||
|
@ -662,7 +704,17 @@ void RocksDBEngine::destroyCollection(TRI_vocbase_t* vocbase,
|
|||
void RocksDBEngine::changeCollection(
|
||||
TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
|
||||
arangodb::LogicalCollection const* parameters, bool doSync) {
|
||||
createCollection(vocbase, id, parameters);
|
||||
VPackBuilder builder = parameters->toVelocyPackIgnore(
|
||||
{"path", "statusString"}, /*translate cid*/ true,
|
||||
/*for persistence*/ true);
|
||||
|
||||
int res = writeCreateCollectionMarker(
|
||||
vocbase->id(), id, builder.slice(),
|
||||
RocksDBLogValue::CollectionChange(vocbase->id(), id));
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBEngine::renameCollection(
|
||||
|
@ -670,8 +722,10 @@ arangodb::Result RocksDBEngine::renameCollection(
|
|||
std::string const& oldName) {
|
||||
VPackBuilder builder =
|
||||
collection->toVelocyPackIgnore({"path", "statusString"}, true, true);
|
||||
int res = writeCreateCollectionMarker(vocbase->id(), collection->cid(),
|
||||
builder.slice());
|
||||
int res = writeCreateCollectionMarker(
|
||||
vocbase->id(), collection->cid(), builder.slice(),
|
||||
RocksDBLogValue::CollectionRename(vocbase->id(), collection->cid(),
|
||||
collection->name()));
|
||||
return arangodb::Result(res);
|
||||
}
|
||||
|
||||
|
@ -853,7 +907,8 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
|
|||
return it->second;
|
||||
}
|
||||
|
||||
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder){
|
||||
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase,
|
||||
VPackBuilder& builder) {
|
||||
Result res;
|
||||
|
||||
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
|
||||
|
@ -866,7 +921,7 @@ Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& bu
|
|||
rocksdb::SequenceNumber lastTick = _db->GetLatestSequenceNumber();
|
||||
|
||||
// "state" part
|
||||
builder.add("state", VPackValue(VPackValueType::Object)); //open
|
||||
builder.add("state", VPackValue(VPackValueType::Object)); // open
|
||||
builder.add("running", VPackValue(true));
|
||||
builder.add("lastLogTick", VPackValue(std::to_string(lastTick)));
|
||||
builder.add("lastUncommittedLogTick", VPackValue(std::to_string(lastTick)));
|
||||
|
@ -875,14 +930,14 @@ Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& bu
|
|||
builder.close();
|
||||
|
||||
// "server" part
|
||||
builder.add("server", VPackValue(VPackValueType::Object)); //open
|
||||
builder.add("server", VPackValue(VPackValueType::Object)); // open
|
||||
builder.add("version", VPackValue(ARANGODB_VERSION));
|
||||
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
|
||||
builder.close();
|
||||
|
||||
// "clients" part
|
||||
builder.add("clients", VPackValue(VPackValueType::Array)); //open
|
||||
if(vocbase != nullptr) { //add clients
|
||||
builder.add("clients", VPackValue(VPackValueType::Array)); // open
|
||||
if (vocbase != nullptr) { // add clients
|
||||
auto allClients = vocbase->getReplicationClients();
|
||||
for (auto& it : allClients) {
|
||||
// One client
|
||||
|
@ -893,7 +948,8 @@ Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& bu
|
|||
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
|
||||
builder.add("time", VPackValue(buffer));
|
||||
|
||||
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
|
||||
builder.add("lastServedTick",
|
||||
VPackValue(std::to_string(std::get<2>(it))));
|
||||
|
||||
builder.close();
|
||||
}
|
||||
|
|
|
@ -39,10 +39,11 @@
|
|||
namespace arangodb {
|
||||
class PhysicalCollection;
|
||||
class PhysicalView;
|
||||
class RocksDBBackgroundThread;
|
||||
class RocksDBComparator;
|
||||
class RocksDBCounterManager;
|
||||
class RocksDBReplicationManager;
|
||||
class RocksDBBackgroundThread;
|
||||
class RocksDBLogValue;
|
||||
class TransactionCollection;
|
||||
class TransactionState;
|
||||
|
||||
|
@ -243,7 +244,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
RocksDBComparator* cmp() const { return _cmp.get(); }
|
||||
|
||||
int writeCreateCollectionMarker(TRI_voc_tick_t databaseId, TRI_voc_cid_t id,
|
||||
VPackSlice const& slice);
|
||||
VPackSlice const& slice, RocksDBLogValue&& logValue);
|
||||
|
||||
void addCollectionMapping(uint64_t, TRI_voc_tick_t, TRI_voc_cid_t);
|
||||
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
|
||||
|
|
|
@ -0,0 +1,250 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::rocksutils;
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::BeginTransaction(TRI_voc_tick_t dbid,
|
||||
TRI_voc_tid_t tid) {
|
||||
return RocksDBLogValue(RocksDBLogType::BeginTransaction, dbid, tid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::DatabaseCreate() {
|
||||
return RocksDBLogValue(RocksDBLogType::DatabaseCreate);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::DatabaseDrop(TRI_voc_tick_t dbid) {
|
||||
return RocksDBLogValue(RocksDBLogType::DatabaseDrop, dbid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::CollectionCreate(TRI_voc_cid_t cid) {
|
||||
return RocksDBLogValue(RocksDBLogType::CollectionCreate, cid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::CollectionDrop(TRI_voc_tick_t dbid,
|
||||
TRI_voc_cid_t cid) {
|
||||
return RocksDBLogValue(RocksDBLogType::CollectionDrop, dbid, cid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::CollectionRename(TRI_voc_tick_t dbid,
|
||||
TRI_voc_cid_t cid,
|
||||
std::string const& newName) {
|
||||
return RocksDBLogValue(RocksDBLogType::CollectionRename, dbid, cid, newName);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::CollectionChange(TRI_voc_tick_t dbid,
|
||||
TRI_voc_cid_t cid) {
|
||||
return RocksDBLogValue(RocksDBLogType::CollectionChange, dbid, cid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::IndexCreate(TRI_voc_tick_t dbid,
|
||||
TRI_voc_cid_t cid,
|
||||
VPackSlice const& indexInfo) {
|
||||
return RocksDBLogValue(RocksDBLogType::IndexCreate, dbid, cid, indexInfo);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::IndexDrop(TRI_voc_tick_t dbid,
|
||||
TRI_voc_cid_t cid,
|
||||
TRI_idx_iid_t iid) {
|
||||
return RocksDBLogValue(RocksDBLogType::IndexDrop, dbid, cid, iid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::ViewCreate(TRI_voc_cid_t cid,
|
||||
TRI_idx_iid_t iid) {
|
||||
return RocksDBLogValue(RocksDBLogType::ViewCreate, cid, iid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::ViewDrop(TRI_voc_cid_t cid,
|
||||
TRI_idx_iid_t iid) {
|
||||
return RocksDBLogValue(RocksDBLogType::ViewDrop, cid, iid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::DocumentOpsPrologue(TRI_voc_cid_t cid) {
|
||||
return RocksDBLogValue(RocksDBLogType::DocumentOperationsPrologue, cid);
|
||||
}
|
||||
|
||||
RocksDBLogValue RocksDBLogValue::DocumentRemove(
|
||||
arangodb::StringRef const& key) {
|
||||
return RocksDBLogValue(RocksDBLogType::DocumentRemove, key);
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type) : _buffer() {
|
||||
switch (type) {
|
||||
case RocksDBLogType::DatabaseCreate:
|
||||
_buffer.reserve(sizeof(RocksDBLogType));
|
||||
_buffer += static_cast<char>(type);
|
||||
break;
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, uint64_t val)
|
||||
: _buffer() {
|
||||
switch (type) {
|
||||
case RocksDBLogType::DatabaseDrop:
|
||||
case RocksDBLogType::CollectionCreate:
|
||||
case RocksDBLogType::DocumentOperationsPrologue: {
|
||||
_buffer.reserve(sizeof(RocksDBLogType) + sizeof(uint64_t));
|
||||
_buffer += static_cast<char>(type);
|
||||
uint64ToPersistent(_buffer, val); // database or collection ID
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, uint64_t dbId,
|
||||
uint64_t cid)
|
||||
: _buffer() {
|
||||
switch (type) {
|
||||
case RocksDBLogType::BeginTransaction:
|
||||
case RocksDBLogType::CollectionChange:
|
||||
case RocksDBLogType::CollectionDrop: {
|
||||
_buffer.reserve(sizeof(RocksDBLogType) + sizeof(uint64_t) * 2);
|
||||
_buffer += static_cast<char>(type);
|
||||
uint64ToPersistent(_buffer, dbId);
|
||||
uint64ToPersistent(_buffer, cid);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, uint64_t dbId,
|
||||
uint64_t cid, uint64_t iid)
|
||||
: _buffer() {
|
||||
switch (type) {
|
||||
case RocksDBLogType::IndexDrop: {
|
||||
_buffer.reserve(sizeof(RocksDBLogType) + sizeof(uint64_t) * 3);
|
||||
_buffer += static_cast<char>(type);
|
||||
uint64ToPersistent(_buffer, dbId);
|
||||
uint64ToPersistent(_buffer, cid);
|
||||
uint64ToPersistent(_buffer, iid);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, uint64_t dbId,
|
||||
uint64_t cid, VPackSlice const& indexInfo)
|
||||
: _buffer() {
|
||||
switch (type) {
|
||||
case RocksDBLogType::IndexCreate: {
|
||||
_buffer.reserve(sizeof(RocksDBLogType) + (sizeof(uint64_t) * 2) +
|
||||
indexInfo.byteSize());
|
||||
_buffer += static_cast<char>(type);
|
||||
uint64ToPersistent(_buffer, dbId);
|
||||
uint64ToPersistent(_buffer, cid);
|
||||
_buffer.append(reinterpret_cast<char const*>(indexInfo.begin()),
|
||||
indexInfo.byteSize());
|
||||
break;
|
||||
}
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, uint64_t dbId,
|
||||
uint64_t cid, std::string const& data)
|
||||
: _buffer() {
|
||||
if (type != RocksDBLogType::CollectionRename) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
_buffer.reserve(sizeof(RocksDBLogType) + sizeof(uint64_t) * 2 +
|
||||
data.length());
|
||||
_buffer += static_cast<char>(type);
|
||||
uint64ToPersistent(_buffer, dbId);
|
||||
uint64ToPersistent(_buffer, cid);
|
||||
_buffer.append(data.data(), data.length()); // primary key
|
||||
}
|
||||
|
||||
RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, StringRef const& data)
|
||||
: _buffer() {
|
||||
switch (type) {
|
||||
case RocksDBLogType::DocumentRemove: {
|
||||
_buffer.reserve(data.length() + sizeof(RocksDBLogType));
|
||||
_buffer += static_cast<char>(type);
|
||||
_buffer.append(data.data(), data.length()); // primary key
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBLogType RocksDBLogValue::type(rocksdb::Slice const& slice) {
|
||||
TRI_ASSERT(slice.size() >= sizeof(RocksDBLogType) + sizeof(uint64_t));
|
||||
return static_cast<RocksDBLogType>(slice.data()[0]);
|
||||
}
|
||||
|
||||
TRI_voc_tick_t RocksDBLogValue::databaseId(rocksdb::Slice const& slice) {
|
||||
TRI_ASSERT(slice.size() >= sizeof(RocksDBLogType) + sizeof(uint64_t));
|
||||
RocksDBLogType type = static_cast<RocksDBLogType>(slice.data()[0]);
|
||||
TRI_ASSERT(type == RocksDBLogType::BeginTransaction ||
|
||||
type == RocksDBLogType::DatabaseCreate ||
|
||||
type == RocksDBLogType::DatabaseDrop);
|
||||
return uint64FromPersistent(slice.data() + sizeof(RocksDBLogType));
|
||||
}
|
||||
|
||||
TRI_voc_tid_t RocksDBLogValue::transactionId(rocksdb::Slice const& slice) {
|
||||
TRI_ASSERT(slice.size() >= sizeof(RocksDBLogType) + sizeof(uint64_t));
|
||||
RocksDBLogType type = static_cast<RocksDBLogType>(slice.data()[0]);
|
||||
if (type == RocksDBLogType::BeginTransaction) {
|
||||
return uint64FromPersistent(slice.data() + sizeof(TRI_voc_tick_t) +
|
||||
sizeof(RocksDBLogType));
|
||||
} else {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
||||
TRI_voc_cid_t RocksDBLogValue::collectionId(rocksdb::Slice const& slice) {
|
||||
TRI_ASSERT(slice.size() >= sizeof(RocksDBLogType) + sizeof(uint64_t));
|
||||
RocksDBLogType type = static_cast<RocksDBLogType>(slice.data()[0]);
|
||||
TRI_ASSERT(type == RocksDBLogType::CollectionCreate ||
|
||||
type == RocksDBLogType::CollectionDrop ||
|
||||
type == RocksDBLogType::CollectionRename ||
|
||||
type == RocksDBLogType::CollectionChange);
|
||||
return uint64FromPersistent(slice.data() + sizeof(RocksDBLogType));
|
||||
}
|
||||
|
||||
TRI_idx_iid_t RocksDBLogValue::indexId(rocksdb::Slice const& slice) {
|
||||
TRI_ASSERT(slice.size() >= sizeof(RocksDBLogType) + sizeof(uint64_t));
|
||||
RocksDBLogType type = static_cast<RocksDBLogType>(slice.data()[0]);
|
||||
TRI_ASSERT(type == RocksDBLogType::IndexCreate ||
|
||||
type == RocksDBLogType::IndexDrop);
|
||||
return uint64FromPersistent(slice.data() + sizeof(RocksDBLogType));
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGO_ROCKSDB_ROCKSDB_LOG_VALUE_H
|
||||
#define ARANGO_ROCKSDB_ROCKSDB_LOG_VALUE_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
#include <rocksdb/slice.h>
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class RocksDBLogValue {
|
||||
public:
|
||||
//----------------------------------------------------------------------------
|
||||
// SECTION Constructors
|
||||
// Each of these simply specifies the correct type and copies the input
|
||||
// parameter in an appropriate format into the underlying string buffer.
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
static RocksDBLogValue BeginTransaction(TRI_voc_tick_t vocbaseId,
|
||||
TRI_voc_tid_t trxId);
|
||||
static RocksDBLogValue DatabaseCreate();
|
||||
static RocksDBLogValue DatabaseDrop(TRI_voc_tick_t vocbaseId);
|
||||
static RocksDBLogValue CollectionCreate(TRI_voc_tick_t vocbaseId);
|
||||
static RocksDBLogValue CollectionDrop(TRI_voc_tick_t vocbaseId,
|
||||
TRI_voc_cid_t cid);
|
||||
static RocksDBLogValue CollectionRename(TRI_voc_tick_t vocbaseId,
|
||||
TRI_voc_cid_t cid,
|
||||
std::string const& newName);
|
||||
static RocksDBLogValue CollectionChange(TRI_voc_tick_t vocbaseId,
|
||||
TRI_voc_cid_t cid);
|
||||
|
||||
static RocksDBLogValue IndexCreate(TRI_voc_tick_t vocbaseId,
|
||||
TRI_voc_cid_t cid,
|
||||
VPackSlice const& indexInfo);
|
||||
static RocksDBLogValue IndexDrop(TRI_voc_tick_t vocbaseId, TRI_voc_cid_t cid,
|
||||
TRI_idx_iid_t indexId);
|
||||
|
||||
static RocksDBLogValue ViewCreate(TRI_voc_cid_t, TRI_idx_iid_t);
|
||||
static RocksDBLogValue ViewDrop(TRI_voc_cid_t, TRI_idx_iid_t);
|
||||
static RocksDBLogValue DocumentOpsPrologue(TRI_voc_cid_t cid);
|
||||
static RocksDBLogValue DocumentRemove(arangodb::StringRef const&);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Extracts the revisionId from a value
|
||||
///
|
||||
/// May be called only on PrimaryIndexValue values. Other types will throw.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static RocksDBLogType type(rocksdb::Slice const&);
|
||||
static TRI_voc_tick_t databaseId(rocksdb::Slice const&);
|
||||
static TRI_voc_tid_t transactionId(rocksdb::Slice const&);
|
||||
static TRI_voc_cid_t collectionId(rocksdb::Slice const&);
|
||||
static TRI_idx_iid_t indexId(rocksdb::Slice const&);
|
||||
static arangodb::StringRef const& documentKey(rocksdb::Slice const&);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Returns a reference to the underlying string buffer.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
std::string const& string() const { return _buffer; } // to be used with put
|
||||
/*VPackSlice slice() const { return VPackSlice(
|
||||
reinterpret_cast<uint8_t const*>(_buffer.data())
|
||||
); }*/ // return a slice
|
||||
|
||||
RocksDBLogType type() const {
|
||||
return static_cast<RocksDBLogType>(*(_buffer.data()));
|
||||
}
|
||||
rocksdb::Slice slice() const { return rocksdb::Slice(_buffer); }
|
||||
|
||||
private:
|
||||
explicit RocksDBLogValue(RocksDBLogType type);
|
||||
RocksDBLogValue(RocksDBLogType type, uint64_t);
|
||||
RocksDBLogValue(RocksDBLogType type, uint64_t, uint64_t);
|
||||
RocksDBLogValue(RocksDBLogType type, uint64_t, uint64_t, uint64_t);
|
||||
RocksDBLogValue(RocksDBLogType type, uint64_t, uint64_t, VPackSlice const&);
|
||||
RocksDBLogValue(RocksDBLogType type, uint64_t, uint64_t,
|
||||
std::string const& data);
|
||||
RocksDBLogValue(RocksDBLogType type, StringRef const& data);
|
||||
|
||||
private:
|
||||
std::string _buffer;
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -56,7 +56,7 @@ RocksDBReplicationContext::RocksDBReplicationContext()
|
|||
_mdr(),
|
||||
_customTypeHandler(),
|
||||
_vpackOptions(Options::Defaults),
|
||||
_lastChunkOffset(0),
|
||||
_lastIteratorOffset(0),
|
||||
_expires(TRI_microtime() + DefaultTTL),
|
||||
_isDeleted(false),
|
||||
_isUsed(true),
|
||||
|
@ -231,7 +231,7 @@ arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
|
|||
};
|
||||
|
||||
b.openArray();
|
||||
while (_hasMore && true /*sizelimit*/) {
|
||||
while (_hasMore) {
|
||||
try {
|
||||
_hasMore = primary->next(cb, chunkSize);
|
||||
|
||||
|
@ -256,20 +256,23 @@ arangodb::Result RocksDBReplicationContext::dumpKeys(VPackBuilder& b,
|
|||
size_t chunkSize) {
|
||||
TRI_ASSERT(_trx);
|
||||
TRI_ASSERT(_iter);
|
||||
|
||||
|
||||
// Position the iterator correctly
|
||||
size_t from = chunk * chunkSize;
|
||||
if (from == 0) {
|
||||
if (from == 0 || !_hasMore || from < _lastIteratorOffset) {
|
||||
_iter->reset();
|
||||
_lastChunkOffset = 0;
|
||||
_hasMore = true;
|
||||
} else if (from < _lastChunkOffset + chunkSize) {
|
||||
_lastIteratorOffset = 0;
|
||||
}
|
||||
if (from > _lastIteratorOffset) {
|
||||
TRI_ASSERT(from >= chunkSize);
|
||||
uint64_t diff = from - chunkSize;
|
||||
uint64_t to; // = (chunk + 1) * chunkSize;
|
||||
uint64_t diff = from - _lastIteratorOffset;
|
||||
uint64_t to = 0; // = (chunk + 1) * chunkSize;
|
||||
_iter->skip(diff, to);
|
||||
_lastIteratorOffset += to;
|
||||
TRI_ASSERT(to == diff);
|
||||
_lastChunkOffset = from;
|
||||
} else if (from > _lastChunkOffset + chunkSize) {
|
||||
} else if (from < _lastIteratorOffset) {
|
||||
// no jumping back in time fix the intitial syncer if you see this
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "Trying to request a chunk the rocksdb "
|
||||
|
@ -289,10 +292,11 @@ arangodb::Result RocksDBReplicationContext::dumpKeys(VPackBuilder& b,
|
|||
};
|
||||
|
||||
b.openArray();
|
||||
// chunk is going to be ignored here
|
||||
while (_hasMore && true /*sizelimit*/) {
|
||||
// chunkSize is going to be ignored here
|
||||
if (_hasMore) {
|
||||
try {
|
||||
_hasMore = primary->nextWithKey(cb, chunkSize);
|
||||
_lastIteratorOffset++;
|
||||
} catch (std::exception const& ex) {
|
||||
return Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
@ -307,25 +311,22 @@ arangodb::Result RocksDBReplicationContext::dumpDocuments(
|
|||
VPackBuilder& b, size_t chunk, size_t chunkSize, VPackSlice const& ids) {
|
||||
TRI_ASSERT(_trx);
|
||||
TRI_ASSERT(_iter);
|
||||
// Position the iterator correctly
|
||||
|
||||
// Position the iterator must be reset to the beginning
|
||||
// after calls to dumpKeys moved it forwards
|
||||
size_t from = chunk * chunkSize;
|
||||
if (from == 0) {
|
||||
if (from == 0 || !_hasMore || from < _lastIteratorOffset) {
|
||||
_iter->reset();
|
||||
_lastChunkOffset = 0;
|
||||
_hasMore = true;
|
||||
} else if (from < _lastChunkOffset + chunkSize) {
|
||||
_lastIteratorOffset = 0;
|
||||
}
|
||||
if (from > _lastIteratorOffset) {
|
||||
TRI_ASSERT(from >= chunkSize);
|
||||
uint64_t diff = from - chunkSize;
|
||||
uint64_t to; // = (chunk + 1) * chunkSize;
|
||||
uint64_t diff = from - _lastIteratorOffset;
|
||||
uint64_t to = 0; // = (chunk + 1) * chunkSize;
|
||||
_iter->skip(diff, to);
|
||||
_lastIteratorOffset += to;
|
||||
TRI_ASSERT(to == diff);
|
||||
_lastChunkOffset = from;
|
||||
} else if (from > _lastChunkOffset + chunkSize) {
|
||||
// no jumping back in time fix the intitial syncer if you see this
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "Trying to request a chunk the rocksdb "
|
||||
<< "iterator already passed over";
|
||||
return Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
auto cb = [&](DocumentIdentifierToken const& token) {
|
||||
|
@ -344,17 +345,22 @@ arangodb::Result RocksDBReplicationContext::dumpDocuments(
|
|||
size_t oldPos = from;
|
||||
for (auto const& it : VPackArrayIterator(ids)) {
|
||||
if (!it.isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
return Result(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
if (!hasMore) {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION) << "Not enough data";
|
||||
return Result(TRI_ERROR_FAILED);
|
||||
}
|
||||
TRI_ASSERT(hasMore);
|
||||
|
||||
size_t newPos = from + it.getNumber<size_t>();
|
||||
if (oldPos != from && newPos > oldPos + 1) {
|
||||
uint64_t ignore;
|
||||
uint64_t ignore = 0;
|
||||
_iter->skip(newPos - oldPos, ignore);
|
||||
TRI_ASSERT(ignore == newPos - oldPos);
|
||||
_lastIteratorOffset += ignore;
|
||||
}
|
||||
hasMore = _iter->next(cb, 1);
|
||||
_lastIteratorOffset++;
|
||||
}
|
||||
b.close();
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ class RocksDBReplicationContext {
|
|||
ManagedDocumentResult _mdr;
|
||||
std::shared_ptr<arangodb::velocypack::CustomTypeHandler> _customTypeHandler;
|
||||
arangodb::velocypack::Options _vpackOptions;
|
||||
uint64_t _lastChunkOffset;
|
||||
uint64_t _lastIteratorOffset;
|
||||
std::unique_ptr<DatabaseGuard> _guard;
|
||||
|
||||
double _expires;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||
#include "VocBase/replication-common.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
|
@ -43,7 +44,7 @@ class WBReader : public rocksdb::WriteBatch::Handler {
|
|||
explicit WBReader(TRI_vocbase_t* vocbase, uint64_t from, size_t& limit,
|
||||
bool includeSystem, VPackBuilder& builder)
|
||||
: _vocbase(vocbase),
|
||||
_from(from),
|
||||
/* _from(from), */
|
||||
_limit(limit),
|
||||
_includeSystem(includeSystem),
|
||||
_builder(builder) {}
|
||||
|
@ -87,6 +88,58 @@ class WBReader : public rocksdb::WriteBatch::Handler {
|
|||
|
||||
void SingleDelete(rocksdb::Slice const& key) override { handleDeletion(key); }
|
||||
|
||||
void PutLogData(rocksdb::Slice const& blob) {
|
||||
auto type = RocksDBLogValue::type(blob);
|
||||
switch (type) {
|
||||
case RocksDBLogType::BeginTransaction: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::DatabaseCreate: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::DatabaseDrop: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::CollectionCreate: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::CollectionDrop: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::CollectionRename: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::CollectionChange: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::IndexCreate: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::IndexDrop: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::ViewCreate: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::ViewDrop: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::ViewChange: {
|
||||
break;
|
||||
}
|
||||
case RocksDBLogType::DocumentRemove: {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void startNewBatch() {
|
||||
// starting new write batch
|
||||
// TODO: reset state?
|
||||
}
|
||||
|
||||
private:
|
||||
bool shouldHandleKey(rocksdb::Slice const& key) {
|
||||
if (_limit == 0) {
|
||||
|
@ -170,7 +223,7 @@ class WBReader : public rocksdb::WriteBatch::Handler {
|
|||
|
||||
private:
|
||||
TRI_vocbase_t* _vocbase;
|
||||
uint64_t _from;
|
||||
/* uint64_t _from; */
|
||||
size_t& _limit;
|
||||
bool _includeSystem;
|
||||
VPackBuilder& _builder;
|
||||
|
@ -203,8 +256,8 @@ RocksDBReplicationResult rocksutils::tailWal(TRI_vocbase_t* vocbase,
|
|||
fromTickIncluded = true;
|
||||
}
|
||||
s = batch.writeBatchPtr->Iterate(handler.get());
|
||||
}
|
||||
if (!s.ok()) {
|
||||
handler->startNewBatch();
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::ENGINES) << "error during WAL scan";
|
||||
auto converted = convertStatus(s);
|
||||
auto result = RocksDBReplicationResult(converted.errorNumber(), lastTick);
|
||||
|
|
|
@ -331,6 +331,7 @@ void RocksDBRestReplicationHandler::handleCommandLoggerState() {
|
|||
VPackBuilder builder;
|
||||
auto res = globalRocksEngine()->createLoggerState(_vocbase, builder);
|
||||
if (res.fail()) {
|
||||
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "failed to create logger-state" << res.errorMessage();
|
||||
generateError(rest::ResponseCode::BAD, res.errorNumber(),
|
||||
res.errorMessage());
|
||||
return;
|
||||
|
@ -1444,9 +1445,16 @@ void RocksDBRestReplicationHandler::handleCommandServerId() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RocksDBRestReplicationHandler::handleCommandApplierGetConfig() {
|
||||
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
|
||||
TRI_ERROR_NOT_YET_IMPLEMENTED,
|
||||
"GET applier-config API is not implemented for RocksDB yet");
|
||||
TRI_ASSERT(_vocbase->replicationApplier() != nullptr);
|
||||
|
||||
TRI_replication_applier_configuration_t config;
|
||||
|
||||
{
|
||||
READ_LOCKER(readLocker, _vocbase->replicationApplier()->_statusLock);
|
||||
config.update(&_vocbase->replicationApplier()->_configuration);
|
||||
}
|
||||
std::shared_ptr<VPackBuilder> configBuilder = config.toVelocyPack(false);
|
||||
generateResult(rest::ResponseCode::OK, configBuilder->slice());
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1454,9 +1462,121 @@ void RocksDBRestReplicationHandler::handleCommandApplierGetConfig() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RocksDBRestReplicationHandler::handleCommandApplierSetConfig() {
|
||||
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
|
||||
TRI_ERROR_NOT_YET_IMPLEMENTED,
|
||||
"set applier-config API is not implemented for RocksDB yet");
|
||||
TRI_ASSERT(_vocbase->replicationApplier() != nullptr);
|
||||
|
||||
TRI_replication_applier_configuration_t config;
|
||||
|
||||
bool success;
|
||||
std::shared_ptr<VPackBuilder> parsedBody = parseVelocyPackBody(success);
|
||||
|
||||
if (!success) {
|
||||
// error already created
|
||||
return;
|
||||
}
|
||||
VPackSlice const body = parsedBody->slice();
|
||||
|
||||
{
|
||||
READ_LOCKER(readLocker, _vocbase->replicationApplier()->_statusLock);
|
||||
config.update(&_vocbase->replicationApplier()->_configuration);
|
||||
}
|
||||
|
||||
std::string const endpoint =
|
||||
VelocyPackHelper::getStringValue(body, "endpoint", "");
|
||||
|
||||
if (!endpoint.empty()) {
|
||||
config._endpoint = endpoint;
|
||||
}
|
||||
|
||||
config._database =
|
||||
VelocyPackHelper::getStringValue(body, "database", _vocbase->name());
|
||||
|
||||
VPackSlice const username = body.get("username");
|
||||
if (username.isString()) {
|
||||
config._username = username.copyString();
|
||||
}
|
||||
|
||||
VPackSlice const password = body.get("password");
|
||||
if (password.isString()) {
|
||||
config._password = password.copyString();
|
||||
}
|
||||
|
||||
VPackSlice const jwt = body.get("jwt");
|
||||
if (jwt.isString()) {
|
||||
config._jwt = jwt.copyString();
|
||||
}
|
||||
|
||||
config._requestTimeout = VelocyPackHelper::getNumericValue<double>(
|
||||
body, "requestTimeout", config._requestTimeout);
|
||||
config._connectTimeout = VelocyPackHelper::getNumericValue<double>(
|
||||
body, "connectTimeout", config._connectTimeout);
|
||||
config._ignoreErrors = VelocyPackHelper::getNumericValue<uint64_t>(
|
||||
body, "ignoreErrors", config._ignoreErrors);
|
||||
config._maxConnectRetries = VelocyPackHelper::getNumericValue<uint64_t>(
|
||||
body, "maxConnectRetries", config._maxConnectRetries);
|
||||
config._sslProtocol = VelocyPackHelper::getNumericValue<uint32_t>(
|
||||
body, "sslProtocol", config._sslProtocol);
|
||||
config._chunkSize = VelocyPackHelper::getNumericValue<uint64_t>(
|
||||
body, "chunkSize", config._chunkSize);
|
||||
config._autoStart =
|
||||
VelocyPackHelper::getBooleanValue(body, "autoStart", config._autoStart);
|
||||
config._adaptivePolling = VelocyPackHelper::getBooleanValue(
|
||||
body, "adaptivePolling", config._adaptivePolling);
|
||||
config._autoResync =
|
||||
VelocyPackHelper::getBooleanValue(body, "autoResync", config._autoResync);
|
||||
config._includeSystem = VelocyPackHelper::getBooleanValue(
|
||||
body, "includeSystem", config._includeSystem);
|
||||
config._verbose =
|
||||
VelocyPackHelper::getBooleanValue(body, "verbose", config._verbose);
|
||||
config._incremental = VelocyPackHelper::getBooleanValue(body, "incremental",
|
||||
config._incremental);
|
||||
config._requireFromPresent = VelocyPackHelper::getBooleanValue(
|
||||
body, "requireFromPresent", config._requireFromPresent);
|
||||
config._restrictType = VelocyPackHelper::getStringValue(body, "restrictType",
|
||||
config._restrictType);
|
||||
config._connectionRetryWaitTime = static_cast<uint64_t>(
|
||||
1000.0 * 1000.0 *
|
||||
VelocyPackHelper::getNumericValue<double>(
|
||||
body, "connectionRetryWaitTime",
|
||||
static_cast<double>(config._connectionRetryWaitTime) /
|
||||
(1000.0 * 1000.0)));
|
||||
config._initialSyncMaxWaitTime = static_cast<uint64_t>(
|
||||
1000.0 * 1000.0 *
|
||||
VelocyPackHelper::getNumericValue<double>(
|
||||
body, "initialSyncMaxWaitTime",
|
||||
static_cast<double>(config._initialSyncMaxWaitTime) /
|
||||
(1000.0 * 1000.0)));
|
||||
config._idleMinWaitTime = static_cast<uint64_t>(
|
||||
1000.0 * 1000.0 *
|
||||
VelocyPackHelper::getNumericValue<double>(
|
||||
body, "idleMinWaitTime",
|
||||
static_cast<double>(config._idleMinWaitTime) / (1000.0 * 1000.0)));
|
||||
config._idleMaxWaitTime = static_cast<uint64_t>(
|
||||
1000.0 * 1000.0 *
|
||||
VelocyPackHelper::getNumericValue<double>(
|
||||
body, "idleMaxWaitTime",
|
||||
static_cast<double>(config._idleMaxWaitTime) / (1000.0 * 1000.0)));
|
||||
config._autoResyncRetries = VelocyPackHelper::getNumericValue<uint64_t>(
|
||||
body, "autoResyncRetries", config._autoResyncRetries);
|
||||
|
||||
VPackSlice const restriction = body.get("restrictCollections");
|
||||
if (restriction.isArray()) {
|
||||
config._restrictCollections.clear();
|
||||
for (VPackSlice const& collection : VPackArrayIterator(restriction)) {
|
||||
if (collection.isString()) {
|
||||
config._restrictCollections.emplace(
|
||||
std::make_pair(collection.copyString(), true));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int res =
|
||||
TRI_ConfigureReplicationApplier(_vocbase->replicationApplier(), &config);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
handleCommandApplierGetConfig();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1464,9 +1584,36 @@ void RocksDBRestReplicationHandler::handleCommandApplierSetConfig() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RocksDBRestReplicationHandler::handleCommandApplierStart() {
|
||||
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
|
||||
TRI_ERROR_NOT_YET_IMPLEMENTED,
|
||||
"applier-start API is not implemented for RocksDB yet");
|
||||
TRI_ASSERT(_vocbase->replicationApplier() != nullptr);
|
||||
|
||||
bool found;
|
||||
std::string const& value1 = _request->value("from", found);
|
||||
|
||||
TRI_voc_tick_t initialTick = 0;
|
||||
bool useTick = false;
|
||||
|
||||
if (found) {
|
||||
// query parameter "from" specified
|
||||
initialTick = (TRI_voc_tick_t)StringUtils::uint64(value1);
|
||||
useTick = true;
|
||||
}
|
||||
|
||||
TRI_voc_tick_t barrierId = 0;
|
||||
std::string const& value2 = _request->value("barrierId", found);
|
||||
|
||||
if (found) {
|
||||
// query parameter "barrierId" specified
|
||||
barrierId = (TRI_voc_tick_t)StringUtils::uint64(value2);
|
||||
}
|
||||
|
||||
int res =
|
||||
_vocbase->replicationApplier()->start(initialTick, useTick, barrierId);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
handleCommandApplierGetState();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1474,9 +1621,15 @@ void RocksDBRestReplicationHandler::handleCommandApplierStart() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RocksDBRestReplicationHandler::handleCommandApplierStop() {
|
||||
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
|
||||
TRI_ERROR_NOT_YET_IMPLEMENTED,
|
||||
"applier-stop API is not implemented for RocksDB yet");
|
||||
TRI_ASSERT(_vocbase->replicationApplier() != nullptr);
|
||||
|
||||
int res = _vocbase->replicationApplier()->stop(true);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
handleCommandApplierGetState();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1484,9 +1637,11 @@ void RocksDBRestReplicationHandler::handleCommandApplierStop() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RocksDBRestReplicationHandler::handleCommandApplierGetState() {
|
||||
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
|
||||
TRI_ERROR_NOT_YET_IMPLEMENTED,
|
||||
"applier-state get API is not implemented for RocksDB yet");
|
||||
TRI_ASSERT(_vocbase->replicationApplier() != nullptr);
|
||||
|
||||
std::shared_ptr<VPackBuilder> result =
|
||||
_vocbase->replicationApplier()->toVelocyPack();
|
||||
generateResult(rest::ResponseCode::OK, result->slice());
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1494,9 +1649,16 @@ void RocksDBRestReplicationHandler::handleCommandApplierGetState() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RocksDBRestReplicationHandler::handleCommandApplierDeleteState() {
|
||||
generateError(rest::ResponseCode::NOT_IMPLEMENTED,
|
||||
TRI_ERROR_NOT_YET_IMPLEMENTED,
|
||||
"applier-state delete API is not implemented for RocksDB yet");
|
||||
TRI_ASSERT(_vocbase->replicationApplier() != nullptr);
|
||||
|
||||
int res = _vocbase->replicationApplier()->forget();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "unable to delete applier state";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res,"unable to delete applier state");
|
||||
}
|
||||
|
||||
handleCommandApplierGetState();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBCounterManager.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionCollection.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
|
@ -51,7 +52,7 @@
|
|||
|
||||
using namespace arangodb;
|
||||
|
||||
// for the RocksDB engine we do not need any additional data
|
||||
// for the RocksDB engine we do not need any additional data
|
||||
struct RocksDBTransactionData final : public TransactionData {};
|
||||
|
||||
RocksDBSavePoint::RocksDBSavePoint(rocksdb::Transaction* trx)
|
||||
|
@ -97,7 +98,8 @@ RocksDBTransactionState::RocksDBTransactionState(
|
|||
_numInserts(0),
|
||||
_numUpdates(0),
|
||||
_numRemoves(0),
|
||||
_intermediateTransactionEnabled(intermediateTransactionEnabled) {}
|
||||
_intermediateTransactionEnabled(intermediateTransactionEnabled),
|
||||
_lastUsedCollection(UINT64_MAX) {}
|
||||
|
||||
/// @brief free a transaction container
|
||||
RocksDBTransactionState::~RocksDBTransactionState() {
|
||||
|
@ -110,9 +112,9 @@ RocksDBTransactionState::~RocksDBTransactionState() {
|
|||
|
||||
/// @brief start a transaction
|
||||
Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
||||
LOG_TRX(this, _nestingLevel) << "beginning " << AccessMode::typeString(_type)
|
||||
<< " transaction";
|
||||
|
||||
LOG_TRX(this, _nestingLevel)
|
||||
<< "beginning " << AccessMode::typeString(_type) << " transaction";
|
||||
|
||||
Result result = useCollections(_nestingLevel);
|
||||
|
||||
if (result.ok()) {
|
||||
|
@ -156,18 +158,23 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
|||
_rocksWriteOptions, rocksdb::TransactionOptions()));
|
||||
_rocksTransaction->SetSnapshot();
|
||||
_rocksReadOptions.snapshot = _rocksTransaction->GetSnapshot();
|
||||
|
||||
RocksDBLogValue header =
|
||||
RocksDBLogValue::BeginTransaction(_vocbase->id(), _id);
|
||||
_rocksTransaction->PutLogData(header.slice());
|
||||
|
||||
} else {
|
||||
TRI_ASSERT(_status == transaction::Status::RUNNING);
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// @brief commit a transaction
|
||||
Result RocksDBTransactionState::commitTransaction(
|
||||
transaction::Methods* activeTrx) {
|
||||
LOG_TRX(this, _nestingLevel) << "committing " << AccessMode::typeString(_type)
|
||||
<< " transaction";
|
||||
LOG_TRX(this, _nestingLevel)
|
||||
<< "committing " << AccessMode::typeString(_type) << " transaction";
|
||||
|
||||
TRI_ASSERT(_status == transaction::Status::RUNNING);
|
||||
TRI_IF_FAILURE("TransactionWriteCommitMarker") {
|
||||
|
@ -239,8 +246,8 @@ Result RocksDBTransactionState::commitTransaction(
|
|||
/// @brief abort and rollback a transaction
|
||||
Result RocksDBTransactionState::abortTransaction(
|
||||
transaction::Methods* activeTrx) {
|
||||
LOG_TRX(this, _nestingLevel) << "aborting " << AccessMode::typeString(_type)
|
||||
<< " transaction";
|
||||
LOG_TRX(this, _nestingLevel)
|
||||
<< "aborting " << AccessMode::typeString(_type) << " transaction";
|
||||
TRI_ASSERT(_status == transaction::Status::RUNNING);
|
||||
Result result;
|
||||
|
||||
|
@ -271,6 +278,26 @@ Result RocksDBTransactionState::abortTransaction(
|
|||
return result;
|
||||
}
|
||||
|
||||
void RocksDBTransactionState::prepareOperation(
|
||||
TRI_voc_cid_t collectionId, TRI_voc_rid_t revisionId,
|
||||
TRI_voc_document_operation_e operationType) {
|
||||
switch (operationType) {
|
||||
case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN:
|
||||
break;
|
||||
case TRI_VOC_DOCUMENT_OPERATION_INSERT:
|
||||
case TRI_VOC_DOCUMENT_OPERATION_UPDATE:
|
||||
case TRI_VOC_DOCUMENT_OPERATION_REPLACE:
|
||||
case TRI_VOC_DOCUMENT_OPERATION_REMOVE: {
|
||||
if (collectionId != _lastUsedCollection) {
|
||||
RocksDBLogValue logValue =
|
||||
RocksDBLogValue::DocumentOpsPrologue(collectionId);
|
||||
//_rocksTransaction->PutLogData(logValue.slice());
|
||||
_lastUsedCollection = collectionId;
|
||||
}
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief add an operation for a transaction collection
|
||||
RocksDBOperationResult RocksDBTransactionState::addOperation(
|
||||
TRI_voc_cid_t cid, TRI_voc_rid_t revisionId,
|
||||
|
@ -292,7 +319,7 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
|
|||
static_cast<RocksDBTransactionCollection*>(findCollection(cid));
|
||||
|
||||
if (collection == nullptr) {
|
||||
std::string message = "collection '" + std::to_string(cid) +
|
||||
std::string message = "collection '" + std::to_string(cid) +
|
||||
"' not found in transaction state";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message);
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ namespace rocksdb {
|
|||
class Transaction;
|
||||
class Slice;
|
||||
class Iterator;
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
||||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
@ -62,6 +62,7 @@ class RocksDBSavePoint {
|
|||
~RocksDBSavePoint();
|
||||
|
||||
void commit();
|
||||
|
||||
private:
|
||||
void rollback();
|
||||
|
||||
|
@ -101,6 +102,9 @@ class RocksDBTransactionState final : public TransactionState {
|
|||
return (_status == transaction::Status::ABORTED) && hasOperations();
|
||||
}
|
||||
|
||||
void prepareOperation(TRI_voc_cid_t collectionId, TRI_voc_rid_t revisionId,
|
||||
TRI_voc_document_operation_e operationType);
|
||||
|
||||
/// @brief add an operation for a transaction collection
|
||||
RocksDBOperationResult addOperation(
|
||||
TRI_voc_cid_t collectionId, TRI_voc_rid_t revisionId,
|
||||
|
@ -135,7 +139,10 @@ class RocksDBTransactionState final : public TransactionState {
|
|||
uint64_t _numUpdates;
|
||||
uint64_t _numRemoves;
|
||||
bool _intermediateTransactionEnabled;
|
||||
|
||||
/// Last collection used for transaction
|
||||
TRI_voc_cid_t _lastUsedCollection;
|
||||
};
|
||||
}
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,6 +43,24 @@ enum class RocksDBEntryType : char {
|
|||
View = '8',
|
||||
SettingsValue = '9'
|
||||
};
|
||||
|
||||
enum class RocksDBLogType : char {
|
||||
BeginTransaction = '0',
|
||||
DatabaseCreate = '1',
|
||||
DatabaseDrop = '2',
|
||||
CollectionCreate = '3',
|
||||
CollectionDrop = '4',
|
||||
CollectionRename = '5',
|
||||
CollectionChange = '6',
|
||||
IndexCreate = '7',
|
||||
IndexDrop = '8',
|
||||
ViewCreate = '9',
|
||||
ViewDrop = ':',
|
||||
ViewChange = ';',
|
||||
DocumentOperationsPrologue = '<',
|
||||
DocumentRemove = '='
|
||||
};
|
||||
|
||||
|
||||
rocksdb::Slice const& rocksDBSlice(RocksDBEntryType const& type);
|
||||
}
|
||||
|
|
|
@ -55,8 +55,8 @@ class JobQueueThread final
|
|||
while (!isStopping()) {
|
||||
++idleTries;
|
||||
|
||||
LOG_TOPIC(TRACE, Logger::THREADS) << "size of job queue: "
|
||||
<< _jobQueue->queueSize();
|
||||
LOG_TOPIC(TRACE, Logger::THREADS)
|
||||
<< "size of job queue: " << _jobQueue->queueSize();
|
||||
|
||||
while (_scheduler->shouldQueueMore()) {
|
||||
Job* jobPtr = nullptr;
|
||||
|
@ -77,7 +77,8 @@ class JobQueueThread final
|
|||
try {
|
||||
job->_callback(std::move(job->_handler));
|
||||
} catch (std::exception& e) {
|
||||
LOG_TOPIC(WARN, Logger::THREADS) << "Exception caught in a dangereous place! " << e.what();
|
||||
LOG_TOPIC(WARN, Logger::THREADS)
|
||||
<< "Exception caught in a dangereous place! " << e.what();
|
||||
}
|
||||
|
||||
this->_jobQueue->wakeup();
|
||||
|
@ -102,14 +103,15 @@ class JobQueueThread final
|
|||
JobQueue* _jobQueue;
|
||||
rest::Scheduler* _scheduler;
|
||||
};
|
||||
}
|
||||
} // namespace arangodb
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- constructors and destructors
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
JobQueue::JobQueue(size_t queueSize, rest::Scheduler* scheduler)
|
||||
: _queue(queueSize),
|
||||
JobQueue::JobQueue(size_t maxQueueSize, rest::Scheduler* scheduler)
|
||||
: _maxQueueSize(static_cast<int64_t>(maxQueueSize)),
|
||||
_queue(maxQueueSize == 0 ? 512 : maxQueueSize),
|
||||
_queueSize(0),
|
||||
_queueThread(new JobQueueThread(this, scheduler)) {}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ class JobQueue {
|
|||
static size_t const STANDARD_QUEUE = 2;
|
||||
|
||||
public:
|
||||
JobQueue(size_t queueSize, rest::Scheduler*);
|
||||
JobQueue(size_t maxQueueSize, rest::Scheduler*);
|
||||
|
||||
public:
|
||||
void start();
|
||||
|
@ -56,8 +56,14 @@ class JobQueue {
|
|||
|
||||
bool queue(std::unique_ptr<Job> job) {
|
||||
try {
|
||||
if (0 < _maxQueueSize && _maxQueueSize <= _queueSize) {
|
||||
wakeup();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_queue.push(job.get())) {
|
||||
throw "failed to add to queue";
|
||||
wakeup();
|
||||
return false;
|
||||
}
|
||||
|
||||
job.release();
|
||||
|
@ -85,6 +91,7 @@ class JobQueue {
|
|||
void waitForWork();
|
||||
|
||||
private:
|
||||
int64_t const _maxQueueSize;
|
||||
boost::lockfree::queue<Job*> _queue;
|
||||
std::atomic<int64_t> _queueSize;
|
||||
|
||||
|
|
|
@ -137,9 +137,14 @@ class SchedulerThread : public Thread {
|
|||
|
||||
LOG_TOPIC(DEBUG, Logger::THREADS) << "stopped ("
|
||||
<< _scheduler->infoStatus() << ")";
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(ERR, Logger::THREADS)
|
||||
<< "restarting scheduler loop after caught exception: " << ex.what();
|
||||
_scheduler->decRunning();
|
||||
_scheduler->startNewThread();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, Logger::THREADS)
|
||||
<< "scheduler loop caught an error, restarting";
|
||||
<< "restarting scheduler loop after unknown exception";
|
||||
_scheduler->decRunning();
|
||||
_scheduler->startNewThread();
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ void SchedulerFeature::collectOptions(
|
|||
new UInt64Parameter(&_nrMaximalThreads));
|
||||
|
||||
options->addOption("--server.maximal-queue-size",
|
||||
"maximum queue length for asynchronous operations",
|
||||
"maximum queue length for pending operations (use 0 for unrestricted)",
|
||||
new UInt64Parameter(&_queueSize));
|
||||
|
||||
options->addOldOption("scheduler.threads", "server.threads");
|
||||
|
@ -89,12 +89,6 @@ void SchedulerFeature::validateOptions(
|
|||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "Detected number of processors: " << _nrServerThreads;
|
||||
}
|
||||
|
||||
if (_queueSize < 128) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "invalid value for `--server.maximal-queue-size', need at least 128";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (_nrMinimalThreads < 2) {
|
||||
_nrMinimalThreads = 2;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ class SchedulerFeature final : public application_features::ApplicationFeature {
|
|||
uint64_t _nrServerThreads = 0;
|
||||
uint64_t _nrMinimalThreads = 0;
|
||||
uint64_t _nrMaximalThreads = 0;
|
||||
uint64_t _queueSize = 512;
|
||||
uint64_t _queueSize = 0;
|
||||
|
||||
public:
|
||||
size_t concurrency() const {
|
||||
|
|
|
@ -146,7 +146,7 @@ class Socket {
|
|||
virtual void shutdownReceive() = 0;
|
||||
virtual void shutdownReceive(boost::system::error_code& ec) = 0;
|
||||
virtual void shutdownSend(boost::system::error_code& ec) = 0;
|
||||
virtual int available(boost::system::error_code& ec) = 0;
|
||||
virtual std::size_t available(boost::system::error_code& ec) = 0;
|
||||
virtual void asyncRead(boost::asio::mutable_buffers_1 const& buffer,
|
||||
AsyncHandler const& handler) = 0;
|
||||
|
||||
|
|
|
@ -69,9 +69,9 @@ void SocketTcp::shutdownSend(boost::system::error_code& ec) {
|
|||
_socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec);
|
||||
}
|
||||
|
||||
int SocketTcp::available(boost::system::error_code& ec) {
|
||||
std::size_t SocketTcp::available(boost::system::error_code& ec) {
|
||||
MUTEX_LOCKER(guard, _lock);
|
||||
return static_cast<int>(_socket.available(ec));
|
||||
return static_cast<size_t>(_socket.available(ec));
|
||||
}
|
||||
|
||||
void SocketTcp::asyncRead(boost::asio::mutable_buffers_1 const& buffer,
|
||||
|
|
|
@ -89,7 +89,7 @@ class SocketTcp final : public Socket {
|
|||
void shutdownReceive() override;
|
||||
void shutdownReceive(boost::system::error_code& ec) override;
|
||||
void shutdownSend(boost::system::error_code& ec) override;
|
||||
int available(boost::system::error_code& ec) override;
|
||||
std::size_t available(boost::system::error_code& ec) override;
|
||||
|
||||
public:
|
||||
Mutex _lock;
|
||||
|
|
|
@ -44,7 +44,7 @@ void SocketUnixDomain::shutdownReceive(boost::system::error_code& ec) {
|
|||
void SocketUnixDomain::shutdownSend(boost::system::error_code& ec) {
|
||||
_socket.shutdown(boost::asio::local::stream_protocol::socket::shutdown_send, ec);
|
||||
}
|
||||
int SocketUnixDomain::available(boost::system::error_code& ec) {
|
||||
std::size_t SocketUnixDomain::available(boost::system::error_code& ec) {
|
||||
return _socket.available(ec);
|
||||
}
|
||||
void SocketUnixDomain::asyncRead(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) {
|
||||
|
|
|
@ -65,8 +65,8 @@ class SocketUnixDomain final : public Socket {
|
|||
|
||||
void shutdownSend(boost::system::error_code& ec) override;
|
||||
|
||||
int available(boost::system::error_code& ec) override;
|
||||
|
||||
std::size_t available(boost::system::error_code& ec) override;
|
||||
|
||||
void asyncRead(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) override;
|
||||
|
||||
public:
|
||||
|
|
|
@ -45,7 +45,8 @@ class Hints {
|
|||
TRY_LOCK = 64,
|
||||
NO_COMPACTION_LOCK = 128,
|
||||
NO_USAGE_LOCK = 256,
|
||||
RECOVERY = 512
|
||||
RECOVERY = 512,
|
||||
NO_DLD = 1024 // disable deadlock detection
|
||||
};
|
||||
|
||||
Hints() : _value(0) {}
|
||||
|
|
|
@ -44,6 +44,7 @@ SingleCollectionTransaction::SingleCollectionTransaction(
|
|||
|
||||
// add the (sole) collection
|
||||
addCollection(cid, _accessType);
|
||||
addHint(transaction::Hints::Hint::NO_DLD);
|
||||
}
|
||||
|
||||
/// @brief create the transaction, using a collection name
|
||||
|
@ -58,6 +59,7 @@ SingleCollectionTransaction::SingleCollectionTransaction(
|
|||
// add the (sole) collection
|
||||
_cid = resolver()->getCollectionId(name);
|
||||
addCollection(_cid, name.c_str(), _accessType);
|
||||
addHint(transaction::Hints::Hint::NO_DLD);
|
||||
}
|
||||
|
||||
/// @brief get the underlying transaction collection
|
||||
|
|
|
@ -640,6 +640,8 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
CONDITION_LOCKER(guard, _contextCondition);
|
||||
|
||||
while (_freeContexts.empty() && !_stopping) {
|
||||
TRI_ASSERT(guard.isLocked());
|
||||
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::V8) << "waiting for unused V8 context";
|
||||
|
||||
if (!_dirtyContexts.empty()) {
|
||||
|
@ -653,10 +655,11 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
if (_contexts.size() + _nrInflightContexts < _nrMaxContexts) {
|
||||
++_nrInflightContexts;
|
||||
|
||||
TRI_ASSERT(guard.isLocked());
|
||||
guard.unlock();
|
||||
|
||||
try {
|
||||
LOG_TOPIC(TRACE, Logger::V8) << "creating additional V8 context";
|
||||
LOG_TOPIC(DEBUG, Logger::V8) << "creating additional V8 context";
|
||||
context = addContext();
|
||||
} catch (...) {
|
||||
guard.lock();
|
||||
|
@ -666,6 +669,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
// must re-lock
|
||||
TRI_ASSERT(!guard.isLocked());
|
||||
guard.lock();
|
||||
|
||||
--_nrInflightContexts;
|
||||
|
@ -674,6 +678,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
} catch (...) {
|
||||
// oops
|
||||
delete context;
|
||||
context = nullptr;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -683,6 +688,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
} catch (...) {
|
||||
TRI_ASSERT(!_contexts.empty());
|
||||
_contexts.pop_back();
|
||||
TRI_ASSERT(context != nullptr);
|
||||
delete context;
|
||||
}
|
||||
|
||||
|
@ -693,6 +699,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
JobGuard jobGuard(SchedulerFeature::SCHEDULER);
|
||||
jobGuard.block();
|
||||
|
||||
TRI_ASSERT(guard.isLocked());
|
||||
guard.wait(100000);
|
||||
}
|
||||
|
||||
|
@ -701,6 +708,8 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
|
|||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
TRI_ASSERT(guard.isLocked());
|
||||
|
||||
// in case we are in the shutdown phase, do not enter a context!
|
||||
// the context might have been deleted by the shutdown
|
||||
|
|
|
@ -1053,6 +1053,102 @@ static void JS_IsLeader(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SYNC_REPLICATION
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock getFollowers
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_AddFollower(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
if (args.Length() < 1) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("addFollower(<name>)");
|
||||
}
|
||||
|
||||
ServerID const serverId = TRI_ObjectToString(args[0]);
|
||||
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
arangodb::LogicalCollection const* v8Collection =
|
||||
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(),
|
||||
WRP_VOCBASE_COL_TYPE);
|
||||
|
||||
if (v8Collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = v8Collection->vocbase();
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
std::string collectionName = v8Collection->name();
|
||||
auto collection = vocbase->lookupCollection(collectionName);
|
||||
if (collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
collection->followers()->add(serverId);
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock removeFollower
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_RemoveFollower(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
if (args.Length() < 1) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("removeFollower(<name>)");
|
||||
}
|
||||
|
||||
ServerID const serverId = TRI_ObjectToString(args[0]);
|
||||
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
arangodb::LogicalCollection const* v8Collection =
|
||||
TRI_UnwrapClass<arangodb::LogicalCollection>(args.Holder(),
|
||||
WRP_VOCBASE_COL_TYPE);
|
||||
|
||||
if (v8Collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = v8Collection->vocbase();
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
std::string collectionName = v8Collection->name();
|
||||
auto collection = vocbase->lookupCollection(collectionName);
|
||||
if (collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
}
|
||||
collection->followers()->remove(serverId);
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock getFollowers
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -2977,6 +3073,12 @@ void TRI_InitV8Collections(v8::Handle<v8::Context> context,
|
|||
JS_AssumeLeadership, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("isLeader"),
|
||||
JS_IsLeader, true);
|
||||
#ifdef DEBUG_SYNC_REPLICATION
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("addFollower"),
|
||||
JS_AddFollower, true);
|
||||
#endif
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("removeFollower"),
|
||||
JS_RemoveFollower, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("getFollowers"),
|
||||
JS_GetFollowers, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("load"),
|
||||
|
|
|
@ -281,6 +281,9 @@ void V8Task::start(boost::asio::io_service* ioService) {
|
|||
}
|
||||
|
||||
void V8Task::cancel() {
|
||||
// this will prevent the task from dispatching itself again
|
||||
_periodic = false;
|
||||
|
||||
boost::system::error_code ec;
|
||||
_timer->cancel(ec);
|
||||
}
|
||||
|
|
|
@ -128,8 +128,7 @@ static void JS_TickRangesLoggerReplication(
|
|||
for (auto& it : ranges) {
|
||||
v8::Handle<v8::Object> df = v8::Object::New(isolate);
|
||||
|
||||
df->ForceSet(TRI_V8_ASCII_STRING("datafile"),
|
||||
TRI_V8_STD_STRING(it.filename));
|
||||
df->ForceSet(TRI_V8_ASCII_STRING("datafile"), TRI_V8_STD_STRING(it.filename));
|
||||
df->ForceSet(TRI_V8_ASCII_STRING("state"), TRI_V8_STD_STRING(it.state));
|
||||
df->ForceSet(TRI_V8_ASCII_STRING("tickMin"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, it.tickMin));
|
||||
df->ForceSet(TRI_V8_ASCII_STRING("tickMax"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, it.tickMax));
|
||||
|
|
|
@ -734,8 +734,9 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
|
|||
}
|
||||
|
||||
std::unique_ptr<LogicalCollection> col =
|
||||
ClusterMethods::createCollectionOnCoordinator(collectionType, vocbase,
|
||||
infoSlice, true, createWaitsForSyncReplication);
|
||||
ClusterMethods::createCollectionOnCoordinator(
|
||||
collectionType, vocbase, infoSlice, false,
|
||||
createWaitsForSyncReplication);
|
||||
TRI_V8_RETURN(WrapCollection(isolate, col.release()));
|
||||
}
|
||||
|
||||
|
|
|
@ -29,21 +29,25 @@
|
|||
#include "Basics/WriteLocker.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "GeneralServer/GeneralServerFeature.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "Ssl/SslInterface.h"
|
||||
#include "Random/UniformCharacter.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::velocypack;
|
||||
using namespace arangodb::rest;
|
||||
|
||||
static AuthEntry CreateAuthEntry(VPackSlice const& slice) {
|
||||
|
||||
static AuthEntry CreateAuthEntry(VPackSlice const& slice, AuthSource source) {
|
||||
if (slice.isNone() || !slice.isObject()) {
|
||||
return AuthEntry();
|
||||
}
|
||||
|
@ -126,7 +130,7 @@ static AuthEntry CreateAuthEntry(VPackSlice const& slice) {
|
|||
// build authentication entry
|
||||
return AuthEntry(userSlice.copyString(), methodSlice.copyString(),
|
||||
saltSlice.copyString(), hashSlice.copyString(), std::move(databases),
|
||||
allDatabases, active, mustChange);
|
||||
allDatabases, active, mustChange, source);
|
||||
}
|
||||
|
||||
AuthLevel AuthEntry::canUseDatabase(std::string const& dbname) const {
|
||||
|
@ -139,6 +143,12 @@ AuthLevel AuthEntry::canUseDatabase(std::string const& dbname) const {
|
|||
return it->second;
|
||||
}
|
||||
|
||||
AuthInfo::AuthInfo()
|
||||
: _outdated(true),
|
||||
_authJwtCache(16384),
|
||||
_jwtSecret(""),
|
||||
_queryRegistry(nullptr) {}
|
||||
|
||||
void AuthInfo::setJwtSecret(std::string const& jwtSecret) {
|
||||
WRITE_LOCKER(writeLocker, _authJwtLock);
|
||||
_jwtSecret = jwtSecret;
|
||||
|
@ -205,7 +215,13 @@ bool AuthInfo::populate(VPackSlice const& slice) {
|
|||
_authBasicCache.clear();
|
||||
|
||||
for (VPackSlice const& authSlice : VPackArrayIterator(slice)) {
|
||||
AuthEntry auth = CreateAuthEntry(authSlice.resolveExternal());
|
||||
VPackSlice const& s = authSlice.resolveExternal();
|
||||
|
||||
if (s.hasKey("source") && s.get("source").isString() && s.get("source").copyString() == "LDAP") {
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::CONFIG) << "LDAP: skip user in collection _users: " << s.get("user").copyString();
|
||||
continue;
|
||||
}
|
||||
AuthEntry auth = CreateAuthEntry(s, AuthSource::COLLECTION);
|
||||
|
||||
if (auth.isActive()) {
|
||||
_authInfo.emplace(auth.username(), std::move(auth));
|
||||
|
@ -224,6 +240,11 @@ void AuthInfo::reload() {
|
|||
_outdated = false;
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: is this correct?
|
||||
if (_authenticationHandler == nullptr) {
|
||||
_authenticationHandler.reset(application_features::ApplicationServer::getFeature<AuthenticationFeature>("Authentication")->getHandler());
|
||||
}
|
||||
|
||||
{
|
||||
WRITE_LOCKER(writeLocker, _authInfoLock);
|
||||
|
@ -250,7 +271,7 @@ void AuthInfo::reload() {
|
|||
arangodb::aql::PART_MAIN);
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "starting to load authentication and authorization information";
|
||||
TRI_ASSERT(_queryRegistry != nullptr);
|
||||
TRI_ASSERT(_queryRegistry != nullptr);
|
||||
auto queryResult = query.execute(_queryRegistry);
|
||||
|
||||
if (queryResult.code != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -285,6 +306,64 @@ void AuthInfo::reload() {
|
|||
_outdated = false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// protected
|
||||
HexHashResult AuthInfo::hexHashFromData(std::string const& hashMethod, char const* data, size_t len) {
|
||||
char* crypted = nullptr;
|
||||
size_t cryptedLength;
|
||||
char* hex;
|
||||
|
||||
try {
|
||||
if (hashMethod == "sha1") {
|
||||
arangodb::rest::SslInterface::sslSHA1(data, len, crypted,
|
||||
cryptedLength);
|
||||
} else if (hashMethod == "sha512") {
|
||||
arangodb::rest::SslInterface::sslSHA512(data, len, crypted,
|
||||
cryptedLength);
|
||||
} else if (hashMethod == "sha384") {
|
||||
arangodb::rest::SslInterface::sslSHA384(data, len, crypted,
|
||||
cryptedLength);
|
||||
} else if (hashMethod == "sha256") {
|
||||
arangodb::rest::SslInterface::sslSHA256(data, len, crypted,
|
||||
cryptedLength);
|
||||
} else if (hashMethod == "sha224") {
|
||||
arangodb::rest::SslInterface::sslSHA224(data, len, crypted,
|
||||
cryptedLength);
|
||||
} else if (hashMethod == "md5") {
|
||||
arangodb::rest::SslInterface::sslMD5(data, len, crypted,
|
||||
cryptedLength);
|
||||
} else {
|
||||
// invalid algorithm...
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "invalid algorithm for hexHashFromData: " << hashMethod;
|
||||
return HexHashResult(TRI_ERROR_FAILED); // TODO: fix to correct error number
|
||||
}
|
||||
} catch (...) {
|
||||
// SslInterface::ssl....() allocate strings with new, which might throw
|
||||
// exceptions
|
||||
return HexHashResult(TRI_ERROR_FAILED);
|
||||
}
|
||||
|
||||
if (crypted == nullptr ||
|
||||
cryptedLength == 0) {
|
||||
delete[] crypted;
|
||||
return HexHashResult(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
size_t hexLen;
|
||||
hex = TRI_EncodeHexString(crypted, cryptedLength, &hexLen);
|
||||
delete[] crypted;
|
||||
|
||||
if (hex == nullptr) {
|
||||
return HexHashResult(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
HexHashResult result(std::string(hex, hexLen));
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, hex);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// public
|
||||
AuthResult AuthInfo::checkPassword(std::string const& username,
|
||||
std::string const& password) {
|
||||
|
@ -294,11 +373,134 @@ AuthResult AuthInfo::checkPassword(std::string const& username,
|
|||
|
||||
AuthResult result(username);
|
||||
|
||||
// look up username
|
||||
READ_LOCKER(readLocker, _authInfoLock);
|
||||
|
||||
WRITE_LOCKER(writeLocker, _authInfoLock);
|
||||
auto it = _authInfo.find(username);
|
||||
|
||||
if (it == _authInfo.end() || (it->second.source() == AuthSource::LDAP)) { // && it->second.created() < TRI_microtime() - 60)) {
|
||||
TRI_ASSERT(_authenticationHandler != nullptr);
|
||||
AuthenticationResult authResult = _authenticationHandler->authenticate(username, password);
|
||||
|
||||
if (!authResult.ok()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (authResult.source() == AuthSource::LDAP) { // user authed, add to _authInfo and _users
|
||||
if (it != _authInfo.end()) { // && it->second.created() < TRI_microtime() - 60) {
|
||||
_authInfo.erase(username);
|
||||
it = _authInfo.end();
|
||||
}
|
||||
|
||||
if (it == _authInfo.end()) {
|
||||
TRI_vocbase_t* vocbase = DatabaseFeature::DATABASE->systemDatabase();
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "system database is unknown, cannot load authentication "
|
||||
<< "and authorization information";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_FAILED, "_system databse is unknown");
|
||||
}
|
||||
|
||||
MUTEX_LOCKER(locker, _queryLock);
|
||||
|
||||
std::string const queryStr("UPSERT {user: @username} INSERT @user REPLACE @user IN _users");
|
||||
auto emptyBuilder = std::make_shared<VPackBuilder>();
|
||||
|
||||
VPackBuilder binds;
|
||||
binds.openObject();
|
||||
binds.add("username", VPackValue(username));
|
||||
|
||||
binds.add("user", VPackValue(VPackValueType::Object));
|
||||
|
||||
binds.add("user", VPackValue(username));
|
||||
binds.add("source", VPackValue("LDAP"));
|
||||
|
||||
binds.add("databases", VPackValue(VPackValueType::Object));
|
||||
for(auto const& permission : authResult.permissions() ) {
|
||||
binds.add(permission.first, VPackValue(permission.second));
|
||||
}
|
||||
binds.close();
|
||||
|
||||
binds.add("configData", VPackValue(VPackValueType::Object));
|
||||
binds.close();
|
||||
|
||||
binds.add("userData", VPackValue(VPackValueType::Object));
|
||||
binds.close();
|
||||
|
||||
binds.add("authData", VPackValue(VPackValueType::Object));
|
||||
binds.add("active", VPackValue(true));
|
||||
binds.add("changePassword", VPackValue(false));
|
||||
|
||||
binds.add("simple", VPackValue(VPackValueType::Object));
|
||||
binds.add("method", VPackValue("sha256"));
|
||||
|
||||
std::string salt = UniformCharacter(8, "0123456789abcdef").random();
|
||||
binds.add("salt", VPackValue(salt));
|
||||
|
||||
std::string saltedPassword = salt + password;
|
||||
HexHashResult hex = hexHashFromData("sha256", saltedPassword.data(), saltedPassword.size());
|
||||
if (!hex.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_FAILED, "Could not calculate hex-hash from data");
|
||||
}
|
||||
binds.add("hash", VPackValue(hex.hexHash()));
|
||||
|
||||
binds.close(); // simple
|
||||
binds.close(); // authData
|
||||
|
||||
binds.close(); // user
|
||||
binds.close(); // obj
|
||||
|
||||
arangodb::aql::Query query(false, vocbase, queryStr.c_str(),
|
||||
queryStr.size(), std::make_shared<VPackBuilder>(binds), emptyBuilder,
|
||||
arangodb::aql::PART_MAIN);
|
||||
|
||||
TRI_ASSERT(_queryRegistry != nullptr);
|
||||
auto queryResult = query.execute(_queryRegistry);
|
||||
|
||||
if (queryResult.code != TRI_ERROR_NO_ERROR) {
|
||||
if (queryResult.code == TRI_ERROR_REQUEST_CANCELED ||
|
||||
(queryResult.code == TRI_ERROR_QUERY_KILLED)) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_REQUEST_CANCELED);
|
||||
}
|
||||
_outdated = false;
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_FAILED, "query error");
|
||||
}
|
||||
|
||||
VPackBuilder builder;
|
||||
builder.openObject();
|
||||
|
||||
// username
|
||||
builder.add("user", VPackValue(username));
|
||||
builder.add("source", VPackValue("LDAP"));
|
||||
builder.add("authData", VPackValue(VPackValueType::Object));
|
||||
|
||||
// simple auth
|
||||
builder.add("simple", VPackValue(VPackValueType::Object));
|
||||
builder.add("method", VPackValue("sha256"));
|
||||
|
||||
builder.add("salt", VPackValue(salt));
|
||||
|
||||
builder.add("hash", VPackValue(hex.hexHash()));
|
||||
|
||||
builder.close(); // simple
|
||||
|
||||
builder.add("active", VPackValue(true));
|
||||
|
||||
builder.close(); // authData
|
||||
|
||||
builder.add("databases", VPackValue(VPackValueType::Object));
|
||||
for(auto const& permission : authResult.permissions() ) {
|
||||
builder.add(permission.first, VPackValue(permission.second));
|
||||
}
|
||||
builder.close();
|
||||
builder.close(); // The Object
|
||||
|
||||
AuthEntry auth = CreateAuthEntry(builder.slice().resolveExternal(), AuthSource::LDAP);
|
||||
_authInfo.emplace(auth.username(), std::move(auth));
|
||||
|
||||
it = _authInfo.find(username);
|
||||
}
|
||||
} // AuthSource::LDAP
|
||||
}
|
||||
|
||||
if (it == _authInfo.end()) {
|
||||
return result;
|
||||
}
|
||||
|
@ -312,54 +514,13 @@ AuthResult AuthInfo::checkPassword(std::string const& username,
|
|||
result._mustChange = auth.mustChange();
|
||||
|
||||
std::string salted = auth.passwordSalt() + password;
|
||||
size_t len = salted.size();
|
||||
HexHashResult hexHash = hexHashFromData(auth.passwordMethod(), salted.data(), salted.size());
|
||||
|
||||
std::string const& passwordMethod = auth.passwordMethod();
|
||||
|
||||
// default value is false
|
||||
char* crypted = nullptr;
|
||||
size_t cryptedLength;
|
||||
|
||||
try {
|
||||
if (passwordMethod == "sha1") {
|
||||
arangodb::rest::SslInterface::sslSHA1(salted.c_str(), len, crypted,
|
||||
cryptedLength);
|
||||
} else if (passwordMethod == "sha512") {
|
||||
arangodb::rest::SslInterface::sslSHA512(salted.c_str(), len, crypted,
|
||||
cryptedLength);
|
||||
} else if (passwordMethod == "sha384") {
|
||||
arangodb::rest::SslInterface::sslSHA384(salted.c_str(), len, crypted,
|
||||
cryptedLength);
|
||||
} else if (passwordMethod == "sha256") {
|
||||
arangodb::rest::SslInterface::sslSHA256(salted.c_str(), len, crypted,
|
||||
cryptedLength);
|
||||
} else if (passwordMethod == "sha224") {
|
||||
arangodb::rest::SslInterface::sslSHA224(salted.c_str(), len, crypted,
|
||||
cryptedLength);
|
||||
} else if (passwordMethod == "md5") {
|
||||
arangodb::rest::SslInterface::sslMD5(salted.c_str(), len, crypted,
|
||||
cryptedLength);
|
||||
} else {
|
||||
// invalid algorithm...
|
||||
}
|
||||
} catch (...) {
|
||||
// SslInterface::ssl....() allocate strings with new, which might throw
|
||||
// exceptions
|
||||
if (!hexHash.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "hexcalc did not work");
|
||||
}
|
||||
|
||||
if (crypted != nullptr) {
|
||||
if (0 < cryptedLength) {
|
||||
size_t hexLen;
|
||||
char* hex = TRI_EncodeHexString(crypted, cryptedLength, &hexLen);
|
||||
|
||||
if (hex != nullptr) {
|
||||
result._authorized = auth.checkPasswordHash(hex);
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, hex);
|
||||
}
|
||||
}
|
||||
|
||||
delete[] crypted;
|
||||
}
|
||||
result._authorized = auth.checkPasswordHash(hexHash.hexHash());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -372,7 +533,7 @@ AuthLevel AuthInfo::canUseDatabase(std::string const& username,
|
|||
}
|
||||
|
||||
READ_LOCKER(readLocker, _authInfoLock);
|
||||
|
||||
|
||||
auto const& it = _authInfo.find(username);
|
||||
|
||||
if (it == _authInfo.end()) {
|
||||
|
@ -384,7 +545,7 @@ AuthLevel AuthInfo::canUseDatabase(std::string const& username,
|
|||
return entry.canUseDatabase(dbname);
|
||||
}
|
||||
|
||||
// public
|
||||
// public called from VocbaseContext.cpp
|
||||
AuthResult AuthInfo::checkAuthentication(AuthType authType,
|
||||
std::string const& secret) {
|
||||
if (_outdated) {
|
||||
|
|
|
@ -31,10 +31,13 @@
|
|||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include "ApplicationFeatures/ApplicationFeature.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
#include "Basics/Mutex.h"
|
||||
#include "Basics/LruCache.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "GeneralServer/AuthenticationHandler.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
|
@ -44,21 +47,42 @@ class Slice;
|
|||
enum class AuthLevel {
|
||||
NONE, RO, RW
|
||||
};
|
||||
|
||||
|
||||
enum class AuthSource {
|
||||
COLLECTION, LDAP
|
||||
};
|
||||
|
||||
class HexHashResult : public arangodb::Result {
|
||||
public:
|
||||
explicit HexHashResult(int errorNumber) : Result(errorNumber) {}
|
||||
explicit HexHashResult(std::string const& hexHash) : Result(0), _hexHash(hexHash) {}
|
||||
std::string const& hexHash() { return _hexHash; }
|
||||
|
||||
protected:
|
||||
std::string const _hexHash;
|
||||
};
|
||||
|
||||
class AuthEntry {
|
||||
public:
|
||||
AuthEntry() : _active(false), _mustChange(false), _allDatabases(AuthLevel::NONE) {}
|
||||
AuthEntry()
|
||||
: _active(false),
|
||||
_mustChange(false),
|
||||
_created(TRI_microtime()),
|
||||
_source(AuthSource::COLLECTION),
|
||||
_allDatabases(AuthLevel::NONE) {}
|
||||
|
||||
AuthEntry(std::string&& username, std::string&& passwordMethod,
|
||||
std::string&& passwordSalt, std::string&& passwordHash,
|
||||
std::unordered_map<std::string, AuthLevel>&& databases, AuthLevel allDatabases,
|
||||
bool active, bool mustChange)
|
||||
bool active, bool mustChange, AuthSource source)
|
||||
: _username(std::move(username)),
|
||||
_passwordMethod(std::move(passwordMethod)),
|
||||
_passwordSalt(std::move(passwordSalt)),
|
||||
_passwordHash(std::move(passwordHash)),
|
||||
_active(active),
|
||||
_mustChange(mustChange),
|
||||
_created(TRI_microtime()),
|
||||
_source(source),
|
||||
_databases(std::move(databases)),
|
||||
_allDatabases(allDatabases) {}
|
||||
|
||||
|
@ -71,6 +95,8 @@ class AuthEntry {
|
|||
_passwordHash(std::move(other._passwordHash)),
|
||||
_active(other._active),
|
||||
_mustChange(other._mustChange),
|
||||
_created(other._created),
|
||||
_source(other._source),
|
||||
_databases(std::move(other._databases)),
|
||||
_allDatabases(other._allDatabases) {}
|
||||
|
||||
|
@ -81,6 +107,8 @@ class AuthEntry {
|
|||
std::string const& passwordHash() const { return _passwordHash; }
|
||||
bool isActive() const { return _active; }
|
||||
bool mustChange() const { return _mustChange; }
|
||||
double created() const { return _created; }
|
||||
AuthSource source() const { return _source; }
|
||||
|
||||
bool checkPasswordHash(std::string const& hash) const {
|
||||
return _passwordHash == hash;
|
||||
|
@ -95,6 +123,8 @@ class AuthEntry {
|
|||
std::string const _passwordHash;
|
||||
bool const _active;
|
||||
bool _mustChange;
|
||||
double _created;
|
||||
AuthSource _source;
|
||||
std::unordered_map<std::string, AuthLevel> const _databases;
|
||||
AuthLevel const _allDatabases;
|
||||
};
|
||||
|
@ -119,6 +149,9 @@ class AuthJwtResult: public AuthResult {
|
|||
std::chrono::system_clock::time_point _expireTime;
|
||||
};
|
||||
|
||||
|
||||
class AuthenticationHandler;
|
||||
|
||||
class AuthInfo {
|
||||
public:
|
||||
enum class AuthType {
|
||||
|
@ -126,13 +159,8 @@ class AuthInfo {
|
|||
};
|
||||
|
||||
public:
|
||||
AuthInfo()
|
||||
: _outdated(true),
|
||||
_authJwtCache(16384),
|
||||
_jwtSecret(""),
|
||||
_queryRegistry(nullptr) {
|
||||
}
|
||||
|
||||
AuthInfo();
|
||||
|
||||
public:
|
||||
void setQueryRegistry(aql::QueryRegistry* registry) {
|
||||
TRI_ASSERT(registry != nullptr);
|
||||
|
@ -167,6 +195,10 @@ class AuthInfo {
|
|||
bool validateJwtHMAC256Signature(std::string const&, std::string const&);
|
||||
std::shared_ptr<VPackBuilder> parseJson(std::string const&, std::string const&);
|
||||
|
||||
|
||||
HexHashResult hexHashFromData(std::string const& hashMethod, char const* data, size_t len);
|
||||
|
||||
|
||||
private:
|
||||
basics::ReadWriteLock _authInfoLock;
|
||||
basics::ReadWriteLock _authJwtLock;
|
||||
|
@ -178,6 +210,7 @@ class AuthInfo {
|
|||
arangodb::basics::LruCache<std::string, arangodb::AuthJwtResult> _authJwtCache;
|
||||
std::string _jwtSecret;
|
||||
aql::QueryRegistry* _queryRegistry;
|
||||
std::unique_ptr<AuthenticationHandler> _authenticationHandler;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -499,15 +499,7 @@ std::string LogicalCollection::name() const {
|
|||
}
|
||||
|
||||
std::string const LogicalCollection::distributeShardsLike() const {
|
||||
if (!_distributeShardsLike.empty()) {
|
||||
CollectionNameResolver resolver(_vocbase);
|
||||
TRI_voc_cid_t shardLike =
|
||||
resolver.getCollectionIdCluster(_distributeShardsLike);
|
||||
if (shardLike != 0) {
|
||||
return basics::StringUtils::itoa(shardLike);
|
||||
}
|
||||
}
|
||||
return "";
|
||||
return _distributeShardsLike;
|
||||
}
|
||||
|
||||
void LogicalCollection::distributeShardsLike(std::string const& cid) {
|
||||
|
|
|
@ -74,6 +74,10 @@ typedef enum {
|
|||
|
||||
REPLICATION_INDEX_CREATE = 2100,
|
||||
REPLICATION_INDEX_DROP = 2101,
|
||||
|
||||
REPLICATION_VIEW_CREATE = 2110,
|
||||
REPLICATION_VIEW_DROP = 2111,
|
||||
REPLICATION_VIEW_CHANGE = 2112,
|
||||
|
||||
REPLICATION_TRANSACTION_START = 2200,
|
||||
REPLICATION_TRANSACTION_COMMIT = 2201,
|
||||
|
|
|
@ -309,14 +309,15 @@ int DumpFeature::dumpCollection(int fd, std::string const& cid,
|
|||
uint64_t chunkSize = _chunkSize;
|
||||
|
||||
std::string const baseUrl =
|
||||
"/_api/replication/dump?collection=" + cid + "&ticks=false&flush=false";
|
||||
"/_api/replication/dump?collection=" + cid +
|
||||
"&batchId=" + StringUtils::itoa(_batchId) +
|
||||
"&ticks=false&flush=false";
|
||||
|
||||
uint64_t fromTick = _tickStart;
|
||||
|
||||
while (true) {
|
||||
std::string url = baseUrl + "&from=" + StringUtils::itoa(fromTick) +
|
||||
"&chunkSize=" + StringUtils::itoa(chunkSize) +
|
||||
"&batchId=" + StringUtils::itoa(_batchId);
|
||||
"&chunkSize=" + StringUtils::itoa(chunkSize);
|
||||
|
||||
if (maxTick > 0) {
|
||||
url += "&to=" + StringUtils::itoa(maxTick);
|
||||
|
@ -682,6 +683,7 @@ int DumpFeature::runDump(std::string& dbName, std::string& errorMsg) {
|
|||
int DumpFeature::dumpShard(int fd, std::string const& DBserver,
|
||||
std::string const& name, std::string& errorMsg) {
|
||||
std::string const baseUrl = "/_api/replication/dump?DBserver=" + DBserver +
|
||||
"&batchId=" + StringUtils::itoa(_batchId) +
|
||||
"&collection=" + name + "&chunkSize=" +
|
||||
StringUtils::itoa(_chunkSize) + "&ticks=false";
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ set(CPACK_ARANGO_DATA_DIR "${INST_USR_LIBDIR}${CMAKE_INSTALL_PREFIX}/${CMAKE_INS
|
|||
set(CPACK_ARANGO_LOG_DIR "${INST_USR_LIBDIR}${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LOCALSTATEDIR}/log/arangodb3")
|
||||
set(CPACK_ARANGO_STATE_DIR "${INST_USR_LIBDIR}${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LOCALSTATEDIR}")
|
||||
|
||||
set(CPACK_ARANGODB_APPS_DIRECTORY "${INST_USR_LIBDIR}/opt/arangodb${ARANGODB_APPS_DIRECTORY}")
|
||||
set(CPACK_ARANGODB_APPS_DIRECTORY "${INST_USR_LIBDIR}${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LOCALSTATEDIR}/lib/arangodb3-apps")
|
||||
|
||||
to_native_path("CPACK_ARANGODB_APPS_DIRECTORY")
|
||||
to_native_path("CMAKE_INSTALL_DATAROOTDIR_ARANGO")
|
||||
|
|
|
@ -65,7 +65,6 @@ function collectionRepresentation(collection, showProperties, showCount, showFig
|
|||
|
||||
if (cluster.isCoordinator()) {
|
||||
result.avoidServers = properties.avoidServers;
|
||||
result.distributeShardsLike = properties.distributeShardsLike;
|
||||
result.numberOfShards = properties.numberOfShards;
|
||||
result.replicationFactor = properties.replicationFactor;
|
||||
result.avoidServers = properties.avoidServers;
|
||||
|
|
|
@ -970,12 +970,25 @@ actions.defineHttp({
|
|||
"body must be an object with a string attribute 'server'");
|
||||
return;
|
||||
}
|
||||
|
||||
// First translate the server name from short name to long name:
|
||||
var server = body.server;
|
||||
var servers = global.ArangoClusterInfo.getDBServers();
|
||||
for (let i = 0; i < servers.length; i++) {
|
||||
if (servers[i].serverId !== server) {
|
||||
if (servers[i].serverName === server) {
|
||||
server = servers[i].serverId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ok = true;
|
||||
var id;
|
||||
try {
|
||||
id = ArangoClusterInfo.uniqid();
|
||||
var todo = { 'type': 'cleanOutServer',
|
||||
'server': body.server,
|
||||
'server': server,
|
||||
'jobId': id,
|
||||
'timeCreated': (new Date()).toISOString(),
|
||||
'creator': ArangoServerState.id() };
|
||||
|
|
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
|
@ -369,7 +369,7 @@
|
|||
<div id="collectionsThumbnailsIn" class="tileList pure-g">
|
||||
<div class="tile pure-u-1-1 pure-u-sm-1-2 pure-u-md-1-3 pure-u-lg-1-4 pure-u-xl-1-6">
|
||||
<div class="fullBorderBox">
|
||||
<a href="#" id="createCollection" class="add"><span id="newCollection" class="pull-left add-Icon"><i class="fa fa-plus-circle"></i>
|
||||
<a id="createCollection" class="add"><span id="newCollection" class="pull-left add-Icon"><i class="fa fa-plus-circle"></i>
|
||||
</span> Add Collection</a>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -815,9 +815,8 @@ if (list.length > 0) {
|
|||
<input type="hidden" id="newEdgeDefinitions<%= number%>" value="" placeholder="Edge definitions" tabindex="-1" class="select2-offscreen">
|
||||
<button id="remove_newEdgeDefinitions<%= number%>" class="graphViewer-icon-button gv_internal_remove_line gv-icon-small delete"></button>
|
||||
</th><th>
|
||||
<a class="modalTooltips" title="Some info for edge definitions">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Some info for edge definitions">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr class="tableRow" id="row_fromCollections<%= number%>">
|
||||
|
@ -825,9 +824,8 @@ if (list.length > 0) {
|
|||
<th class="collectionTh">
|
||||
<input type="hidden" id="fromCollections<%= number%>" value="" placeholder="fromCollections" tabindex="-1" class="select2-offscreen">
|
||||
</th><th>
|
||||
<a class="modalTooltips" title="The collection that contain the start vertices of the relation.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="The collection that contain the start vertices of the relation.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr class="tableRow" id="row_toCollections<%= number%>">
|
||||
|
@ -835,9 +833,8 @@ if (list.length > 0) {
|
|||
<th class="collectionTh">
|
||||
<input type="hidden" id="toCollections<%= number%>" value="" placeholder="toCollections" tabindex="-1" class="select2-offscreen">
|
||||
</th><th>
|
||||
<a class="modalTooltips" title="The collection that contain the end vertices of the relation.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="The collection that contain the end vertices of the relation.">
|
||||
</span>
|
||||
</th>
|
||||
</tr></script><script id="editListEntryView.ejs" type="text/template"><td class="writable sorting_1"> <% if (isReadOnly) { %> <span class="key"><%=key%></span> <% } else { %> <input type="text" class="key" placeholder="attribute" <%=key?"value=" + key:""%>></input> <% } %> </td>
|
||||
<td class="writeable rightCell"> <% if (isReadOnly) { %> <span class="val"><%=value%></span> <% } else { %> <textarea class="val" placeholder="value"><%=value?value:""%></textarea> <% } %> </td>
|
||||
|
@ -846,7 +843,19 @@ if (list.length > 0) {
|
|||
<a class="deleteAttribute">
|
||||
<span class="icon_arangodb_roundminus" data-original-title="Delete attribute"></span>
|
||||
</a>
|
||||
</td></script><script id="footerView.ejs" type="text/template"> <%
|
||||
</td></script><script id="filterSelect.ejs" type="text/template"><div class="filterSelectBox">
|
||||
<div class="filterLabel"><%=name%><span><i class="fa fa-close" id="closeFilter"></i></span></div>
|
||||
|
||||
<div class="filterInput">
|
||||
<input type="text" id="<%=name%>-filter" placeholder="Filter <%=name%>" aria-label="Filter labels" autocomplete="off">
|
||||
</div>
|
||||
|
||||
<div class="filterOptions">
|
||||
<div id="showAll"><strong>Show all</strong></div> <% var cssClass; %> <% _.each(options, function (option) { %> <% if (option.active) { %> <% cssClass = 'active'; %> <% } else { %> <% cssClass = 'inactive'; %> <% } %> <div class="<%=cssClass%>" name="<%=option.name%>" id="<%=option.name%>-option"> <% var color = option.color || '#f6f8fa'; %> <% if (cssClass === 'active') { %> <span class="marked"><i class="fa fa-check"/></span> <% } else { %> <span style="visibility: hidden;" class="marked"><i class="fa fa-check"/></span> <% } %> <span class="color" style="background-color: <%=color%>"> </span>
|
||||
<span class="name"><%=option.name%></span>
|
||||
</div> <% }); %> </div>
|
||||
|
||||
</div></script><script id="footerView.ejs" type="text/template"> <%
|
||||
var n,v,db;
|
||||
if (name) {
|
||||
n = name || "";
|
||||
|
@ -896,7 +905,7 @@ if (list.length > 0) {
|
|||
<tr>
|
||||
<th class="collectionTh">Mount:</th>
|
||||
<th class="collectionTh"><input type="text" id="change-mount-point" name="mountpoint" value="<%=attributes.mount%>"/></th>
|
||||
<th><a class="modalTooltips" title="The path where the app can be reached."><i class="arangoicon icon_arangodb_info"></i></a></th>
|
||||
<th><span class="modalTooltips arangoicon icon_arangodb_info" title="The path where the app can be reached."></span></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Version:</th>
|
||||
|
@ -1135,13 +1144,7 @@ if (list.length > 0) {
|
|||
<tr>
|
||||
<th class="collectionTh">Type:</th>
|
||||
<th class="">
|
||||
<select id="newIndexType">
|
||||
<option value="Geo">Geo Index</option>
|
||||
<option value="Hash">Hash Index</option>
|
||||
<option value="Persistent">Persistent Index</option>
|
||||
<option value="Fulltext">Fulltext Index</option>
|
||||
<option value="Skiplist">Skip-List Index</option>
|
||||
</select>
|
||||
<select id="newIndexType"> <% if (supported.indexOf('geo') > -1) { %> <option value="Geo">Geo Index</option> <% } %> <% if (supported.indexOf('hash') > -1) { %> <option value="Hash">Hash Index</option> <% } %> <% if (supported.indexOf('persistent') > -1) { %> <option value="Persistent">Persistent Index</option> <% } %> <% if (supported.indexOf('fulltext') > -1) { %> <option value="Fulltext">Fulltext Index</option> <% } %> <% if (supported.indexOf('skiplist') > -1) { %> <option value="Skiplist">Skip-List Index</option> <% } %> </select>
|
||||
</th>
|
||||
<th class="" style="width: 18px"/>
|
||||
</tr>
|
||||
|
@ -1353,7 +1356,50 @@ if (list.length > 0) {
|
|||
<tr class="odd">
|
||||
<td valign="top" class="dataTables_empty">Loading...</td>
|
||||
</tr>
|
||||
</tbody></script><script id="loginView.ejs" type="text/template"><div class="loginFixedWindow">
|
||||
</tbody></script><script id="loggerView.ejs" type="text/template"><div id="loggerContent" class="logger-content-id innerContent">
|
||||
|
||||
<div class="arangoToolbar arangoToolbarTop">
|
||||
|
||||
<div class="pull-left">
|
||||
</div>
|
||||
|
||||
<div class="pull-right">
|
||||
<button id="logTopicSelection" class="button-default filterSelect">Topic <i class="fa fa-caret-down"></i></button>
|
||||
<button id="logLevelSelection" class="button-default filterSelect">Level <i class="fa fa-caret-down"></i></button>
|
||||
<button style="display: none" class="button-default filterSelect" id="logFilters">
|
||||
<i class="fa fa-close"></i>Clear current <a id="filterDesc"></a> filter
|
||||
</button>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div id="noLogEntries" style="display: none">
|
||||
<span>No suitable log entries found </span>
|
||||
</div>
|
||||
|
||||
<div id="logEntries">
|
||||
</div>
|
||||
|
||||
<div class="actions">
|
||||
<button id="loadMoreEntries" class="button-success">Load older entries</button>
|
||||
</div>
|
||||
|
||||
</div></script><script id="loggerViewEntries.ejs" type="text/template"> <% _.each(entries, function (entry) { %> <div class="pure-g" level="<%=entry.status.toLowerCase()%>" topic="<%=entry.topic.toLowerCase()%>">
|
||||
<div class="pure-u-1-24">
|
||||
<p class="labels"> <% var lcolor = arangoHelper.statusColors[entry.status.toLowerCase()];%> <% var tcolor = arangoHelper.alphabetColors[entry.topic.charAt(0).toLowerCase()];%> <span class="tippy level" title="Level: <%=entry.status%>" style="background: <%=lcolor%>"></span>
|
||||
<span class="tippy topic" title="Topic: <%=entry.topic%>" style="background: <%=tcolor%>"></span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="pure-u-19-24 desc">
|
||||
<p class="msg"><%=entry.msg%></p>
|
||||
</div>
|
||||
<div class="pure-u-4-24 date">
|
||||
<p> <% var x = moment(entry.timestamp, "X").fromNow() %> <i class="fa fa-clock-o" aria-hidden="true"></i>
|
||||
<span class="tippy" title="<%=entry.date%>"><%= x %></span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="logBorder"></div>
|
||||
</div> <% }); %> </script><script id="loginView.ejs" type="text/template"><div class="loginFixedWindow">
|
||||
|
||||
<div id="loginWindow" class="login-window">
|
||||
<div class="login-logo-round">
|
||||
|
@ -1385,9 +1431,7 @@ if (list.length > 0) {
|
|||
<button id="logout" class="button-danger pull-right">Logout</button>
|
||||
</div>
|
||||
</div>
|
||||
</div></script><script id="logsView.ejs" type="text/template"><div id="logContent" class="log-content-id innerContent">
|
||||
</div>
|
||||
<!-- <div id="logPaginationDiv" class="pagination-line"></div> --></script><script id="modalApplicationMount.ejs" type="text/template"><table>
|
||||
</div></script><script id="modalApplicationMount.ejs" type="text/template"><table>
|
||||
<tr class="tableRow"> <% if (content === true) { %> <th class="collectionInfoTh">
|
||||
Run teardown:
|
||||
</th>
|
||||
|
@ -1400,9 +1444,8 @@ if (list.length > 0) {
|
|||
<input type="text" id="new-app-mount" value="" placeholder="/my/foxx"></input>
|
||||
</th>
|
||||
<th>
|
||||
<a class="modalTooltips" title="The path the app will be mounted. Has to start with /. Is not allowed to start with /_">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="The path the app will be mounted. Has to start with /. Is not allowed to start with /_">
|
||||
</span>
|
||||
</th> <% } %> </tr>
|
||||
</table>
|
||||
|
||||
|
@ -1467,9 +1510,8 @@ if (list.length > 0) {
|
|||
<input type="hidden" id="new-app-document-collections" value="" placeholder="Document Collections"></input>
|
||||
</th>
|
||||
<th>
|
||||
<a class="modalTooltips" title="A list of document collections that will be created specifically for this Service. A CRUD API for these will be generated.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="A list of document collections that will be created specifically for this Service. A CRUD API for these will be generated.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr class="tableRow">
|
||||
|
@ -1480,9 +1522,8 @@ if (list.length > 0) {
|
|||
<input type="hidden" id="new-app-edge-collections" value="" placeholder="Edge Collections"></input>
|
||||
</th>
|
||||
<th>
|
||||
<a class="modalTooltips" title="A list of edge collections that will be created specifically for this Service. A CRUD API for these will be generated.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="A list of edge collections that will be created specifically for this Service. A CRUD API for these will be generated.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
@ -1526,9 +1567,8 @@ if (list.length > 0) {
|
|||
<input type="checkbox" id="github-app-islegacy" value="true">
|
||||
</th>
|
||||
<th>
|
||||
<a class="modalTooltips" title="Legacy Compatibility Mode allows mounting some Foxx services written for ArangoDB 2.8 or older. This overrides the ArangoDB version specified in the service manifest. See the Foxx documentation for more information on running legacy services in ArangoDB 3.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Legacy Compatibility Mode allows mounting some Foxx services written for ArangoDB 2.8 or older. This overrides the ArangoDB version specified in the service manifest. See the Foxx documentation for more information on running legacy services in ArangoDB 3.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
@ -1552,9 +1592,8 @@ if (list.length > 0) {
|
|||
<input type="checkbox" id="zip-app-islegacy" value="true">
|
||||
</th>
|
||||
<th>
|
||||
<a class="modalTooltips" title="Legacy Compatibility Mode allows mounting some Foxx services written for ArangoDB 2.8 or older. This overrides the ArangoDB version specified in the service manifest. See the Foxx documentation for more information on running legacy services in ArangoDB 3.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Legacy Compatibility Mode allows mounting some Foxx services written for ArangoDB 2.8 or older. This overrides the ArangoDB version specified in the service manifest. See the Foxx documentation for more information on running legacy services in ArangoDB 3.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
@ -1593,9 +1632,8 @@ if (list.length > 0) {
|
|||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="modalTooltips" data-toggle="tooltip" data-placement="left" title="The maximal size of a journal or datafile (in MB). Must be at least 1.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="The maximal size of a journal or datafile (in MB). Must be at least 1.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
|
@ -1606,9 +1644,8 @@ if (list.length > 0) {
|
|||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="modalTooltips" data-toggle="tooltip" data-placement="left" title="Synchronize to disk before returning from a create or update of a document.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Synchronize to disk before returning from a create or update of a document.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
|
@ -1706,9 +1743,8 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> <%=prettyBytes(figuresData.figures.alive.size)%> </th>
|
||||
<th class="modal-text"> -</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<a class="modalTooltips" title="Total number and size of all living documents.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number and size of all living documents.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1719,9 +1755,8 @@ if (list.length > 0) {
|
|||
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="modalTooltips" title="Total number and size of all dead documents.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number and size of all dead documents.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
|
||||
|
@ -1744,9 +1779,8 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="modalTooltips" data-toggle="tooltip" data-placement="left" title="Number and total size of active datafiles.">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" data-toggle="tooltip" data-placement="left" title="Number and total size of active datafiles.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
|
@ -1756,8 +1790,8 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> <%=prettyBytes(figuresData.figures.journals.fileSize)%> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<a class="modalTooltips" title="Number and total size of journal files.">
|
||||
<span class="arangoicon icon_arangodb_info"></span></a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total size of journal files.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1766,8 +1800,7 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> <%=prettyBytes(figuresData.figures.compactors.fileSize)%> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<a class="modalTooltips" title="Number and total size of compactor files.">
|
||||
<span class="arangoicon icon_arangodb_info"></span></a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total size of compactor files."></span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1776,8 +1809,7 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> <%=prettyBytes(figuresData.figures.indexes.size)%> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<a class="modalTooltips" title="Number and total memory usage of indexes.">
|
||||
<span class="arangoicon icon_arangodb_info"></span></a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total memory usage of indexes."></span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
@ -1796,8 +1828,8 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<a class="modalTooltips" title="Total number of uncollected WAL entries">
|
||||
<span class="arangoicon icon_arangodb_info"></span></a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number of uncollected WAL entries">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1806,8 +1838,8 @@ if (list.length > 0) {
|
|||
<th class="modal-text"> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<a class="modalTooltips" title="Total number of objects pointing to documents in collection datafiles">
|
||||
<span class="arangoicon icon_arangodb_info"></span></a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number of objects pointing to documents in collection datafiles">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</table></script><script id="modalDownloadFoxx.ejs" type="text/template"><div>
|
||||
|
@ -1902,9 +1934,8 @@ if (list.length > 0) {
|
|||
|
||||
if (row.info) {
|
||||
%> <th>
|
||||
<a class="modalTooltips" title="<%=row.info%>">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="<%=row.info%>">
|
||||
</span>
|
||||
</th> <%
|
||||
}
|
||||
%> </tr> <%
|
||||
|
@ -1982,9 +2013,8 @@ if (list.length > 0) {
|
|||
}
|
||||
%> <% if (row.info) { %> </th>
|
||||
<th>
|
||||
<a class="modalTooltips" title="<%=row.info%>">
|
||||
<span class="arangoicon icon_arangodb_info"></span>
|
||||
</a> <% } %> </th>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="<%=row.info%>">
|
||||
</span> <% } %> </th>
|
||||
</tr> <%
|
||||
};
|
||||
%> <% if (content) { %> <table>
|
||||
|
@ -2099,7 +2129,21 @@ if (list.length > 0) {
|
|||
<p><a href="https://slack.arangodb.com" target="_blank"><i class="fa fa-slack"></i></a></p>
|
||||
<p><a href="https://stackoverflow.com/questions/tagged/arangodb" target="_blank"><i class="fa fa-stack-overflow"></i></a></p>
|
||||
<p><a href="https://groups.google.com/group/arangodb" target="_blank"><i class="fa fa-google"></i></a></p>
|
||||
</div></script><script id="nodesView.ejs" type="text/template"><div id="nodesContent" class="innerContent"> <% if (Object.keys(coords).length > 0) { %> <% var disabled = ''; %> <div class="pure-g">
|
||||
</div></script><script id="nodeInfoView.ejs" type="text/template"><div class="nodeInfoView">
|
||||
<div class="modal-body">
|
||||
<table id="serverInfoTable" class="arango-table">
|
||||
<tbody> <% _.each(entries, function (entry, name) { %> <tr>
|
||||
<th class="collectionInfoTh2"><%=name%></th>
|
||||
<th class="collectionInfoTh">
|
||||
<div id="server-<%=name%>" class="modal-text"><%=entry%></div>
|
||||
</th>
|
||||
<th> <% if (entry.description) { %> <th class="tooltipInfoTh">
|
||||
<span class="tippy" title="<%=entry.description%>"></span>
|
||||
</th> <% } %> </th>
|
||||
</tr> <% }); %> </tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div></script><script id="nodesView.ejs" type="text/template"><div id="nodesContent" class="innerContent"> <% if (Object.keys(coords).length > 0) { %> <% var disabled = ''; %> <div class="pure-g">
|
||||
|
||||
<div class="pure-u-1-1 pure-u-md-1-1 pure-u-lg-1-1 pure-u-xl-1-2">
|
||||
|
||||
|
@ -2127,18 +2171,19 @@ if (list.length > 0) {
|
|||
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title" style="clear: both">
|
||||
<div class="pure-table-row">
|
||||
<div class="pure-u-9-24 left">Name</div>
|
||||
<div class="pure-u-8-24 left">Endpoint</div>
|
||||
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
|
||||
<div class="pure-u-3-24 mid">Status</div>
|
||||
<div class="pure-u-1-24 mid"></div>
|
||||
<div class="pure-u-9-24 left">Endpoint</div>
|
||||
<div class="pure-u-2-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-2-24 mid">Info</div>
|
||||
<div class="pure-u-2-24 mid">Status</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="pure-g cluster-nodes coords-nodes pure-table pure-table-body"> <% _.each(coords, function(node, name) { %> <% var id = name + "-node"; %> <div class="pure-table-row <%= disabled %>" node="<%= id %>">
|
||||
|
||||
<div class="pure-u-9-24 left"> <%= node.ShortName %> <i class="fa fa-bar-chart"></i> <% if(node.Status === 'FAILED') { %> <i class="fa fa-trash-o"></i> <% } %> </div>
|
||||
<div class="pure-u-8-24 left"><%= node.Endpoint %></div> <% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %> <div class="pure-u-3-24 hide-small mid"><%= formatted %></div>
|
||||
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div> <% if(node.Status === 'GOOD') { %> <div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div> <% } else { %> <div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div> <% } %> </div> <% }); %> </div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div> <% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %> <div class="pure-u-2-24 hide-small mid"><%= formatted %></div>
|
||||
|
||||
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div> <% if(node.Status === 'GOOD') { %> <div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div> <% } else { %> <div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div> <% } %> </div> <% }); %> </div>
|
||||
</div> <% } %> <% if (Object.keys(dbs).length > 0) { %> <% var disabled = ''; %> <% disabled = " disabled"; %> <div class="pure-u-1-1 pure-u-md-1-1 pure-u-lg-1-1 pure-u-xl-1-2">
|
||||
<div class="sectionHeader pure-g">
|
||||
<div class="pure-u-1-5">
|
||||
|
@ -2164,16 +2209,17 @@ if (list.length > 0) {
|
|||
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title">
|
||||
<div class="pure-table-row">
|
||||
<div class="pure-u-9-24 left">Name</div>
|
||||
<div class="pure-u-8-24 left">Endpoint</div>
|
||||
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
|
||||
<div class="pure-u-3-24 mid">Status</div>
|
||||
<div class="pure-u-1-24 mid"></div>
|
||||
<div class="pure-u-9-24 left">Endpoint</div>
|
||||
<div class="pure-u-2-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-2-24 mid">Info</div>
|
||||
<div class="pure-u-2-24 mid">Status</div>
|
||||
</div>
|
||||
</div> <% } %> <div class="pure-g cluster-nodes dbs-nodes pure-table pure-table-body"> <% _.each(dbs, function(node, name) { %> <% var id = name + "-node"; %> <div class="pure-table-row <%= disabled %>" id="<%= id %>">
|
||||
</div> <% } %> <div class="pure-g cluster-nodes dbs-nodes pure-table pure-table-body"> <% _.each(dbs, function(node, name) { %> <% var id = name + "-node"; %> <div class="pure-table-row <%= disabled %>" node="<%= id %>">
|
||||
|
||||
<div class="pure-u-9-24 left"><%= node.ShortName %></div>
|
||||
<div class="pure-u-8-24 left"><%= node.Endpoint %></div> <% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %> <div class="pure-u-3-24 mid hide-small"><%= formatted %></div>
|
||||
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div> <% if(node.Status === 'GOOD') { %> <div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div> <% } else { %> <div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div> <% } %> </div> <% }); %> </div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div> <% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %> <div class="pure-u-2-24 mid hide-small"><%= formatted %></div>
|
||||
|
||||
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div> <% if(node.Status === 'GOOD') { %> <div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div> <% } else { %> <div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div> <% } %> </div> <% }); %> </div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
@ -2385,7 +2431,7 @@ if (list.length > 0) {
|
|||
<div class="<%= genClass1 %> left">Sync</div>
|
||||
</div>
|
||||
</div> <% var counter = 0; var shardClass; %> <% _.each(collection.Plan, function(shard, name) { %> <div class="pure-g pure-table pure-table-body"> <% if (shard.progress) { %> <% shardClass = 'disabled';%> <% } %> <div class="pure-table-row noHover <%= shardClass %>" collection="<%= collectionName %>" shard="<%= name %>" leader="<%= shard.leader%>"> <% if (shard.leader.substring(0,1) === '_') { %> <% shard.leader = shard.leader.slice(1) %> <% } %> <div class="<%= genClass1 %> left"><%= name %></div>
|
||||
<div class="shardLeader <%= genClass1 %> positive left"><span><%= shard.leader %></span></div> <% var found = null; %> <% _.each(shard.followers, function(db) { %> <% if (db === shard.leader) { %> <% found = true; %> <% } %> <% }); %> <% if (found) { %> <div class="<%= genClass2 %> mid"><i class="fa fa-circle-o-notch fa-spin"></i></div> <% } else { %> <% if (shard.followers.length === 0) { %> <div class="<%= genClass2 %> left"> no followers </div> <% } else { %> <% var string = ''; %> <% var counter2 = 0; %> <% _.each(shard.followers, function(db) { %> <% if (shard.followers.length === 1) { %> <% string += '<span>' + db + '</span> '; %> <% } else { %> <% if (counter2 === 0) { %> <% string += '<span>' + db + '</span>'; counter2++; %> <% } else { %> <% string += ", " + '<span>' + db + '</span>'; %> <% } %> <% } %> <% }); %> <div class="shardFollowers <%= genClass2 %> left"><%= string %></div> <% } %> <% if (shard.progress) { %> <% var percent = numeral(shard.progress.current / shard.progress.total).format('0.00%'); %> <div class="<%= genClass1 %> left"><%= percent %></div> <% } else { %> <div class="<%= genClass1 %> left"><i style="margin-left: 10px;" class="fa fa-check-circle"></i></div> <% } %> <% } %> </div>
|
||||
<div class="shardLeader <%= genClass1 %> positive left"><span><%= shard.leader %></span></div> <% var found = null; %> <% _.each(shard.followers, function(db) { %> <% if (db === shard.leader) { %> <% found = true; %> <% } %> <% }); %> <% if (found) { %> <div class="<%= genClass2 %> mid"><i class="fa fa-circle-o-notch fa-spin"></i></div> <% } else { %> <% if (shard.followers.length === 0) { %> <div class="<%= genClass2 %> left"> no followers </div> <% } else { %> <% var string = ''; %> <% var counter2 = 0; %> <% _.each(shard.followers, function(db) { %> <% if (shard.followers.length === 1) { %> <% string += '<span>' + db + '</span> '; %> <% } else { %> <% if (counter2 === 0) { %> <% string += '<span>' + db + '</span>'; counter2++; %> <% } else { %> <% string += ", " + '<span>' + db + '</span>'; %> <% } %> <% } %> <% }); %> <div class="shardFollowers <%= genClass2 %> left"><%= string %></div> <% } %> <% if (shard.progress) { %> <% var percent; %> <% if (shard.progress.total === 0) { %> <% percent = numeral(0).format('0.00%'); %> <% } else { %> <% percent = numeral(shard.progress.current / shard.progress.total).format('0.00%'); %> <% } %> <div class="<%= genClass1 %> left"><%= percent %></div> <% } else { %> <div class="<%= genClass1 %> left"><i style="margin-left: 10px;" class="fa fa-check-circle"></i></div> <% } %> <% } %> </div>
|
||||
</div> <% counter++; %> <% }); %> <% } %> <% }); %> <button id="rebalanceShards" style="margin-top: 20px;" class="button-success pull-right">Rebalance Shards</button>
|
||||
</div></script><script id="shellView.ejs" type="text/template"><div class="headerBar">
|
||||
<a class="arangoHeader">JS Shell</a>
|
||||
|
@ -2685,7 +2731,7 @@ var cutByResolution = function (str) {
|
|||
<div id="userManagementThumbnailsIn" class="tileList pure-u">
|
||||
<div class="tile pure-u-1-1 pure-u-sm-1-2 pure-u-md-1-3 pure-u-lg-1-4 pure-u-xl-1-6">
|
||||
<div class="fullBorderBox">
|
||||
<a href="#" id="createUser" class="add">
|
||||
<a id="createUser" class="add">
|
||||
<span id="newUser" class="pull-left add-Icon"><i class="fa fa-plus-circle"></i></span>
|
||||
Add User
|
||||
</a>
|
||||
|
@ -2748,4 +2794,4 @@ var cutByResolution = function (str) {
|
|||
</div>
|
||||
|
||||
<div id="workMonitorContent" class="innerContent">
|
||||
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img id="ArangoDBLogo" class="arangodbLogo" src="img/arangodb-edition-optimized.svg"></a><a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a><a class="version"><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="graphSettingsContent" style="display: none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1490207594829"></script><script src="app.js?version=1490207594829"></script></body></html>
|
||||
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img id="ArangoDBLogo" class="arangodbLogo" src="img/arangodb-edition-optimized.svg"></a><a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a><a class="version"><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="graphSettingsContent" style="display: none"></div><div id="filterSelectDiv" style="display:none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1493213897414"></script><script src="app.js?version=1493213897414"></script></body></html>
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
|
@ -1,247 +1,249 @@
|
|||
<script id="indicesView.ejs" type="text/template">
|
||||
<div class="contentIn" id="indexHeaderContent">
|
||||
<div id="indexEditView">
|
||||
<table id="collectionEditIndexTable" class="edit-index-table arango-table">
|
||||
<thead>
|
||||
<tr class="figuresHeader">
|
||||
<th class="collectionInfoTh">ID</th>
|
||||
<th class="collectionInfoTh">Type</th>
|
||||
<th class="collectionInfoTh">Unique</th>
|
||||
<th class="collectionInfoTh">Sparse</th>
|
||||
<th class="collectionInfoTh">Selectivity Est.</th>
|
||||
<th class="collectionInfoTh">Fields</th>
|
||||
<th class="collectionInfoTh">Action</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td><i class="fa fa-plus-circle" id="addIndex"></i></td>
|
||||
</tr>
|
||||
</tfoot>
|
||||
</table>
|
||||
</div>
|
||||
<% if (typeof supported !== 'undefined') { %>
|
||||
<div class="contentIn" id="indexHeaderContent">
|
||||
<div id="indexEditView">
|
||||
<table id="collectionEditIndexTable" class="edit-index-table arango-table">
|
||||
<thead>
|
||||
<tr class="figuresHeader">
|
||||
<th class="collectionInfoTh">ID</th>
|
||||
<th class="collectionInfoTh">Type</th>
|
||||
<th class="collectionInfoTh">Unique</th>
|
||||
<th class="collectionInfoTh">Sparse</th>
|
||||
<th class="collectionInfoTh">Selectivity Est.</th>
|
||||
<th class="collectionInfoTh">Fields</th>
|
||||
<th class="collectionInfoTh">Action</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td><i class="fa fa-plus-circle" id="addIndex"></i></td>
|
||||
</tr>
|
||||
</tfoot>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div id="newIndexView" class="new-index-view" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Type:</th>
|
||||
<th class="">
|
||||
<select id="newIndexType">
|
||||
<% if (supported.indexOf('geo') > -1) { %>
|
||||
<option value="Geo">Geo Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('hash') > -1) { %>
|
||||
<option value="Hash">Hash Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('persistent') > -1) { %>
|
||||
<option value="Persistent">Persistent Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('fulltext') > -1) { %>
|
||||
<option value="Fulltext">Fulltext Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('skiplist') > -1) { %>
|
||||
<option value="Skiplist">Skip-List Index</option>
|
||||
<% } %>
|
||||
</select>
|
||||
</th>
|
||||
<th class="" style="width: 18px"/>
|
||||
</tr>
|
||||
</table>
|
||||
<div id="newIndexTypeGeo" class="newIndexClass" style="display: none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newGeoFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list with one or two attribute paths. Example: latitude,longitude">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Geo JSON:</th>
|
||||
<th>
|
||||
<input id="newGeoJson" type="checkbox" name="newGeoJson" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="Set geoJson to true if the coordinates stored in the specified attribute are arrays in the form [longitude,latitude].">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="newIndexTypePersistent" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newPersistentFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list of attribute paths.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Unique:</th>
|
||||
<th>
|
||||
<input id="newPersistentUnique" type="checkbox" name="newPersistentUnique" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Sparse:</th>
|
||||
<th>
|
||||
<input id="newPersistentSparse" type="checkbox" name="newPersistentSparse" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="newIndexTypeHash" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newHashFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list of attribute paths.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Unique:</th>
|
||||
<th>
|
||||
<input id="newHashUnique" type="checkbox" name="newHashUnique" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Sparse:</th>
|
||||
<th>
|
||||
<input id="newHashSparse" type="checkbox" name="newHashSparse" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="newIndexTypeFulltext" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newFulltextFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A single attribute path. Example: message.text">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Min. length:</th>
|
||||
<th><input type="text" id="newFulltextMinLength" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
<div id="newIndexView" class="new-index-view" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Type:</th>
|
||||
<th class="">
|
||||
<select id="newIndexType">
|
||||
<% if (supported.indexOf('geo') > -1) { %>
|
||||
<option value="Geo">Geo Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('hash') > -1) { %>
|
||||
<option value="Hash">Hash Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('persistent') > -1) { %>
|
||||
<option value="Persistent">Persistent Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('fulltext') > -1) { %>
|
||||
<option value="Fulltext">Fulltext Index</option>
|
||||
<% } %>
|
||||
<% if (supported.indexOf('skiplist') > -1) { %>
|
||||
<option value="Skiplist">Skip-List Index</option>
|
||||
<% } %>
|
||||
</select>
|
||||
</th>
|
||||
<th class="" style="width: 18px"/>
|
||||
</tr>
|
||||
</table>
|
||||
<div id="newIndexTypeGeo" class="newIndexClass" style="display: none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newGeoFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list with one or two attribute paths. Example: latitude,longitude">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Geo JSON:</th>
|
||||
<th>
|
||||
<input id="newGeoJson" type="checkbox" name="newGeoJson" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="Set geoJson to true if the coordinates stored in the specified attribute are arrays in the form [longitude,latitude].">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="newIndexTypePersistent" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newPersistentFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list of attribute paths.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Unique:</th>
|
||||
<th>
|
||||
<input id="newPersistentUnique" type="checkbox" name="newPersistentUnique" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Sparse:</th>
|
||||
<th>
|
||||
<input id="newPersistentSparse" type="checkbox" name="newPersistentSparse" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="newIndexTypeHash" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newHashFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list of attribute paths.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Unique:</th>
|
||||
<th>
|
||||
<input id="newHashUnique" type="checkbox" name="newHashUnique" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Sparse:</th>
|
||||
<th>
|
||||
<input id="newHashSparse" type="checkbox" name="newHashSparse" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="newIndexTypeFulltext" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newFulltextFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A single attribute path. Example: message.text">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Min. length:</th>
|
||||
<th><input type="text" id="newFulltextMinLength" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
<div id="newIndexTypeSkiplist" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newSkiplistFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list of attribute paths.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Unique:</th>
|
||||
<th>
|
||||
<input id="newSkiplistUnique" type="checkbox" name="newSkiplistUnique" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Sparse:</th>
|
||||
<th>
|
||||
<input id="newSkiplistSparse" type="checkbox" name="newSkiplistSparse" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div class="index-button-bar index-button-bar2">
|
||||
<button id="createIndex" class="button-success" style="margin-left: 15px;">Create</button>
|
||||
<button id="cancelIndex" class="button-close" style="margin-left: 0;"><i class="fa fa-arrow-left"></i>
|
||||
<span style="margin-left: 5px;">Back</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div id="newIndexTypeSkiplist" class="newIndexClass" style="display:none">
|
||||
<table>
|
||||
<tr>
|
||||
<th class="collectionTh">Fields:</th>
|
||||
<th><input type="text" id="newSkiplistFields" value=""/></th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="A comma-separated list of attribute paths.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Unique:</th>
|
||||
<th>
|
||||
<input id="newSkiplistUnique" type="checkbox" name="newSkiplistUnique" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a unique index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="collectionTh">Sparse:</th>
|
||||
<th>
|
||||
<input id="newSkiplistSparse" type="checkbox" name="newSkiplistSparse" value="true">
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<a class="index-tooltip" data-toggle="tooltip" data-placement="left" title="If true, then create a sparse index.">
|
||||
<span rel="tooltip" class="arangoicon icon_arangodb_info"></span>
|
||||
</a>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
<div id="modal-dialog">
|
||||
<div class="modal-footer" style="border: none"></div>
|
||||
</div>
|
||||
<div class="index-button-bar index-button-bar2">
|
||||
<button id="createIndex" class="button-success" style="margin-left: 15px;">Create</button>
|
||||
<button id="cancelIndex" class="button-close" style="margin-left: 0;"><i class="fa fa-arrow-left"></i>
|
||||
<span style="margin-left: 5px;">Back</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div id="modal-dialog">
|
||||
<div class="modal-footer" style="border: none"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<% } %>
|
||||
</script>
|
||||
|
|
|
@ -5,20 +5,22 @@
|
|||
%>
|
||||
|
||||
<table id="collectionInfoTable" class="arango-table">
|
||||
<tr id="collectionSizeBox">
|
||||
<th class="collectionInfoTh2">Journal size:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div id="show-collection-size" class="modal-text">
|
||||
<%=prettyBytes(figuresData.journalSize)%>
|
||||
</div>
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="The maximal size of a journal or datafile (in MB). Must be at least 1.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<% if (figuresData.journalSize) { %>
|
||||
<tr id="collectionSizeBox">
|
||||
<th class="collectionInfoTh2">Journal size:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div id="show-collection-size" class="modal-text">
|
||||
<%=prettyBytes(figuresData.journalSize)%>
|
||||
</div>
|
||||
</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="The maximal size of a journal or datafile (in MB). Must be at least 1.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<% } %>
|
||||
<tr id="collectionSyncBox">
|
||||
<th class="collectionInfoTh2">Wait for sync:</th>
|
||||
<th class="collectionInfoTh">
|
||||
|
@ -105,161 +107,174 @@
|
|||
</tr>
|
||||
<% } %>
|
||||
|
||||
<tr>
|
||||
<th class="collectionInfoTh2">Index buckets:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div class="modal-text"><%=figuresData.indexBuckets%></div>
|
||||
</th>
|
||||
<th class="collectionInfoTh">
|
||||
</th>
|
||||
</tr>
|
||||
<% if (!frontendConfig.isCluster) { %>
|
||||
<% if (figuresData.indexBuckets) { %>
|
||||
<tr>
|
||||
<th class="collectionInfoTh2">Compaction status:</th>
|
||||
<th class="collectionInfoTh2">Index buckets:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div class="modal-text"><%=figuresData.figures.compactionStatus.message + " (" + figuresData.figures.compactionStatus.time + ")" %></div>
|
||||
<div class="modal-text"><%=figuresData.indexBuckets%></div>
|
||||
</th>
|
||||
<th class="collectionInfoTh">
|
||||
</th>
|
||||
</tr>
|
||||
<% } %>
|
||||
<% if (!frontendConfig.isCluster) { %>
|
||||
<% if (figuresData.figures.compactionStatus) { %>
|
||||
<tr>
|
||||
<th class="collectionInfoTh2">Compaction status:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div class="modal-text"><%=figuresData.figures.compactionStatus.message + " (" + figuresData.figures.compactionStatus.time + ")" %></div>
|
||||
</th>
|
||||
<th class="collectionInfoTh">
|
||||
</th>
|
||||
</tr>
|
||||
<% } %>
|
||||
<% } %>
|
||||
|
||||
<% if (figuresData.figures.waitingFor) { %>
|
||||
<tr>
|
||||
<th class="collectionInfoTh2">Waiting for:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div class="modal-text"><%=figuresData.figures.waitingFor%></div>
|
||||
</th>
|
||||
<th class="collectionInfoTh">
|
||||
</th>
|
||||
</tr>
|
||||
<% } %>
|
||||
<tr>
|
||||
<th class="collectionInfoTh2">Waiting for:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div class="modal-text"><%=figuresData.figures.waitingFor%></div>
|
||||
</th>
|
||||
<th class="collectionInfoTh">
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
|
||||
<table class="figures1 arango-table">
|
||||
<tr class="figuresHeader">
|
||||
<th>Type</th>
|
||||
<th>Count</th>
|
||||
<th>Size</th>
|
||||
<th>Deletion</th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Alive</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.alive.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.alive.size)%>
|
||||
</th>
|
||||
<th class="modal-text"> -</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number and size of all living documents.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Dead</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.dead.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.dead.size)%>
|
||||
</th>
|
||||
<th class="modal-text"><%=figuresData.figures.dead.deletion%></th>
|
||||
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number and size of all dead documents.">
|
||||
<% if (figuresData.figures.alive && figuresData.figures.dead) { %>
|
||||
<table class="figures1 arango-table">
|
||||
<tr class="figuresHeader">
|
||||
<th>Type</th>
|
||||
<th>Count</th>
|
||||
<th>Size</th>
|
||||
<th>Deletion</th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Alive</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.alive.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.alive.size)%>
|
||||
</th>
|
||||
<th class="modal-text"> -</th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number and size of all living documents.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Dead</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.dead.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.dead.size)%>
|
||||
</th>
|
||||
<th class="modal-text"><%=figuresData.figures.dead.deletion%></th>
|
||||
|
||||
</tr>
|
||||
<tr><th><div> </div></th></tr>
|
||||
</table>
|
||||
|
||||
<table class="figures2 arango-table">
|
||||
<tr class="figuresHeader">
|
||||
<th>Type</th>
|
||||
<th>Count</th>
|
||||
<th>Size</th>
|
||||
<th></th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Datafiles</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.datafiles.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%= prettyBytes(figuresData.figures.datafiles.fileSize) %>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" data-toggle="tooltip" data-placement="left" title="Number and total size of active datafiles.">
|
||||
</span>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number and size of all dead documents.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Journals</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.journals.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.journals.fileSize)%>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total size of journal files.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Compactors</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.compactors.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.compactors.fileSize)%>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total size of compactor files."></span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Indexes</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.indexes.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.indexes.size)%>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total memory usage of indexes."></span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</th>
|
||||
|
||||
<table class="figures3 arango-table">
|
||||
<tr class="figuresHeader">
|
||||
<th>Type</th>
|
||||
<th>Count</th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Uncollected</th>
|
||||
<th class="modal-text"><%=figuresData.figures.uncollectedLogfileEntries%></th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number of uncollected WAL entries">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">References</th>
|
||||
<th class="modal-text"><%=figuresData.figures.documentReferences%></th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number of objects pointing to documents in collection datafiles">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
</tr>
|
||||
<tr><th><div> </div></th></tr>
|
||||
</table>
|
||||
<% } %>
|
||||
|
||||
<% if (figuresData.figures.datafiles && figuresData.figures.journals && figuresData.figures.compactors) { %>
|
||||
<table class="figures2 arango-table">
|
||||
<tr class="figuresHeader">
|
||||
<th>Type</th>
|
||||
<th>Count</th>
|
||||
<th>Size</th>
|
||||
<th></th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Datafiles</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.datafiles.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%= prettyBytes(figuresData.figures.datafiles.fileSize) %>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<div>
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" data-toggle="tooltip" data-placement="left" title="Number and total size of active datafiles.">
|
||||
</span>
|
||||
</div>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Journals</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.journals.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.journals.fileSize)%>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total size of journal files.">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Compactors</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.compactors.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.compactors.fileSize)%>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total size of compactor files."></span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Indexes</th>
|
||||
<th class="modal-text"><%=numeral(figuresData.figures.indexes.count).format('0,0')%></th>
|
||||
<th class="modal-text">
|
||||
<%=prettyBytes(figuresData.figures.indexes.size)%>
|
||||
</th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Number and total memory usage of indexes."></span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
<% } %>
|
||||
|
||||
<% if (figuresData.figures.documentReferences && figuresData.figures.uncollectedLogfileEntries) { %>
|
||||
<table class="figures3 arango-table">
|
||||
<tr class="figuresHeader">
|
||||
<th>Type</th>
|
||||
<th>Count</th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">Uncollected</th>
|
||||
<th class="modal-text"><%=figuresData.figures.uncollectedLogfileEntries%></th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number of uncollected WAL entries">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th class="modal-text">References</th>
|
||||
<th class="modal-text"><%=figuresData.figures.documentReferences%></th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="modal-text"> </th>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="modalTooltips arangoicon icon_arangodb_info" title="Total number of objects pointing to documents in collection datafiles">
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
<% } %>
|
||||
|
||||
</script>
|
||||
|
|
|
@ -141,7 +141,7 @@
|
|||
<% _.each(dbs, function(node, name) { %>
|
||||
<% var id = name + "-node"; %>
|
||||
|
||||
<div class="pure-table-row <%= disabled %>" id="<%= id %>">
|
||||
<div class="pure-table-row <%= disabled %>" node="<%= id %>">
|
||||
|
||||
<div class="pure-u-9-24 left"><%= node.ShortName %></div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
|
||||
|
|
|
@ -313,10 +313,27 @@
|
|||
|
||||
createCollection: function (e) {
|
||||
e.preventDefault();
|
||||
this.createNewCollectionModal();
|
||||
var self = this;
|
||||
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: arangoHelper.databaseUrl('/_api/engine'),
|
||||
contentType: 'application/json',
|
||||
processData: false,
|
||||
success: function (data) {
|
||||
self.engine = data;
|
||||
console.log(self.engine);
|
||||
self.createNewCollectionModal(data);
|
||||
},
|
||||
error: function () {
|
||||
arangoHelper.arangoError('Engine', 'Could not fetch ArangoDB Engine details.');
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
submitCreateCollection: function () {
|
||||
var self = this;
|
||||
var callbackCoord = function (error, isCoordinator) {
|
||||
if (error) {
|
||||
arangoHelper.arangoError('DB', 'Could not check coordinator state');
|
||||
|
@ -383,16 +400,19 @@
|
|||
window.modalView.hide();
|
||||
}.bind(this);
|
||||
|
||||
this.collection.newCollection({
|
||||
var tmpObj = {
|
||||
collName: collName,
|
||||
wfs: wfs,
|
||||
isSystem: isSystem,
|
||||
journalSize: collSize,
|
||||
replicationFactor: replicationFactor,
|
||||
collType: collType,
|
||||
shards: shards,
|
||||
shardBy: shardBy
|
||||
}, callback);
|
||||
};
|
||||
if (self.engine.name !== 'rocksdb') {
|
||||
tmpObj.journalSize = collSize;
|
||||
}
|
||||
this.collection.newCollection(tmpObj, callback);
|
||||
}
|
||||
}.bind(this);
|
||||
|
||||
|
@ -400,6 +420,7 @@
|
|||
},
|
||||
|
||||
createNewCollectionModal: function () {
|
||||
var self = this;
|
||||
var callbackCoord2 = function (error, isCoordinator) {
|
||||
if (error) {
|
||||
arangoHelper.arangoError('DB', 'Could not check coordinator state');
|
||||
|
@ -474,22 +495,24 @@
|
|||
this.submitCreateCollection.bind(this)
|
||||
)
|
||||
);
|
||||
advancedTableContent.push(
|
||||
window.modalView.createTextEntry(
|
||||
'new-collection-size',
|
||||
'Journal size',
|
||||
'',
|
||||
'The maximal size of a journal or datafile (in MB). Must be at least 1.',
|
||||
'',
|
||||
false,
|
||||
[
|
||||
{
|
||||
rule: Joi.string().allow('').optional().regex(/^[0-9]*$/),
|
||||
msg: 'Must be a number.'
|
||||
}
|
||||
]
|
||||
)
|
||||
);
|
||||
if (self.engine.name !== 'rocksdb') {
|
||||
advancedTableContent.push(
|
||||
window.modalView.createTextEntry(
|
||||
'new-collection-size',
|
||||
'Journal size',
|
||||
'',
|
||||
'The maximal size of a journal or datafile (in MB). Must be at least 1.',
|
||||
'',
|
||||
false,
|
||||
[
|
||||
{
|
||||
rule: Joi.string().allow('').optional().regex(/^[0-9]*$/),
|
||||
msg: 'Must be a number.'
|
||||
}
|
||||
]
|
||||
)
|
||||
);
|
||||
}
|
||||
if (window.App.isCluster) {
|
||||
advancedTableContent.push(
|
||||
window.modalView.createTextEntry(
|
||||
|
|
|
@ -110,26 +110,33 @@
|
|||
|
||||
var error = 0;
|
||||
|
||||
_.each(window.clusterHealth, function (node) {
|
||||
if (node.Status !== 'GOOD') {
|
||||
error++;
|
||||
}
|
||||
});
|
||||
if (Object.keys(window.clusterHealth).length !== 0) {
|
||||
_.each(window.clusterHealth, function (node) {
|
||||
if (node.Status !== 'GOOD') {
|
||||
error++;
|
||||
}
|
||||
});
|
||||
|
||||
if (error > 0) {
|
||||
if (error > 0) {
|
||||
$('#healthStatus').removeClass('positive');
|
||||
$('#healthStatus').addClass('negative');
|
||||
if (error === 1) {
|
||||
$('.health-state').html(error + ' NODE ERROR');
|
||||
} else {
|
||||
$('.health-state').html(error + ' NODES ERROR');
|
||||
}
|
||||
$('.health-icon').html('<i class="fa fa-exclamation-circle"></i>');
|
||||
} else {
|
||||
$('#healthStatus').removeClass('negative');
|
||||
$('#healthStatus').addClass('positive');
|
||||
$('.health-state').html('NODES OK');
|
||||
$('.health-icon').html('<i class="fa fa-check-circle"></i>');
|
||||
}
|
||||
} else {
|
||||
$('.health-state').html('HEALTH ERROR');
|
||||
$('#healthStatus').removeClass('positive');
|
||||
$('#healthStatus').addClass('negative');
|
||||
if (error === 1) {
|
||||
$('.health-state').html(error + ' NODE ERROR');
|
||||
} else {
|
||||
$('.health-state').html(error + ' NODES ERROR');
|
||||
}
|
||||
$('.health-icon').html('<i class="fa fa-exclamation-circle"></i>');
|
||||
} else {
|
||||
$('#healthStatus').removeClass('negative');
|
||||
$('#healthStatus').addClass('positive');
|
||||
$('.health-state').html('NODES OK');
|
||||
$('.health-icon').html('<i class="fa fa-check-circle"></i>');
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1606,14 +1606,14 @@
|
|||
newNode.originalColor = newNode.color;
|
||||
self.currentGraph.graph.addNode(newNode);
|
||||
newNodeCounter++;
|
||||
}
|
||||
});
|
||||
|
||||
_.each(newEdges, function (edge) {
|
||||
if (edge.source === newNode.id || edge.target === newNode.id) {
|
||||
edge.originalColor = edge.color;
|
||||
self.currentGraph.graph.addEdge(edge);
|
||||
newEdgeCounter++;
|
||||
}
|
||||
});
|
||||
_.each(newEdges, function (edge) {
|
||||
if (self.currentGraph.graph.edges(edge.id) === undefined) {
|
||||
edge.originalColor = edge.color;
|
||||
self.currentGraph.graph.addEdge(edge);
|
||||
newEdgeCounter++;
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
}
|
||||
} else {
|
||||
if (availableDbs) {
|
||||
if (availableDbs.indexOf(db) > -1) {
|
||||
if (availableDbs.indexOf(rule) > -1) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + rule + '</option>'
|
||||
);
|
||||
|
@ -87,7 +87,6 @@
|
|||
successFunc(availableDbs);
|
||||
});
|
||||
} catch (ignore) {
|
||||
console.log(ignore);
|
||||
successFunc();
|
||||
}
|
||||
}).error(function () {
|
||||
|
|
|
@ -56,11 +56,21 @@
|
|||
}
|
||||
|
||||
var renderObj = {};
|
||||
renderObj.Name = model.name;
|
||||
renderObj.Address = model.address;
|
||||
renderObj.Status = model.status;
|
||||
renderObj.Protocol = model.protocol;
|
||||
renderObj.Role = model.role;
|
||||
if (model.name) {
|
||||
renderObj.Name = model.name;
|
||||
}
|
||||
if (model.address) {
|
||||
renderObj.Address = model.address;
|
||||
}
|
||||
if (model.status) {
|
||||
renderObj.Status = model.status;
|
||||
}
|
||||
if (model.protocol) {
|
||||
renderObj.Protocol = model.protocol;
|
||||
}
|
||||
if (model.role) {
|
||||
renderObj.Role = model.role;
|
||||
}
|
||||
this.$el.html(this.template.render({entries: renderObj}));
|
||||
},
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
events: {
|
||||
'click #nodesContent .coords-nodes .pure-table-row': 'navigateToNode',
|
||||
'click #nodesContent .dbs-nodes .pure-table-row': 'navigateToInfo',
|
||||
'click #nodesContent .coords-nodes .pure-table-row .fa-trash-o': 'deleteNode',
|
||||
'click #addCoord': 'addCoord',
|
||||
'click #removeCoord': 'removeCoord',
|
||||
|
@ -128,6 +129,13 @@
|
|||
return false;
|
||||
},
|
||||
|
||||
navigateToInfo: function (elem) {
|
||||
var name = $(elem.currentTarget).attr('node').slice(0, -5);
|
||||
if ($(elem.target).hasClass('fa-info-circle')) {
|
||||
window.App.navigate('#nodeInfo/' + encodeURIComponent(name), {trigger: true});
|
||||
}
|
||||
},
|
||||
|
||||
navigateToNode: function (elem) {
|
||||
var name = $(elem.currentTarget).attr('node').slice(0, -5);
|
||||
|
||||
|
@ -303,7 +311,6 @@
|
|||
this.setCoordSize(coords);
|
||||
this.setDBsSize(dbs);
|
||||
} catch (ignore) {
|
||||
console.log(ignore);
|
||||
arangoHelper.arangoError('Plan', 'Could not abort Cluster Plan');
|
||||
}
|
||||
},
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue