mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
f534421c74
|
@ -618,8 +618,8 @@ fi
|
|||
|
||||
if test "${DOWNLOAD_STARTER}" == 1; then
|
||||
# we utilize https://developer.github.com/v3/repos/ to get the newest release:
|
||||
STARTER_REV=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases |grep tag_name |head -n 1 |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
STARTER_URL=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases/tags/${STARTER_REV} |grep browser_download_url |grep "${OSNAME}" |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
STARTER_REV=`curl -s https://api.github.com/repos/arangodb-helper/arangodb/releases |grep tag_name |head -n 1 |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
STARTER_URL=`curl -s https://api.github.com/repos/arangodb-helper/arangodb/releases/tags/${STARTER_REV} |grep browser_download_url |grep "${OSNAME}" |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
if test -n "${STARTER_URL}"; then
|
||||
mkdir -p ${BUILD_DIR}
|
||||
if test "${isCygwin}" == 1; then
|
||||
|
@ -653,11 +653,11 @@ fi
|
|||
PARTIAL_STATE=$?
|
||||
set -e
|
||||
|
||||
if test "${isCygwin}" == 0 -a "${PARTIAL_STATE}" == 0; then
|
||||
if test "${isCygwin}" == 1 -a "${PARTIAL_STATE}" == 1; then
|
||||
# windows fails to partialy re-configure - so do a complete configure run.
|
||||
if test -f CMakeFiles/generate.stamp -a CMakeFiles/generate.stamp -ot "${SOURCE_DIR}/CMakeList.txt"; then
|
||||
echo "CMakeList older - Forcing complete configure run!"
|
||||
PARTIAL_STATE=1
|
||||
PARTIAL_STATE=0
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
!define StrRep "!insertmacro StrRep"
|
||||
!macro StrRep output string old new
|
||||
Push `${string}`
|
||||
Push `${old}`
|
||||
Push `${new}`
|
||||
!ifdef __UNINSTALL__
|
||||
Call un.StrRep
|
||||
!else
|
||||
Call StrRep
|
||||
!endif
|
||||
Pop ${output}
|
||||
!macroend
|
||||
|
||||
!macro Func_StrRep un
|
||||
Function ${un}StrRep
|
||||
Exch $R2 ;new
|
||||
Exch 1
|
||||
Exch $R1 ;old
|
||||
Exch 2
|
||||
Exch $R0 ;string
|
||||
Push $R3
|
||||
Push $R4
|
||||
Push $R5
|
||||
Push $R6
|
||||
Push $R7
|
||||
Push $R8
|
||||
Push $R9
|
||||
|
||||
StrCpy $R3 0
|
||||
StrLen $R4 $R1
|
||||
StrLen $R6 $R0
|
||||
StrLen $R9 $R2
|
||||
loop:
|
||||
StrCpy $R5 $R0 $R4 $R3
|
||||
StrCmp $R5 $R1 found
|
||||
StrCmp $R3 $R6 done
|
||||
IntOp $R3 $R3 + 1 ;move offset by 1 to check the next character
|
||||
Goto loop
|
||||
found:
|
||||
StrCpy $R5 $R0 $R3
|
||||
IntOp $R8 $R3 + $R4
|
||||
StrCpy $R7 $R0 "" $R8
|
||||
StrCpy $R0 $R5$R2$R7
|
||||
StrLen $R6 $R0
|
||||
IntOp $R3 $R3 + $R9 ;move offset by length of the replacement string
|
||||
Goto loop
|
||||
done:
|
||||
|
||||
Pop $R9
|
||||
Pop $R8
|
||||
Pop $R7
|
||||
Pop $R6
|
||||
Pop $R5
|
||||
Pop $R4
|
||||
Pop $R3
|
||||
Push $R0
|
||||
Push $R1
|
||||
Pop $R0
|
||||
Pop $R1
|
||||
Pop $R0
|
||||
Pop $R2
|
||||
Exch $R1
|
||||
FunctionEnd
|
||||
!macroend
|
||||
!insertmacro Func_StrRep ""
|
||||
!insertmacro Func_StrRep "un."
|
||||
|
||||
Function RIF
|
||||
|
||||
ClearErrors ; want to be a newborn
|
||||
|
||||
Exch $0 ; REPLACEMENT
|
||||
Exch
|
||||
Exch $1 ; SEARCH_TEXT
|
||||
Exch 2
|
||||
Exch $2 ; SOURCE_FILE
|
||||
|
||||
Push $R0 ; SOURCE_FILE file handle
|
||||
Push $R1 ; temporary file handle
|
||||
Push $R2 ; unique temporary file name
|
||||
Push $R3 ; a line to sar/save
|
||||
Push $R4 ; shift puffer
|
||||
|
||||
IfFileExists $2 +1 RIF_error ; knock-knock
|
||||
FileOpen $R0 $2 "r" ; open the door
|
||||
|
||||
GetTempFileName $R2 ; who's new?
|
||||
FileOpen $R1 $R2 "w" ; the escape, please!
|
||||
|
||||
RIF_loop: ; round'n'round we go
|
||||
FileRead $R0 $R3 ; read one line
|
||||
IfErrors RIF_leaveloop ; enough is enough
|
||||
RIF_sar: ; sar - search and replace
|
||||
Push "$R3" ; (hair)stack
|
||||
Push "$1" ; needle
|
||||
Push "$0" ; blood
|
||||
Call StrRep ; do the bartwalk
|
||||
StrCpy $R4 "$R3" ; remember previous state
|
||||
Pop $R3 ; gimme s.th. back in return!
|
||||
StrCmp "$R3" "$R4" +1 RIF_sar ; loop, might change again!
|
||||
FileWrite $R1 "$R3" ; save the newbie
|
||||
Goto RIF_loop ; gimme more
|
||||
|
||||
RIF_leaveloop: ; over'n'out, Sir!
|
||||
FileClose $R1 ; S'rry, Ma'am - clos'n now
|
||||
FileClose $R0 ; me 2
|
||||
|
||||
Delete "$2.old" ; go away, Sire
|
||||
Rename "$2" "$2.old" ; step aside, Ma'am
|
||||
Rename "$R2" "$2" ; hi, baby!
|
||||
|
||||
ClearErrors ; now i AM a newborn
|
||||
Goto RIF_out ; out'n'away
|
||||
|
||||
RIF_error: ; ups - s.th. went wrong...
|
||||
SetErrors ; ...so cry, boy!
|
||||
|
||||
RIF_out: ; your wardrobe?
|
||||
Pop $R4
|
||||
Pop $R3
|
||||
Pop $R2
|
||||
Pop $R1
|
||||
Pop $R0
|
||||
Pop $2
|
||||
Pop $0
|
||||
Pop $1
|
||||
|
||||
FunctionEnd
|
||||
|
||||
!macro _ReplaceInFile SOURCE_FILE SEARCH_TEXT REPLACEMENT
|
||||
Push "${SOURCE_FILE}"
|
||||
Push "${SEARCH_TEXT}"
|
||||
Push "${REPLACEMENT}"
|
||||
Call RIF
|
||||
!macroend
|
|
@ -1,11 +1,11 @@
|
|||
[Settings]
|
||||
NumFields=9
|
||||
NumFields=11
|
||||
|
||||
[Field 1]
|
||||
Type=label
|
||||
Text=By default @CPACK_PACKAGE_INSTALL_DIRECTORY@ does not add its directory to the system PATH.
|
||||
Left=0
|
||||
Right=-1
|
||||
Right=140
|
||||
Top=0
|
||||
Bottom=20
|
||||
|
||||
|
@ -13,7 +13,7 @@ Bottom=20
|
|||
Type=radiobutton
|
||||
Text=as service into the default directory
|
||||
Left=5
|
||||
Right=-1
|
||||
Right=140
|
||||
Top=30
|
||||
Bottom=40
|
||||
State=1
|
||||
|
@ -22,7 +22,7 @@ State=1
|
|||
Type=radiobutton
|
||||
Text=for all users
|
||||
Left=5
|
||||
Right=-1
|
||||
Right=140
|
||||
Top=40
|
||||
Bottom=50
|
||||
State=0
|
||||
|
@ -31,7 +31,7 @@ State=0
|
|||
Type=radiobutton
|
||||
Text=for the current user
|
||||
Left=5
|
||||
Right=-1
|
||||
Right=140
|
||||
Top=50
|
||||
Bottom=60
|
||||
State=0
|
||||
|
@ -67,7 +67,7 @@ State=
|
|||
Type=label
|
||||
Text=Install @CPACK_PACKAGE_NAME@
|
||||
Left=0
|
||||
Right=-1
|
||||
Right=160
|
||||
Top=20
|
||||
Bottom=29
|
||||
|
||||
|
@ -78,3 +78,21 @@ Left=0
|
|||
Right=-1
|
||||
Top=75
|
||||
Bottom=85
|
||||
|
||||
[Field 10]
|
||||
Type=label
|
||||
Text=Choose the stogare engine to use for this ArangoDB Installation
|
||||
Left=160
|
||||
Right=-1
|
||||
Top=0
|
||||
Bottom=20
|
||||
|
||||
[Field 11]
|
||||
Type=Droplist
|
||||
ListItems=auto|mmfiles|rocksdb
|
||||
State=auto
|
||||
Left=160
|
||||
Right=-1
|
||||
Top=20
|
||||
Bottom=29
|
||||
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
!addplugindir '@CPACK_PLUGIN_PATH@/AccessControl/Plugins'
|
||||
!addplugindir '@CPACK_PLUGIN_PATH@/SharedMemory/Plugins'
|
||||
!addincludedir '@CPACK_PLUGIN_PATH@/UAC-plug-in-NSIS'
|
||||
|
||||
!addincludedir '@CPACK_PLUGIN_PATH@/'
|
||||
!include StrRep.nsh
|
||||
;--------------------------------
|
||||
; Include LogicLib for more readable code
|
||||
!include "LogicLib.nsh"
|
||||
|
@ -47,7 +48,8 @@
|
|||
|
||||
Var INSTALL_DESKTOP ; x bool: add desktop icon
|
||||
Var IS_DEFAULT_INSTALLDIR
|
||||
|
||||
|
||||
Var STORAGE_ENGINE ; x string auto/mmfiles/rocksdb
|
||||
Var PASSWORD ; x string
|
||||
Var PASSWORD_AGAIN ; x string / only for comparison
|
||||
|
||||
|
@ -566,6 +568,7 @@ FunctionEnd
|
|||
;--------------------------------
|
||||
|
||||
Function WaitForServiceUp
|
||||
DetailPrint "starting ArangoDB Service..."
|
||||
Push 0
|
||||
Pop $retryCount
|
||||
try_again:
|
||||
|
@ -579,7 +582,7 @@ Function WaitForServiceUp
|
|||
${EndIf}
|
||||
Sleep 1000
|
||||
${If} $retryCount == 40
|
||||
MessageBox MB_OK "Service waiting retry count reached"
|
||||
MessageBox MB_OK "Waited 40 seconds for ArangoDB to come up; Please look at the Windows Eventlog for eventual errors!"
|
||||
Return
|
||||
${EndIf}
|
||||
IntOp $retryCount $retryCount + 1
|
||||
|
@ -589,6 +592,7 @@ FunctionEnd
|
|||
;--------------------------------
|
||||
|
||||
Function WaitForServiceDown
|
||||
DetailPrint "stopping ArangoDB Service..."
|
||||
Push 0
|
||||
Pop $retryCount
|
||||
try_again:
|
||||
|
@ -602,7 +606,7 @@ Function WaitForServiceDown
|
|||
${EndIf}
|
||||
Sleep 1000
|
||||
${If} $retryCount == 40
|
||||
MessageBox MB_OK "Service shutdown waiting retry count reached; you may need to remove files by hand"
|
||||
MessageBox MB_OK "Waited 40 seconds for the ArangoDB Service to shutdown; you may need to remove files by hand"
|
||||
Return
|
||||
${EndIf}
|
||||
IntOp $retryCount $retryCount + 1
|
||||
|
@ -610,6 +614,7 @@ Function WaitForServiceDown
|
|||
FunctionEnd
|
||||
|
||||
Function un.WaitForServiceDown
|
||||
DetailPrint "stopping ArangoDB Service..."
|
||||
Push 0
|
||||
Pop $retryCount
|
||||
try_again:
|
||||
|
@ -623,7 +628,7 @@ Function un.WaitForServiceDown
|
|||
${EndIf}
|
||||
Sleep 1000
|
||||
${If} $retryCount == 40
|
||||
MessageBox MB_OK "Service shutdown waiting retry count reached; you may need to remove files by hand"
|
||||
MessageBox MB_OK "Waited 40 seconds for the ArangoDB Service to shutdown; you may need to remove files by hand"
|
||||
Return
|
||||
${EndIf}
|
||||
IntOp $retryCount $retryCount + 1
|
||||
|
@ -779,6 +784,12 @@ Section "-Core installation"
|
|||
; this variable was defined by eld and included in NSIS.template.in
|
||||
; we probably need this for the install/uninstall software list.
|
||||
SetRegView ${BITS}
|
||||
|
||||
StrCmp $TRI_INSTALL_TYPE 'Service' 0 noServiceToStop
|
||||
SimpleSC::StopService '${TRI_SVC_NAME}' 0 30
|
||||
Call WaitForServiceDown
|
||||
SimpleSC::RemoveService '${TRI_SVC_NAME}'
|
||||
noServiceToStop:
|
||||
@CPACK_NSIS_FULL_INSTALL@
|
||||
|
||||
;Store installation folder
|
||||
|
@ -861,6 +872,8 @@ Section "-Core installation"
|
|||
|
||||
!insertmacro MUI_STARTMENU_WRITE_END
|
||||
|
||||
!insertmacro _ReplaceInFile "$INSTDIR\etc\arangodb3\arangod.conf" "storage-engine = auto" "storage-engine = $STORAGE_ENGINE"
|
||||
|
||||
System::Call 'Kernel32::SetEnvironmentVariable(t, t)i ("ARANGODB_DEFAULT_ROOT_PASSWORD", "$PASSWORD").r0'
|
||||
StrCmp $0 0 error
|
||||
ExecWait "$INSTDIR\${SBIN_DIR}\arangod.exe --database.init-database"
|
||||
|
@ -870,9 +883,6 @@ Section "-Core installation"
|
|||
done:
|
||||
@CPACK_NSIS_EXTRA_INSTALL_COMMANDS@
|
||||
StrCmp $TRI_INSTALL_TYPE 'Service' 0 nothing
|
||||
SimpleSC::StopService '${TRI_SVC_NAME}' 0 30
|
||||
Call WaitForServiceDown
|
||||
SimpleSC::RemoveService '${TRI_SVC_NAME}'
|
||||
SimpleSC::InstallService '${TRI_SVC_NAME}' '${TRI_SVC_NAME}' '16' '2' '"$INSTDIR\${SBIN_DIR}\arangod.exe" --start-service' '' '' ''
|
||||
SimpleSC::SetServiceDescription '${TRI_SVC_NAME}' '${TRI_FRIENDLY_SVC_NAME}'
|
||||
SimpleSC::StartService '${TRI_SVC_NAME}' '' 30
|
||||
|
@ -923,6 +933,7 @@ displayAgain:
|
|||
!insertmacro MUI_INSTALLOPTIONS_READ $DO_NOT_ADD_TO_PATH "NSIS.InstallOptions.ini" "Field 2" "State"
|
||||
!insertmacro MUI_INSTALLOPTIONS_READ $ADD_TO_PATH_ALL_USERS "NSIS.InstallOptions.ini" "Field 3" "State"
|
||||
!insertmacro MUI_INSTALLOPTIONS_READ $ADD_TO_PATH_CURRENT_USER "NSIS.InstallOptions.ini" "Field 4" "State"
|
||||
!insertmacro MUI_INSTALLOPTIONS_READ $STORAGE_ENGINE "NSIS.InstallOptions.ini" "Field 11" "State"
|
||||
|
||||
StrCmp $PASSWORD $PASSWORD_AGAIN +3 0
|
||||
MessageBox MB_OK|MB_ICONSTOP "Passwords don't match, try again"
|
||||
|
@ -1401,6 +1412,12 @@ Function .onInit
|
|||
IfErrors 0 +3
|
||||
StrCpy $ADD_TO_PATH_CURRENT_USER "0"
|
||||
|
||||
${GetParameters} $R0
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/STORAGE_ENGINE=" $STORAGE_ENGINE
|
||||
IfErrors 0 +3
|
||||
StrCpy $ADD_TO_PATH_CURRENT_USER "0"
|
||||
|
||||
IfSilent 0 dontValidatePathOption
|
||||
StrCpy $allPathOpts "0"
|
||||
IntOp $allPathOpts $allPathOpts + $DO_NOT_ADD_TO_PATH
|
||||
|
@ -1450,6 +1467,7 @@ inst:
|
|||
; Reads components status for registry
|
||||
!insertmacro SectionList "InitSection"
|
||||
|
||||
StrCpy $STORAGE_ENGINE "auto"
|
||||
; check to see if /D has been used to change
|
||||
; the install directory by comparing it to the
|
||||
; install directory that is expected to be the
|
||||
|
|
|
@ -152,6 +152,7 @@ Node::Node(Node&& other)
|
|||
_parent(nullptr),
|
||||
_store(nullptr),
|
||||
_children(std::move(other._children)),
|
||||
_ttl(std::move(other._ttl)),
|
||||
_value(std::move(other._value)),
|
||||
_vecBuf(std::move(other._vecBuf)),
|
||||
_vecBufDirty(std::move(other._vecBufDirty)),
|
||||
|
@ -162,6 +163,7 @@ Node::Node(Node const& other)
|
|||
: _nodeName(other._nodeName),
|
||||
_parent(nullptr),
|
||||
_store(nullptr),
|
||||
_ttl(other._ttl),
|
||||
_value(other._value),
|
||||
_vecBuf(other._vecBuf),
|
||||
_vecBufDirty(other._vecBufDirty),
|
||||
|
@ -203,14 +205,14 @@ Node& Node::operator=(Node&& rhs) {
|
|||
// 1. remove any existing time to live entry
|
||||
// 2. move children map over
|
||||
// 3. move value over
|
||||
// Must not move ober rhs's _parent, _ttl, _observers
|
||||
removeTimeToLive();
|
||||
// Must not move ober rhs's _parent, _observers
|
||||
_nodeName = std::move(rhs._nodeName);
|
||||
_children = std::move(rhs._children);
|
||||
_value = std::move(rhs._value);
|
||||
_vecBuf = std::move(rhs._vecBuf);
|
||||
_vecBufDirty = std::move(rhs._vecBufDirty);
|
||||
_isArray = std::move(rhs._isArray);
|
||||
_ttl = std::move(rhs._ttl);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -219,7 +221,7 @@ Node& Node::operator=(Node const& rhs) {
|
|||
// 1. remove any existing time to live entry
|
||||
// 2. clear children map
|
||||
// 3. move from rhs to buffer pointer
|
||||
// Must not move rhs's _parent, _ttl, _observers
|
||||
// Must not move rhs's _parent, _observers
|
||||
removeTimeToLive();
|
||||
_nodeName = rhs._nodeName;
|
||||
_children.clear();
|
||||
|
@ -231,6 +233,7 @@ Node& Node::operator=(Node const& rhs) {
|
|||
_vecBuf = rhs._vecBuf;
|
||||
_vecBufDirty = rhs._vecBufDirty;
|
||||
_isArray = rhs._isArray;
|
||||
_ttl = rhs._ttl;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -286,9 +289,9 @@ Node const& Node::operator()(std::vector<std::string> const& pv) const {
|
|||
if (!pv.empty()) {
|
||||
auto const& key = pv.front();
|
||||
auto const it = _children.find(key);
|
||||
if (it == _children.end()/* ||
|
||||
if (it == _children.end() ||
|
||||
(it->second->_ttl != std::chrono::system_clock::time_point() &&
|
||||
it->second->_ttl < std::chrono::system_clock::now())*/) {
|
||||
it->second->_ttl < std::chrono::system_clock::now())) {
|
||||
throw StoreException(std::string("Node ") + key + " not found!");
|
||||
}
|
||||
auto const& child = *_children.at(key);
|
||||
|
@ -696,15 +699,19 @@ bool Node::applies(VPackSlice const& slice) {
|
|||
}
|
||||
|
||||
void Node::toBuilder(Builder& builder, bool showHidden) const {
|
||||
|
||||
typedef std::chrono::system_clock clock;
|
||||
try {
|
||||
if (type() == NODE) {
|
||||
VPackObjectBuilder guard(&builder);
|
||||
for (auto const& child : _children) {
|
||||
if (child.first[0] == '.' && !showHidden) {
|
||||
auto const& cptr = child.second;
|
||||
if ((cptr->_ttl != clock::time_point() && cptr->_ttl < clock::now()) ||
|
||||
(child.first[0] == '.' && !showHidden )) {
|
||||
continue;
|
||||
}
|
||||
builder.add(VPackValue(child.first));
|
||||
child.second->toBuilder(builder);
|
||||
cptr->toBuilder(builder);
|
||||
}
|
||||
} else {
|
||||
if (!slice().isNone()) {
|
||||
|
|
|
@ -648,7 +648,7 @@ void Store::run() {
|
|||
|
||||
toClear = clearExpired();
|
||||
if (_agent && _agent->leading()) {
|
||||
_agent->write(toClear);
|
||||
//_agent->write(toClear);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -198,6 +198,8 @@ void prepareTraversalsRule(Optimizer* opt, std::unique_ptr<ExecutionPlan> plan,
|
|||
|
||||
/// @brief moves simple subqueries one level higher
|
||||
void inlineSubqueriesRule(Optimizer*, std::unique_ptr<ExecutionPlan>, OptimizerRule const*);
|
||||
|
||||
void geoIndexRule(aql::Optimizer* opt, std::unique_ptr<aql::ExecutionPlan> plan, aql::OptimizerRule const* rule);
|
||||
|
||||
} // namespace aql
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -221,6 +221,10 @@ void OptimizerRulesFeature::addRules() {
|
|||
// patch update statements
|
||||
registerRule("patch-update-statements", patchUpdateStatementsRule,
|
||||
OptimizerRule::patchUpdateStatementsRule_pass9, DoesNotCreateAdditionalPlans, CanBeDisabled);
|
||||
|
||||
// patch update statements
|
||||
OptimizerRulesFeature::registerRule("geo-index-optimizer", geoIndexRule,
|
||||
OptimizerRule::applyGeoIndexRule, false, true);
|
||||
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
// distribute operations in cluster
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include "Logger/LogAppender.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "ProgramOptions/Section.h"
|
||||
//#include "RestServer/ServerFeature.h"
|
||||
#include "Scheduler/Scheduler.h"
|
||||
#include "Scheduler/SchedulerFeature.h"
|
||||
|
||||
|
|
|
@ -46,8 +46,6 @@
|
|||
#include <stack>
|
||||
#include <utility>
|
||||
|
||||
#include <iostream> // TODO
|
||||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
const uint64_t Manager::minSize = 1024 * 1024;
|
||||
|
|
|
@ -118,6 +118,9 @@ void ClusterCommResult::setDestination(std::string const& dest,
|
|||
endpoint = ci->getServerEndpoint(serverID);
|
||||
if (endpoint.empty()) {
|
||||
status = CL_COMM_BACKEND_UNAVAILABLE;
|
||||
if (serverID.find(',') != std::string::npos) {
|
||||
TRI_ASSERT(false);
|
||||
}
|
||||
errorMessage = "did not find endpoint of server '" + serverID + "'";
|
||||
if (logConnectionErrors) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER)
|
||||
|
|
|
@ -41,549 +41,12 @@ using namespace arangodb::aql;
|
|||
using EN = arangodb::aql::ExecutionNode;
|
||||
|
||||
void MMFilesOptimizerRules::registerResources() {
|
||||
// patch update statements
|
||||
OptimizerRulesFeature::registerRule("geo-index-optimizer", geoIndexRule,
|
||||
OptimizerRule::applyGeoIndexRule, false, true);
|
||||
|
||||
// remove SORT RAND() if appropriate
|
||||
OptimizerRulesFeature::registerRule("remove-sort-rand", removeSortRandRule,
|
||||
OptimizerRule::removeSortRandRule_pass5, false, true);
|
||||
}
|
||||
|
||||
struct MMFilesGeoIndexInfo {
|
||||
operator bool() const { return distanceNode && valid; }
|
||||
void invalidate() { valid = false; }
|
||||
MMFilesGeoIndexInfo()
|
||||
: collectionNode(nullptr)
|
||||
, executionNode(nullptr)
|
||||
, indexNode(nullptr)
|
||||
, setter(nullptr)
|
||||
, expressionParent(nullptr)
|
||||
, expressionNode(nullptr)
|
||||
, distanceNode(nullptr)
|
||||
, index(nullptr)
|
||||
, range(nullptr)
|
||||
, executionNodeType(EN::NORESULTS)
|
||||
, within(false)
|
||||
, lessgreaterequal(false)
|
||||
, valid(true)
|
||||
, constantPair{nullptr,nullptr}
|
||||
{}
|
||||
EnumerateCollectionNode* collectionNode; // node that will be replaced by (geo) IndexNode
|
||||
ExecutionNode* executionNode; // start node that is a sort or filter
|
||||
IndexNode* indexNode; // AstNode that is the parent of the Node
|
||||
CalculationNode* setter; // node that has contains the condition for filter or sort
|
||||
AstNode* expressionParent; // AstNode that is the parent of the Node
|
||||
AstNode* expressionNode; // AstNode that contains the sort/filter condition
|
||||
AstNode* distanceNode; // AstNode that contains the distance parameters
|
||||
std::shared_ptr<arangodb::Index> index; //pointer to geoindex
|
||||
AstNode const* range; // range for within
|
||||
ExecutionNode::NodeType executionNodeType; // type of execution node sort or filter
|
||||
bool within; // is this a within lookup
|
||||
bool lessgreaterequal; // is this a check for le/ge (true) or lt/gt (false)
|
||||
bool valid; // contains this node a valid condition
|
||||
std::vector<std::string> longitude; // access path to longitude
|
||||
std::vector<std::string> latitude; // access path to latitude
|
||||
std::pair<AstNode*,AstNode*> constantPair;
|
||||
};
|
||||
|
||||
//candidate checking
|
||||
|
||||
AstNode* isValueOrRefNode(AstNode* node){
|
||||
//TODO - implement me
|
||||
return node;
|
||||
}
|
||||
|
||||
MMFilesGeoIndexInfo isDistanceFunction(AstNode* distanceNode, AstNode* expressionParent){
|
||||
// the expression must exist and it must be a function call
|
||||
auto rv = MMFilesGeoIndexInfo{};
|
||||
if(distanceNode->type != NODE_TYPE_FCALL) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
//get the ast node of the expression
|
||||
auto func = static_cast<Function const*>(distanceNode->getData());
|
||||
|
||||
// we're looking for "DISTANCE()", which is a function call
|
||||
// with an empty parameters array
|
||||
if ( func->externalName != "DISTANCE" || distanceNode->numMembers() != 1 ) {
|
||||
return rv;
|
||||
}
|
||||
rv.distanceNode = distanceNode;
|
||||
rv.expressionNode = distanceNode;
|
||||
rv.expressionParent = expressionParent;
|
||||
return rv;
|
||||
}
|
||||
|
||||
MMFilesGeoIndexInfo isGeoFilterExpression(AstNode* node, AstNode* expressionParent){
|
||||
// binary compare must be on top
|
||||
bool dist_first = true;
|
||||
bool lessEqual = true;
|
||||
auto rv = MMFilesGeoIndexInfo{};
|
||||
if( node->type != NODE_TYPE_OPERATOR_BINARY_GE
|
||||
&& node->type != NODE_TYPE_OPERATOR_BINARY_GT
|
||||
&& node->type != NODE_TYPE_OPERATOR_BINARY_LE
|
||||
&& node->type != NODE_TYPE_OPERATOR_BINARY_LT) {
|
||||
|
||||
return rv;
|
||||
}
|
||||
if (node->type == NODE_TYPE_OPERATOR_BINARY_GE || node->type == NODE_TYPE_OPERATOR_BINARY_GT) {
|
||||
dist_first = false;
|
||||
}
|
||||
if (node->type == NODE_TYPE_OPERATOR_BINARY_GT || node->type == NODE_TYPE_OPERATOR_BINARY_LT) {
|
||||
lessEqual = false;
|
||||
}
|
||||
|
||||
if (node->numMembers() != 2){
|
||||
return rv;
|
||||
}
|
||||
|
||||
AstNode* first = node->getMember(0);
|
||||
AstNode* second = node->getMember(1);
|
||||
|
||||
auto eval_stuff = [](bool dist_first, bool lessEqual, MMFilesGeoIndexInfo&& dist_fun, AstNode* value_node){
|
||||
if (dist_first && dist_fun && value_node) {
|
||||
dist_fun.within = true;
|
||||
dist_fun.range = value_node;
|
||||
dist_fun.lessgreaterequal = lessEqual;
|
||||
} else {
|
||||
dist_fun.invalidate();
|
||||
}
|
||||
return dist_fun;
|
||||
};
|
||||
|
||||
rv = eval_stuff(dist_first, lessEqual, isDistanceFunction(first, expressionParent), isValueOrRefNode(second));
|
||||
if (!rv) {
|
||||
rv = eval_stuff(dist_first, lessEqual, isDistanceFunction(second, expressionParent), isValueOrRefNode(first));
|
||||
}
|
||||
|
||||
if(rv){
|
||||
//this must be set after checking if the node contains a distance node.
|
||||
rv.expressionNode = node;
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
MMFilesGeoIndexInfo iterativePreorderWithCondition(EN::NodeType type, AstNode* root, MMFilesGeoIndexInfo(*condition)(AstNode*, AstNode*)){
|
||||
// returns on first hit
|
||||
if (!root){
|
||||
return MMFilesGeoIndexInfo{};
|
||||
}
|
||||
std::vector<std::pair<AstNode*,AstNode*>> nodestack;
|
||||
nodestack.push_back({root, nullptr});
|
||||
|
||||
while(nodestack.size()){
|
||||
auto current = nodestack.back();
|
||||
nodestack.pop_back();
|
||||
MMFilesGeoIndexInfo rv = condition(current.first,current.second);
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (type == EN::FILTER){
|
||||
if (current.first->type == NODE_TYPE_OPERATOR_BINARY_AND || current.first->type == NODE_TYPE_OPERATOR_NARY_AND ){
|
||||
for (std::size_t i = 0; i < current.first->numMembers(); ++i){
|
||||
nodestack.push_back({current.first->getMember(i),current.first});
|
||||
}
|
||||
}
|
||||
} else if (type == EN::SORT) {
|
||||
// must be the only sort condition
|
||||
}
|
||||
}
|
||||
return MMFilesGeoIndexInfo{};
|
||||
}
|
||||
|
||||
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode const*, AstNode const*> const& pair,
|
||||
ExecutionPlan* plan, MMFilesGeoIndexInfo info){
|
||||
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess1;
|
||||
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess2;
|
||||
|
||||
// first and second should be based on the same document - need to provide the document
|
||||
// in order to see which collection is bound to it and if that collections supports geo-index
|
||||
if (!pair.first->isAttributeAccessForVariable(attributeAccess1) ||
|
||||
!pair.second->isAttributeAccessForVariable(attributeAccess2)) {
|
||||
info.invalidate();
|
||||
return info;
|
||||
}
|
||||
|
||||
TRI_ASSERT(attributeAccess1.first != nullptr);
|
||||
TRI_ASSERT(attributeAccess2.first != nullptr);
|
||||
|
||||
// expect access of the for doc.attribute
|
||||
auto setter1 = plan->getVarSetBy(attributeAccess1.first->id);
|
||||
auto setter2 = plan->getVarSetBy(attributeAccess2.first->id);
|
||||
|
||||
if (setter1 != nullptr &&
|
||||
setter2 != nullptr &&
|
||||
setter1 == setter2 &&
|
||||
setter1->getType() == EN::ENUMERATE_COLLECTION) {
|
||||
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
|
||||
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
|
||||
auto lcoll = coll->getCollection();
|
||||
// TODO - check collection for suitable geo-indexes
|
||||
for(auto indexShardPtr : lcoll->getIndexes()){
|
||||
// get real index
|
||||
arangodb::Index& index = *indexShardPtr.get();
|
||||
|
||||
// check if current index is a geo-index
|
||||
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
|
||||
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||
continue;
|
||||
}
|
||||
|
||||
TRI_ASSERT(index.fields().size() == 2);
|
||||
|
||||
//check access paths of attributes in ast and those in index match
|
||||
if (index.fields()[0] == attributeAccess1.second &&
|
||||
index.fields()[1] == attributeAccess2.second) {
|
||||
info.collectionNode = collNode;
|
||||
info.index = indexShardPtr;
|
||||
TRI_AttributeNamesJoinNested(attributeAccess1.second, info.longitude, true);
|
||||
TRI_AttributeNamesJoinNested(attributeAccess2.second, info.latitude, true);
|
||||
return info;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info.invalidate();
|
||||
return info;
|
||||
}
|
||||
|
||||
bool checkDistanceArguments(MMFilesGeoIndexInfo& info, ExecutionPlan* plan){
|
||||
if(!info){
|
||||
return false;
|
||||
}
|
||||
|
||||
auto const& functionArguments = info.distanceNode->getMember(0);
|
||||
if(functionArguments->numMembers() < 4){
|
||||
return false;
|
||||
}
|
||||
|
||||
std::pair<AstNode*,AstNode*> argPair1 = { functionArguments->getMember(0), functionArguments->getMember(1) };
|
||||
std::pair<AstNode*,AstNode*> argPair2 = { functionArguments->getMember(2), functionArguments->getMember(3) };
|
||||
|
||||
MMFilesGeoIndexInfo result1 = geoDistanceFunctionArgCheck(argPair1, plan, info /*copy*/);
|
||||
MMFilesGeoIndexInfo result2 = geoDistanceFunctionArgCheck(argPair2, plan, info /*copy*/);
|
||||
//info now conatins access path to collection
|
||||
|
||||
// xor only one argument pair shall have a geoIndex
|
||||
if ( ( !result1 && !result2 ) || ( result1 && result2 ) ){
|
||||
info.invalidate();
|
||||
return false;
|
||||
}
|
||||
|
||||
MMFilesGeoIndexInfo res;
|
||||
if(result1){
|
||||
info = std::move(result1);
|
||||
info.constantPair = std::move(argPair2);
|
||||
} else {
|
||||
info = std::move(result2);
|
||||
info.constantPair = std::move(argPair1);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//checks a single sort or filter node
|
||||
MMFilesGeoIndexInfo identifyGeoOptimizationCandidate(ExecutionNode::NodeType type, ExecutionPlan* plan, ExecutionNode* n){
|
||||
ExecutionNode* setter = nullptr;
|
||||
auto rv = MMFilesGeoIndexInfo{};
|
||||
switch(type){
|
||||
case EN::SORT: {
|
||||
auto node = static_cast<SortNode*>(n);
|
||||
auto& elements = node->getElements();
|
||||
|
||||
// we're looking for "SORT DISTANCE(x,y,a,b) ASC", which has just one sort criterion
|
||||
if ( !(elements.size() == 1 && elements[0].ascending)) {
|
||||
//test on second makes sure the SORT is ascending
|
||||
return rv;
|
||||
}
|
||||
|
||||
//variable of sort expression
|
||||
auto variable = elements[0].var;
|
||||
TRI_ASSERT(variable != nullptr);
|
||||
|
||||
//// find the expression that is bound to the variable
|
||||
// get the expression node that holds the calculation
|
||||
setter = plan->getVarSetBy(variable->id);
|
||||
}
|
||||
break;
|
||||
|
||||
case EN::FILTER: {
|
||||
auto node = static_cast<FilterNode*>(n);
|
||||
|
||||
// filter nodes always have one input variable
|
||||
auto varsUsedHere = node->getVariablesUsedHere();
|
||||
TRI_ASSERT(varsUsedHere.size() == 1);
|
||||
|
||||
// now check who introduced our variable
|
||||
auto variable = varsUsedHere[0];
|
||||
setter = plan->getVarSetBy(variable->id);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return rv;
|
||||
}
|
||||
|
||||
// common part - extract astNode from setter witch is a calculation node
|
||||
if (setter == nullptr || setter->getType() != EN::CALCULATION) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
auto expression = static_cast<CalculationNode*>(setter)->expression();
|
||||
|
||||
// the expression must exist and it must have an astNode
|
||||
if (expression == nullptr || expression->node() == nullptr){
|
||||
// not the right type of node
|
||||
return rv;
|
||||
}
|
||||
AstNode* node = expression->nodeForModification();
|
||||
|
||||
//FIXME -- technical debt -- code duplication / not all cases covered
|
||||
switch(type){
|
||||
case EN::SORT: {
|
||||
// check comma separated parts of condition cond0, cond1, cond2
|
||||
rv = isDistanceFunction(node,nullptr);
|
||||
}
|
||||
break;
|
||||
|
||||
case EN::FILTER: {
|
||||
rv = iterativePreorderWithCondition(type, node, &isGeoFilterExpression);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
rv.invalidate(); // not required but make sure the result is invalid
|
||||
}
|
||||
|
||||
rv.executionNode = n;
|
||||
rv.executionNodeType = type;
|
||||
rv.setter = static_cast<CalculationNode*>(setter);
|
||||
|
||||
checkDistanceArguments(rv, plan);
|
||||
|
||||
return rv;
|
||||
};
|
||||
|
||||
//modify plan
|
||||
|
||||
// builds a condition that can be used with the index interface and
|
||||
// contains all parameters required by the MMFilesGeoIndex
|
||||
std::unique_ptr<Condition> buildGeoCondition(ExecutionPlan* plan, MMFilesGeoIndexInfo& info) {
|
||||
AstNode* lat = info.constantPair.first;
|
||||
AstNode* lon = info.constantPair.second;
|
||||
auto ast = plan->getAst();
|
||||
auto varAstNode = ast->createNodeReference(info.collectionNode->outVariable());
|
||||
|
||||
auto args = ast->createNodeArray(info.within ? 4 : 3);
|
||||
args->addMember(varAstNode); // collection
|
||||
args->addMember(lat); // latitude
|
||||
args->addMember(lon); // longitude
|
||||
|
||||
AstNode* cond = nullptr;
|
||||
if (info.within) {
|
||||
// WITHIN
|
||||
args->addMember(info.range);
|
||||
auto lessValue = ast->createNodeValueBool(info.lessgreaterequal);
|
||||
args->addMember(lessValue);
|
||||
cond = ast->createNodeFunctionCall("WITHIN", args);
|
||||
} else {
|
||||
// NEAR
|
||||
cond = ast->createNodeFunctionCall("NEAR", args);
|
||||
}
|
||||
|
||||
TRI_ASSERT(cond != nullptr);
|
||||
|
||||
auto condition = std::make_unique<Condition>(ast);
|
||||
condition->andCombine(cond);
|
||||
condition->normalize(plan);
|
||||
return condition;
|
||||
}
|
||||
|
||||
void replaceGeoCondition(ExecutionPlan* plan, MMFilesGeoIndexInfo& info){
|
||||
if (info.expressionParent && info.executionNodeType == EN::FILTER) {
|
||||
auto ast = plan->getAst();
|
||||
CalculationNode* newNode = nullptr;
|
||||
Expression* expr = new Expression(ast, static_cast<CalculationNode*>(info.setter)->expression()->nodeForModification()->clone(ast));
|
||||
|
||||
try {
|
||||
newNode = new CalculationNode(plan, plan->nextId(), expr, static_cast<CalculationNode*>(info.setter)->outVariable());
|
||||
} catch (...) {
|
||||
delete expr;
|
||||
throw;
|
||||
}
|
||||
|
||||
plan->registerNode(newNode);
|
||||
plan->replaceNode(info.setter, newNode);
|
||||
|
||||
bool done = false;
|
||||
ast->traverseAndModify(newNode->expression()->nodeForModification(),[&done](AstNode* node, void* data) {
|
||||
if (done) {
|
||||
return node;
|
||||
}
|
||||
if (node->type == NODE_TYPE_OPERATOR_BINARY_AND) {
|
||||
for (std::size_t i = 0; i < node->numMembers(); i++){
|
||||
if (isGeoFilterExpression(node->getMemberUnchecked(i),node)) {
|
||||
done = true;
|
||||
return node->getMemberUnchecked(i ? 0 : 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return node;
|
||||
},
|
||||
nullptr);
|
||||
|
||||
if(done){
|
||||
return;
|
||||
}
|
||||
|
||||
auto replaceInfo = iterativePreorderWithCondition(EN::FILTER, newNode->expression()->nodeForModification(), &isGeoFilterExpression);
|
||||
if (newNode->expression()->nodeForModification() == replaceInfo.expressionParent) {
|
||||
if (replaceInfo.expressionParent->type == NODE_TYPE_OPERATOR_BINARY_AND){
|
||||
for (std::size_t i = 0; i < replaceInfo.expressionParent->numMembers(); ++i) {
|
||||
if (replaceInfo.expressionParent->getMember(i) != replaceInfo.expressionNode) {
|
||||
newNode->expression()->replaceNode(replaceInfo.expressionParent->getMember(i));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//else {
|
||||
// // COULD BE IMPROVED
|
||||
// if(replaceInfo.expressionParent->type == NODE_TYPE_OPERATOR_BINARY_AND){
|
||||
// // delete ast node - we would need the parent of expression parent to delete the node
|
||||
// // we do not have it available here so we just replace the the node with true
|
||||
// return;
|
||||
// }
|
||||
//}
|
||||
|
||||
//fallback
|
||||
auto replacement = ast->createNodeValueBool(true);
|
||||
for (std::size_t i = 0; i < replaceInfo.expressionParent->numMembers(); ++i) {
|
||||
if (replaceInfo.expressionParent->getMember(i) == replaceInfo.expressionNode) {
|
||||
replaceInfo.expressionParent->removeMemberUnchecked(i);
|
||||
replaceInfo.expressionParent->addMember(replacement);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// applys the optimization for a candidate
|
||||
bool applyGeoOptimization(bool near, ExecutionPlan* plan, MMFilesGeoIndexInfo& first, MMFilesGeoIndexInfo& second) {
|
||||
if (!first && !second) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!first) {
|
||||
first = std::move(second);
|
||||
second.invalidate();
|
||||
}
|
||||
|
||||
// We are not allowed to be a inner loop
|
||||
if (first.collectionNode->isInInnerLoop() && first.executionNodeType == EN::SORT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::unique_ptr<Condition> condition(buildGeoCondition(plan, first));
|
||||
|
||||
auto inode = new IndexNode(
|
||||
plan, plan->nextId(), first.collectionNode->vocbase(),
|
||||
first.collectionNode->collection(), first.collectionNode->outVariable(),
|
||||
std::vector<transaction::Methods::IndexHandle>{transaction::Methods::IndexHandle{first.index}},
|
||||
condition.get(), false);
|
||||
plan->registerNode(inode);
|
||||
condition.release();
|
||||
|
||||
plan->replaceNode(first.collectionNode,inode);
|
||||
|
||||
replaceGeoCondition(plan, first);
|
||||
replaceGeoCondition(plan, second);
|
||||
|
||||
// if executionNode is sort OR a filter without further sub conditions
|
||||
// the node can be unlinked
|
||||
auto unlinkNode = [&](MMFilesGeoIndexInfo& info) {
|
||||
if (info && !info.expressionParent) {
|
||||
if (!arangodb::ServerState::instance()->isCoordinator() || info.executionNodeType == EN::FILTER) {
|
||||
plan->unlinkNode(info.executionNode);
|
||||
} else if (info.executionNodeType == EN::SORT) {
|
||||
//make sure sort is not reinserted in cluster
|
||||
static_cast<SortNode*>(info.executionNode)->_reinsertInCluster = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
unlinkNode(first);
|
||||
unlinkNode(second);
|
||||
|
||||
//signal that plan has been changed
|
||||
return true;
|
||||
}
|
||||
|
||||
void MMFilesOptimizerRules::geoIndexRule(Optimizer* opt,
|
||||
std::unique_ptr<ExecutionPlan> plan,
|
||||
OptimizerRule const* rule) {
|
||||
|
||||
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
|
||||
SmallVector<ExecutionNode*> nodes{a};
|
||||
bool modified = false;
|
||||
//inspect each return node and work upwards to SingletonNode
|
||||
plan->findEndNodes(nodes, true);
|
||||
|
||||
for (auto& node : nodes) {
|
||||
MMFilesGeoIndexInfo sortInfo{};
|
||||
MMFilesGeoIndexInfo filterInfo{};
|
||||
auto current = node;
|
||||
|
||||
while (current) {
|
||||
switch(current->getType()) {
|
||||
case EN::SORT:{
|
||||
sortInfo = identifyGeoOptimizationCandidate(EN::SORT, plan.get(), current);
|
||||
break;
|
||||
}
|
||||
case EN::FILTER: {
|
||||
filterInfo = identifyGeoOptimizationCandidate(EN::FILTER, plan.get(), current);
|
||||
break;
|
||||
}
|
||||
case EN::ENUMERATE_COLLECTION: {
|
||||
EnumerateCollectionNode* collnode = static_cast<EnumerateCollectionNode*>(current);
|
||||
if( (sortInfo && sortInfo.collectionNode!= collnode)
|
||||
||(filterInfo && filterInfo.collectionNode != collnode)
|
||||
){
|
||||
filterInfo.invalidate();
|
||||
sortInfo.invalidate();
|
||||
break;
|
||||
}
|
||||
if (applyGeoOptimization(true, plan.get(), filterInfo, sortInfo)){
|
||||
modified = true;
|
||||
filterInfo.invalidate();
|
||||
sortInfo.invalidate();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case EN::INDEX:
|
||||
case EN::COLLECT:{
|
||||
filterInfo.invalidate();
|
||||
sortInfo.invalidate();
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
//skip - do nothing
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
current = current->getFirstDependency(); //inspect next node
|
||||
}
|
||||
}
|
||||
|
||||
opt->addPlan(std::move(plan), rule, modified);
|
||||
}
|
||||
|
||||
/// @brief remove SORT RAND() if appropriate
|
||||
void MMFilesOptimizerRules::removeSortRandRule(Optimizer* opt, std::unique_ptr<ExecutionPlan> plan,
|
||||
OptimizerRule const* rule) {
|
||||
|
|
|
@ -35,8 +35,6 @@ struct OptimizerRule;
|
|||
|
||||
struct MMFilesOptimizerRules {
|
||||
static void registerResources();
|
||||
|
||||
static void geoIndexRule(aql::Optimizer* opt, std::unique_ptr<aql::ExecutionPlan> plan, aql::OptimizerRule const* rule);
|
||||
|
||||
static void removeSortRandRule(aql::Optimizer* opt, std::unique_ptr<aql::ExecutionPlan> plan, aql::OptimizerRule const* rule);
|
||||
};
|
||||
|
|
|
@ -99,9 +99,8 @@
|
|||
|
||||
using namespace arangodb;
|
||||
|
||||
static int runServer(int argc, char** argv) {
|
||||
static int runServer(int argc, char** argv, ArangoGlobalContext &context) {
|
||||
try {
|
||||
ArangoGlobalContext context(argc, argv, SBIN_DIRECTORY);
|
||||
context.installSegv();
|
||||
context.runStartupChecks();
|
||||
|
||||
|
@ -215,7 +214,6 @@ static int runServer(int argc, char** argv) {
|
|||
ret = EXIT_FAILURE;
|
||||
}
|
||||
Logger::flush();
|
||||
|
||||
return context.exit(ret);
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
|
@ -244,7 +242,8 @@ static void WINAPI ServiceMain(DWORD dwArgc, LPSTR* lpszArgv) {
|
|||
// set start pending
|
||||
SetServiceStatus(SERVICE_START_PENDING, 0, 1, 10000);
|
||||
|
||||
runServer(ARGC, ARGV);
|
||||
ArangoGlobalContext context(ARGC, ARGV, SBIN_DIRECTORY);
|
||||
runServer(ARGC, ARGV, context);
|
||||
|
||||
// service has stopped
|
||||
SetServiceStatus(SERVICE_STOPPED, NO_ERROR, 0, 0);
|
||||
|
@ -264,10 +263,12 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
if (!StartServiceCtrlDispatcher(ste)) {
|
||||
std::cerr << "FATAL: StartServiceCtrlDispatcher has failed with "
|
||||
<< GetLastError() << std::endl;
|
||||
<< GetLastError() << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
return runServer(argc, argv);
|
||||
ArangoGlobalContext context(argc, argv, SBIN_DIRECTORY);
|
||||
return runServer(argc, argv, context);
|
||||
}
|
||||
|
|
|
@ -13,9 +13,11 @@ set(ROCKSDB_SOURCES
|
|||
RocksDBEngine/RocksDBEngine.cpp
|
||||
RocksDBEngine/RocksDBExportCursor.cpp
|
||||
RocksDBEngine/RocksDBFulltextIndex.cpp
|
||||
RocksDBEngine/RocksDBGeoIndex.cpp
|
||||
RocksDBEngine/RocksDBGeoIndexImpl.cpp
|
||||
RocksDBEngine/RocksDBHashIndex.cpp
|
||||
RocksDBEngine/RocksDBIndex.cpp
|
||||
RocksDBEngine/RocksDBIndexFactory.cpp
|
||||
RocksDBEngine/RocksDBHashIndex.cpp
|
||||
RocksDBEngine/RocksDBKey.cpp
|
||||
RocksDBEngine/RocksDBKeyBounds.cpp
|
||||
RocksDBEngine/RocksDBLogValue.cpp
|
||||
|
@ -33,8 +35,8 @@ set(ROCKSDB_SOURCES
|
|||
RocksDBEngine/RocksDBTransactionState.cpp
|
||||
RocksDBEngine/RocksDBTypes.cpp
|
||||
RocksDBEngine/RocksDBV8Functions.cpp
|
||||
RocksDBEngine/RocksDBVPackIndex.cpp
|
||||
RocksDBEngine/RocksDBValue.cpp
|
||||
RocksDBEngine/RocksDBView.cpp
|
||||
RocksDBEngine/RocksDBVPackIndex.cpp
|
||||
)
|
||||
set(ROCKSDB_SOURCES ${ROCKSDB_SOURCES} PARENT_SCOPE)
|
||||
|
|
|
@ -25,9 +25,12 @@
|
|||
#include "Aql/AqlFunctionFeature.h"
|
||||
#include "Aql/Function.h"
|
||||
#include "RocksDBEngine/RocksDBFulltextIndex.h"
|
||||
#include "RocksDBEngine/RocksDBGeoIndex.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "StorageEngine/DocumentIdentifierToken.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
@ -134,7 +137,7 @@ AqlValue RocksDBAqlFunctions::Fulltext(
|
|||
}
|
||||
// do we need this in rocksdb?
|
||||
trx->pinData(cid);
|
||||
|
||||
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
FulltextQuery parsedQuery;
|
||||
Result res = fulltextIndex->parseQueryString(queryString, parsedQuery);
|
||||
|
@ -149,20 +152,242 @@ AqlValue RocksDBAqlFunctions::Fulltext(
|
|||
return AqlValue(builder.get());
|
||||
}
|
||||
|
||||
/// @brief Load geoindex for collection name
|
||||
static arangodb::RocksDBGeoIndex* getGeoIndex(
|
||||
transaction::Methods* trx, TRI_voc_cid_t const& cid,
|
||||
std::string const& collectionName) {
|
||||
// NOTE:
|
||||
// Due to trx lock the shared_index stays valid
|
||||
// as long as trx stays valid.
|
||||
// It is save to return the Raw pointer.
|
||||
// It can only be used until trx is finished.
|
||||
trx->addCollectionAtRuntime(cid, collectionName);
|
||||
Result res = trx->state()->ensureCollections();
|
||||
if (!res.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(),
|
||||
res.errorMessage());
|
||||
}
|
||||
|
||||
auto document = trx->documentCollection(cid);
|
||||
if (document == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_FORMAT(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, "'%s'",
|
||||
collectionName.c_str());
|
||||
}
|
||||
|
||||
arangodb::RocksDBGeoIndex* index = nullptr;
|
||||
for (auto const& idx : document->getIndexes()) {
|
||||
if (idx->type() == arangodb::Index::TRI_IDX_TYPE_GEO1_INDEX ||
|
||||
idx->type() == arangodb::Index::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||
index = static_cast<arangodb::RocksDBGeoIndex*>(idx.get());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (index == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(TRI_ERROR_QUERY_GEO_INDEX_MISSING,
|
||||
collectionName.c_str());
|
||||
}
|
||||
|
||||
trx->pinData(cid);
|
||||
return index;
|
||||
}
|
||||
|
||||
static AqlValue buildGeoResult(transaction::Methods* trx,
|
||||
LogicalCollection* collection,
|
||||
arangodb::aql::Query* query,
|
||||
GeoCoordinates* cors, TRI_voc_cid_t const& cid,
|
||||
std::string const& attributeName) {
|
||||
if (cors == nullptr) {
|
||||
return AqlValue(arangodb::basics::VelocyPackHelper::EmptyArrayValue());
|
||||
}
|
||||
|
||||
size_t const nCoords = cors->length;
|
||||
if (nCoords == 0) {
|
||||
GeoIndex_CoordinatesFree(cors);
|
||||
return AqlValue(arangodb::basics::VelocyPackHelper::EmptyArrayValue());
|
||||
}
|
||||
|
||||
struct geo_coordinate_distance_t {
|
||||
geo_coordinate_distance_t(double distance, RocksDBToken token)
|
||||
: _distance(distance), _token(token) {}
|
||||
double _distance;
|
||||
RocksDBToken _token;
|
||||
};
|
||||
|
||||
std::vector<geo_coordinate_distance_t> distances;
|
||||
|
||||
try {
|
||||
distances.reserve(nCoords);
|
||||
|
||||
for (size_t i = 0; i < nCoords; ++i) {
|
||||
distances.emplace_back(geo_coordinate_distance_t(
|
||||
cors->distances[i], RocksDBToken(cors->coordinates[i].data)));
|
||||
}
|
||||
} catch (...) {
|
||||
GeoIndex_CoordinatesFree(cors);
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
GeoIndex_CoordinatesFree(cors);
|
||||
|
||||
// sort result by distance
|
||||
std::sort(distances.begin(), distances.end(),
|
||||
[](geo_coordinate_distance_t const& left,
|
||||
geo_coordinate_distance_t const& right) {
|
||||
return left._distance < right._distance;
|
||||
});
|
||||
|
||||
try {
|
||||
ManagedDocumentResult mmdr;
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
builder->openArray();
|
||||
if (!attributeName.empty()) {
|
||||
// We have to copy the entire document
|
||||
for (auto& it : distances) {
|
||||
VPackObjectBuilder docGuard(builder.get());
|
||||
builder->add(attributeName, VPackValue(it._distance));
|
||||
if (collection->readDocument(trx, it._token, mmdr)) {
|
||||
VPackSlice doc(mmdr.vpack());
|
||||
for (auto const& entry : VPackObjectIterator(doc)) {
|
||||
std::string key = entry.key.copyString();
|
||||
if (key != attributeName) {
|
||||
builder->add(key, entry.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
for (auto& it : distances) {
|
||||
if (collection->readDocument(trx, it._token, mmdr)) {
|
||||
mmdr.addToBuilder(*builder.get(), true);
|
||||
}
|
||||
}
|
||||
}
|
||||
builder->close();
|
||||
return AqlValue(builder.get());
|
||||
} catch (...) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief function NEAR
|
||||
AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
|
||||
transaction::Methods* trx,
|
||||
VPackFunctionParameters const& parameters) {
|
||||
// TODO: obi
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(TRI_ERROR_QUERY_GEO_INDEX_MISSING, "NEAR");
|
||||
ValidateParameters(parameters, "NEAR", 3, 5);
|
||||
|
||||
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
|
||||
if (!collectionValue.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "NEAR");
|
||||
}
|
||||
|
||||
std::string const collectionName(collectionValue.slice().copyString());
|
||||
|
||||
AqlValue latitude = ExtractFunctionParameterValue(trx, parameters, 1);
|
||||
AqlValue longitude = ExtractFunctionParameterValue(trx, parameters, 2);
|
||||
|
||||
if (!latitude.isNumber() || !longitude.isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "NEAR");
|
||||
}
|
||||
|
||||
// extract limit
|
||||
int64_t limitValue = 100;
|
||||
|
||||
if (parameters.size() > 3) {
|
||||
AqlValue limit = ExtractFunctionParameterValue(trx, parameters, 3);
|
||||
|
||||
if (limit.isNumber()) {
|
||||
limitValue = limit.toInt64(trx);
|
||||
} else if (!limit.isNull(true)) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "NEAR");
|
||||
}
|
||||
}
|
||||
|
||||
std::string attributeName;
|
||||
if (parameters.size() > 4) {
|
||||
// have a distance attribute
|
||||
AqlValue distanceValue = ExtractFunctionParameterValue(trx, parameters, 4);
|
||||
|
||||
if (!distanceValue.isNull(true) && !distanceValue.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "NEAR");
|
||||
}
|
||||
|
||||
if (distanceValue.isString()) {
|
||||
attributeName = distanceValue.slice().copyString();
|
||||
}
|
||||
}
|
||||
|
||||
TRI_voc_cid_t cid = trx->resolver()->getCollectionIdLocal(collectionName);
|
||||
arangodb::RocksDBGeoIndex* index = getGeoIndex(trx, cid, collectionName);
|
||||
|
||||
TRI_ASSERT(index != nullptr);
|
||||
TRI_ASSERT(trx->isPinned(cid));
|
||||
|
||||
GeoCoordinates* cors =
|
||||
index->nearQuery(trx, latitude.toDouble(trx), longitude.toDouble(trx),
|
||||
static_cast<size_t>(limitValue));
|
||||
|
||||
return buildGeoResult(trx, index->collection(), query, cors, cid,
|
||||
attributeName);
|
||||
}
|
||||
|
||||
/// @brief function WITHIN
|
||||
AqlValue RocksDBAqlFunctions::Within(
|
||||
arangodb::aql::Query* query, transaction::Methods* trx,
|
||||
VPackFunctionParameters const& parameters) {
|
||||
// TODO: obi
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(TRI_ERROR_QUERY_GEO_INDEX_MISSING, "Within");
|
||||
ValidateParameters(parameters, "WITHIN", 4, 5);
|
||||
|
||||
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
|
||||
|
||||
if (!collectionValue.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "WITHIN");
|
||||
}
|
||||
|
||||
std::string const collectionName(collectionValue.slice().copyString());
|
||||
|
||||
AqlValue latitudeValue = ExtractFunctionParameterValue(trx, parameters, 1);
|
||||
AqlValue longitudeValue = ExtractFunctionParameterValue(trx, parameters, 2);
|
||||
AqlValue radiusValue = ExtractFunctionParameterValue(trx, parameters, 3);
|
||||
|
||||
if (!latitudeValue.isNumber() || !longitudeValue.isNumber() ||
|
||||
!radiusValue.isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "WITHIN");
|
||||
}
|
||||
|
||||
std::string attributeName;
|
||||
if (parameters.size() > 4) {
|
||||
// have a distance attribute
|
||||
AqlValue distanceValue = ExtractFunctionParameterValue(trx, parameters, 4);
|
||||
|
||||
if (!distanceValue.isNull(true) && !distanceValue.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "WITHIN");
|
||||
}
|
||||
|
||||
if (distanceValue.isString()) {
|
||||
attributeName = distanceValue.slice().copyString();
|
||||
}
|
||||
}
|
||||
|
||||
TRI_voc_cid_t cid = trx->resolver()->getCollectionIdLocal(collectionName);
|
||||
arangodb::RocksDBGeoIndex* index = getGeoIndex(trx, cid, collectionName);
|
||||
|
||||
TRI_ASSERT(index != nullptr);
|
||||
TRI_ASSERT(trx->isPinned(cid));
|
||||
|
||||
GeoCoordinates* cors = index->withinQuery(trx, latitudeValue.toDouble(trx),
|
||||
longitudeValue.toDouble(trx),
|
||||
radiusValue.toDouble(trx));
|
||||
|
||||
return buildGeoResult(trx, index->collection(), query, cors, cid,
|
||||
attributeName);
|
||||
}
|
||||
|
||||
void RocksDBAqlFunctions::registerResources() {
|
||||
|
|
|
@ -81,7 +81,8 @@ RocksDBCollection::RocksDBCollection(LogicalCollection* collection,
|
|||
: PhysicalCollection(collection, info),
|
||||
_objectId(basics::VelocyPackHelper::stringUInt64(info, "objectId")),
|
||||
_numberDocuments(0),
|
||||
_revisionId(0) {
|
||||
_revisionId(0),
|
||||
_hasGeoIndex(false) {
|
||||
addCollectionMapping(_objectId, _logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid());
|
||||
}
|
||||
|
@ -91,7 +92,8 @@ RocksDBCollection::RocksDBCollection(LogicalCollection* collection,
|
|||
: PhysicalCollection(collection, VPackSlice::emptyObjectSlice()),
|
||||
_objectId(static_cast<RocksDBCollection*>(physical)->_objectId),
|
||||
_numberDocuments(0),
|
||||
_revisionId(0) {
|
||||
_revisionId(0),
|
||||
_hasGeoIndex(false) {
|
||||
addCollectionMapping(_objectId, _logicalCollection->vocbase()->id(),
|
||||
_logicalCollection->cid());
|
||||
}
|
||||
|
@ -183,11 +185,14 @@ void RocksDBCollection::open(bool ignoreErrors) {
|
|||
<< " number of documents: " << counterValue.added();
|
||||
_numberDocuments = counterValue.added() - counterValue.removed();
|
||||
_revisionId = counterValue.revisionId();
|
||||
//_numberDocuments = countKeyRange(db, readOptions,
|
||||
// RocksDBKeyBounds::CollectionDocuments(_objectId));
|
||||
|
||||
for (auto it : getIndexes()) {
|
||||
for (std::shared_ptr<Index> it : getIndexes()) {
|
||||
static_cast<RocksDBIndex*>(it.get())->load();
|
||||
|
||||
if (it->type() == Index::TRI_IDX_TYPE_GEO1_INDEX ||
|
||||
it->type() == Index::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||
_hasGeoIndex = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,7 +552,7 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
if (rv == TRI_ERROR_NO_ERROR) {
|
||||
// trigger compaction before deleting the object
|
||||
cindex->cleanup();
|
||||
|
||||
|
||||
_indexes.erase(_indexes.begin() + i);
|
||||
events::DropIndex("", std::to_string(iid), TRI_ERROR_NO_ERROR);
|
||||
|
||||
|
@ -674,13 +679,20 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
case RocksDBIndex::TRI_IDX_TYPE_FULLTEXT_INDEX:
|
||||
indexBounds = RocksDBKeyBounds::FulltextIndex(rindex->objectId());
|
||||
break;
|
||||
// TODO add options for geoindex, fulltext etc
|
||||
// TODO add options for geoindex, fulltext etc
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
rocksdb::ReadOptions options = state->readOptions();
|
||||
options.iterate_upper_bound = &(indexBounds.end());
|
||||
iter.reset(rtrx->GetIterator(options));
|
||||
|
||||
iter->Seek(indexBounds.start());
|
||||
while (iter->Valid() && cmp->Compare(iter->key(), indexBounds.end()) < 0) {
|
||||
rindex->disableCache(); // TODO: proper blacklisting of keys?
|
||||
TRI_DEFER(rindex->createCache());
|
||||
|
||||
while (iter->Valid()) {
|
||||
rocksdb::Status s = rtrx->Delete(iter->key());
|
||||
if (!s.ok()) {
|
||||
auto converted = convertStatus(s);
|
||||
|
@ -695,10 +707,10 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
/*
|
||||
void RocksDBCollection::truncateNoTrx(transaction::Methods* trx) {
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
|
||||
|
||||
rocksdb::Comparator const* cmp = globalRocksEngine()->cmp();
|
||||
TRI_voc_cid_t cid = _logicalCollection->cid();
|
||||
|
||||
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
rocksdb::WriteBatch batch(32 * 1024 * 1024);
|
||||
// delete documents
|
||||
|
@ -709,16 +721,16 @@ void RocksDBCollection::truncateNoTrx(transaction::Methods* trx) {
|
|||
// isolate against newer writes
|
||||
rocksdb::ReadOptions readOptions;
|
||||
readOptions.snapshot = state->rocksTransaction()->GetSnapshot();
|
||||
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> iter(db->NewIterator(readOptions));
|
||||
iter->Seek(documentBounds.start());
|
||||
|
||||
|
||||
while (iter->Valid() && cmp->Compare(iter->key(), documentBounds.end()) < 0) {
|
||||
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key());
|
||||
VPackSlice key =
|
||||
VPackSlice(iter->value().data()).get(StaticStrings::KeyString);
|
||||
TRI_ASSERT(key.isString());
|
||||
|
||||
|
||||
// add possible log statement
|
||||
state->prepareOperation(cid, revisionId, StringRef(key),
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
@ -731,28 +743,28 @@ void RocksDBCollection::truncateNoTrx(transaction::Methods* trx) {
|
|||
RocksDBOperationResult result =
|
||||
state->addOperation(cid, revisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE,
|
||||
0, iter->key().size());
|
||||
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (result.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(result);
|
||||
}
|
||||
|
||||
|
||||
// force intermediate commit
|
||||
if (result.commitRequired()) {
|
||||
// force commit
|
||||
}
|
||||
|
||||
|
||||
iter->Next();
|
||||
}
|
||||
|
||||
|
||||
// delete index items
|
||||
|
||||
|
||||
// TODO maybe we could also reuse Index::drop, if we ensure the
|
||||
// implementations
|
||||
// don't do anything beyond deleting their contents
|
||||
for (std::shared_ptr<Index> const& index : _indexes) {
|
||||
RocksDBIndex* rindex = static_cast<RocksDBIndex*>(index.get());
|
||||
|
||||
|
||||
RocksDBKeyBounds indexBounds = RocksDBKeyBounds::Empty();
|
||||
switch (rindex->type()) {
|
||||
case RocksDBIndex::TRI_IDX_TYPE_PRIMARY_INDEX:
|
||||
|
@ -761,7 +773,7 @@ void RocksDBCollection::truncateNoTrx(transaction::Methods* trx) {
|
|||
case RocksDBIndex::TRI_IDX_TYPE_EDGE_INDEX:
|
||||
indexBounds = RocksDBKeyBounds::EdgeIndex(rindex->objectId());
|
||||
break;
|
||||
|
||||
|
||||
case RocksDBIndex::TRI_IDX_TYPE_HASH_INDEX:
|
||||
case RocksDBIndex::TRI_IDX_TYPE_SKIPLIST_INDEX:
|
||||
case RocksDBIndex::TRI_IDX_TYPE_PERSISTENT_INDEX:
|
||||
|
@ -772,11 +784,11 @@ void RocksDBCollection::truncateNoTrx(transaction::Methods* trx) {
|
|||
}
|
||||
break;
|
||||
// TODO add options for geoindex, fulltext etc
|
||||
|
||||
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
||||
iter->Seek(indexBounds.start());
|
||||
while (iter->Valid() && cmp->Compare(iter->key(), indexBounds.end()) < 0) {
|
||||
rocksdb::Status s = rtrx->Delete(iter->key());
|
||||
|
@ -784,7 +796,7 @@ void RocksDBCollection::truncateNoTrx(transaction::Methods* trx) {
|
|||
auto converted = convertStatus(s);
|
||||
THROW_ARANGO_EXCEPTION(converted);
|
||||
}
|
||||
|
||||
|
||||
iter->Next();
|
||||
}
|
||||
}
|
||||
|
@ -1263,8 +1275,11 @@ void RocksDBCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
|
|||
}
|
||||
|
||||
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(id));
|
||||
|
||||
_indexes.emplace_back(idx);
|
||||
if (idx->type() == Index::TRI_IDX_TYPE_GEO1_INDEX ||
|
||||
idx->type() == Index::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||
_hasGeoIndex = true;
|
||||
}
|
||||
}
|
||||
|
||||
void RocksDBCollection::addIndexCoordinator(
|
||||
|
@ -1337,8 +1352,8 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
bool hasMore = true;
|
||||
while (hasMore) {
|
||||
hasMore = iter->next(cb, 5000);
|
||||
if (_logicalCollection->status() == TRI_VOC_COL_STATUS_DELETED
|
||||
|| _logicalCollection->deleted()) {
|
||||
if (_logicalCollection->status() == TRI_VOC_COL_STATUS_DELETED ||
|
||||
_logicalCollection->deleted()) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
|
|
@ -39,7 +39,7 @@ class Result;
|
|||
class RocksDBPrimaryIndex;
|
||||
class RocksDBVPackIndex;
|
||||
struct RocksDBToken;
|
||||
|
||||
|
||||
class RocksDBCollection final : public PhysicalCollection {
|
||||
friend class RocksDBEngine;
|
||||
friend class RocksDBVPackIndex;
|
||||
|
@ -48,7 +48,6 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
constexpr static double defaultLockTimeout = 10.0 * 60.0;
|
||||
|
||||
public:
|
||||
|
||||
public:
|
||||
explicit RocksDBCollection(LogicalCollection*, VPackSlice const& info);
|
||||
explicit RocksDBCollection(LogicalCollection*,
|
||||
|
@ -121,7 +120,7 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
/// non transactional truncate, will continoiusly commit the deletes
|
||||
/// and no fully rollback on failure. Uses trx snapshots to isolate
|
||||
/// against newer PUTs
|
||||
//void truncateNoTrx(transaction::Methods* trx);
|
||||
// void truncateNoTrx(transaction::Methods* trx);
|
||||
|
||||
DocumentIdentifierToken lookupKey(
|
||||
transaction::Methods* trx,
|
||||
|
@ -185,10 +184,12 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
|
||||
/// recalculte counts for collection in case of failure
|
||||
uint64_t recalculateCounts();
|
||||
|
||||
|
||||
/// trigger rocksdb compaction for documentDB and indexes
|
||||
void compact();
|
||||
void estimateSize(velocypack::Builder &builder);
|
||||
void estimateSize(velocypack::Builder& builder);
|
||||
|
||||
bool hasGeoIndex() { return _hasGeoIndex; }
|
||||
|
||||
private:
|
||||
/// @brief return engine-specific figures
|
||||
|
@ -231,6 +232,8 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
std::atomic<uint64_t> _numberDocuments;
|
||||
std::atomic<TRI_voc_rid_t> _revisionId;
|
||||
|
||||
/// upgrade write locks to exclusive locks if this flag is set
|
||||
bool _hasGeoIndex;
|
||||
basics::ReadWriteLock _exclusiveLock;
|
||||
};
|
||||
|
||||
|
@ -246,6 +249,6 @@ inline RocksDBCollection* toRocksDBCollection(LogicalCollection* logical) {
|
|||
return toRocksDBCollection(phys);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
|
|
|
@ -79,8 +79,8 @@ bool RocksDBEdgeIndexIterator::updateBounds() {
|
|||
fromTo = fromTo.get(StaticStrings::IndexEq);
|
||||
}
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
_bounds = RocksDBKeyBounds::EdgeIndexVertex(_index->_objectId,
|
||||
StringRef(fromTo));
|
||||
_bounds =
|
||||
RocksDBKeyBounds::EdgeIndexVertex(_index->_objectId, StringRef(fromTo));
|
||||
|
||||
_iterator->Seek(_bounds.start());
|
||||
return true;
|
||||
|
@ -97,7 +97,7 @@ RocksDBEdgeIndexIterator::~RocksDBEdgeIndexIterator() {
|
|||
|
||||
bool RocksDBEdgeIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
|
||||
if (limit == 0 || !_keysIterator.valid()) {
|
||||
// No limit no data, or we are actually done. The last call should have
|
||||
// returned false
|
||||
|
@ -107,10 +107,11 @@ bool RocksDBEdgeIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
|
||||
// acquire rocksdb collection
|
||||
auto rocksColl = toRocksDBCollection(_collection);
|
||||
|
||||
|
||||
while (true) {
|
||||
TRI_ASSERT(limit > 0);
|
||||
|
||||
// TODO: set options.iterate_upper_bound and remove compare?
|
||||
while (_iterator->Valid() &&
|
||||
(_index->_cmp->Compare(_iterator->key(), _bounds.end()) < 0)) {
|
||||
StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key());
|
||||
|
@ -146,8 +147,9 @@ RocksDBEdgeIndex::RocksDBEdgeIndex(TRI_idx_iid_t iid,
|
|||
arangodb::LogicalCollection* collection,
|
||||
VPackSlice const& info,
|
||||
std::string const& attr)
|
||||
: RocksDBIndex(iid, collection, std::vector<std::vector<AttributeName>>(
|
||||
{{AttributeName(attr, false)}}),
|
||||
: RocksDBIndex(iid, collection,
|
||||
std::vector<std::vector<AttributeName>>(
|
||||
{{AttributeName(attr, false)}}),
|
||||
false, false,
|
||||
basics::VelocyPackHelper::stringUInt64(info, "objectId")),
|
||||
_directionAttr(attr) {
|
||||
|
@ -235,8 +237,8 @@ int RocksDBEdgeIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
}
|
||||
|
||||
int RocksDBEdgeIndex::insertRaw(rocksdb::WriteBatchWithIndex*,
|
||||
TRI_voc_rid_t, VPackSlice const&) {
|
||||
int RocksDBEdgeIndex::insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
VPackSlice const&) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
@ -261,8 +263,8 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int RocksDBEdgeIndex::removeRaw(rocksdb::WriteBatch* writeBatch,
|
||||
TRI_voc_rid_t, VPackSlice const& doc) {
|
||||
int RocksDBEdgeIndex::removeRaw(rocksdb::WriteBatch* writeBatch, TRI_voc_rid_t,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
|
|
|
@ -175,8 +175,9 @@ void RocksDBEngine::start() {
|
|||
// transactionOptions.num_stripes = TRI_numberProcessors();
|
||||
|
||||
// options imported set by RocksDBOptionFeature
|
||||
auto const* opts = ApplicationServer::getFeature<arangodb::RocksDBOptionFeature>(
|
||||
"RocksDBOption");
|
||||
auto const* opts =
|
||||
ApplicationServer::getFeature<arangodb::RocksDBOptionFeature>(
|
||||
"RocksDBOption");
|
||||
|
||||
_options.write_buffer_size = static_cast<size_t>(opts->_writeBufferSize);
|
||||
_options.max_write_buffer_number =
|
||||
|
@ -214,15 +215,16 @@ void RocksDBEngine::start() {
|
|||
_options.compaction_readahead_size =
|
||||
static_cast<size_t>(opts->_compactionReadaheadSize);
|
||||
|
||||
_options.env->SetBackgroundThreads(opts->_numThreadsHigh,
|
||||
_options.env->SetBackgroundThreads((int)opts->_numThreadsHigh,
|
||||
rocksdb::Env::Priority::HIGH);
|
||||
_options.env->SetBackgroundThreads(opts->_numThreadsLow,
|
||||
_options.env->SetBackgroundThreads((int)opts->_numThreadsLow,
|
||||
rocksdb::Env::Priority::LOW);
|
||||
|
||||
_options.info_log_level = rocksdb::InfoLogLevel::ERROR_LEVEL;
|
||||
// intentionally do not start the logger (yet)
|
||||
// as it will produce a lot of log spam
|
||||
// _options.info_log = std::make_shared<RocksDBLogger>(_options.info_log_level);
|
||||
// _options.info_log =
|
||||
// std::make_shared<RocksDBLogger>(_options.info_log_level);
|
||||
|
||||
// _options.statistics = rocksdb::CreateDBStatistics();
|
||||
// _options.stats_dump_period_sec = 1;
|
||||
|
|
|
@ -456,8 +456,8 @@ Result RocksDBFulltextIndex::parseQueryString(std::string const& qstr,
|
|||
TRI_PrefixUtf8String(lowered, TRI_FULLTEXT_MAX_WORD_LENGTH);
|
||||
ptrdiff_t prefixLength = prefixEnd - lowered;
|
||||
|
||||
query.emplace_back(std::string(lowered, (size_t)prefixLength),
|
||||
matchType, operation);
|
||||
query.emplace_back(std::string(lowered, (size_t)prefixLength), matchType,
|
||||
operation);
|
||||
|
||||
++i;
|
||||
if (i >= TRI_FULLTEXT_SEARCH_MAX_WORDS) {
|
||||
|
@ -476,7 +476,6 @@ Result RocksDBFulltextIndex::executeQuery(transaction::Methods* trx,
|
|||
FulltextQuery const& query,
|
||||
size_t maxResults,
|
||||
VPackBuilder& builder) {
|
||||
|
||||
std::set<std::string> resultSet;
|
||||
for (FulltextQueryToken const& token : query) {
|
||||
applyQueryToken(trx, token, resultSet);
|
||||
|
@ -485,8 +484,8 @@ Result RocksDBFulltextIndex::executeQuery(transaction::Methods* trx,
|
|||
auto physical = static_cast<RocksDBCollection*>(_collection->getPhysical());
|
||||
auto idx = physical->primaryIndex();
|
||||
ManagedDocumentResult mmdr;
|
||||
|
||||
if (maxResults == 0) {// 0 appearantly means "all results"
|
||||
|
||||
if (maxResults == 0) { // 0 appearantly means "all results"
|
||||
maxResults = SIZE_MAX;
|
||||
}
|
||||
|
||||
|
@ -534,13 +533,14 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
|
|||
|
||||
std::set<std::string> intersect;
|
||||
|
||||
// TODO: set options.iterate_upper_bound and remove compare?
|
||||
// apply left to right logic, merging all current results with ALL previous
|
||||
while (iter->Valid() && _cmp->Compare(iter->key(), bounds.end()) < 0) {
|
||||
rocksdb::Status s = iter->status();
|
||||
if (!s.ok()) {
|
||||
return rocksutils::convertStatus(s);
|
||||
}
|
||||
|
||||
|
||||
StringRef key = RocksDBKey::primaryKey(iter->key());
|
||||
if (token.operation == FulltextQueryToken::AND) {
|
||||
intersect.insert(key.toString());
|
||||
|
@ -556,8 +556,8 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
|
|||
resultSet.clear();
|
||||
} else {
|
||||
std::set<std::string> output;
|
||||
std::set_intersection(resultSet.begin(), resultSet.end(), intersect.begin(),
|
||||
intersect.end(),
|
||||
std::set_intersection(resultSet.begin(), resultSet.end(),
|
||||
intersect.begin(), intersect.end(),
|
||||
std::inserter(output, output.begin()));
|
||||
resultSet = std::move(output);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,589 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBGeoIndex.h"
|
||||
|
||||
#include "Aql/Ast.h"
|
||||
#include "Aql/AstNode.h"
|
||||
#include "Aql/SortCondition.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
RocksDBGeoIndexIterator::RocksDBGeoIndexIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, RocksDBGeoIndex const* index,
|
||||
arangodb::aql::AstNode const* cond, arangodb::aql::Variable const* var)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_index(index),
|
||||
_cursor(nullptr),
|
||||
_coor(),
|
||||
_condition(cond),
|
||||
_lat(0.0),
|
||||
_lon(0.0),
|
||||
_near(true),
|
||||
_inclusive(false),
|
||||
_done(false),
|
||||
_radius(0.0) {
|
||||
evaluateCondition();
|
||||
}
|
||||
|
||||
void RocksDBGeoIndexIterator::evaluateCondition() {
|
||||
if (_condition) {
|
||||
auto numMembers = _condition->numMembers();
|
||||
|
||||
TRI_ASSERT(numMembers == 1); // should only be an FCALL
|
||||
auto fcall = _condition->getMember(0);
|
||||
TRI_ASSERT(fcall->type == arangodb::aql::NODE_TYPE_FCALL);
|
||||
TRI_ASSERT(fcall->numMembers() == 1);
|
||||
auto args = fcall->getMember(0);
|
||||
|
||||
numMembers = args->numMembers();
|
||||
TRI_ASSERT(numMembers >= 3);
|
||||
|
||||
_lat = args->getMember(1)->getDoubleValue();
|
||||
_lon = args->getMember(2)->getDoubleValue();
|
||||
|
||||
if (numMembers == 3) {
|
||||
// NEAR
|
||||
_near = true;
|
||||
} else {
|
||||
// WITHIN
|
||||
TRI_ASSERT(numMembers == 5);
|
||||
_near = false;
|
||||
_radius = args->getMember(3)->getDoubleValue();
|
||||
_inclusive = args->getMember(4)->getBoolValue();
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "No condition passed to RocksDBGeoIndexIterator constructor";
|
||||
}
|
||||
}
|
||||
|
||||
size_t RocksDBGeoIndexIterator::findLastIndex(GeoCoordinates* coords) const {
|
||||
TRI_ASSERT(coords != nullptr);
|
||||
|
||||
// determine which documents to return...
|
||||
size_t numDocs = coords->length;
|
||||
|
||||
if (!_near) {
|
||||
// WITHIN
|
||||
// only return those documents that are within the specified radius
|
||||
TRI_ASSERT(numDocs > 0);
|
||||
|
||||
// linear scan for the first document outside the specified radius
|
||||
// scan backwards because documents with higher distances are more
|
||||
// interesting
|
||||
int iterations = 0;
|
||||
while ((_inclusive && coords->distances[numDocs - 1] > _radius) ||
|
||||
(!_inclusive && coords->distances[numDocs - 1] >= _radius)) {
|
||||
// document is outside the specified radius!
|
||||
--numDocs;
|
||||
|
||||
if (numDocs == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (++iterations == 8 && numDocs >= 10) {
|
||||
// switch to a binary search for documents inside/outside the specified
|
||||
// radius
|
||||
size_t l = 0;
|
||||
size_t r = numDocs - 1;
|
||||
|
||||
while (true) {
|
||||
// determine midpoint
|
||||
size_t m = l + ((r - l) / 2);
|
||||
if ((_inclusive && coords->distances[m] > _radius) ||
|
||||
(!_inclusive && coords->distances[m] >= _radius)) {
|
||||
// document is outside the specified radius!
|
||||
if (m == 0) {
|
||||
numDocs = 0;
|
||||
break;
|
||||
}
|
||||
r = m - 1;
|
||||
} else {
|
||||
// still inside the radius
|
||||
numDocs = m + 1;
|
||||
l = m + 1;
|
||||
}
|
||||
|
||||
if (r < l) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
bool RocksDBGeoIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
if (!_cursor) {
|
||||
createCursor(_lat, _lon);
|
||||
|
||||
if (!_cursor) {
|
||||
// actually validate that we got a valid cursor
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
TRI_ASSERT(_cursor != nullptr);
|
||||
|
||||
if (_done) {
|
||||
// we already know that no further results will be returned by the index
|
||||
return false;
|
||||
}
|
||||
|
||||
TRI_ASSERT(limit > 0);
|
||||
if (limit > 0) {
|
||||
// only need to calculate distances for WITHIN queries, but not for NEAR
|
||||
// queries
|
||||
bool withDistances;
|
||||
double maxDistance;
|
||||
if (_near) {
|
||||
withDistances = false;
|
||||
maxDistance = -1.0;
|
||||
} else {
|
||||
withDistances = true;
|
||||
maxDistance = _radius;
|
||||
}
|
||||
auto coords = std::unique_ptr<GeoCoordinates>(::GeoIndex_ReadCursor(
|
||||
_cursor, static_cast<int>(limit), withDistances, maxDistance));
|
||||
|
||||
size_t const length = coords ? coords->length : 0;
|
||||
|
||||
if (length == 0) {
|
||||
// Nothing Found
|
||||
// TODO validate
|
||||
_done = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t numDocs = findLastIndex(coords.get());
|
||||
if (numDocs == 0) {
|
||||
// we are done
|
||||
_done = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < numDocs; ++i) {
|
||||
cb(RocksDBToken(coords->coordinates[i].data));
|
||||
}
|
||||
// If we return less then limit many docs we are done.
|
||||
_done = numDocs < limit;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void RocksDBGeoIndexIterator::replaceCursor(::GeoCursor* c) {
|
||||
if (_cursor) {
|
||||
::GeoIndex_CursorFree(_cursor);
|
||||
}
|
||||
_cursor = c;
|
||||
_done = false;
|
||||
}
|
||||
|
||||
void RocksDBGeoIndexIterator::createCursor(double lat, double lon) {
|
||||
_coor = GeoCoordinate{lat, lon, 0};
|
||||
replaceCursor(::GeoIndex_NewCursor(_index->_geoIndex, &_coor));
|
||||
}
|
||||
|
||||
/// @brief creates an IndexIterator for the given Condition
|
||||
IndexIterator* RocksDBGeoIndex::iteratorForCondition(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
arangodb::aql::AstNode const* node,
|
||||
arangodb::aql::Variable const* reference, bool) {
|
||||
TRI_IF_FAILURE("GeoIndex::noIterator") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
return new RocksDBGeoIndexIterator(_collection, trx, mmdr, this, node,
|
||||
reference);
|
||||
}
|
||||
|
||||
void RocksDBGeoIndexIterator::reset() { replaceCursor(nullptr); }
|
||||
|
||||
RocksDBGeoIndex::RocksDBGeoIndex(TRI_idx_iid_t iid,
|
||||
arangodb::LogicalCollection* collection,
|
||||
VPackSlice const& info)
|
||||
: RocksDBIndex(iid, collection, info),
|
||||
_variant(INDEX_GEO_INDIVIDUAL_LAT_LON),
|
||||
_geoJson(false),
|
||||
_geoIndex(nullptr) {
|
||||
TRI_ASSERT(iid != 0);
|
||||
_unique = false;
|
||||
_sparse = true;
|
||||
|
||||
if (_fields.size() == 1) {
|
||||
_geoJson = arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
info, "geoJson", false);
|
||||
auto& loc = _fields[0];
|
||||
_location.reserve(loc.size());
|
||||
for (auto const& it : loc) {
|
||||
_location.emplace_back(it.name);
|
||||
}
|
||||
_variant =
|
||||
_geoJson ? INDEX_GEO_COMBINED_LAT_LON : INDEX_GEO_COMBINED_LON_LAT;
|
||||
} else if (_fields.size() == 2) {
|
||||
_variant = INDEX_GEO_INDIVIDUAL_LAT_LON;
|
||||
auto& lat = _fields[0];
|
||||
_latitude.reserve(lat.size());
|
||||
for (auto const& it : lat) {
|
||||
_latitude.emplace_back(it.name);
|
||||
}
|
||||
auto& lon = _fields[1];
|
||||
_longitude.reserve(lon.size());
|
||||
for (auto const& it : lon) {
|
||||
_longitude.emplace_back(it.name);
|
||||
}
|
||||
} else {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
"RocksDBGeoIndex can only be created with one or two fields.");
|
||||
}
|
||||
|
||||
|
||||
// cheap trick to get the last inserted pot and slot number
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
rocksdb::ReadOptions opts;
|
||||
std::unique_ptr<rocksdb::Iterator> iter(db->NewIterator(opts));
|
||||
|
||||
int numPots = 0;
|
||||
RocksDBKeyBounds b1 = RocksDBKeyBounds::GeoIndex(_objectId, false);
|
||||
iter->SeekForPrev(b1.end());
|
||||
if (iter->Valid()
|
||||
&& _cmp->Compare(b1.start(), iter->key()) < 0
|
||||
&& _cmp->Compare(iter->key(), b1.end()) < 0) {
|
||||
// found a key smaller than bounds end
|
||||
std::pair<bool, int32_t> pair = RocksDBKey::geoValues(iter->key());
|
||||
TRI_ASSERT(pair.first == false);
|
||||
numPots = pair.second;
|
||||
}
|
||||
|
||||
int numSlots = 0;
|
||||
RocksDBKeyBounds b2 = RocksDBKeyBounds::GeoIndex(_objectId, true);
|
||||
iter->SeekForPrev(b2.end());
|
||||
if (iter->Valid()
|
||||
&& _cmp->Compare(b2.start(), iter->key()) < 0
|
||||
&& _cmp->Compare(iter->key(), b2.end()) < 0) {
|
||||
// found a key smaller than bounds end
|
||||
std::pair<bool, int32_t> pair = RocksDBKey::geoValues(iter->key());
|
||||
TRI_ASSERT(pair.first);
|
||||
numSlots = pair.second;
|
||||
}
|
||||
|
||||
_geoIndex = GeoIndex_new(_objectId, numPots, numSlots);
|
||||
if (_geoIndex == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBGeoIndex::~RocksDBGeoIndex() {
|
||||
if (_geoIndex != nullptr) {
|
||||
GeoIndex_free(_geoIndex);
|
||||
}
|
||||
}
|
||||
|
||||
size_t RocksDBGeoIndex::memory() const {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::GeoIndex(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return (size_t)out;
|
||||
}
|
||||
|
||||
/// @brief return a JSON representation of the index
|
||||
void RocksDBGeoIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
|
||||
bool forPersistence) const {
|
||||
builder.openObject();
|
||||
// Basic index
|
||||
RocksDBIndex::toVelocyPack(builder, withFigures, forPersistence);
|
||||
|
||||
if (_variant == INDEX_GEO_COMBINED_LAT_LON ||
|
||||
_variant == INDEX_GEO_COMBINED_LON_LAT) {
|
||||
builder.add("geoJson", VPackValue(_geoJson));
|
||||
}
|
||||
|
||||
// geo indexes are always non-unique
|
||||
// geo indexes are always sparse.
|
||||
// "ignoreNull" has the same meaning as "sparse" and is only returned for
|
||||
// backwards compatibility
|
||||
// the "constraint" attribute has no meaning since ArangoDB 2.5 and is only
|
||||
// returned for backwards compatibility
|
||||
builder.add("constraint", VPackValue(false));
|
||||
builder.add("unique", VPackValue(false));
|
||||
builder.add("ignoreNull", VPackValue(true));
|
||||
builder.add("sparse", VPackValue(true));
|
||||
builder.close();
|
||||
}
|
||||
|
||||
/// @brief Test if this index matches the definition
|
||||
bool RocksDBGeoIndex::matchesDefinition(VPackSlice const& info) const {
|
||||
TRI_ASSERT(info.isObject());
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
VPackSlice typeSlice = info.get("type");
|
||||
TRI_ASSERT(typeSlice.isString());
|
||||
StringRef typeStr(typeSlice);
|
||||
TRI_ASSERT(typeStr == oldtypeName());
|
||||
#endif
|
||||
auto value = info.get("id");
|
||||
if (!value.isNone()) {
|
||||
// We already have an id.
|
||||
if (!value.isString()) {
|
||||
// Invalid ID
|
||||
return false;
|
||||
}
|
||||
// Short circuit. If id is correct the index is identical.
|
||||
StringRef idRef(value);
|
||||
return idRef == std::to_string(_iid);
|
||||
}
|
||||
value = info.get("fields");
|
||||
if (!value.isArray()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t const n = static_cast<size_t>(value.length());
|
||||
if (n != _fields.size()) {
|
||||
return false;
|
||||
}
|
||||
if (_unique != arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
info, "unique", false)) {
|
||||
return false;
|
||||
}
|
||||
if (_sparse != arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
info, "sparse", true)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
if (_geoJson != arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
info, "geoJson", false)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// This check takes ordering of attributes into account.
|
||||
std::vector<arangodb::basics::AttributeName> translate;
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
translate.clear();
|
||||
VPackSlice f = value.at(i);
|
||||
if (!f.isString()) {
|
||||
// Invalid field definition!
|
||||
return false;
|
||||
}
|
||||
arangodb::StringRef in(f);
|
||||
TRI_ParseAttributeString(in, translate, true);
|
||||
if (!arangodb::basics::AttributeName::isIdentical(_fields[i], translate,
|
||||
false)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
double latitude;
|
||||
double longitude;
|
||||
|
||||
if (_variant == INDEX_GEO_INDIVIDUAL_LAT_LON) {
|
||||
VPackSlice lat = doc.get(_latitude);
|
||||
if (!lat.isNumber()) {
|
||||
// Invalid, no insert. Index is sparse
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
VPackSlice lon = doc.get(_longitude);
|
||||
if (!lon.isNumber()) {
|
||||
// Invalid, no insert. Index is sparse
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
latitude = lat.getNumericValue<double>();
|
||||
longitude = lon.getNumericValue<double>();
|
||||
} else {
|
||||
VPackSlice loc = doc.get(_location);
|
||||
if (!loc.isArray() || loc.length() < 2) {
|
||||
// Invalid, no insert. Index is sparse
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
VPackSlice first = loc.at(0);
|
||||
if (!first.isNumber()) {
|
||||
// Invalid, no insert. Index is sparse
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
VPackSlice second = loc.at(1);
|
||||
if (!second.isNumber()) {
|
||||
// Invalid, no insert. Index is sparse
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
if (_geoJson) {
|
||||
longitude = first.getNumericValue<double>();
|
||||
latitude = second.getNumericValue<double>();
|
||||
} else {
|
||||
latitude = first.getNumericValue<double>();
|
||||
longitude = second.getNumericValue<double>();
|
||||
}
|
||||
}
|
||||
|
||||
// and insert into index
|
||||
GeoCoordinate gc;
|
||||
gc.latitude = latitude;
|
||||
gc.longitude = longitude;
|
||||
gc.data = static_cast<uint64_t>(revisionId);
|
||||
|
||||
int res = GeoIndex_insert(_geoIndex, &gc);
|
||||
|
||||
if (res == -1) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "found duplicate entry in geo-index, should not happen";
|
||||
return TRI_set_errno(TRI_ERROR_INTERNAL);
|
||||
} else if (res == -2) {
|
||||
return TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY);
|
||||
} else if (res == -3) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
|
||||
<< "illegal geo-coordinates, ignoring entry";
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
} else if (res < 0) {
|
||||
return TRI_set_errno(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t revisionId,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
return this->insert(nullptr, revisionId, doc, false);
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::remove(transaction::Methods*, TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
double latitude = 0.0;
|
||||
double longitude = 0.0;
|
||||
bool ok = true;
|
||||
|
||||
if (_variant == INDEX_GEO_INDIVIDUAL_LAT_LON) {
|
||||
VPackSlice lat = doc.get(_latitude);
|
||||
VPackSlice lon = doc.get(_longitude);
|
||||
if (!lat.isNumber()) {
|
||||
ok = false;
|
||||
} else {
|
||||
latitude = lat.getNumericValue<double>();
|
||||
}
|
||||
if (!lon.isNumber()) {
|
||||
ok = false;
|
||||
} else {
|
||||
longitude = lon.getNumericValue<double>();
|
||||
}
|
||||
} else {
|
||||
VPackSlice loc = doc.get(_location);
|
||||
if (!loc.isArray() || loc.length() < 2) {
|
||||
ok = false;
|
||||
} else {
|
||||
VPackSlice first = loc.at(0);
|
||||
if (!first.isNumber()) {
|
||||
ok = false;
|
||||
}
|
||||
VPackSlice second = loc.at(1);
|
||||
if (!second.isNumber()) {
|
||||
ok = false;
|
||||
}
|
||||
if (ok) {
|
||||
if (_geoJson) {
|
||||
longitude = first.getNumericValue<double>();
|
||||
latitude = second.getNumericValue<double>();
|
||||
} else {
|
||||
latitude = first.getNumericValue<double>();
|
||||
longitude = second.getNumericValue<double>();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
GeoCoordinate gc;
|
||||
gc.latitude = latitude;
|
||||
gc.longitude = longitude;
|
||||
gc.data = static_cast<uint64_t>(revisionId);
|
||||
|
||||
// ignore non-existing elements in geo-index
|
||||
GeoIndex_remove(_geoIndex, &gc);
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t revisionId,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
return this->remove(nullptr, revisionId, doc, false);
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::unload() {
|
||||
// create a new, empty index
|
||||
/*auto empty = GeoIndex_new(_objectId, 0, 0);
|
||||
|
||||
if (empty == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
// free the old one
|
||||
if (_geoIndex != nullptr) {
|
||||
GeoIndex_free(_geoIndex);
|
||||
}
|
||||
|
||||
// and assign it
|
||||
_geoIndex = empty;*/
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief looks up all points within a given radius
|
||||
GeoCoordinates* RocksDBGeoIndex::withinQuery(transaction::Methods* trx,
|
||||
double lat, double lon,
|
||||
double radius) const {
|
||||
GeoCoordinate gc;
|
||||
gc.latitude = lat;
|
||||
gc.longitude = lon;
|
||||
|
||||
return GeoIndex_PointsWithinRadius(_geoIndex, &gc, radius);
|
||||
}
|
||||
|
||||
/// @brief looks up the nearest points
|
||||
GeoCoordinates* RocksDBGeoIndex::nearQuery(transaction::Methods* trx,
|
||||
double lat, double lon,
|
||||
size_t count) const {
|
||||
GeoCoordinate gc;
|
||||
gc.latitude = lat;
|
||||
gc.longitude = lon;
|
||||
|
||||
return GeoIndex_NearestCountPoints(_geoIndex, &gc, static_cast<int>(count));
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_MMFILES_GEO_INDEX_H
|
||||
#define ARANGOD_MMFILES_GEO_INDEX_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "RocksDBEngine/RocksDBGeoIndexImpl.h"
|
||||
#include "RocksDBEngine/RocksDBIndex.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace ::arangodb::rocksdbengine;
|
||||
|
||||
// GeoCoordinate.data must be capable of storing revision ids
|
||||
static_assert(sizeof(GeoCoordinate::data) >= sizeof(TRI_voc_rid_t),
|
||||
"invalid size of GeoCoordinate.data");
|
||||
|
||||
namespace arangodb {
|
||||
class RocksDBGeoIndex;
|
||||
|
||||
class RocksDBGeoIndexIterator final : public IndexIterator {
|
||||
public:
|
||||
/// @brief Construct an RocksDBGeoIndexIterator based on Ast Conditions
|
||||
RocksDBGeoIndexIterator(LogicalCollection* collection,
|
||||
transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
RocksDBGeoIndex const* index,
|
||||
arangodb::aql::AstNode const*,
|
||||
arangodb::aql::Variable const*);
|
||||
|
||||
~RocksDBGeoIndexIterator() { replaceCursor(nullptr); }
|
||||
|
||||
char const* typeName() const override { return "geo-index-iterator"; }
|
||||
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
||||
void reset() override;
|
||||
|
||||
private:
|
||||
size_t findLastIndex(GeoCoordinates* coords) const;
|
||||
void replaceCursor(::GeoCursor* c);
|
||||
void createCursor(double lat, double lon);
|
||||
void evaluateCondition(); // called in constructor
|
||||
|
||||
RocksDBGeoIndex const* _index;
|
||||
::GeoCursor* _cursor;
|
||||
::GeoCoordinate _coor;
|
||||
arangodb::aql::AstNode const* _condition;
|
||||
double _lat;
|
||||
double _lon;
|
||||
bool _near;
|
||||
bool _inclusive;
|
||||
bool _done;
|
||||
double _radius;
|
||||
};
|
||||
|
||||
class RocksDBGeoIndex final : public RocksDBIndex {
|
||||
friend class RocksDBGeoIndexIterator;
|
||||
|
||||
public:
|
||||
RocksDBGeoIndex() = delete;
|
||||
|
||||
RocksDBGeoIndex(TRI_idx_iid_t, LogicalCollection*,
|
||||
arangodb::velocypack::Slice const&);
|
||||
|
||||
~RocksDBGeoIndex();
|
||||
|
||||
public:
|
||||
/// @brief geo index variants
|
||||
enum IndexVariant {
|
||||
INDEX_GEO_NONE = 0,
|
||||
INDEX_GEO_INDIVIDUAL_LAT_LON,
|
||||
INDEX_GEO_COMBINED_LAT_LON,
|
||||
INDEX_GEO_COMBINED_LON_LAT
|
||||
};
|
||||
|
||||
public:
|
||||
IndexType type() const override {
|
||||
if (_variant == INDEX_GEO_COMBINED_LAT_LON ||
|
||||
_variant == INDEX_GEO_COMBINED_LON_LAT) {
|
||||
return TRI_IDX_TYPE_GEO1_INDEX;
|
||||
}
|
||||
|
||||
return TRI_IDX_TYPE_GEO2_INDEX;
|
||||
}
|
||||
|
||||
char const* typeName() const override {
|
||||
if (_variant == INDEX_GEO_COMBINED_LAT_LON ||
|
||||
_variant == INDEX_GEO_COMBINED_LON_LAT) {
|
||||
return "geo1";
|
||||
}
|
||||
return "geo2";
|
||||
}
|
||||
|
||||
IndexIterator* iteratorForCondition(transaction::Methods*,
|
||||
ManagedDocumentResult*,
|
||||
arangodb::aql::AstNode const*,
|
||||
arangodb::aql::Variable const*,
|
||||
bool) override;
|
||||
|
||||
bool allowExpansion() const override { return false; }
|
||||
|
||||
bool canBeDropped() const override { return true; }
|
||||
|
||||
bool isSorted() const override { return true; }
|
||||
|
||||
bool hasSelectivityEstimate() const override { return false; }
|
||||
|
||||
size_t memory() const override;
|
||||
|
||||
void toVelocyPack(VPackBuilder&, bool, bool) const override;
|
||||
// Uses default toVelocyPackFigures
|
||||
|
||||
bool matchesDefinition(VPackSlice const& info) const override;
|
||||
|
||||
int insert(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
int insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
int unload() override;
|
||||
|
||||
/// @brief looks up all points within a given radius
|
||||
GeoCoordinates* withinQuery(transaction::Methods*, double, double,
|
||||
double) const;
|
||||
|
||||
/// @brief looks up the nearest points
|
||||
GeoCoordinates* nearQuery(transaction::Methods*, double, double,
|
||||
size_t) const;
|
||||
|
||||
bool isSame(std::vector<std::string> const& location, bool geoJson) const {
|
||||
return (!_location.empty() && _location == location && _geoJson == geoJson);
|
||||
}
|
||||
|
||||
bool isSame(std::vector<std::string> const& latitude,
|
||||
std::vector<std::string> const& longitude) const {
|
||||
return (!_latitude.empty() && !_longitude.empty() &&
|
||||
_latitude == latitude && _longitude == longitude);
|
||||
}
|
||||
|
||||
private:
|
||||
/// @brief attribute paths
|
||||
std::vector<std::string> _location;
|
||||
std::vector<std::string> _latitude;
|
||||
std::vector<std::string> _longitude;
|
||||
|
||||
/// @brief the geo index variant (geo1 or geo2)
|
||||
IndexVariant _variant;
|
||||
|
||||
/// @brief whether the index is a geoJson index (latitude / longitude
|
||||
/// reversed)
|
||||
bool _geoJson;
|
||||
|
||||
/// @brief the actual geo index
|
||||
GeoIdx* _geoIndex;
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
class default_delete<GeoCoordinates> {
|
||||
public:
|
||||
void operator()(GeoCoordinates* result) {
|
||||
if (result != nullptr) {
|
||||
GeoIndex_CoordinatesFree(result);
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,114 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author R. A. Parker
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/* GeoIdx.h - header file for GeoIdx algorithms */
|
||||
/* Version 2.2 25.11.2015 R. A. Parker */
|
||||
|
||||
#ifndef ARANGOD_ROCKSDB_GEO_INDEX_IMPL_H
|
||||
#define ARANGOD_ROCKSDB_GEO_INDEX_IMPL_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include <cstdint>
|
||||
|
||||
namespace arangodb { namespace rocksdbengine {
|
||||
|
||||
/* first the things that a user might want to change */
|
||||
|
||||
/* a GeoString - a signed type of at least 64 bits */
|
||||
typedef std::uint_fast64_t GeoString;
|
||||
|
||||
/* percentage growth of slot or slotslot tables */
|
||||
#define GeoIndexGROW 50
|
||||
|
||||
/* maximum number of points in a pot */
|
||||
/* *** note - must be even! */
|
||||
/* smaller takes more space but is a little faster */
|
||||
#define GeoIndexPOTSIZE 6
|
||||
|
||||
/* choses the set of fixed points */
|
||||
#define GeoIndexFIXEDSET 6
|
||||
/* 1 is just the N pole (doesn't really work) */
|
||||
/* 2 is N and S pole - slow but OK */
|
||||
/* 3 is equilateral triangle on 0/180 long */
|
||||
/* 4 is four corners of a tetrahedron */
|
||||
/* 5 is trigonal bipyramid */
|
||||
/* 6 is the corners of octahedron (default) */
|
||||
/* 8 is eight corners of a cube */
|
||||
|
||||
/* size of max-dist integer. */
|
||||
/* 2 is 16-bit - smaller but slow when lots of points */
|
||||
/* within a few hundred meters of target */
|
||||
/* 4 is 32-bit - larger and fast even when points are */
|
||||
/* only centimeters apart. Default */
|
||||
#define GEOFIXLEN 4
|
||||
#if GEOFIXLEN == 2
|
||||
typedef std::uint16_t GeoFix;
|
||||
#endif
|
||||
#if GEOFIXLEN == 4
|
||||
typedef std::uint32_t GeoFix;
|
||||
#endif
|
||||
|
||||
/* If this #define is there, then the INDEXDUMP and */
|
||||
/* INDEXVALID functions are also available. These */
|
||||
/* are not needed for normal production versions */
|
||||
/* the INDEXDUMP function also prints the data, */
|
||||
/* assumed to be a character string, if DEBUG is */
|
||||
/* set to 2. */
|
||||
//#define TRI_GEO_DEBUG 1
|
||||
|
||||
typedef struct {
|
||||
double latitude;
|
||||
double longitude;
|
||||
uint64_t data;
|
||||
} GeoCoordinate;
|
||||
|
||||
typedef struct {
|
||||
size_t length;
|
||||
GeoCoordinate* coordinates;
|
||||
double* distances;
|
||||
} GeoCoordinates;
|
||||
|
||||
typedef void GeoIdx; /* to keep the structure private */
|
||||
typedef void GeoCursor; /* to keep the structure private */
|
||||
|
||||
GeoIdx* GeoIndex_new(uint64_t objectId, int slo, int);
|
||||
void GeoIndex_free(GeoIdx* gi);
|
||||
double GeoIndex_distance(GeoCoordinate* c1, GeoCoordinate* c2);
|
||||
int GeoIndex_insert(GeoIdx* gi, GeoCoordinate* c);
|
||||
int GeoIndex_remove(GeoIdx* gi, GeoCoordinate* c);
|
||||
int GeoIndex_hint(GeoIdx* gi, int hint);
|
||||
GeoCoordinates* GeoIndex_PointsWithinRadius(GeoIdx* gi, GeoCoordinate* c,
|
||||
double d);
|
||||
GeoCoordinates* GeoIndex_NearestCountPoints(GeoIdx* gi, GeoCoordinate* c,
|
||||
int count);
|
||||
GeoCursor* GeoIndex_NewCursor(GeoIdx* gi, GeoCoordinate* c);
|
||||
GeoCoordinates* GeoIndex_ReadCursor(GeoCursor* gc, int count, bool returnDistances = true, double maxDistance = -1.0);
|
||||
void GeoIndex_CursorFree(GeoCursor* gc);
|
||||
void GeoIndex_CoordinatesFree(GeoCoordinates* clist);
|
||||
#ifdef TRI_GEO_DEBUG
|
||||
void GeoIndex_INDEXDUMP(GeoIdx* gi, FILE* f);
|
||||
int GeoIndex_INDEXVALID(GeoIdx* gi);
|
||||
#endif
|
||||
}}
|
||||
#endif
|
||||
/* end of GeoIdx.h */
|
|
@ -0,0 +1,117 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// MUST BE ONLY INCLUDED IN RocksDBGeoIndexImpl.cpp after struct definitions!
|
||||
// IT CAN NOT BE USED IN OTHER
|
||||
// This file has only been added to keep Richards code clean. So it is easier
|
||||
// for him to spot relevant changes.
|
||||
|
||||
#ifndef ARANGOD_ROCKSDB_GEO_INDEX_IMPL_HELPER_H
|
||||
#define ARANGOD_ROCKSDB_GEO_INDEX_IMPL_HELPER_H 1
|
||||
|
||||
#include <RocksDBEngine/RocksDBGeoIndexImpl.h>
|
||||
|
||||
#include <RocksDBEngine/RocksDBCommon.h>
|
||||
#include <RocksDBEngine/RocksDBEngine.h>
|
||||
#include <RocksDBEngine/RocksDBKey.h>
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
namespace arangodb {
|
||||
namespace rocksdbengine {
|
||||
|
||||
VPackBuilder CoordToVpack(GeoCoordinate* coord) {
|
||||
VPackBuilder rv{};
|
||||
rv.openArray();
|
||||
rv.add(VPackValue(coord->latitude)); // double
|
||||
rv.add(VPackValue(coord->longitude)); // double
|
||||
rv.add(VPackValue(coord->data)); // uint64_t
|
||||
rv.close();
|
||||
return rv;
|
||||
}
|
||||
|
||||
void VpackToCoord(VPackSlice const& slice, GeoCoordinate* gc) {
|
||||
TRI_ASSERT(slice.isArray() && slice.length() == 3);
|
||||
gc->latitude = slice.at(0).getDouble();
|
||||
gc->longitude = slice.at(1).getDouble();
|
||||
gc->data = slice.at(2).getUInt();
|
||||
}
|
||||
|
||||
VPackBuilder PotToVpack(GeoPot* pot) {
|
||||
VPackBuilder rv{};
|
||||
rv.openArray(); // open
|
||||
rv.add(VPackValue(pot->LorLeaf)); // int
|
||||
rv.add(VPackValue(pot->RorPoints)); // int
|
||||
rv.add(VPackValue(pot->middle)); // GeoString
|
||||
{
|
||||
rv.openArray(); // array GeoFix //uint 16/32
|
||||
for (std::size_t i = 0; i < GeoIndexFIXEDPOINTS; i++) {
|
||||
rv.add(VPackValue(pot->maxdist[i])); // unit 16/32
|
||||
}
|
||||
rv.close(); // close array
|
||||
}
|
||||
rv.add(VPackValue(pot->start)); // GeoString
|
||||
rv.add(VPackValue(pot->end)); // GeoString
|
||||
rv.add(VPackValue(pot->level)); // int
|
||||
{
|
||||
rv.openArray(); // arrray of int
|
||||
for (std::size_t i = 0; i < GeoIndexPOTSIZE; i++) {
|
||||
rv.add(VPackValue(pot->points[i])); // int
|
||||
}
|
||||
rv.close(); // close array
|
||||
}
|
||||
rv.close(); // close
|
||||
return rv;
|
||||
}
|
||||
|
||||
void VpackToPot(VPackSlice const& slice, GeoPot* rv) {
|
||||
TRI_ASSERT(slice.isArray());
|
||||
rv->LorLeaf = (int)slice.at(0).getInt(); // int
|
||||
rv->RorPoints = (int)slice.at(1).getInt(); // int
|
||||
rv->middle = slice.at(2).getUInt(); // GeoString
|
||||
{
|
||||
auto maxdistSlice = slice.at(3);
|
||||
TRI_ASSERT(maxdistSlice.isArray());
|
||||
TRI_ASSERT(maxdistSlice.length() == GeoIndexFIXEDPOINTS);
|
||||
for (std::size_t i = 0; i < GeoIndexFIXEDPOINTS; i++) {
|
||||
rv->maxdist[i] = (int)maxdistSlice.at(i).getUInt(); // unit 16/33
|
||||
}
|
||||
}
|
||||
rv->start = (int)slice.at(4).getUInt(); // GeoString
|
||||
rv->end = slice.at(5).getUInt(); // GeoString
|
||||
rv->level = (int)slice.at(6).getInt(); // int
|
||||
{
|
||||
auto pointsSlice = slice.at(7);
|
||||
TRI_ASSERT(pointsSlice.isArray());
|
||||
TRI_ASSERT(pointsSlice.length() == GeoIndexFIXEDPOINTS);
|
||||
for (std::size_t i = 0; i < GeoIndexPOTSIZE; i++) {
|
||||
rv->points[i] = (int)pointsSlice.at(i).getInt(); // int
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace rocksdbengine
|
||||
} // namespace arangodb
|
||||
#endif
|
|
@ -59,7 +59,7 @@ RocksDBIndex::RocksDBIndex(TRI_idx_iid_t id, LogicalCollection* collection,
|
|||
}
|
||||
|
||||
RocksDBIndex::~RocksDBIndex() {
|
||||
if (_useCache && _cachePresent) {
|
||||
if (useCache()) {
|
||||
try {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
TRI_ASSERT(CacheManagerFeature::MANAGER != nullptr);
|
||||
|
@ -76,7 +76,7 @@ void RocksDBIndex::load() {
|
|||
}
|
||||
|
||||
int RocksDBIndex::unload() {
|
||||
if (_useCache && _cachePresent) {
|
||||
if (useCache()) {
|
||||
disableCache();
|
||||
TRI_ASSERT(!_cachePresent);
|
||||
}
|
||||
|
@ -96,16 +96,18 @@ void RocksDBIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
|
|||
|
||||
void RocksDBIndex::createCache() {
|
||||
if (!_useCache || _cachePresent) {
|
||||
// we should not get here if we do not need the cache
|
||||
// we leave this if we do not need the cache
|
||||
// or if cache already created
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_useCache);
|
||||
TRI_ASSERT(_cache.get() == nullptr);
|
||||
TRI_ASSERT(CacheManagerFeature::MANAGER != nullptr);
|
||||
_cache = CacheManagerFeature::MANAGER->createCache(
|
||||
cache::CacheType::Transactional);
|
||||
_cachePresent = (_cache.get() != nullptr);
|
||||
TRI_ASSERT(_useCache);
|
||||
}
|
||||
|
||||
void RocksDBIndex::disableCache() {
|
||||
|
@ -120,6 +122,7 @@ void RocksDBIndex::disableCache() {
|
|||
CacheManagerFeature::MANAGER->destroyCache(_cache);
|
||||
_cache.reset();
|
||||
_cachePresent = false;
|
||||
TRI_ASSERT(_useCache);
|
||||
}
|
||||
|
||||
int RocksDBIndex::drop() {
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
namespace rocksdb {
|
||||
class WriteBatch;
|
||||
class WriteBatchWithIndex;
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
||||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
@ -43,7 +43,6 @@ class RocksDBComparator;
|
|||
|
||||
class RocksDBIndex : public Index {
|
||||
protected:
|
||||
|
||||
RocksDBIndex(TRI_idx_iid_t, LogicalCollection*,
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> const&
|
||||
attributes,
|
||||
|
@ -79,15 +78,16 @@ class RocksDBIndex : public Index {
|
|||
/// as an optimization for the non transactional fillIndex method
|
||||
virtual int insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) = 0;
|
||||
|
||||
/// remove index elements and put it in the specified write batch. Should be used
|
||||
/// as an optimization for the non transactional fillIndex method
|
||||
|
||||
/// remove index elements and put it in the specified write batch. Should be
|
||||
/// used as an optimization for the non transactional fillIndex method
|
||||
virtual int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) = 0;
|
||||
|
||||
protected:
|
||||
|
||||
void createCache();
|
||||
void disableCache();
|
||||
|
||||
protected:
|
||||
inline bool useCache() const { return (_useCache && _cachePresent); }
|
||||
|
||||
protected:
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "RocksDBEngine/RocksDBEdgeIndex.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
#include "RocksDBEngine/RocksDBFulltextIndex.h"
|
||||
#include "RocksDBEngine/RocksDBGeoIndex.h"
|
||||
#include "RocksDBEngine/RocksDBHashIndex.h"
|
||||
#include "RocksDBEngine/RocksDBPersistentIndex.h"
|
||||
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
|
||||
|
@ -428,6 +429,11 @@ std::shared_ptr<Index> RocksDBIndexFactory::prepareIndexFromSlice(
|
|||
newIdx.reset(new arangodb::RocksDBPersistentIndex(iid, col, info));
|
||||
break;
|
||||
}
|
||||
case arangodb::Index::TRI_IDX_TYPE_GEO1_INDEX:
|
||||
case arangodb::Index::TRI_IDX_TYPE_GEO2_INDEX:{
|
||||
newIdx.reset(new arangodb::RocksDBGeoIndex(iid, col, info));
|
||||
break;
|
||||
}
|
||||
case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: {
|
||||
newIdx.reset(new arangodb::RocksDBFulltextIndex(iid, col, info));
|
||||
break;
|
||||
|
|
|
@ -417,9 +417,9 @@ int handleSyncKeysRocksDB(InitialSyncer& syncer,
|
|||
VPackSlice doc(mmdr.vpack());
|
||||
VPackSlice key = doc.get(StaticStrings::KeyString);
|
||||
if (key.compareString(lowKey.data(), lowKey.length()) < 0) {
|
||||
trx.remove(collectionName, key, options);
|
||||
trx.remove(collectionName, doc, options);
|
||||
} else if (key.compareString(highKey.data(), highKey.length()) > 0) {
|
||||
trx.remove(collectionName, key, options);
|
||||
trx.remove(collectionName, doc, options);
|
||||
}
|
||||
},
|
||||
UINT64_MAX);
|
||||
|
|
|
@ -77,6 +77,24 @@ RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId,
|
|||
return RocksDBKey(RocksDBEntryType::UniqueIndexValue, indexId, indexValues);
|
||||
}
|
||||
|
||||
RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId,
|
||||
arangodb::StringRef const& word,
|
||||
arangodb::StringRef const& primaryKey) {
|
||||
return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, primaryKey);
|
||||
}
|
||||
|
||||
RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot) {
|
||||
RocksDBKey key(RocksDBEntryType::GeoIndexValue);
|
||||
size_t length = sizeof(char) + sizeof(indexId) + sizeof(offset);
|
||||
key._buffer.reserve(length);
|
||||
uint64ToPersistent(key._buffer, indexId);
|
||||
|
||||
uint64_t norm = uint64_t(offset) << 32;
|
||||
norm |= isSlot ? 0xFFU : 0; //encode slot|pot in lowest bit
|
||||
uint64ToPersistent(key._buffer, norm);
|
||||
return key;
|
||||
}
|
||||
|
||||
RocksDBKey RocksDBKey::View(TRI_voc_tick_t databaseId, TRI_voc_cid_t viewId) {
|
||||
return RocksDBKey(RocksDBEntryType::View, databaseId, viewId);
|
||||
}
|
||||
|
@ -93,12 +111,6 @@ RocksDBKey RocksDBKey::ReplicationApplierConfig(TRI_voc_tick_t databaseId) {
|
|||
return RocksDBKey(RocksDBEntryType::ReplicationApplierConfig, databaseId);
|
||||
}
|
||||
|
||||
RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId,
|
||||
arangodb::StringRef const& word,
|
||||
arangodb::StringRef const& primaryKey) {
|
||||
return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, primaryKey);
|
||||
}
|
||||
|
||||
// ========================= Member methods ===========================
|
||||
|
||||
RocksDBEntryType RocksDBKey::type(RocksDBKey const& key) {
|
||||
|
@ -173,10 +185,20 @@ VPackSlice RocksDBKey::indexedVPack(rocksdb::Slice const& slice) {
|
|||
return indexedVPack(slice.data(), slice.size());
|
||||
}
|
||||
|
||||
std::pair<bool, int32_t> RocksDBKey::geoValues(rocksdb::Slice const& slice) {
|
||||
TRI_ASSERT(slice.size() >= sizeof(char) + sizeof(uint64_t) * 2);
|
||||
RocksDBEntryType type = static_cast<RocksDBEntryType>(*slice.data());
|
||||
TRI_ASSERT(type == RocksDBEntryType::GeoIndexValue);
|
||||
uint64_t val = uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t));
|
||||
bool isSlot = val & 0xFFU;// lowest byte is 0xFF if true
|
||||
return std::pair<bool, int32_t>(isSlot, (val >> 32));
|
||||
}
|
||||
|
||||
std::string const& RocksDBKey::string() const { return _buffer; }
|
||||
|
||||
RocksDBKey::RocksDBKey(RocksDBEntryType type) : _type(type), _buffer() {
|
||||
switch (_type) {
|
||||
case RocksDBEntryType::GeoIndexValue:
|
||||
case RocksDBEntryType::SettingsValue: {
|
||||
_buffer.push_back(static_cast<char>(_type));
|
||||
break;
|
||||
|
@ -323,6 +345,8 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
|
|||
}
|
||||
}
|
||||
|
||||
// ====================== Private Methods ==========================
|
||||
|
||||
TRI_voc_tick_t RocksDBKey::databaseId(char const* data, size_t size) {
|
||||
TRI_ASSERT(data != nullptr);
|
||||
TRI_ASSERT(size >= sizeof(char));
|
||||
|
@ -365,7 +389,9 @@ TRI_voc_cid_t RocksDBKey::objectId(char const* data, size_t size) {
|
|||
case RocksDBEntryType::PrimaryIndexValue:
|
||||
case RocksDBEntryType::EdgeIndexValue:
|
||||
case RocksDBEntryType::IndexValue:
|
||||
case RocksDBEntryType::UniqueIndexValue: {
|
||||
case RocksDBEntryType::UniqueIndexValue:
|
||||
case RocksDBEntryType::GeoIndexValue:
|
||||
{
|
||||
TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t))));
|
||||
return uint64FromPersistent(data + sizeof(char));
|
||||
}
|
||||
|
|
|
@ -103,6 +103,18 @@ class RocksDBKey {
|
|||
static RocksDBKey UniqueIndexValue(uint64_t indexId,
|
||||
VPackSlice const& indexValues);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Create a fully-specified key for the fulltext index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey FulltextIndexValue(uint64_t indexId,
|
||||
arangodb::StringRef const& word,
|
||||
arangodb::StringRef const& primaryKey);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Create a fully-specified key for a geoIndexValue
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Create a fully-specified key for a view
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -122,13 +134,6 @@ class RocksDBKey {
|
|||
/// @brief Create a fully-specified key for a replication applier config
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey ReplicationApplierConfig(TRI_voc_tick_t databaseId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Create a fully-specified key for the fulltext index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey FulltextIndexValue(uint64_t indexId,
|
||||
arangodb::StringRef const& word,
|
||||
arangodb::StringRef const& primaryKey);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -170,8 +175,8 @@ class RocksDBKey {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Extracts the objectId from a key
|
||||
///
|
||||
/// May be called only on the the following key types: Document.
|
||||
/// Other types will throw.
|
||||
/// May be called only on the the following key types: Document,
|
||||
/// all kinds of index entries. Other types will throw.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static uint64_t objectId(RocksDBKey const&);
|
||||
static uint64_t objectId(rocksdb::Slice const&);
|
||||
|
@ -219,6 +224,13 @@ class RocksDBKey {
|
|||
static VPackSlice indexedVPack(RocksDBKey const&);
|
||||
static VPackSlice indexedVPack(rocksdb::Slice const&);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Extracts the geo pot offset
|
||||
///
|
||||
/// May be called only on GeoIndexValues
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static std::pair<bool, int32_t> geoValues(rocksdb::Slice const& slice);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Returns a reference to the full, constructed key
|
||||
|
@ -234,6 +246,8 @@ class RocksDBKey {
|
|||
arangodb::StringRef const& docKey, VPackSlice const& indexData);
|
||||
RocksDBKey(RocksDBEntryType type, uint64_t first,
|
||||
arangodb::StringRef const& second);
|
||||
RocksDBKey(RocksDBEntryType type, uint64_t first, std::string const& second,
|
||||
std::string const& third);
|
||||
RocksDBKey(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second,
|
||||
arangodb::StringRef const& third);
|
||||
|
||||
|
|
|
@ -27,17 +27,13 @@
|
|||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
|
||||
#include "Logger/Logger.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::rocksutils;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
const char RocksDBKeyBounds::_stringSeparator = '\0';
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::Empty() {
|
||||
return RocksDBKeyBounds();
|
||||
}
|
||||
RocksDBKeyBounds RocksDBKeyBounds::Empty() { return RocksDBKeyBounds(); }
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::Databases() {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::Database);
|
||||
|
@ -48,7 +44,8 @@ RocksDBKeyBounds RocksDBKeyBounds::DatabaseCollections(
|
|||
return RocksDBKeyBounds(RocksDBEntryType::Collection, databaseId);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::CollectionDocuments(uint64_t collectionObjectId) {
|
||||
RocksDBKeyBounds RocksDBKeyBounds::CollectionDocuments(
|
||||
uint64_t collectionObjectId) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::Document, collectionObjectId);
|
||||
}
|
||||
|
||||
|
@ -73,6 +70,34 @@ RocksDBKeyBounds RocksDBKeyBounds::UniqueIndex(uint64_t indexId) {
|
|||
return RocksDBKeyBounds(RocksDBEntryType::UniqueIndexValue, indexId);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndex(uint64_t indexId) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::FulltextIndexValue, indexId);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::GeoIndex(uint64_t indexId) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::GeoIndexValue, indexId);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::GeoIndex(uint64_t indexId, bool isSlot) {
|
||||
RocksDBKeyBounds b;
|
||||
size_t length = sizeof(char) + sizeof(uint64_t) * 2;
|
||||
b._startBuffer.reserve(length);
|
||||
b._startBuffer.push_back(static_cast<char>(RocksDBEntryType::GeoIndexValue));
|
||||
uint64ToPersistent(b._startBuffer, indexId);
|
||||
|
||||
b._endBuffer.clear();
|
||||
b._endBuffer.append(b._startBuffer); // append common prefix
|
||||
|
||||
uint64_t norm = isSlot ? 0xFFU : 0; // encode slot|pot in lowest bit
|
||||
uint64ToPersistent(b._startBuffer, norm); // lower endian
|
||||
norm = norm | (0xFFFFFFFFULL << 32);
|
||||
uint64ToPersistent(b._endBuffer, norm);
|
||||
|
||||
b._start = rocksdb::Slice(b._startBuffer);
|
||||
b._end = rocksdb::Slice(b._endBuffer);
|
||||
return b;
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::IndexRange(uint64_t indexId,
|
||||
VPackSlice const& left,
|
||||
VPackSlice const& right) {
|
||||
|
@ -94,40 +119,53 @@ RocksDBKeyBounds RocksDBKeyBounds::CounterValues() {
|
|||
return RocksDBKeyBounds(RocksDBEntryType::CounterValue);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndex(uint64_t indexId) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::FulltextIndexValue, indexId);
|
||||
}
|
||||
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndexPrefix(uint64_t indexId,
|
||||
arangodb::StringRef const& word) {
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndexPrefix(
|
||||
uint64_t indexId, arangodb::StringRef const& word) {
|
||||
// I did not want to pass a bool to the constructor for this
|
||||
RocksDBKeyBounds bounds;
|
||||
size_t length =
|
||||
sizeof(char) + sizeof(uint64_t) + word.size();
|
||||
size_t length = sizeof(char) + sizeof(uint64_t) + word.size();
|
||||
bounds._startBuffer.reserve(length);
|
||||
bounds._startBuffer.push_back(static_cast<char>(RocksDBEntryType::FulltextIndexValue));
|
||||
bounds._startBuffer.push_back(
|
||||
static_cast<char>(RocksDBEntryType::FulltextIndexValue));
|
||||
uint64ToPersistent(bounds._startBuffer, indexId);
|
||||
bounds._startBuffer.append(word.data(), word.length());
|
||||
|
||||
|
||||
bounds._endBuffer.clear();
|
||||
bounds._endBuffer.append(bounds._startBuffer);
|
||||
bounds._endBuffer.push_back(0xFF);// invalid UTF-8 character, higher than with memcmp
|
||||
bounds._endBuffer.push_back(
|
||||
0xFFU); // invalid UTF-8 character, higher than with memcmp
|
||||
|
||||
bounds._start = rocksdb::Slice(bounds._startBuffer);
|
||||
bounds._end = rocksdb::Slice(bounds._endBuffer);
|
||||
|
||||
return bounds;
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndexComplete(uint64_t indexId,
|
||||
arangodb::StringRef const& word) {
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndexComplete(
|
||||
uint64_t indexId, arangodb::StringRef const& word) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::FulltextIndexValue, indexId, word);
|
||||
}
|
||||
|
||||
// ============================ Member Methods ==============================
|
||||
|
||||
rocksdb::Slice const RocksDBKeyBounds::start() const {
|
||||
return rocksdb::Slice(_startBuffer);
|
||||
RocksDBKeyBounds& RocksDBKeyBounds::operator=(RocksDBKeyBounds const& other) {
|
||||
_type = other._type;
|
||||
_startBuffer = other._startBuffer;
|
||||
_endBuffer = other._endBuffer;
|
||||
_start = rocksdb::Slice(_startBuffer);
|
||||
_end = rocksdb::Slice(_endBuffer);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
rocksdb::Slice const RocksDBKeyBounds::end() const {
|
||||
return rocksdb::Slice(_endBuffer);
|
||||
rocksdb::Slice const& RocksDBKeyBounds::start() const {
|
||||
TRI_ASSERT(_start.size() > 0);
|
||||
return _start;
|
||||
}
|
||||
|
||||
rocksdb::Slice const& RocksDBKeyBounds::end() const {
|
||||
TRI_ASSERT(_end.size() > 0);
|
||||
return _end;
|
||||
}
|
||||
|
||||
uint64_t RocksDBKeyBounds::objectId() const {
|
||||
|
@ -141,7 +179,7 @@ uint64_t RocksDBKeyBounds::objectId() const {
|
|||
TRI_ASSERT(_startBuffer.size() >= (sizeof(char) + sizeof(uint64_t)));
|
||||
return uint64FromPersistent(_startBuffer.data() + sizeof(char));
|
||||
}
|
||||
|
||||
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR);
|
||||
}
|
||||
|
@ -149,7 +187,7 @@ uint64_t RocksDBKeyBounds::objectId() const {
|
|||
|
||||
// constructor for an empty bound. do not use for anything but to
|
||||
// default-construct a key bound!
|
||||
RocksDBKeyBounds::RocksDBKeyBounds()
|
||||
RocksDBKeyBounds::RocksDBKeyBounds()
|
||||
: _type(RocksDBEntryType::Database), _startBuffer(), _endBuffer() {}
|
||||
|
||||
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type)
|
||||
|
@ -180,6 +218,8 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type)
|
|||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
_start = rocksdb::Slice(_startBuffer);
|
||||
_end = rocksdb::Slice(_endBuffer);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
|
||||
|
@ -191,9 +231,9 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
|
|||
// 7 + 8-byte object ID of index + VPack array with index value(s) ....
|
||||
// prefix is the same for non-unique indexes
|
||||
// static slices with an array with one entry
|
||||
VPackSlice min("\x02\x03\x1e");// [minSlice]
|
||||
VPackSlice max("\x02\x03\x1f");// [maxSlice]
|
||||
|
||||
VPackSlice min("\x02\x03\x1e"); // [minSlice]
|
||||
VPackSlice max("\x02\x03\x1f"); // [maxSlice]
|
||||
|
||||
size_t length = sizeof(char) + sizeof(uint64_t) + min.byteSize();
|
||||
_startBuffer.reserve(length);
|
||||
_startBuffer.push_back(static_cast<char>(_type));
|
||||
|
@ -207,9 +247,10 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
|
|||
_endBuffer.append((char*)(max.begin()), max.byteSize());
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case RocksDBEntryType::Collection:
|
||||
case RocksDBEntryType::Document:{
|
||||
case RocksDBEntryType::Document:
|
||||
case RocksDBEntryType::GeoIndexValue: {
|
||||
// Collections are stored as follows:
|
||||
// Key: 1 + 8-byte ArangoDB database ID + 8-byte ArangoDB collection ID
|
||||
//
|
||||
|
@ -222,18 +263,17 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
|
|||
// append common prefix
|
||||
_endBuffer.clear();
|
||||
_endBuffer.append(_startBuffer);
|
||||
|
||||
|
||||
// construct min max
|
||||
uint64ToPersistent(_startBuffer, 0);
|
||||
uint64ToPersistent(_endBuffer, UINT64_MAX);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
|
||||
case RocksDBEntryType::PrimaryIndexValue:
|
||||
case RocksDBEntryType::EdgeIndexValue:
|
||||
case RocksDBEntryType::View:
|
||||
case RocksDBEntryType::FulltextIndexValue: {
|
||||
case RocksDBEntryType::FulltextIndexValue:
|
||||
case RocksDBEntryType::View: {
|
||||
size_t length = sizeof(char) + sizeof(uint64_t);
|
||||
_startBuffer.reserve(length);
|
||||
_startBuffer.push_back(static_cast<char>(_type));
|
||||
|
@ -242,12 +282,15 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
|
|||
_endBuffer.clear();
|
||||
_endBuffer.append(_startBuffer);
|
||||
nextPrefix(_endBuffer);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
_start = rocksdb::Slice(_startBuffer);
|
||||
_end = rocksdb::Slice(_endBuffer);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
||||
|
@ -273,6 +316,8 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
|||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
_start = rocksdb::Slice(_startBuffer);
|
||||
_end = rocksdb::Slice(_endBuffer);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
||||
|
@ -308,6 +353,8 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
|||
default:
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
_start = rocksdb::Slice(_startBuffer);
|
||||
_end = rocksdb::Slice(_endBuffer);
|
||||
}
|
||||
|
||||
void RocksDBKeyBounds::nextPrefix(std::string& s) {
|
||||
|
|
|
@ -25,11 +25,11 @@
|
|||
#ifndef ARANGO_ROCKSDB_ROCKSDB_KEY_BOUNDS_H
|
||||
#define ARANGO_ROCKSDB_ROCKSDB_KEY_BOUNDS_H 1
|
||||
|
||||
#include <rocksdb/slice.h>
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
#include <rocksdb/slice.h>
|
||||
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -59,7 +59,8 @@ class RocksDBKeyBounds {
|
|||
static RocksDBKeyBounds CollectionDocuments(uint64_t collectionObjectId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all index-entries- belonging to a specified primary index
|
||||
/// @brief Bounds for all index-entries- belonging to a specified primary
|
||||
/// index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds PrimaryIndex(uint64_t indexId);
|
||||
|
||||
|
@ -69,14 +70,15 @@ class RocksDBKeyBounds {
|
|||
static RocksDBKeyBounds EdgeIndex(uint64_t indexId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all index-entries belonging to a specified edge index related
|
||||
/// to the specified vertex
|
||||
/// @brief Bounds for all index-entries belonging to a specified edge index
|
||||
/// related to the specified vertex
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds EdgeIndexVertex(uint64_t indexId,
|
||||
arangodb::StringRef const& vertexId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all index-entries belonging to a specified non-unique index
|
||||
/// @brief Bounds for all index-entries belonging to a specified non-unique
|
||||
/// index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds IndexEntries(uint64_t indexId);
|
||||
|
||||
|
@ -85,6 +87,17 @@ class RocksDBKeyBounds {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds UniqueIndex(uint64_t indexId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all entries of a fulltext index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds FulltextIndex(uint64_t indexId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all entries belonging to a specified unique index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds GeoIndex(uint64_t indexId);
|
||||
static RocksDBKeyBounds GeoIndex(uint64_t indexId, bool isSlot);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all index-entries within a value range belonging to a
|
||||
/// specified non-unique index
|
||||
|
@ -104,23 +117,18 @@ class RocksDBKeyBounds {
|
|||
/// @brief Bounds for all views belonging to a specified database
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds DatabaseViews(TRI_voc_tick_t databaseId);
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all counter values
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds CounterValues();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all entries of a fulltext index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds FulltextIndex(uint64_t indexId);
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all entries of a fulltext index, matching prefixes
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds FulltextIndexPrefix(uint64_t,
|
||||
arangodb::StringRef const&);
|
||||
|
||||
arangodb::StringRef const&);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all entries of a fulltext index, matching the word
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -128,13 +136,15 @@ class RocksDBKeyBounds {
|
|||
arangodb::StringRef const&);
|
||||
|
||||
public:
|
||||
RocksDBKeyBounds& operator=(RocksDBKeyBounds const& other);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Returns the left bound slice.
|
||||
///
|
||||
/// Forward iterators may use it->Seek(bound.start()) and reverse iterators
|
||||
/// may check that the current key is greater than this value.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
rocksdb::Slice const start() const;
|
||||
rocksdb::Slice const& start() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Returns the right bound slice.
|
||||
|
@ -142,8 +152,8 @@ class RocksDBKeyBounds {
|
|||
/// Reverse iterators may use it->SeekForPrev(bound.end()) and forward
|
||||
/// iterators may check that the current key is less than this value.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
rocksdb::Slice const end() const;
|
||||
|
||||
rocksdb::Slice const& end() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Returns the object ID for these bounds
|
||||
///
|
||||
|
@ -168,6 +178,8 @@ class RocksDBKeyBounds {
|
|||
RocksDBEntryType _type;
|
||||
std::string _startBuffer;
|
||||
std::string _endBuffer;
|
||||
rocksdb::Slice _end;
|
||||
rocksdb::Slice _start;
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -116,7 +116,6 @@ RocksDBAllIndexIterator::RocksDBAllIndexIterator(
|
|||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index, bool reverse)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_cmp(index->_cmp),
|
||||
_reverse(reverse),
|
||||
_bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())) {
|
||||
// acquire rocksdb transaction
|
||||
|
@ -126,6 +125,7 @@ RocksDBAllIndexIterator::RocksDBAllIndexIterator(
|
|||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
auto const& options = state->readOptions();
|
||||
TRI_ASSERT(options.snapshot != nullptr);
|
||||
TRI_ASSERT(options.prefix_same_as_start);
|
||||
|
||||
_iterator.reset(rtrx->GetIterator(options));
|
||||
if (reverse) {
|
||||
|
@ -138,7 +138,7 @@ RocksDBAllIndexIterator::RocksDBAllIndexIterator(
|
|||
bool RocksDBAllIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
if (limit == 0 || !_iterator->Valid() || outOfRange()) {
|
||||
if (limit == 0 || !_iterator->Valid()) {
|
||||
// No limit no data, or we are actually done. The last call should have
|
||||
// returned false
|
||||
TRI_ASSERT(limit > 0); // Someone called with limit == 0. Api broken
|
||||
|
@ -157,7 +157,7 @@ bool RocksDBAllIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
_iterator->Next();
|
||||
}
|
||||
|
||||
if (!_iterator->Valid() || outOfRange()) {
|
||||
if (!_iterator->Valid()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ bool RocksDBAllIndexIterator::nextWithKey(TokenKeyCallback const& cb,
|
|||
size_t limit) {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
if (limit == 0 || !_iterator->Valid() || outOfRange()) {
|
||||
if (limit == 0 || !_iterator->Valid()) {
|
||||
// No limit no data, or we are actually done. The last call should have
|
||||
// returned false
|
||||
TRI_ASSERT(limit > 0); // Someone called with limit == 0. Api broken
|
||||
|
@ -188,7 +188,7 @@ bool RocksDBAllIndexIterator::nextWithKey(TokenKeyCallback const& cb,
|
|||
} else {
|
||||
_iterator->Next();
|
||||
}
|
||||
if (!_iterator->Valid() || outOfRange()) {
|
||||
if (!_iterator->Valid()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -217,16 +217,6 @@ void RocksDBAllIndexIterator::reset() {
|
|||
}
|
||||
}
|
||||
|
||||
bool RocksDBAllIndexIterator::outOfRange() const {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
if (_reverse) {
|
||||
return _cmp->Compare(_iterator->key(), _bounds.start()) < 0;
|
||||
} else {
|
||||
return _cmp->Compare(_iterator->key(), _bounds.end()) > 0;
|
||||
}
|
||||
}
|
||||
|
||||
// ================ Any Iterator ================
|
||||
|
||||
RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(
|
||||
|
@ -271,7 +261,7 @@ RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(
|
|||
bool RocksDBAnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
if (limit == 0 || !_iterator->Valid() || outOfRange()) {
|
||||
if (limit == 0 || !_iterator->Valid()) {
|
||||
// No limit no data, or we are actually done. The last call should have
|
||||
// returned false
|
||||
TRI_ASSERT(limit > 0); // Someone called with limit == 0. Api broken
|
||||
|
@ -285,7 +275,7 @@ bool RocksDBAnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
--limit;
|
||||
_returned++;
|
||||
_iterator->Next();
|
||||
if (!_iterator->Valid() || outOfRange()) {
|
||||
if (!_iterator->Valid()) {
|
||||
if (_returned < _total) {
|
||||
_iterator->Seek(_bounds.start());
|
||||
continue;
|
||||
|
@ -298,11 +288,6 @@ bool RocksDBAnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
|
||||
void RocksDBAnyIndexIterator::reset() { _iterator->Seek(_bounds.start()); }
|
||||
|
||||
bool RocksDBAnyIndexIterator::outOfRange() const {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
return _cmp->Compare(_iterator->key(), _bounds.end()) > 0;
|
||||
}
|
||||
|
||||
// ================ PrimaryIndex ================
|
||||
|
||||
RocksDBPrimaryIndex::RocksDBPrimaryIndex(
|
||||
|
@ -460,8 +445,8 @@ int RocksDBPrimaryIndex::insert(transaction::Methods* trx,
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBPrimaryIndex::insertRaw(rocksdb::WriteBatchWithIndex*,
|
||||
TRI_voc_rid_t, VPackSlice const&) {
|
||||
int RocksDBPrimaryIndex::insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
VPackSlice const&) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
@ -503,10 +488,10 @@ int RocksDBPrimaryIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int RocksDBPrimaryIndex::removeRaw(rocksdb::WriteBatch* batch,
|
||||
TRI_voc_rid_t, VPackSlice const& slice) {
|
||||
int RocksDBPrimaryIndex::removeRaw(rocksdb::WriteBatch* batch, TRI_voc_rid_t,
|
||||
VPackSlice const& slice) {
|
||||
auto key = RocksDBKey::PrimaryIndexValue(
|
||||
_objectId, StringRef(slice.get(StaticStrings::KeyString)));
|
||||
_objectId, StringRef(slice.get(StaticStrings::KeyString)));
|
||||
batch->Delete(key.string());
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
|
|
@ -93,9 +93,6 @@ class RocksDBAllIndexIterator final : public IndexIterator {
|
|||
void seek(StringRef const& key);
|
||||
|
||||
private:
|
||||
bool outOfRange() const;
|
||||
|
||||
RocksDBComparator const* _cmp;
|
||||
bool const _reverse;
|
||||
std::unique_ptr<rocksdb::Iterator> _iterator;
|
||||
RocksDBKeyBounds _bounds;
|
||||
|
@ -180,7 +177,7 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
|
||||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
@ -211,9 +208,9 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
void invokeOnAllElements(
|
||||
transaction::Methods* trx,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback) const;
|
||||
|
||||
|
||||
int cleanup() override;
|
||||
|
||||
|
||||
private:
|
||||
/// @brief create the iterator, for a single attribute, IN operator
|
||||
IndexIterator* createInIterator(transaction::Methods*, ManagedDocumentResult*,
|
||||
|
|
|
@ -163,6 +163,12 @@ int RocksDBTransactionCollection::use(int nestingLevel) {
|
|||
_collection = _transaction->vocbase()->useCollection(_cid, status);
|
||||
if (_collection != nullptr) {
|
||||
_usageLocked = true;
|
||||
|
||||
// geo index needs exclusive write access
|
||||
RocksDBCollection* rc = static_cast<RocksDBCollection*>(_collection->getPhysical());
|
||||
if (AccessMode::isWrite(_accessType) && rc->hasGeoIndex()) {
|
||||
_accessType = AccessMode::Type::EXCLUSIVE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// use without usage-lock (lock already set externally)
|
||||
|
@ -187,11 +193,9 @@ int RocksDBTransactionCollection::use(int nestingLevel) {
|
|||
return TRI_ERROR_ARANGO_READ_ONLY;
|
||||
}
|
||||
|
||||
_initialNumberDocuments =
|
||||
static_cast<RocksDBCollection*>(_collection->getPhysical())
|
||||
->numberDocuments();
|
||||
_revision =
|
||||
static_cast<RocksDBCollection*>(_collection->getPhysical())->revision();
|
||||
RocksDBCollection* rc = static_cast<RocksDBCollection*>(_collection->getPhysical());
|
||||
_initialNumberDocuments = rc->numberDocuments();
|
||||
_revision = rc->revision();
|
||||
}
|
||||
|
||||
if (AccessMode::isWriteOrExclusive(_accessType) && !isLocked()) {
|
||||
|
|
|
@ -112,10 +112,10 @@ RocksDBTransactionState::~RocksDBTransactionState() {
|
|||
|
||||
/// @brief start a transaction
|
||||
Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
||||
LOG_TRX(this, _nestingLevel) << "beginning " << AccessMode::typeString(_type)
|
||||
<< " transaction";
|
||||
|
||||
if (_nestingLevel == 0) {
|
||||
LOG_TRX(this, _nestingLevel)
|
||||
<< "beginning " << AccessMode::typeString(_type) << " transaction";
|
||||
|
||||
if (_nestingLevel == 0) {
|
||||
// set hints
|
||||
_hints = hints;
|
||||
}
|
||||
|
@ -163,6 +163,7 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
|||
_rocksWriteOptions, rocksdb::TransactionOptions()));
|
||||
_rocksTransaction->SetSnapshot();
|
||||
_rocksReadOptions.snapshot = _rocksTransaction->GetSnapshot();
|
||||
_rocksReadOptions.prefix_same_as_start = true;
|
||||
|
||||
if (!isReadOnlyTransaction() &&
|
||||
!hasHint(transaction::Hints::Hint::SINGLE_OPERATION)) {
|
||||
|
@ -170,7 +171,7 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
|||
RocksDBLogValue::BeginTransaction(_vocbase->id(), _id);
|
||||
_rocksTransaction->PutLogData(header.slice());
|
||||
}
|
||||
|
||||
|
||||
} else {
|
||||
TRI_ASSERT(_status == transaction::Status::RUNNING);
|
||||
}
|
||||
|
@ -181,8 +182,8 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
|||
/// @brief commit a transaction
|
||||
Result RocksDBTransactionState::commitTransaction(
|
||||
transaction::Methods* activeTrx) {
|
||||
LOG_TRX(this, _nestingLevel) << "committing " << AccessMode::typeString(_type)
|
||||
<< " transaction";
|
||||
LOG_TRX(this, _nestingLevel)
|
||||
<< "committing " << AccessMode::typeString(_type) << " transaction";
|
||||
|
||||
TRI_ASSERT(_status == transaction::Status::RUNNING);
|
||||
TRI_IF_FAILURE("TransactionWriteCommitMarker") {
|
||||
|
@ -203,13 +204,18 @@ Result RocksDBTransactionState::commitTransaction(
|
|||
|
||||
// TODO wait for response on github issue to see how we can use the
|
||||
// sequence number
|
||||
double t1 = TRI_microtime();
|
||||
// double t1 = TRI_microtime();
|
||||
result = rocksutils::convertStatus(_rocksTransaction->Commit());
|
||||
|
||||
double t2 = TRI_microtime();
|
||||
if (t2 - t1 > 0.25) {
|
||||
LOG_TOPIC(ERR, Logger::FIXME) << "COMMIT TOOK: " << (t2 - t1) << " S. NUMINSERTS: " << _numInserts << ", NUMUPDATES: " << _numUpdates << ", NUMREMOVES: " << _numRemoves << ", TRANSACTIONSIZE: " << _transactionSize;
|
||||
}
|
||||
// double t2 = TRI_microtime();
|
||||
// if (t2 - t1 > 0.25) {
|
||||
// LOG_TOPIC(ERR, Logger::FIXME)
|
||||
// << "COMMIT TOOK: " << (t2 - t1)
|
||||
// << " S. NUMINSERTS: " << _numInserts
|
||||
// << ", NUMUPDATES: " << _numUpdates
|
||||
// << ", NUMREMOVES: " << _numRemoves
|
||||
// << ", TRANSACTIONSIZE: " << _transactionSize;
|
||||
// }
|
||||
rocksdb::SequenceNumber latestSeq =
|
||||
rocksutils::globalRocksDB()->GetLatestSequenceNumber();
|
||||
if (!result.ok()) {
|
||||
|
@ -269,8 +275,8 @@ Result RocksDBTransactionState::commitTransaction(
|
|||
/// @brief abort and rollback a transaction
|
||||
Result RocksDBTransactionState::abortTransaction(
|
||||
transaction::Methods* activeTrx) {
|
||||
LOG_TRX(this, _nestingLevel) << "aborting " << AccessMode::typeString(_type)
|
||||
<< " transaction";
|
||||
LOG_TRX(this, _nestingLevel)
|
||||
<< "aborting " << AccessMode::typeString(_type) << " transaction";
|
||||
TRI_ASSERT(_status == transaction::Status::RUNNING);
|
||||
Result result;
|
||||
|
||||
|
|
|
@ -72,6 +72,20 @@ static rocksdb::Slice UniqueIndexValue(
|
|||
reinterpret_cast<std::underlying_type<RocksDBEntryType>::type*>(
|
||||
&uniqueIndexValue),
|
||||
1);
|
||||
|
||||
static RocksDBEntryType fulltextIndexValue =
|
||||
RocksDBEntryType::FulltextIndexValue;
|
||||
static rocksdb::Slice FulltextIndexValue(
|
||||
reinterpret_cast<std::underlying_type<RocksDBEntryType>::type*>(
|
||||
&fulltextIndexValue),
|
||||
1);
|
||||
|
||||
static RocksDBEntryType geoIndexValue =
|
||||
RocksDBEntryType::GeoIndexValue;
|
||||
static rocksdb::Slice GeoIndexValue(
|
||||
reinterpret_cast<std::underlying_type<RocksDBEntryType>::type*>(
|
||||
&geoIndexValue),
|
||||
1);
|
||||
|
||||
static RocksDBEntryType view = RocksDBEntryType::View;
|
||||
static rocksdb::Slice View(
|
||||
|
@ -89,13 +103,6 @@ static rocksdb::Slice ReplicationApplierConfig(
|
|||
reinterpret_cast<std::underlying_type<RocksDBEntryType>::type*>(
|
||||
&replicationApplierConfig),
|
||||
1);
|
||||
|
||||
static RocksDBEntryType fulltextIndexValue =
|
||||
RocksDBEntryType::FulltextIndexValue;
|
||||
static rocksdb::Slice FulltextIndexValue(
|
||||
reinterpret_cast<std::underlying_type<RocksDBEntryType>::type*>(
|
||||
&fulltextIndexValue),
|
||||
1);
|
||||
}
|
||||
|
||||
rocksdb::Slice const& arangodb::rocksDBSlice(RocksDBEntryType const& type) {
|
||||
|
@ -116,14 +123,16 @@ rocksdb::Slice const& arangodb::rocksDBSlice(RocksDBEntryType const& type) {
|
|||
return IndexValue;
|
||||
case RocksDBEntryType::UniqueIndexValue:
|
||||
return UniqueIndexValue;
|
||||
case RocksDBEntryType::FulltextIndexValue:
|
||||
return FulltextIndexValue;
|
||||
case RocksDBEntryType::GeoIndexValue:
|
||||
return GeoIndexValue;
|
||||
case RocksDBEntryType::View:
|
||||
return View;
|
||||
case RocksDBEntryType::SettingsValue:
|
||||
return SettingsValue;
|
||||
case RocksDBEntryType::ReplicationApplierConfig:
|
||||
return ReplicationApplierConfig;
|
||||
case RocksDBEntryType::FulltextIndexValue:
|
||||
return FulltextIndexValue;
|
||||
}
|
||||
|
||||
return Document; // avoids warning - errorslice instead ?!
|
||||
|
|
|
@ -47,7 +47,8 @@ enum class RocksDBEntryType : char {
|
|||
View = '8',
|
||||
SettingsValue = '9',
|
||||
ReplicationApplierConfig = ':',
|
||||
FulltextIndexValue = ';'
|
||||
FulltextIndexValue = ';',
|
||||
GeoIndexValue = '<'
|
||||
};
|
||||
|
||||
enum class RocksDBLogType : char {
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
#include <rocksdb/iterator.h>
|
||||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/utilities/transaction.h>
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include <rocksdb/utilities/write_batch_with_index.h>
|
||||
|
@ -92,7 +93,10 @@ RocksDBVPackIndexIterator::RocksDBVPackIndexIterator(
|
|||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
TRI_ASSERT(state != nullptr);
|
||||
auto const& options = state->readOptions();
|
||||
rocksdb::ReadOptions options = state->readOptions();
|
||||
if (!reverse) {
|
||||
options.iterate_upper_bound = &(_bounds.end());
|
||||
}
|
||||
|
||||
_iterator.reset(rtrx->GetIterator(options));
|
||||
if (reverse) {
|
||||
|
@ -115,18 +119,15 @@ void RocksDBVPackIndexIterator::reset() {
|
|||
|
||||
bool RocksDBVPackIndexIterator::outOfRange() const {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
TRI_ASSERT(_reverse);
|
||||
|
||||
if (_reverse) {
|
||||
return (_cmp->Compare(_iterator->key(), _bounds.start()) < 0);
|
||||
} else {
|
||||
return (_cmp->Compare(_iterator->key(), _bounds.end()) > 0);
|
||||
}
|
||||
return (_cmp->Compare(_iterator->key(), _bounds.start()) < 0);
|
||||
}
|
||||
|
||||
bool RocksDBVPackIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
if (limit == 0 || !_iterator->Valid() || outOfRange()) {
|
||||
if (limit == 0 || !_iterator->Valid() || (_reverse && outOfRange())) {
|
||||
// No limit no data, or we are actually done. The last call should have
|
||||
// returned false
|
||||
TRI_ASSERT(limit > 0); // Someone called with limit == 0. Api broken
|
||||
|
@ -148,7 +149,7 @@ bool RocksDBVPackIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
_iterator->Next();
|
||||
}
|
||||
|
||||
if (!_iterator->Valid() || outOfRange()) {
|
||||
if (!_iterator->Valid() || (_reverse && outOfRange())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -183,7 +184,7 @@ RocksDBVPackIndex::~RocksDBVPackIndex() {}
|
|||
size_t RocksDBVPackIndex::memory() const {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = _unique ? RocksDBKeyBounds::UniqueIndex(_objectId)
|
||||
: RocksDBKeyBounds::IndexEntries(_objectId);
|
||||
: RocksDBKeyBounds::IndexEntries(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
|
|
|
@ -146,7 +146,7 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
|
||||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
|
@ -177,7 +177,7 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
|
||||
arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
|
||||
|
||||
|
||||
int cleanup() override;
|
||||
|
||||
private:
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBIndexFactory.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/voc-errors.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
|
||||
VPackBuilder& enhanced, bool isCreation) const {
|
||||
return TRI_ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
std::shared_ptr<Index> RocksDBIndexFactory::prepareIndexFromSlice(
|
||||
VPackSlice info, bool generateKey, LogicalCollection* col,
|
||||
bool isClusterConstructor) const {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void RocksDBIndexFactory::fillSystemIndexes(
|
||||
arangodb::LogicalCollection* col,
|
||||
std::vector<std::shared_ptr<arangodb::Index>>& systemIndexes) const {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_STORAGE_ENGINE_ROCKSDB_INDEX_FACTORY_H
|
||||
#define ARANGOD_STORAGE_ENGINE_ROCKSDB_INDEX_FACTORY_H 1
|
||||
|
||||
#include "Indexes/IndexFactory.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class RocksDBIndexFactory : public IndexFactory {
|
||||
public:
|
||||
RocksDBIndexFactory() : IndexFactory() {
|
||||
}
|
||||
|
||||
~RocksDBIndexFactory() override {}
|
||||
|
||||
int enhanceIndexDefinition(
|
||||
arangodb::velocypack::Slice const definition,
|
||||
arangodb::velocypack::Builder& enhanced, bool isCreation) const override;
|
||||
|
||||
std::shared_ptr<arangodb::Index> prepareIndexFromSlice(
|
||||
arangodb::velocypack::Slice info, bool generateKey,
|
||||
LogicalCollection* col, bool isClusterConstructor) const override;
|
||||
|
||||
void fillSystemIndexes(arangodb::LogicalCollection* col,
|
||||
std::vector<std::shared_ptr<arangodb::Index>>&
|
||||
systemIndexes) const override;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -26,6 +26,7 @@ directory = @LOCALSTATEDIR@/lib/arangodb3
|
|||
# endpoint = tcp://[fe80::21a:5df1:aede:98cf]:8529
|
||||
#
|
||||
endpoint = tcp://127.0.0.1:8529
|
||||
storage-engine = auto
|
||||
|
||||
# reuse a port on restart or wait until it is freed by the operating system
|
||||
# reuse-address = false
|
||||
|
|
|
@ -1179,38 +1179,61 @@ actions.defineHttp({
|
|||
} catch (e) {
|
||||
}
|
||||
|
||||
followerOP = null;
|
||||
try {
|
||||
followerOP = ArangoClusterComm.asyncRequest('GET', 'server:' + shard.toCheck, '_system',
|
||||
'/_api/collection/' + shard.shard + '/count', '', {}, options);
|
||||
} catch (e) {
|
||||
}
|
||||
// IMHO these try...catch things should at least log something but I don't want to
|
||||
// introduce last minute log spam before the release (this was not logging either before restructuring it)
|
||||
let followerOps = shard.toCheck.map(follower => {
|
||||
try {
|
||||
return ArangoClusterComm.asyncRequest('GET', 'server:' + follower, '_system', '/_api/collection/' + shard.shard + '/count', '', {}, options);
|
||||
} catch (e) {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
let [minFollowerCount, maxFollowerCount] = followerOps.reduce((result, followerOp) => {
|
||||
if (!followerOp) {
|
||||
return result;
|
||||
}
|
||||
|
||||
let followerCount = 0;
|
||||
try {
|
||||
followerR = ArangoClusterComm.wait(followerOp);
|
||||
if (followerR.status !== 'BACKEND_UNAVAILABLE') {
|
||||
try {
|
||||
followerBody = JSON.parse(followerR.body);
|
||||
followerCount = followerBody.count;
|
||||
} catch (e) {
|
||||
}
|
||||
}
|
||||
} catch(e) {
|
||||
}
|
||||
if (result === null) {
|
||||
return [followerCount, followerCount];
|
||||
} else {
|
||||
return [Math.min(followerCount, result[0]), Math.max(followerCount, result[1])];
|
||||
}
|
||||
}, null);
|
||||
|
||||
let leaderCount = null;
|
||||
|
||||
if (leaderOP) {
|
||||
leaderR = ArangoClusterComm.wait(leaderOP);
|
||||
leaderBody = JSON.parse(leaderR.body);
|
||||
leaderCount = leaderBody.count;
|
||||
}
|
||||
|
||||
let followerCount = null;
|
||||
if (followerOP) {
|
||||
followerR = ArangoClusterComm.wait(followerOP);
|
||||
|
||||
if (followerR.status !== 'BACKEND_UNAVAILABLE') {
|
||||
try {
|
||||
followerBody = JSON.parse(followerR.body);
|
||||
followerCount = followerBody.count;
|
||||
|
||||
result.results[shard.collection].Plan[shard.shard].progress = {
|
||||
total: leaderCount,
|
||||
current: followerCount
|
||||
};
|
||||
} catch (e) {
|
||||
}
|
||||
try {
|
||||
leaderBody = JSON.parse(leaderR.body);
|
||||
leaderCount = leaderBody.count;
|
||||
} catch (e) {
|
||||
}
|
||||
}
|
||||
|
||||
let followerCount;
|
||||
if (minFollowerCount < leaderCount) {
|
||||
followerCount = minFollowerCount;
|
||||
} else {
|
||||
followerCount = maxFollowerCount;
|
||||
}
|
||||
result.results[shard.collection].Plan[shard.shard].progress = {
|
||||
total: leaderCount,
|
||||
current: followerCount,
|
||||
};
|
||||
});
|
||||
|
||||
actions.resultOk(req, res, actions.HTTP_OK, result);
|
||||
|
|
|
@ -93,23 +93,28 @@
|
|||
|
||||
<div id="requests">
|
||||
<div class="contentDiv">
|
||||
<div class="dashboard-row pure-u">
|
||||
<% largeChart("requestsChart", "Requests per Second") %>
|
||||
<div class="dashboard-row pure-u cluster-values" id="node-info" style="width: 100%; margin-top: 0; padding-right: 2px; box-sizing: border-box;">
|
||||
</div>
|
||||
|
||||
<% tendency("Request Types", "asyncRequests", false); %>
|
||||
<% tendency("Number of Client Connections", "clientConnections", false); %>
|
||||
<% if (hideStatistics !== true) { %>
|
||||
<div class="dashboard-row pure-u">
|
||||
<% largeChart("requestsChart", "Requests per Second") %>
|
||||
|
||||
<% tendency("Request Types", "asyncRequests", false); %>
|
||||
<% tendency("Number of Client Connections", "clientConnections", false); %>
|
||||
</div>
|
||||
|
||||
<div class="dashboard-row pure-u small-label-padding">
|
||||
<% largeChart("dataTransferChart", "Transfer Size per Second") %>
|
||||
<% smallChart("dataTransferDistribution", "Transfer Size per Second (distribution)", false) %>
|
||||
</div>
|
||||
|
||||
<div class="dashboard-row pure-u small-label-padding">
|
||||
<% largeChart("totalTimeChart", "Average Request Time (seconds)") %>
|
||||
<% smallChart("totalTimeDistribution", "Average Request Time (distribution)", false) %>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="dashboard-row pure-u small-label-padding">
|
||||
<% largeChart("dataTransferChart", "Transfer Size per Second") %>
|
||||
<% smallChart("dataTransferDistribution", "Transfer Size per Second (distribution)", false) %>
|
||||
</div>
|
||||
|
||||
<div class="dashboard-row pure-u small-label-padding">
|
||||
<% largeChart("totalTimeChart", "Average Request Time (seconds)") %>
|
||||
<% smallChart("totalTimeDistribution", "Average Request Time (distribution)", false) %>
|
||||
</div>
|
||||
</div>
|
||||
<% } %>
|
||||
</div>
|
||||
|
||||
<div id="system" class="tendency-box-sizing" style="display: none";>
|
||||
|
|
|
@ -46,11 +46,10 @@
|
|||
|
||||
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title" style="clear: both">
|
||||
<div class="pure-table-row">
|
||||
<div class="pure-u-9-24 left">Name</div>
|
||||
<div class="pure-u-9-24 left">Endpoint</div>
|
||||
<div class="pure-u-2-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-2-24 mid">Info</div>
|
||||
<div class="pure-u-2-24 mid">Status</div>
|
||||
<div class="pure-u-10-24 left">Name</div>
|
||||
<div class="pure-u-10-24 left">Endpoint</div>
|
||||
<div class="pure-u-3-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-1-24 mid"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
@ -60,24 +59,22 @@
|
|||
|
||||
<div class="pure-table-row <%= disabled %>" node="<%= id %>">
|
||||
|
||||
<div class="pure-u-9-24 left">
|
||||
<div class="pure-u-10-24 left">
|
||||
<%= node.ShortName %>
|
||||
<i class="fa fa-bar-chart"></i>
|
||||
<% if(node.Status === 'FAILED') { %>
|
||||
<i class="fa fa-trash-o"></i>
|
||||
<% } %>
|
||||
</div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
|
||||
<div class="pure-u-10-24 left"><%= node.Endpoint %></div>
|
||||
|
||||
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
|
||||
<div class="pure-u-2-24 hide-small mid"><%= formatted %></div>
|
||||
<div class="pure-u-3-24 hide-small mid"><%= formatted %></div>
|
||||
|
||||
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
|
||||
|
||||
<% if(node.Status === 'GOOD') { %>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
|
||||
<% } else { %>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<% } %>
|
||||
|
||||
</div>
|
||||
|
@ -90,7 +87,7 @@
|
|||
|
||||
<% if (Object.keys(dbs).length > 0) { %>
|
||||
<% var disabled = ''; %>
|
||||
<% disabled = " disabled"; %>
|
||||
<% disabled = " dbserver"; %>
|
||||
<div class="pure-u-1-1 pure-u-md-1-1 pure-u-lg-1-1 pure-u-xl-1-2">
|
||||
<div class="sectionHeader pure-g">
|
||||
<div class="pure-u-1-5">
|
||||
|
@ -128,11 +125,10 @@
|
|||
|
||||
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title">
|
||||
<div class="pure-table-row">
|
||||
<div class="pure-u-9-24 left">Name</div>
|
||||
<div class="pure-u-9-24 left">Endpoint</div>
|
||||
<div class="pure-u-2-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-2-24 mid">Info</div>
|
||||
<div class="pure-u-2-24 mid">Status</div>
|
||||
<div class="pure-u-10-24 left">Name</div>
|
||||
<div class="pure-u-10-24 left">Endpoint</div>
|
||||
<div class="pure-u-3-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-1-24 mid"></div>
|
||||
</div>
|
||||
</div>
|
||||
<% } %>
|
||||
|
@ -143,18 +139,16 @@
|
|||
|
||||
<div class="pure-table-row <%= disabled %>" node="<%= id %>">
|
||||
|
||||
<div class="pure-u-9-24 left"><%= node.ShortName %></div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
|
||||
<div class="pure-u-10-24 left"><%= node.ShortName %></div>
|
||||
<div class="pure-u-10-24 left"><%= node.Endpoint %></div>
|
||||
|
||||
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
|
||||
<div class="pure-u-2-24 mid hide-small"><%= formatted %></div>
|
||||
|
||||
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
|
||||
<div class="pure-u-3-24 mid hide-small"><%= formatted %></div>
|
||||
|
||||
<% if(node.Status === 'GOOD') { %>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
|
||||
<% } else { %>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<% } %>
|
||||
|
||||
</div>
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* jshint browser: true */
|
||||
/* jshint unused: false */
|
||||
/* global Backbone, $, window, arangoHelper, nv, d3, prettyBytes */
|
||||
/* global Backbone, $, window, arangoHelper, moment, nv, d3, prettyBytes */
|
||||
/* global document, console, frontendConfig, Dygraph, _,templateEngine */
|
||||
|
||||
(function () {
|
||||
|
@ -603,6 +603,131 @@
|
|||
}
|
||||
},
|
||||
|
||||
renderStatisticBox: function (name, value, title) {
|
||||
// box already rendered, just update value
|
||||
if ($('#node-info #nodeattribute-' + name).length) {
|
||||
$('#node-info #nodeattribute-' + name).html(value);
|
||||
} else {
|
||||
var elem = '';
|
||||
elem += '<div class="pure-u-1-2 pure-u-md-1-4" style="background-color: #fff">';
|
||||
elem += '<div class="valueWrapper">';
|
||||
if (title) {
|
||||
elem += '<div id="nodeattribute-' + name + '" class="value tippy" title="' + value + '">' + value + '</div>';
|
||||
} else {
|
||||
elem += '<div id="nodeattribute-' + name + '" class="value">' + value + '</div>';
|
||||
}
|
||||
elem += '<div class="graphLabel">' + name + '</div>';
|
||||
elem += '</div>';
|
||||
elem += '</div>';
|
||||
$('#node-info').append(elem);
|
||||
}
|
||||
},
|
||||
|
||||
getNodeInfo: function () {
|
||||
var self = this;
|
||||
|
||||
if (frontendConfig.isCluster) {
|
||||
// Cluster node
|
||||
if (this.serverInfo.isDBServer) {
|
||||
this.renderStatisticBox('Role', 'DBServer');
|
||||
} else {
|
||||
this.renderStatisticBox('Role', 'Coordinator');
|
||||
}
|
||||
|
||||
this.renderStatisticBox('Host', this.serverInfo.raw, this.serverInfo.raw);
|
||||
if (this.serverInfo.endpoint) {
|
||||
this.renderStatisticBox('Protocol', this.serverInfo.endpoint.substr(0, this.serverInfo.endpoint.indexOf('/') - 1));
|
||||
} else {
|
||||
this.renderStatisticBox('Protocol', 'Error');
|
||||
}
|
||||
|
||||
this.renderStatisticBox('ID', this.serverInfo.target, this.serverInfo.target);
|
||||
|
||||
// get node version + license
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: arangoHelper.databaseUrl('/_admin/clusterNodeVersion?ServerID=' + this.serverInfo.target),
|
||||
contentType: 'application/json',
|
||||
processData: false,
|
||||
success: function (data) {
|
||||
self.renderStatisticBox('Version', frontendConfig.version.version);
|
||||
self.renderStatisticBox('License', frontendConfig.version.license);
|
||||
},
|
||||
error: function (data) {
|
||||
self.renderStatisticBox('Version', 'Error');
|
||||
self.renderStatisticBox('License', 'Error');
|
||||
}
|
||||
});
|
||||
|
||||
// get server engine
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: arangoHelper.databaseUrl('/_admin/clusterNodeEngine?ServerID=' + this.serverInfo.target),
|
||||
contentType: 'application/json',
|
||||
processData: false,
|
||||
success: function (data) {
|
||||
self.renderStatisticBox('Engine', data.name);
|
||||
},
|
||||
error: function (data) {
|
||||
self.renderStatisticBox('Engine', 'Error');
|
||||
}
|
||||
});
|
||||
|
||||
// get server statistics
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: arangoHelper.databaseUrl('/_admin/clusterNodeStats?ServerID=' + this.serverInfo.target),
|
||||
contentType: 'application/json',
|
||||
processData: false,
|
||||
success: function (data) {
|
||||
self.renderStatisticBox('Uptime', moment.duration(data.server.uptime, 'seconds').humanize());
|
||||
},
|
||||
error: function (data) {
|
||||
self.renderStatisticBox('Uptime', 'Error');
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Standalone
|
||||
// version + license
|
||||
this.renderStatisticBox('Version', frontendConfig.version.version);
|
||||
this.renderStatisticBox('License', frontendConfig.version.license);
|
||||
|
||||
// engine status
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: arangoHelper.databaseUrl('/_api/engine'),
|
||||
contentType: 'application/json',
|
||||
processData: false,
|
||||
success: function (data) {
|
||||
self.renderStatisticBox('Engine', data.name);
|
||||
},
|
||||
error: function () {
|
||||
self.renderStatisticBox('Engine', 'Error');
|
||||
}
|
||||
});
|
||||
|
||||
// uptime status
|
||||
$.ajax({
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: arangoHelper.databaseUrl('/_admin/statistics'),
|
||||
contentType: 'application/json',
|
||||
processData: false,
|
||||
success: function (data) {
|
||||
self.renderStatisticBox('Uptime', moment.duration(data.server.uptime, 'seconds').humanize());
|
||||
},
|
||||
error: function () {
|
||||
self.renderStatisticBox('Uptime', 'Error');
|
||||
}
|
||||
});
|
||||
}
|
||||
arangoHelper.createTooltips();
|
||||
},
|
||||
|
||||
getStatistics: function (callback, modalView) {
|
||||
var self = this;
|
||||
self.checkState();
|
||||
|
@ -991,78 +1116,95 @@
|
|||
template: templateEngine.createTemplate('dashboardView.ejs'),
|
||||
|
||||
render: function (modalView) {
|
||||
this.delegateEvents(this.events);
|
||||
var callback = function (enabled, modalView) {
|
||||
if (!modalView) {
|
||||
$(this.el).html(this.template.render());
|
||||
}
|
||||
|
||||
if (!enabled || frontendConfig.db !== '_system') {
|
||||
$(this.el).html('');
|
||||
if (this.server) {
|
||||
$(this.el).append(
|
||||
'<div style="color: red">Server statistics (' + this.server + ') are disabled.</div>'
|
||||
);
|
||||
} else {
|
||||
$(this.el).append(
|
||||
'<div style="color: red">Server statistics are disabled.</div>'
|
||||
);
|
||||
if (this.serverInfo === undefined) {
|
||||
this.serverInfo = {
|
||||
isDBServer: false
|
||||
};
|
||||
}
|
||||
if (this.serverInfo.isDBServer !== true) {
|
||||
this.delegateEvents(this.events);
|
||||
var callback = function (enabled, modalView) {
|
||||
if (!modalView) {
|
||||
$(this.el).html(this.template.render({
|
||||
hideStatistics: false
|
||||
}));
|
||||
this.getNodeInfo();
|
||||
}
|
||||
|
||||
if (!enabled || frontendConfig.db !== '_system') {
|
||||
$(this.el).html('');
|
||||
if (this.server) {
|
||||
$(this.el).append(
|
||||
'<div style="color: red">Server statistics (' + this.server + ') are disabled.</div>'
|
||||
);
|
||||
} else {
|
||||
$(this.el).append(
|
||||
'<div style="color: red">Server statistics are disabled.</div>'
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
this.prepareDygraphs();
|
||||
if (this.isUpdating) {
|
||||
this.prepareD3Charts();
|
||||
this.prepareResidentSize();
|
||||
this.updateTendencies();
|
||||
$(window).trigger('resize');
|
||||
}
|
||||
this.startUpdating();
|
||||
$(window).resize();
|
||||
}.bind(this);
|
||||
|
||||
var errorFunction = function () {
|
||||
$(this.el).html('');
|
||||
$('.contentDiv').remove();
|
||||
$('.headerBar').remove();
|
||||
$('.dashboard-headerbar').remove();
|
||||
$('.dashboard-row').remove();
|
||||
$(this.el).append(
|
||||
'<div style="color: red">You do not have permission to view this page.</div>'
|
||||
);
|
||||
$(this.el).append(
|
||||
'<div style="color: red">You can switch to \'_system\' to see the dashboard.</div>'
|
||||
);
|
||||
}.bind(this);
|
||||
|
||||
if (frontendConfig.db !== '_system') {
|
||||
errorFunction();
|
||||
return;
|
||||
}
|
||||
|
||||
this.prepareDygraphs();
|
||||
if (this.isUpdating) {
|
||||
this.prepareD3Charts();
|
||||
this.prepareResidentSize();
|
||||
this.updateTendencies();
|
||||
$(window).trigger('resize');
|
||||
}
|
||||
this.startUpdating();
|
||||
$(window).resize();
|
||||
}.bind(this);
|
||||
|
||||
var errorFunction = function () {
|
||||
$(this.el).html('');
|
||||
$('.contentDiv').remove();
|
||||
$('.headerBar').remove();
|
||||
$('.dashboard-headerbar').remove();
|
||||
$('.dashboard-row').remove();
|
||||
$(this.el).append(
|
||||
'<div style="color: red">You do not have permission to view this page.</div>'
|
||||
);
|
||||
$(this.el).append(
|
||||
'<div style="color: red">You can switch to \'_system\' to see the dashboard.</div>'
|
||||
);
|
||||
}.bind(this);
|
||||
|
||||
if (frontendConfig.db !== '_system') {
|
||||
errorFunction();
|
||||
return;
|
||||
}
|
||||
|
||||
var callback2 = function (error, authorized) {
|
||||
if (!error) {
|
||||
if (!authorized) {
|
||||
errorFunction();
|
||||
} else {
|
||||
this.getStatistics(callback, modalView);
|
||||
var callback2 = function (error, authorized) {
|
||||
if (!error) {
|
||||
if (!authorized) {
|
||||
errorFunction();
|
||||
} else {
|
||||
this.getStatistics(callback, modalView);
|
||||
}
|
||||
}
|
||||
}
|
||||
}.bind(this);
|
||||
}.bind(this);
|
||||
|
||||
if (window.App.currentDB.get('name') === undefined) {
|
||||
window.setTimeout(function () {
|
||||
if (window.App.currentDB.get('name') !== '_system') {
|
||||
errorFunction();
|
||||
return;
|
||||
}
|
||||
if (window.App.currentDB.get('name') === undefined) {
|
||||
window.setTimeout(function () {
|
||||
if (window.App.currentDB.get('name') !== '_system') {
|
||||
errorFunction();
|
||||
return;
|
||||
}
|
||||
// check if user has _system permission
|
||||
this.options.database.hasSystemAccess(callback2);
|
||||
}.bind(this), 300);
|
||||
} else {
|
||||
// check if user has _system permission
|
||||
this.options.database.hasSystemAccess(callback2);
|
||||
}.bind(this), 300);
|
||||
}
|
||||
} else {
|
||||
// check if user has _system permission
|
||||
this.options.database.hasSystemAccess(callback2);
|
||||
$(this.el).html(this.template.render({
|
||||
hideStatistics: true
|
||||
}));
|
||||
// hide menu entries
|
||||
$('.subMenuEntry').remove();
|
||||
this.getNodeInfo();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* jshint browser: true */
|
||||
/* jshint unused: false */
|
||||
/* global _, Backbone, document, templateEngine, $, arangoHelper, window */
|
||||
/* global _, Backbone, frontendConfig, document, templateEngine, $, arangoHelper, window */
|
||||
|
||||
(function () {
|
||||
'use strict';
|
||||
|
@ -180,6 +180,7 @@
|
|||
processData: false,
|
||||
async: true,
|
||||
success: function (data) {
|
||||
frontendConfig.version = data;
|
||||
self.showServerStatus(true);
|
||||
if (self.isOffline === true) {
|
||||
self.isOffline = false;
|
||||
|
|
|
@ -67,20 +67,42 @@
|
|||
|
||||
continueRender: function () {
|
||||
var self = this;
|
||||
var dashboard;
|
||||
|
||||
this.dashboards[this.coordinator.get('name')] = new window.DashboardView({
|
||||
dygraphConfig: window.dygraphConfig,
|
||||
database: window.App.arangoDatabase,
|
||||
serverToShow: {
|
||||
raw: this.coordinator.get('address'),
|
||||
isDBServer: false,
|
||||
endpoint: this.coordinator.get('protocol') + '://' + this.coordinator.get('address'),
|
||||
target: this.coordinator.get('name')
|
||||
}
|
||||
});
|
||||
this.dashboards[this.coordinator.get('name')].render();
|
||||
if (this.coordinator) {
|
||||
dashboard = this.coordinator.get('name');
|
||||
// coordinator
|
||||
this.dashboards[this.coordinator.get('name')] = new window.DashboardView({
|
||||
dygraphConfig: window.dygraphConfig,
|
||||
database: window.App.arangoDatabase,
|
||||
serverToShow: {
|
||||
raw: this.coordinator.get('address'),
|
||||
isDBServer: false,
|
||||
endpoint: this.coordinator.get('protocol') + '://' + this.coordinator.get('address'),
|
||||
target: this.coordinator.get('name')
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// db server
|
||||
var attributes = this.dbServer.toJSON();
|
||||
dashboard = attributes.name;
|
||||
this.dashboards[attributes.name] = new window.DashboardView({
|
||||
dygraphConfig: null,
|
||||
database: window.App.arangoDatabase,
|
||||
serverToShow: {
|
||||
raw: attributes.address,
|
||||
isDBServer: true,
|
||||
endpoint: attributes.endpoint,
|
||||
id: attributes.id,
|
||||
name: attributes.name,
|
||||
status: attributes.status,
|
||||
target: attributes.id
|
||||
}
|
||||
});
|
||||
}
|
||||
this.dashboards[dashboard].render();
|
||||
window.setTimeout(function () {
|
||||
self.dashboards[self.coordinator.get('name')].resize();
|
||||
self.dashboards[dashboard].resize();
|
||||
}, 500);
|
||||
},
|
||||
|
||||
|
@ -111,8 +133,9 @@
|
|||
self.dbServer = self.dbServers[0];
|
||||
|
||||
self.dbServer.each(function (model) {
|
||||
if (model.get('name') === 'DBServer001') {
|
||||
self.dbServer = model;
|
||||
var id = model.get('id');
|
||||
if (id === window.location.hash.split('/')[1]) {
|
||||
self.dbServer = self.dbServer.findWhere({id: id});
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
events: {
|
||||
'click #nodesContent .coords-nodes .pure-table-row': 'navigateToNode',
|
||||
'click #nodesContent .dbs-nodes .pure-table-row': 'navigateToInfo',
|
||||
'click #nodesContent .dbs-nodes .pure-table-row': 'navigateToNode',
|
||||
'click #nodesContent .coords-nodes .pure-table-row .fa-trash-o': 'deleteNode',
|
||||
'click #addCoord': 'addCoord',
|
||||
'click #removeCoord': 'removeCoord',
|
||||
|
@ -129,20 +129,9 @@
|
|||
return false;
|
||||
},
|
||||
|
||||
navigateToInfo: function (elem) {
|
||||
var name = $(elem.currentTarget).attr('node').slice(0, -5);
|
||||
if ($(elem.target).hasClass('fa-info-circle')) {
|
||||
window.App.navigate('#nodeInfo/' + encodeURIComponent(name), {trigger: true});
|
||||
}
|
||||
},
|
||||
|
||||
navigateToNode: function (elem) {
|
||||
var name = $(elem.currentTarget).attr('node').slice(0, -5);
|
||||
|
||||
if ($(elem.target).hasClass('fa-info-circle')) {
|
||||
window.App.navigate('#nodeInfo/' + encodeURIComponent(name), {trigger: true});
|
||||
return;
|
||||
}
|
||||
if ($(elem.currentTarget).hasClass('noHover')) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2000,7 +2000,7 @@
|
|||
checkQueryStatus(data.id);
|
||||
} else {
|
||||
pushQueryResults(data);
|
||||
self.renderQueryResult(self.tmpQueryResult, counter, queryID);
|
||||
self.renderQueryResult(self.tmpQueryResult, counter, false, queryID);
|
||||
self.tmpQueryResult = null;
|
||||
}
|
||||
// SCROLL TO RESULT BOX
|
||||
|
|
|
@ -463,6 +463,18 @@
|
|||
margin-left: 2px;
|
||||
margin-right: 0;
|
||||
|
||||
.valueWrapper {
|
||||
.value {
|
||||
font-size: 18pt !important;
|
||||
font-weight: 100;
|
||||
overflow: hidden;
|
||||
padding-left: 10px;
|
||||
padding-right: 10px;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
}
|
||||
|
||||
.fa-arrows-alt {
|
||||
display: none;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,10 @@ const RED = require('internal').COLORS.COLOR_RED;
|
|||
const RESET = require('internal').COLORS.COLOR_RESET;
|
||||
// const YELLOW = require('internal').COLORS.COLOR_YELLOW;
|
||||
|
||||
|
||||
let didSplitBuckets = false;
|
||||
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief build a unix path
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -65,6 +69,10 @@ function makePathGeneric (path) {
|
|||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function performTests (options, testList, testname, runFn, serverOptions, startStopHandlers) {
|
||||
if (options.testBuckets && !didSplitBuckets) {
|
||||
throw new Error("You parametrized to split buckets, but this testsuite doesn't support it!!!");
|
||||
}
|
||||
|
||||
if (testList.length === 0) {
|
||||
print('Testsuite is empty!');
|
||||
|
||||
|
@ -366,6 +374,7 @@ function splitBuckets (options, cases) {
|
|||
return cases;
|
||||
}
|
||||
|
||||
didSplitBuckets = true;
|
||||
let m = cases.length;
|
||||
let n = options.testBuckets.split('/');
|
||||
let r = parseInt(n[0]);
|
||||
|
|
|
@ -725,9 +725,6 @@ function ReplicationLoggerSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoggerCreateIndexGeo1 : function () {
|
||||
if (db._engine().name === "rocksdb") {
|
||||
return;
|
||||
}
|
||||
var c = db._create(cn);
|
||||
|
||||
var tick = getLastLogTick();
|
||||
|
@ -750,9 +747,6 @@ function ReplicationLoggerSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoggerCreateIndexGeo2 : function () {
|
||||
if (db._engine().name === "rocksdb") {
|
||||
return;
|
||||
}
|
||||
var c = db._create(cn);
|
||||
|
||||
var tick = getLastLogTick();
|
||||
|
@ -775,9 +769,6 @@ function ReplicationLoggerSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoggerCreateIndexGeo3 : function () {
|
||||
if (db._engine().name === "rocksdb") {
|
||||
return;
|
||||
}
|
||||
var c = db._create(cn);
|
||||
|
||||
var tick = getLastLogTick();
|
||||
|
@ -802,9 +793,6 @@ function ReplicationLoggerSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoggerCreateIndexGeo4 : function () {
|
||||
if (db._engine().name === "rocksdb") {
|
||||
return;
|
||||
}
|
||||
var c = db._create(cn);
|
||||
|
||||
var tick = getLastLogTick();
|
||||
|
@ -829,9 +817,6 @@ function ReplicationLoggerSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoggerCreateIndexGeo5 : function () {
|
||||
if (db._engine().name === "rocksdb") {
|
||||
return;
|
||||
}
|
||||
var c = db._create(cn);
|
||||
|
||||
var tick = getLastLogTick();
|
||||
|
|
|
@ -185,7 +185,7 @@ function dumpTestSuite () {
|
|||
assertFalse(p.waitForSync);
|
||||
assertFalse(p.isVolatile);
|
||||
|
||||
assertEqual(8, c.getIndexes().length);
|
||||
assertEqual(9, c.getIndexes().length);
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
|
||||
assertEqual("hash", c.getIndexes()[1].type);
|
||||
|
@ -222,11 +222,9 @@ function dumpTestSuite () {
|
|||
assertEqual("fulltext", c.getIndexes()[7].type);
|
||||
assertEqual([ "a_f" ], c.getIndexes()[7].fields);
|
||||
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
assertEqual("geo2", c.getIndexes()[8].type);
|
||||
assertEqual([ "a_la", "a_lo" ], c.getIndexes()[8].fields);
|
||||
assertFalse(c.getIndexes()[8].unique);
|
||||
}
|
||||
assertEqual("geo2", c.getIndexes()[8].type);
|
||||
assertEqual([ "a_la", "a_lo" ], c.getIndexes()[8].fields);
|
||||
assertFalse(c.getIndexes()[8].unique);
|
||||
|
||||
assertEqual(0, c.count());
|
||||
},
|
||||
|
|
|
@ -100,9 +100,7 @@
|
|||
c.ensureSkiplist("a_ss1", "a_ss2", { sparse: true });
|
||||
c.ensureFulltextIndex("a_f");
|
||||
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
c.ensureGeoIndex("a_la", "a_lo");
|
||||
}
|
||||
c.ensureGeoIndex("a_la", "a_lo");
|
||||
|
||||
// we insert data and remove it
|
||||
c = db._create("UnitTestsDumpTruncated", { isVolatile: true });
|
||||
|
|
|
@ -1,129 +0,0 @@
|
|||
/*jshint globalstrict:false, strict:false */
|
||||
/*global fail, assertFalse, assertTrue, assertEqual, assertUndefined */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the shaped json behavior
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
|
||||
var arangodb = require("@arangodb");
|
||||
var db = arangodb.db;
|
||||
var internal = require("internal");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function GeoShapedJsonSuite () {
|
||||
'use strict';
|
||||
var cn = "UnitTestsCollectionShaped";
|
||||
var c;
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set up
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
setUp : function () {
|
||||
db._drop(cn);
|
||||
c = db._create(cn);
|
||||
c.ensureGeoIndex("lat", "lon");
|
||||
|
||||
for (var i = -3; i < 3; ++i) {
|
||||
for (var j = -3; j < 3; ++j) {
|
||||
c.save({ distance: 0, lat: 40 + 0.01 * i, lon: 40 + 0.01 * j, something: "test" });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// wait until the documents are actually shaped json
|
||||
internal.wal.flush(true, true);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tear down
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
tearDown : function () {
|
||||
db._drop(cn);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief call within function with "distance" attribute
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDistance : function () {
|
||||
var result = db._query(
|
||||
"FOR u IN WITHIN(" + cn + ", 40.0, 40.0, 5000000, 'distance') " +
|
||||
"SORT u.distance "+
|
||||
"RETURN { lat: u.lat, lon: u.lon, distance: u.distance }"
|
||||
).toArray();
|
||||
|
||||
// skip first result (which has a distance of 0)
|
||||
for (var i = 1; i < result.length; ++i) {
|
||||
var doc = result[i];
|
||||
|
||||
assertTrue(doc.hasOwnProperty("lat"));
|
||||
assertTrue(doc.hasOwnProperty("lon"));
|
||||
assertTrue(doc.hasOwnProperty("distance"));
|
||||
assertTrue(doc.distance > 0);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief call near function with "distance" attribute
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testNear : function () {
|
||||
var result = db._query(
|
||||
"FOR u IN NEAR(" + cn + ", 40.0, 40.0, 5, 'something') SORT u.something " +
|
||||
"RETURN { lat: u.lat, lon: u.lon, distance: u.something }")
|
||||
.toArray();
|
||||
|
||||
// skip first result (which has a distance of 0)
|
||||
for (var i = 1; i < result.length; ++i) {
|
||||
var doc = result[i];
|
||||
|
||||
assertTrue(doc.hasOwnProperty("lat"));
|
||||
assertTrue(doc.hasOwnProperty("lon"));
|
||||
assertTrue(doc.hasOwnProperty("distance"));
|
||||
assertTrue(doc.distance >= 0);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(GeoShapedJsonSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
|
@ -38,7 +38,7 @@ var internal = require("internal");
|
|||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function DocumentShapedJsonSuite () {
|
||||
function GeoShapedJsonSuite () {
|
||||
'use strict';
|
||||
var cn = "UnitTestsCollectionShaped";
|
||||
var c;
|
||||
|
@ -52,15 +52,15 @@ function DocumentShapedJsonSuite () {
|
|||
setUp : function () {
|
||||
db._drop(cn);
|
||||
c = db._create(cn);
|
||||
c.ensureGeoIndex("lat", "lon");
|
||||
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
c.save({ _key: "test" + i,
|
||||
value: i,
|
||||
text: "Test" + i,
|
||||
values: [ i ],
|
||||
one: { two: { three: [ 1 ] } } });
|
||||
for (var i = -3; i < 3; ++i) {
|
||||
for (var j = -3; j < 3; ++j) {
|
||||
c.save({ distance: 0, lat: 40 + 0.01 * i, lon: 40 + 0.01 * j, something: "test" });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// wait until the documents are actually shaped json
|
||||
internal.wal.flush(true, true);
|
||||
},
|
||||
|
@ -74,915 +74,45 @@ function DocumentShapedJsonSuite () {
|
|||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief save a Buffer object
|
||||
/// @brief call within function with "distance" attribute
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testBuffer : function () {
|
||||
var b = new Buffer('abcdefg', 'binary');
|
||||
c.save({ _key: "buffer", value: b });
|
||||
var doc = c.document("buffer");
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertEqual(b.toJSON(), doc.value);
|
||||
assertEqual([ 97, 98, 99, 100, 101, 102, 103 ], doc.value);
|
||||
},
|
||||
testDistance : function () {
|
||||
var result = db._query(
|
||||
"FOR u IN WITHIN(" + cn + ", 40.0, 40.0, 5000000, 'distance') " +
|
||||
"SORT u.distance "+
|
||||
"RETURN { lat: u.lat, lon: u.lon, distance: u.distance }"
|
||||
).toArray();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief save a date object
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// skip first result (which has a distance of 0)
|
||||
for (var i = 1; i < result.length; ++i) {
|
||||
var doc = result[i];
|
||||
|
||||
testDate : function () {
|
||||
var dt = new Date();
|
||||
c.save({ _key: "date", value: dt });
|
||||
var doc = c.document("date");
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertEqual(dt.toJSON(), doc.value);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief save a regexp object
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testRegexp : function () {
|
||||
try {
|
||||
c.save({ _key: "date", regexp : /foobar/ });
|
||||
fail();
|
||||
}
|
||||
catch (err) {
|
||||
assertEqual(internal.errors.ERROR_BAD_PARAMETER.code, err.errorNum);
|
||||
assertTrue(doc.hasOwnProperty("lat"));
|
||||
assertTrue(doc.hasOwnProperty("lon"));
|
||||
assertTrue(doc.hasOwnProperty("distance"));
|
||||
assertTrue(doc.distance > 0);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief save a function object
|
||||
/// @brief call near function with "distance" attribute
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testFunction : function () {
|
||||
try {
|
||||
c.save({ _key: "date", func : function () { } });
|
||||
fail();
|
||||
}
|
||||
catch (err) {
|
||||
assertEqual(internal.errors.ERROR_BAD_PARAMETER.code, err.errorNum);
|
||||
}
|
||||
},
|
||||
testNear : function () {
|
||||
var result = db._query(
|
||||
"FOR u IN NEAR(" + cn + ", 40.0, 40.0, 5, 'something') SORT u.something " +
|
||||
"RETURN { lat: u.lat, lon: u.lon, distance: u.something }")
|
||||
.toArray();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check getting keys
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testGet : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
|
||||
assertEqual(cn + "/test" + i, doc._id);
|
||||
assertEqual("test" + i, doc._key);
|
||||
assertEqual(i, doc.value);
|
||||
assertEqual("Test" + i, doc.text);
|
||||
assertEqual([ i ], doc.values);
|
||||
assertEqual({ two: { three: [ 1 ] } }, doc.one);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check getting keys
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testGetKeys : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
var keys = Object.keys(doc).sort();
|
||||
assertEqual([ "_id", "_key", "_rev", "one", "text", "value", "values" ], keys);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdatePseudo : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual(cn + "/test" + i, doc._id);
|
||||
assertEqual("test" + i, doc._key);
|
||||
assertEqual(i, doc.value);
|
||||
assertEqual("Test" + i, doc.text);
|
||||
assertEqual([ i ], doc.values);
|
||||
|
||||
doc._id = "foobarbaz";
|
||||
doc._key = "meow";
|
||||
doc._rev = null;
|
||||
|
||||
assertEqual("foobarbaz", doc._id);
|
||||
assertEqual("meow", doc._key);
|
||||
assertEqual(null, doc._rev);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdateShaped1 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc.value = "Tester" + i;
|
||||
doc.text = 42 + i;
|
||||
doc.values.push(i);
|
||||
|
||||
assertEqual(cn + "/test" + i, doc._id);
|
||||
assertEqual("test" + i, doc._key);
|
||||
assertEqual("Tester" + i, doc.value);
|
||||
assertEqual(42 + i, doc.text);
|
||||
assertEqual([ i, i ], doc.values);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdateShaped2 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual(i, doc.value);
|
||||
|
||||
doc.value = 99;
|
||||
assertEqual(99, doc.value);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdateShaped3 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual([ i ], doc.values);
|
||||
|
||||
doc.someValue = 1; // need to do this to trigger copying
|
||||
doc.values.push(42);
|
||||
assertEqual([ i, 42 ], doc.values);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdateShapedNested1 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual({ two: { three: [ 1 ] } }, doc.one);
|
||||
|
||||
doc.one = "removing the nested structure";
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertEqual("removing the nested structure", doc.one);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdateShapedNested2 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual({ two: { three: [ 1 ] } }, doc.one);
|
||||
|
||||
doc.someValue = 1; // need to do this to trigger copying
|
||||
doc.one.two.three = "removing the nested structure";
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertTrue(doc.one.hasOwnProperty("two"));
|
||||
assertTrue(doc.one.two.hasOwnProperty("three"));
|
||||
assertEqual("removing the nested structure", doc.one.two.three);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdateShapedNested3 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual({ two: { three: [ 1 ] } }, doc.one);
|
||||
doc.someValue = 1; // need to do this to trigger copying
|
||||
|
||||
doc.one.two.four = 42;
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertTrue(doc.one.hasOwnProperty("two"));
|
||||
assertTrue(doc.one.two.hasOwnProperty("three"));
|
||||
assertTrue(doc.one.two.hasOwnProperty("four"));
|
||||
assertEqual([ 1 ], doc.one.two.three);
|
||||
assertEqual(42, doc.one.two.four);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check adding attributes in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddAttributes1 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc.thisIsAnAttribute = 99;
|
||||
|
||||
assertTrue(doc.hasOwnProperty("thisIsAnAttribute"));
|
||||
assertEqual(99, doc.thisIsAnAttribute);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check adding attributes in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddAttributes2 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc["some attribute set now"] = "aha";
|
||||
|
||||
assertTrue(doc.hasOwnProperty("some attribute set now"));
|
||||
assertEqual("aha", doc["some attribute set now"]);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check adding attributes in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddAttributesIndexed : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc[1] = "aha";
|
||||
|
||||
assertTrue(doc.hasOwnProperty(1));
|
||||
assertTrue(doc.hasOwnProperty("1"));
|
||||
assertEqual("aha", doc[1]);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check adding attributes in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddAttributesNested1 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc.someValue = 1; // need to do this to trigger copying
|
||||
doc.one.test = { foo: "bar" };
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertTrue(doc.one.hasOwnProperty("two"));
|
||||
assertTrue(doc.one.two.hasOwnProperty("three"));
|
||||
assertTrue(doc.one.hasOwnProperty("test"));
|
||||
assertEqual({ foo: "bar" }, doc.one.test);
|
||||
assertEqual({ three: [ 1 ] }, doc.one.two);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check adding attributes in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddAttributesNested2 : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc.something = { foo: "bar" };
|
||||
assertTrue(doc.hasOwnProperty("something"));
|
||||
assertTrue(doc.something.hasOwnProperty("foo"));
|
||||
assertEqual("bar", doc.something.foo);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionPseudoFirst : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete pseudo-attributes first
|
||||
delete doc._key;
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
|
||||
delete doc._rev;
|
||||
assertFalse(doc.hasOwnProperty("_rev"));
|
||||
|
||||
delete doc._id;
|
||||
assertFalse(doc.hasOwnProperty("_id"));
|
||||
|
||||
delete doc.value;
|
||||
assertFalse(doc.hasOwnProperty("value"));
|
||||
|
||||
delete doc.text;
|
||||
assertFalse(doc.hasOwnProperty("text"));
|
||||
|
||||
delete doc.values;
|
||||
assertFalse(doc.hasOwnProperty("values"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of special attribute _id
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionShapedKeyId : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete special attribute _id
|
||||
delete doc._id;
|
||||
assertFalse(doc.hasOwnProperty("_id"));
|
||||
assertUndefined(doc._id);
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of special attributes from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionShapedKeyRev : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete special attribute _key
|
||||
delete doc._key;
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertUndefined(doc._key);
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete special attribute _rev
|
||||
delete doc._rev;
|
||||
assertFalse(doc.hasOwnProperty("_rev"));
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertUndefined(doc._rev);
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionShapedFirst : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete shaped attributes first
|
||||
delete doc.value;
|
||||
assertFalse(doc.hasOwnProperty("value"));
|
||||
assertUndefined(doc.value);
|
||||
|
||||
delete doc.text;
|
||||
assertFalse(doc.hasOwnProperty("text"));
|
||||
assertUndefined(doc.text);
|
||||
|
||||
delete doc.values;
|
||||
assertFalse(doc.hasOwnProperty("values"));
|
||||
assertUndefined(doc.values);
|
||||
|
||||
delete doc._key;
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertUndefined(doc._key);
|
||||
|
||||
delete doc._rev;
|
||||
assertFalse(doc.hasOwnProperty("_rev"));
|
||||
assertUndefined(doc._rev);
|
||||
|
||||
delete doc._id;
|
||||
assertFalse(doc.hasOwnProperty("_id"));
|
||||
assertUndefined(doc._id);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion after deletion
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionDeletion : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
assertEqual([ "_id", "_key", "_rev", "one", "text", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _key
|
||||
delete doc._key;
|
||||
assertEqual([ "_id", "_rev", "one", "text", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete text
|
||||
delete doc.text;
|
||||
assertEqual([ "_id", "_rev", "one", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _id
|
||||
delete doc._id;
|
||||
assertEqual([ "_rev", "one", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete value
|
||||
delete doc.value;
|
||||
assertEqual([ "_rev", "one", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _rev
|
||||
delete doc._rev;
|
||||
assertEqual([ "one", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete values
|
||||
delete doc.values;
|
||||
assertEqual([ "one" ], Object.keys(doc).sort());
|
||||
|
||||
// delete one
|
||||
delete doc.one;
|
||||
assertEqual([ ], Object.keys(doc).sort());
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionAfterUpdate : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
doc._key = "foobar";
|
||||
assertEqual("foobar", doc._key);
|
||||
doc._rev = 12345;
|
||||
assertEqual(12345, doc._rev);
|
||||
doc._id = "foo";
|
||||
assertEqual("foo", doc._id);
|
||||
|
||||
delete doc._key;
|
||||
delete doc._rev;
|
||||
|
||||
assertFalse(doc.hasOwnProperty("_rev"));
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertEqual("foo", doc._id);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionSomeAttributes : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
delete doc._key;
|
||||
delete doc.value;
|
||||
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertFalse(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionIndexed : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
delete doc._key;
|
||||
doc[9] = "42!";
|
||||
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertEqual("42!", doc[9]);
|
||||
|
||||
delete doc[9];
|
||||
assertFalse(doc.hasOwnProperty(9));
|
||||
assertFalse(doc.hasOwnProperty("9"));
|
||||
assertUndefined(doc[9]);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionNested : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
delete doc.one.two.three;
|
||||
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertTrue(doc.one.hasOwnProperty("two"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check access after deletion of documents
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAccessAfterDeletion : function () {
|
||||
var docs = [ ];
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
docs[i] = c.document("test" + i);
|
||||
}
|
||||
|
||||
c.truncate();
|
||||
if (c.rotate) {
|
||||
c.rotate();
|
||||
internal.wait(5);
|
||||
}
|
||||
|
||||
for (i = 0; i < 100; ++i) {
|
||||
assertEqual(cn + "/test" + i, docs[i]._id);
|
||||
assertEqual("test" + i, docs[i]._key);
|
||||
assertEqual("Test" + i, docs[i].text);
|
||||
assertEqual([ i ], docs[i].values);
|
||||
assertEqual({ two: { three: [ 1 ] } }, docs[i].one);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check access after dropping collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAccessAfterDropping : function () {
|
||||
var docs = [ ];
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
docs[i] = c.document("test" + i);
|
||||
}
|
||||
|
||||
c.drop();
|
||||
|
||||
internal.wait(5);
|
||||
|
||||
for (i = 0; i < 100; ++i) {
|
||||
assertEqual(cn + "/test" + i, docs[i]._id);
|
||||
assertEqual("test" + i, docs[i]._key);
|
||||
assertEqual("Test" + i, docs[i].text);
|
||||
assertEqual([ i ], docs[i].values);
|
||||
assertEqual({ two: { three: [ 1 ] } }, docs[i].one);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function EdgeShapedJsonSuite () {
|
||||
'use strict';
|
||||
var cn = "UnitTestsCollectionShaped";
|
||||
var c;
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set up
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
setUp : function () {
|
||||
db._drop(cn);
|
||||
c = db._createEdgeCollection(cn);
|
||||
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
c.save(cn + "/from" + i,
|
||||
cn + "/to" + i,
|
||||
{ _key: "test" + i,
|
||||
value: i,
|
||||
text: "Test" + i,
|
||||
values: [ i ],
|
||||
one: { two: { three: [ 1 ] } } });
|
||||
}
|
||||
|
||||
// wait until the documents are actually shaped json
|
||||
internal.wal.flush(true, true);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tear down
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
tearDown : function () {
|
||||
db._drop(cn);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check updating of keys in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUpdatePseudo : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
assertEqual(cn + "/from" + i, doc._from);
|
||||
assertEqual(cn + "/to" + i, doc._to);
|
||||
|
||||
doc._from = "foobarbaz";
|
||||
doc._to = "meow";
|
||||
|
||||
assertEqual("foobarbaz", doc._from);
|
||||
assertEqual("meow", doc._to);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check adding attributes in shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddAttribute : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
doc["some attribute set now"] = "aha";
|
||||
|
||||
assertTrue(doc.hasOwnProperty("some attribute set now"));
|
||||
assertEqual("aha", doc["some attribute set now"]);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionPseudoFirst : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
|
||||
// delete pseudo-attributes
|
||||
delete doc._from;
|
||||
assertFalse(doc.hasOwnProperty("_from"));
|
||||
|
||||
delete doc._to;
|
||||
assertFalse(doc.hasOwnProperty("_to"));
|
||||
|
||||
delete doc._key;
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
|
||||
delete doc._rev;
|
||||
assertFalse(doc.hasOwnProperty("_rev"));
|
||||
|
||||
delete doc._id;
|
||||
assertFalse(doc.hasOwnProperty("_id"));
|
||||
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionShapedFirst : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
|
||||
// delete shaped attributes first
|
||||
delete doc.value;
|
||||
assertFalse(doc.hasOwnProperty("value"));
|
||||
assertUndefined(doc.value);
|
||||
|
||||
delete doc._from;
|
||||
assertFalse(doc.hasOwnProperty("_from"));
|
||||
assertUndefined(doc._from);
|
||||
|
||||
delete doc._to;
|
||||
assertFalse(doc.hasOwnProperty("_to"));
|
||||
assertUndefined(doc._to);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of special attributes from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionShapedKeyRev : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete special attribute _key
|
||||
delete doc._key;
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertUndefined(doc._key);
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
// delete special attribute _rev
|
||||
delete doc._rev;
|
||||
assertFalse(doc.hasOwnProperty("_rev"));
|
||||
assertFalse(doc.hasOwnProperty("_key"));
|
||||
assertUndefined(doc._rev);
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion of keys from shaped json
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionAfterUpdate : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
|
||||
doc._from = "foobar";
|
||||
assertEqual("foobar", doc._from);
|
||||
doc._from = 12345;
|
||||
assertEqual(12345, doc._from);
|
||||
doc._to = "foo";
|
||||
assertEqual("foo", doc._to);
|
||||
|
||||
delete doc._from;
|
||||
delete doc._to;
|
||||
|
||||
assertFalse(doc.hasOwnProperty("_from"));
|
||||
assertFalse(doc.hasOwnProperty("_to"));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check deletion after deletion
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDeletionDeletion : function () {
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
|
||||
// initial state
|
||||
assertTrue(doc.hasOwnProperty("_from"));
|
||||
assertTrue(doc.hasOwnProperty("_to"));
|
||||
assertTrue(doc.hasOwnProperty("_key"));
|
||||
assertTrue(doc.hasOwnProperty("_rev"));
|
||||
assertTrue(doc.hasOwnProperty("_id"));
|
||||
assertTrue(doc.hasOwnProperty("one"));
|
||||
assertTrue(doc.hasOwnProperty("text"));
|
||||
assertTrue(doc.hasOwnProperty("value"));
|
||||
assertTrue(doc.hasOwnProperty("values"));
|
||||
|
||||
var keys = Object.keys(doc).sort();
|
||||
assertEqual([ "_from", "_id", "_key", "_rev", "_to", "one", "text", "value", "values" ], keys);
|
||||
|
||||
// delete _from
|
||||
delete doc._from;
|
||||
assertEqual([ "_id", "_key", "_rev", "_to", "one", "text", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _to
|
||||
delete doc._to;
|
||||
assertEqual([ "_id", "_key", "_rev", "one", "text", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _key
|
||||
delete doc._key;
|
||||
assertEqual([ "_id", "_rev", "one", "text", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete text
|
||||
delete doc.text;
|
||||
assertEqual([ "_id", "_rev", "one", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _id
|
||||
delete doc._id;
|
||||
assertEqual([ "_rev", "one", "value", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete value
|
||||
delete doc.value;
|
||||
assertEqual([ "_rev", "one", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete _rev
|
||||
delete doc._rev;
|
||||
assertEqual([ "one", "values" ], Object.keys(doc).sort());
|
||||
|
||||
// delete values
|
||||
delete doc.values;
|
||||
assertEqual([ "one" ], Object.keys(doc).sort());
|
||||
// skip first result (which has a distance of 0)
|
||||
for (var i = 1; i < result.length; ++i) {
|
||||
var doc = result[i];
|
||||
|
||||
// delete one
|
||||
delete doc.one;
|
||||
assertEqual([ ], Object.keys(doc).sort());
|
||||
assertTrue(doc.hasOwnProperty("lat"));
|
||||
assertTrue(doc.hasOwnProperty("lon"));
|
||||
assertTrue(doc.hasOwnProperty("distance"));
|
||||
assertTrue(doc.distance >= 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -993,8 +123,7 @@ function EdgeShapedJsonSuite () {
|
|||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(DocumentShapedJsonSuite);
|
||||
jsunity.run(EdgeShapedJsonSuite);
|
||||
jsunity.run(GeoShapedJsonSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
||||
|
|
|
@ -29,7 +29,9 @@
|
|||
|
||||
#include "catch.hpp"
|
||||
|
||||
#include "RocksDBEngine/RocksDBComparator.h"
|
||||
#include "RocksDBEngine/RocksDBKey.h"
|
||||
#include "RocksDBEngine/RocksDBKeyBounds.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
|
||||
|
@ -39,7 +41,7 @@ using namespace arangodb;
|
|||
// --SECTION-- test suite
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
/// @brief setup
|
||||
/// @brief test RocksDBKey class
|
||||
TEST_CASE("RocksDBKeyTest", "[rocksdbkeytest]") {
|
||||
|
||||
/// @brief test database
|
||||
|
@ -227,6 +229,26 @@ SECTION("test_edge_index") {
|
|||
CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0foobar\x06", 20));
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/// @brief test RocksDBKeyBounds class
|
||||
TEST_CASE("RocksDBKeyBoundsTest", "[rocksdbkeybounds]") {
|
||||
|
||||
/// @brief test geo index key and bounds consistency
|
||||
SECTION("test_geo_index") {
|
||||
|
||||
RocksDBComparator cmp;
|
||||
|
||||
RocksDBKey k1 = RocksDBKey::GeoIndexValue(256, 128, false);
|
||||
RocksDBKeyBounds bb1 = RocksDBKeyBounds::GeoIndex(256, false);
|
||||
|
||||
CHECK(cmp.Compare(k1.string(), bb1.start()) > 0);
|
||||
CHECK(cmp.Compare(k1.string(), bb1.end()) < 0);
|
||||
|
||||
RocksDBKey k2 = RocksDBKey::GeoIndexValue(256, 128, true);
|
||||
RocksDBKeyBounds bb2 = RocksDBKeyBounds::GeoIndex(256, true);
|
||||
CHECK(cmp.Compare(k2.string(), bb2.start()) > 0);
|
||||
CHECK(cmp.Compare(k2.string(), bb2.end()) < 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue