mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
d81a68e959
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
verb=$1
|
||||
shift
|
||||
if test "$verb" == "create"; then
|
||||
@HDIUTIL_EXECUTABLE@ $verb -megabytes @CMAKE_DMG_SIZE@ $@
|
||||
else
|
||||
@HDIUTIL_EXECUTABLE@ $verb $@
|
||||
fi
|
|
@ -947,7 +947,7 @@ AqlValue Expression::executeSimpleExpressionPlus(AstNode const* node,
|
|||
return AqlValue(s.getNumber<int64_t>());
|
||||
} else if (s.isUInt()) {
|
||||
// can use uint64
|
||||
return AqlValue(s.getNumber<uint64_t>());
|
||||
return AqlValue(s.getUInt());
|
||||
}
|
||||
// fallthrouh intentional
|
||||
}
|
||||
|
|
|
@ -155,22 +155,10 @@ void ServerFeature::validateOptions(std::shared_ptr<ProgramOptions>) {
|
|||
}
|
||||
|
||||
void ServerFeature::start() {
|
||||
if (_operationMode != OperationMode::MODE_CONSOLE) {
|
||||
auto scheduler =
|
||||
ApplicationServer::getFeature<SchedulerFeature>("Scheduler");
|
||||
|
||||
scheduler->buildControlCHandler();
|
||||
}
|
||||
|
||||
waitForHeartbeat();
|
||||
|
||||
*_result = EXIT_SUCCESS;
|
||||
|
||||
// flush all log output before we go on... this is sensible because any
|
||||
// of the following options may print or prompt, and pending log entries
|
||||
// might overwrite that
|
||||
Logger::flush();
|
||||
|
||||
switch (_operationMode) {
|
||||
case OperationMode::MODE_UNITTESTS:
|
||||
case OperationMode::MODE_SCRIPT:
|
||||
|
@ -181,6 +169,19 @@ void ServerFeature::start() {
|
|||
LOG_TOPIC(TRACE, Logger::STARTUP) << "server operation mode: SERVER";
|
||||
break;
|
||||
}
|
||||
|
||||
// flush all log output before we go on... this is sensible because any
|
||||
// of the following options may print or prompt, and pending log entries
|
||||
// might overwrite that
|
||||
Logger::flush();
|
||||
|
||||
if (!isConsoleMode()) {
|
||||
// install CTRL-C handlers
|
||||
server()->registerStartupCallback([]() {
|
||||
ApplicationServer::getFeature<SchedulerFeature>("Scheduler")->buildControlCHandler();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void ServerFeature::beginShutdown() {
|
||||
|
|
|
@ -59,6 +59,10 @@ class ServerFeature final : public application_features::ApplicationFeature {
|
|||
std::vector<std::string> const& scripts() const { return _scripts; }
|
||||
std::vector<std::string> const& unitTests() const { return _unitTests; }
|
||||
uint32_t const& vppMaxSize() const { return _vppMaxSize; }
|
||||
|
||||
bool isConsoleMode() const {
|
||||
return (_operationMode == OperationMode::MODE_CONSOLE);
|
||||
}
|
||||
|
||||
private:
|
||||
void waitForHeartbeat();
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
# add sources for rocksdb engine
|
||||
set(ROCKSDB_SOURCES
|
||||
RocksDBEngine/RocksDBAqlFunctions.cpp
|
||||
RocksDBEngine/RocksDBBackgroundThread.cpp
|
||||
RocksDBEngine/RocksDBCollection.cpp
|
||||
RocksDBEngine/RocksDBCollectionExport.cpp
|
||||
|
@ -11,6 +12,7 @@ set(ROCKSDB_SOURCES
|
|||
RocksDBEngine/RocksDBEdgeIndex.cpp
|
||||
RocksDBEngine/RocksDBEngine.cpp
|
||||
RocksDBEngine/RocksDBExportCursor.cpp
|
||||
RocksDBEngine/RocksDBFulltextIndex.cpp
|
||||
RocksDBEngine/RocksDBIndex.cpp
|
||||
RocksDBEngine/RocksDBIndexFactory.cpp
|
||||
RocksDBEngine/RocksDBHashIndex.cpp
|
||||
|
|
|
@ -0,0 +1,222 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Simon Grätzer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBAqlFunctions.h"
|
||||
|
||||
#include "Aql/Function.h"
|
||||
#include "Aql/AqlFunctionFeature.h"
|
||||
#include "RocksDBEngine/RocksDBFulltextIndex.h"
|
||||
#include "StorageEngine/DocumentIdentifierToken.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
static ExecutionCondition const NotInCoordinator = [] {
|
||||
return !arangodb::ServerState::instance()->isRunningInCluster() ||
|
||||
!arangodb::ServerState::instance()->isCoordinator();
|
||||
};
|
||||
|
||||
/// @brief function FULLTEXT
|
||||
AqlValue RocksDBAqlFunctions::Fulltext(
|
||||
arangodb::aql::Query* query, transaction::Methods* trx,
|
||||
VPackFunctionParameters const& parameters) {
|
||||
ValidateParameters(parameters, "FULLTEXT", 3, 4);
|
||||
|
||||
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
|
||||
|
||||
if (!collectionValue.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "FULLTEXT");
|
||||
}
|
||||
|
||||
std::string const collectionName(collectionValue.slice().copyString());
|
||||
|
||||
AqlValue attribute = ExtractFunctionParameterValue(trx, parameters, 1);
|
||||
|
||||
if (!attribute.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "FULLTEXT");
|
||||
}
|
||||
|
||||
std::string attributeName(attribute.slice().copyString());
|
||||
|
||||
AqlValue queryValue = ExtractFunctionParameterValue(trx, parameters, 2);
|
||||
|
||||
if (!queryValue.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "FULLTEXT");
|
||||
}
|
||||
|
||||
std::string queryString = queryValue.slice().copyString();
|
||||
|
||||
size_t maxResults = 0; // 0 means "all results"
|
||||
if (parameters.size() >= 4) {
|
||||
AqlValue limit = ExtractFunctionParameterValue(trx, parameters, 3);
|
||||
if (!limit.isNull(true) && !limit.isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(
|
||||
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "FULLTEXT");
|
||||
}
|
||||
if (limit.isNumber()) {
|
||||
int64_t value = limit.toInt64(trx);
|
||||
if (value > 0) {
|
||||
maxResults = static_cast<size_t>(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto resolver = trx->resolver();
|
||||
TRI_voc_cid_t cid = resolver->getCollectionIdLocal(collectionName);
|
||||
trx->addCollectionAtRuntime(cid, collectionName);
|
||||
|
||||
LogicalCollection* collection = trx->documentCollection(cid);
|
||||
|
||||
if (collection == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_FORMAT(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND,
|
||||
"", collectionName.c_str());
|
||||
}
|
||||
|
||||
// NOTE: The shared_ptr is protected by trx lock.
|
||||
// It is save to use the raw pointer directly.
|
||||
// We are NOT allowed to delete the index.
|
||||
arangodb::RocksDBFulltextIndex* fulltextIndex = nullptr;
|
||||
|
||||
// split requested attribute name on '.' character to create a proper
|
||||
// vector of AttributeNames
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> search;
|
||||
search.emplace_back();
|
||||
for (auto const& it : basics::StringUtils::split(attributeName, '.')) {
|
||||
search.back().emplace_back(it, false);
|
||||
}
|
||||
|
||||
for (auto const& idx : collection->getIndexes()) {
|
||||
if (idx->type() == arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX) {
|
||||
// test if index is on the correct field
|
||||
if (arangodb::basics::AttributeName::isIdentical(idx->fields(), search,
|
||||
false)) {
|
||||
// match!
|
||||
fulltextIndex = static_cast<arangodb::RocksDBFulltextIndex*>(idx.get());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (fulltextIndex == nullptr) {
|
||||
// fiddle collection name into error message
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(TRI_ERROR_QUERY_FULLTEXT_INDEX_MISSING,
|
||||
collectionName.c_str());
|
||||
}
|
||||
|
||||
trx->pinData(cid);
|
||||
try {
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
Result res = fulltextIndex->executeQuery(queryString, *(builder.get()));
|
||||
if (!res.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
|
||||
}
|
||||
return AqlValue(builder.get());
|
||||
} catch (...) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
/*TRI_fulltext_query_t* ft =
|
||||
TRI_CreateQueryMMFilesFulltextIndex(TRI_FULLTEXT_SEARCH_MAX_WORDS, maxResults);
|
||||
|
||||
if (ft == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
bool isSubstringQuery = false;
|
||||
int res =
|
||||
TRI_ParseQueryMMFilesFulltextIndex(ft, queryString.c_str(), &isSubstringQuery);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_FreeQueryMMFilesFulltextIndex(ft);
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
// note: the following call will free "ft"!
|
||||
TRI_fulltext_result_t* queryResult =
|
||||
TRI_QueryMMFilesFulltextIndex(fulltextIndex->internals(), ft);
|
||||
|
||||
if (queryResult == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
TRI_ASSERT(trx->isPinned(cid));
|
||||
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
try {
|
||||
builder->openArray();
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
size_t const numResults = queryResult->_numDocuments;
|
||||
for (size_t i = 0; i < numResults; ++i) {
|
||||
if (collection->readDocument(trx, queryResult->_documents[i], mmdr)) {
|
||||
mmdr.addToBuilder(*builder.get(), true);
|
||||
}
|
||||
}
|
||||
builder->close();
|
||||
TRI_FreeResultRocksDBFulltextIndex(queryResult);
|
||||
return AqlValue(builder.get());
|
||||
} catch (...) {
|
||||
TRI_FreeResultRocksDBFulltextIndex(queryResult);
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}*/
|
||||
}
|
||||
|
||||
/// @brief function NEAR
|
||||
AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
|
||||
transaction::Methods* trx,
|
||||
VPackFunctionParameters const& parameters) {
|
||||
// TODO: obi
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(TRI_ERROR_QUERY_GEO_INDEX_MISSING, "NEAR");
|
||||
}
|
||||
|
||||
/// @brief function WITHIN
|
||||
AqlValue RocksDBAqlFunctions::Within(
|
||||
arangodb::aql::Query* query, transaction::Methods* trx,
|
||||
VPackFunctionParameters const& parameters) {
|
||||
// TODO: obi
|
||||
THROW_ARANGO_EXCEPTION_PARAMS(TRI_ERROR_QUERY_GEO_INDEX_MISSING, "Within");
|
||||
}
|
||||
|
||||
void RocksDBAqlFunctions::registerResources() {
|
||||
auto functions = AqlFunctionFeature::AQLFUNCTIONS;
|
||||
TRI_ASSERT(functions != nullptr);
|
||||
|
||||
// fulltext functions
|
||||
functions->add({"FULLTEXT", "AQL_FULLTEXT", "hs,s,s|n", true, false, true,
|
||||
false, true, &RocksDBAqlFunctions::Fulltext,
|
||||
NotInCoordinator});
|
||||
functions->add({"NEAR", "AQL_NEAR", "hs,n,n|nz,s", true, false, true, false,
|
||||
true, &RocksDBAqlFunctions::Near, NotInCoordinator});
|
||||
functions->add({"WITHIN", "AQL_WITHIN", "hs,n,n,n|s", true, false, true,
|
||||
false, true, &RocksDBAqlFunctions::Within, NotInCoordinator});
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Simon Grätzer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_ROCKSDB_ROCKSDB_AQL_FUNCTIONS_H
|
||||
#define ARANGOD_ROCKSDB_ROCKSDB_AQL_FUNCTIONS_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/Functions.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace aql {
|
||||
struct Function;
|
||||
}
|
||||
|
||||
struct RocksDBAqlFunctions : public aql::Functions {
|
||||
static aql::AqlValue Fulltext(arangodb::aql::Query*, transaction::Methods*,
|
||||
aql::VPackFunctionParameters const&);
|
||||
|
||||
static aql::AqlValue Near(arangodb::aql::Query*, transaction::Methods*,
|
||||
aql::VPackFunctionParameters const&);
|
||||
|
||||
static aql::AqlValue Within(arangodb::aql::Query*, transaction::Methods*,
|
||||
aql::VPackFunctionParameters const&);
|
||||
|
||||
static void registerResources();
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -66,7 +66,7 @@ void RocksDBBackgroundThread::run() {
|
|||
[force, &minTick](TRI_vocbase_t* vocbase) {
|
||||
vocbase->cursorRepository()->garbageCollect(force);
|
||||
// FIXME: configurable interval tied to follower timeout
|
||||
vocbase->garbageCollectReplicationClients(60.0);
|
||||
vocbase->garbageCollectReplicationClients(120.0);
|
||||
auto clients = vocbase->getReplicationClients();
|
||||
for (auto c : clients) {
|
||||
if (std::get<2>(c) < minTick) {
|
||||
|
@ -74,8 +74,13 @@ void RocksDBBackgroundThread::run() {
|
|||
}
|
||||
}
|
||||
});
|
||||
_engine->pruneWalFiles(minTick);
|
||||
|
||||
}
|
||||
|
||||
// determine which WAL files can be pruned
|
||||
_engine->determinePrunableWalFiles(minTick);
|
||||
// and then prune them when they expired
|
||||
_engine->pruneWalFiles();
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(WARN, Logger::FIXME) << "caught exception in rocksdb background thread: " << ex.what();
|
||||
} catch (...) {
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
|
@ -547,7 +546,7 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
|
||||
if (rv == TRI_ERROR_NO_ERROR) {
|
||||
// trigger compaction before deleting the object
|
||||
cindex->compact();
|
||||
cindex->cleanup();
|
||||
|
||||
_indexes.erase(_indexes.begin() + i);
|
||||
events::DropIndex("", std::to_string(iid), TRI_ERROR_NO_ERROR);
|
||||
|
@ -1320,6 +1319,8 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
rocksdb::WriteBatchWithIndex batch(db->DefaultColumnFamily()->GetComparator(),
|
||||
32 * 1024 * 1024);
|
||||
rocksdb::ReadOptions readOptions;
|
||||
rocksdb::WriteOptions writeOpts = state->writeOptions();
|
||||
writeOpts.disableWAL = true;
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
auto cb = [&](DocumentIdentifierToken token) {
|
||||
|
@ -1344,7 +1345,7 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
r = Result(res);
|
||||
break;
|
||||
}
|
||||
rocksdb::Status s = db->Write(state->writeOptions(), batch.GetWriteBatch());
|
||||
rocksdb::Status s = db->Write(writeOpts, batch.GetWriteBatch());
|
||||
if (!s.ok()) {
|
||||
r = rocksutils::convertStatus(s, rocksutils::StatusHint::index);
|
||||
break;
|
||||
|
@ -1377,7 +1378,7 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
}
|
||||
// TODO: if this fails, do we have any recourse?
|
||||
// Simon: Don't think so
|
||||
db->Write(state->writeOptions(), &removeBatch);
|
||||
db->Write(writeOpts, &removeBatch);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -1752,7 +1753,7 @@ void RocksDBCollection::compact() {
|
|||
|
||||
for (std::shared_ptr<Index> i : _indexes) {
|
||||
RocksDBIndex* index = static_cast<RocksDBIndex*>(i.get());
|
||||
index->compact();
|
||||
index->cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1772,7 +1773,7 @@ void RocksDBCollection::estimateSize(velocypack::Builder& builder) {
|
|||
|
||||
for (std::shared_ptr<Index> i : _indexes) {
|
||||
RocksDBIndex* index = static_cast<RocksDBIndex*>(i.get());
|
||||
out = index->estimateSize();
|
||||
out = index->memory();
|
||||
builder.add(std::to_string(index->id()), VPackValue(out));
|
||||
total += out;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
|
@ -80,7 +80,7 @@ bool RocksDBEdgeIndexIterator::updateBounds() {
|
|||
}
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
_bounds = RocksDBKeyBounds::EdgeIndexVertex(_index->_objectId,
|
||||
fromTo.copyString());
|
||||
StringRef(fromTo));
|
||||
|
||||
_iterator->Seek(_bounds.start());
|
||||
return true;
|
||||
|
@ -187,8 +187,12 @@ double RocksDBEdgeIndex::selectivityEstimate(
|
|||
|
||||
/// @brief return the memory usage for the index
|
||||
size_t RocksDBEdgeIndex::memory() const {
|
||||
// TODO
|
||||
return 0;
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::EdgeIndex(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return (size_t)out;
|
||||
}
|
||||
|
||||
/// @brief return a VelocyPack representation of the index
|
||||
|
@ -215,8 +219,8 @@ int RocksDBEdgeIndex::insert(transaction::Methods* trx,
|
|||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromTo.copyString(),
|
||||
primaryKey.copyString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
StringRef(primaryKey));
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
|
@ -242,8 +246,8 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
|
|||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromTo.copyString(),
|
||||
primaryKey.copyString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
StringRef(primaryKey));
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
|
@ -262,8 +266,8 @@ int RocksDBEdgeIndex::removeRaw(rocksdb::WriteBatch* writeBatch,
|
|||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromTo.copyString(),
|
||||
primaryKey.copyString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
StringRef(primaryKey));
|
||||
writeBatch->Delete(rocksdb::Slice(key.string()));
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
@ -280,8 +284,8 @@ void RocksDBEdgeIndex::batchInsert(
|
|||
VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.second.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromTo.copyString(),
|
||||
primaryKey.copyString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
StringRef(primaryKey));
|
||||
|
||||
rocksdb::Status status =
|
||||
rtrx->Put(rocksdb::Slice(key.string()), rocksdb::Slice());
|
||||
|
@ -464,19 +468,11 @@ void RocksDBEdgeIndex::handleValNode(
|
|||
}
|
||||
}
|
||||
|
||||
void RocksDBEdgeIndex::compact() {
|
||||
int RocksDBEdgeIndex::cleanup() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
rocksdb::CompactRangeOptions opts;
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::EdgeIndex(_objectId);
|
||||
rocksdb::Slice b = bounds.start(), e = bounds.end();
|
||||
db->CompactRange(opts, &b, &e);
|
||||
}
|
||||
|
||||
uint64_t RocksDBEdgeIndex::estimateSize() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::EdgeIndex(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return out;
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
|
|
@ -143,9 +143,8 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
void expandInSearchValues(arangodb::velocypack::Slice const,
|
||||
arangodb::velocypack::Builder&) const override;
|
||||
|
||||
void compact() override;
|
||||
uint64_t estimateSize() override;
|
||||
|
||||
int cleanup() override;
|
||||
|
||||
private:
|
||||
/// @brief create the iterator
|
||||
IndexIterator* createEqIterator(transaction::Methods*, ManagedDocumentResult*,
|
||||
|
|
|
@ -93,13 +93,16 @@ RocksDBEngine::RocksDBEngine(application_features::ApplicationServer* server)
|
|||
_maxTransactionSize((std::numeric_limits<uint64_t>::max)()),
|
||||
_intermediateTransactionCommitSize(32 * 1024 * 1024),
|
||||
_intermediateTransactionCommitCount(100000),
|
||||
_intermediateTransactionCommitEnabled(false) {
|
||||
_intermediateTransactionCommitEnabled(false),
|
||||
_pruneWaitTime(10.0) {
|
||||
// inherits order from StorageEngine but requires RocksDBOption that are used
|
||||
// to configure this Engine and the MMFiles PesistentIndexFeature
|
||||
startsAfter("RocksDBOption");
|
||||
}
|
||||
|
||||
RocksDBEngine::~RocksDBEngine() { delete _db; }
|
||||
RocksDBEngine::~RocksDBEngine() {
|
||||
delete _db;
|
||||
}
|
||||
|
||||
// inherited from ApplicationFeature
|
||||
// ---------------------------------
|
||||
|
@ -114,20 +117,24 @@ void RocksDBEngine::collectOptions(
|
|||
"transaction size limit (in bytes)",
|
||||
new UInt64Parameter(&_maxTransactionSize));
|
||||
|
||||
options->addOption("--rocksdb.intermediate-transaction-count",
|
||||
options->addHiddenOption("--rocksdb.intermediate-transaction-count",
|
||||
"an intermediate commit will be tried when a transaction "
|
||||
"has accumulated operations of this size (in bytes)",
|
||||
new UInt64Parameter(&_intermediateTransactionCommitSize));
|
||||
|
||||
options->addOption("--rocksdb.intermediate-transaction-count",
|
||||
options->addHiddenOption("--rocksdb.intermediate-transaction-count",
|
||||
"an intermediate commit will be tried when this number of "
|
||||
"operations is reached in a transaction",
|
||||
new UInt64Parameter(&_intermediateTransactionCommitCount));
|
||||
_intermediateTransactionCommitCount = 100 * 1000;
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.intermediate-transaction", "enable intermediate transactions",
|
||||
new BooleanParameter(&_intermediateTransactionCommitEnabled));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.wal-file-timeout", "timeout after which unused WAL files are deleted",
|
||||
new DoubleParameter(&_pruneWaitTime));
|
||||
}
|
||||
|
||||
// validate the storage engine's specific options
|
||||
|
@ -264,6 +271,10 @@ void RocksDBEngine::unprepare() {
|
|||
_counterManager->sync(true);
|
||||
}
|
||||
|
||||
// now prune all obsolete WAL files
|
||||
determinePrunableWalFiles(0);
|
||||
pruneWalFiles();
|
||||
|
||||
delete _db;
|
||||
_db = nullptr;
|
||||
}
|
||||
|
@ -963,15 +974,22 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
|
|||
return it->second;
|
||||
}
|
||||
|
||||
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase,
|
||||
VPackBuilder& builder) {
|
||||
Result res;
|
||||
|
||||
bool RocksDBEngine::syncWal() {
|
||||
#ifdef _WIN32
|
||||
// SyncWAL always reports "not implemented" on Windows
|
||||
return true;
|
||||
#else
|
||||
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
res = rocksutils::convertStatus(status).errorNumber();
|
||||
return res;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase,
|
||||
VPackBuilder& builder) {
|
||||
syncWal();
|
||||
|
||||
builder.add(VPackValue(VPackValueType::Object)); // Base
|
||||
rocksdb::SequenceNumber lastTick = _db->GetLatestSequenceNumber();
|
||||
|
@ -1015,10 +1033,10 @@ Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase,
|
|||
|
||||
builder.close(); // base
|
||||
|
||||
return res;
|
||||
return Result();
|
||||
}
|
||||
|
||||
void RocksDBEngine::pruneWalFiles(TRI_voc_tick_t minTickToKeep) {
|
||||
void RocksDBEngine::determinePrunableWalFiles(TRI_voc_tick_t minTickToKeep) {
|
||||
rocksdb::VectorLogPtr files;
|
||||
|
||||
auto status = _db->GetSortedWalFiles(files);
|
||||
|
@ -1036,20 +1054,35 @@ void RocksDBEngine::pruneWalFiles(TRI_voc_tick_t minTickToKeep) {
|
|||
}
|
||||
}
|
||||
|
||||
// insert all candidate files into the map of deletable files
|
||||
if (lastLess > 0 && lastLess < files.size()) {
|
||||
for (size_t current = 0; current < lastLess; current++) {
|
||||
auto f = files[current].get();
|
||||
auto const& f = files[current].get();
|
||||
if (f->Type() == rocksdb::WalFileType::kArchivedLogFile) {
|
||||
auto s = _db->DeleteFile(f->PathName());
|
||||
if (!s.ok()) {
|
||||
// TODO: exception?
|
||||
break;
|
||||
}
|
||||
if (_prunableWalFiles.find(f->PathName()) == _prunableWalFiles.end()) {
|
||||
_prunableWalFiles.emplace(f->PathName(), TRI_microtime() + _pruneWaitTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RocksDBEngine::pruneWalFiles() {
|
||||
// go through the map of WAL files that we have already and check if they are "expired"
|
||||
for (auto it = _prunableWalFiles.begin(); it != _prunableWalFiles.end(); /* no hoisting */) {
|
||||
// check if WAL file is expired
|
||||
if ((*it).second < TRI_microtime()) {
|
||||
auto s = _db->DeleteFile((*it).first);
|
||||
if (s.ok()) {
|
||||
it = _prunableWalFiles.erase(it);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// cannot delete this file yet... must forward iterator to prevent an endless loop
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
Result RocksDBEngine::dropDatabase(TRI_voc_tick_t id) {
|
||||
using namespace rocksutils;
|
||||
Result res;
|
||||
|
|
|
@ -261,7 +261,8 @@ class RocksDBEngine final : public StorageEngine {
|
|||
|
||||
Result createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder);
|
||||
|
||||
void pruneWalFiles(TRI_voc_tick_t minTickToKeep);
|
||||
void determinePrunableWalFiles(TRI_voc_tick_t minTickToKeep);
|
||||
void pruneWalFiles();
|
||||
|
||||
private:
|
||||
Result dropDatabase(TRI_voc_tick_t);
|
||||
|
@ -277,6 +278,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
static std::string const FeatureName;
|
||||
RocksDBCounterManager* counterManager() const;
|
||||
RocksDBReplicationManager* replicationManager() const;
|
||||
bool syncWal();
|
||||
|
||||
private:
|
||||
/// single rocksdb database used in this storage engine
|
||||
|
@ -308,6 +310,12 @@ class RocksDBEngine final : public StorageEngine {
|
|||
|
||||
std::unordered_map<uint64_t, std::pair<TRI_voc_tick_t, TRI_voc_cid_t>>
|
||||
_collectionMap;
|
||||
|
||||
// which WAL files can be pruned when
|
||||
std::unordered_map<std::string, double> _prunableWalFiles;
|
||||
|
||||
// number of seconds to wait before an obsolete WAL file is actually pruned
|
||||
double _pruneWaitTime;
|
||||
};
|
||||
} // namespace arangodb
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,364 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Simon Grätzer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBFulltextIndex.h"
|
||||
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/Utf8Helper.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "RocksDBEngine/RocksDBTransactionState.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
#include "StorageEngine/DocumentIdentifierToken.h"
|
||||
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include <rocksdb/utilities/write_batch_with_index.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
TRI_voc_rid_t RocksDBFulltextIndex::fromDocumentIdentifierToken(
|
||||
DocumentIdentifierToken const& token) {
|
||||
auto tkn = static_cast<RocksDBToken const*>(&token);
|
||||
return tkn->revisionId();
|
||||
}
|
||||
|
||||
DocumentIdentifierToken RocksDBFulltextIndex::toDocumentIdentifierToken(
|
||||
TRI_voc_rid_t revisionId) {
|
||||
return RocksDBToken{revisionId};
|
||||
}
|
||||
|
||||
RocksDBFulltextIndex::RocksDBFulltextIndex(
|
||||
TRI_idx_iid_t iid, arangodb::LogicalCollection* collection,
|
||||
VPackSlice const& info)
|
||||
: RocksDBIndex(iid, collection, info),
|
||||
_minWordLength(TRI_FULLTEXT_MIN_WORD_LENGTH_DEFAULT) {
|
||||
TRI_ASSERT(iid != 0);
|
||||
|
||||
VPackSlice const value = info.get("minLength");
|
||||
|
||||
if (value.isNumber()) {
|
||||
_minWordLength = value.getNumericValue<int>();
|
||||
if (_minWordLength <= 0) {
|
||||
// The min length cannot be negative.
|
||||
_minWordLength = 1;
|
||||
}
|
||||
} else if (!value.isNone()) {
|
||||
// minLength defined but no number
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"<minLength> must be a number");
|
||||
}
|
||||
_unique = false;
|
||||
_sparse = true;
|
||||
if (_fields.size() != 1) {
|
||||
// We need exactly 1 attribute
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL,
|
||||
"fulltext index definition should have exactly one attribute");
|
||||
}
|
||||
auto& attribute = _fields[0];
|
||||
_attr.reserve(attribute.size());
|
||||
for (auto& a : attribute) {
|
||||
_attr.emplace_back(a.name);
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBFulltextIndex::~RocksDBFulltextIndex() {}
|
||||
|
||||
size_t RocksDBFulltextIndex::memory() const {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds =
|
||||
RocksDBKeyBounds::FulltextIndexEntries(_objectId, StringRef());
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return (size_t)out;
|
||||
}
|
||||
|
||||
/// @brief return a VelocyPack representation of the index
|
||||
void RocksDBFulltextIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
|
||||
bool forPersistence) const {
|
||||
builder.openObject();
|
||||
Index::toVelocyPack(builder, withFigures, forPersistence);
|
||||
builder.add("unique", VPackValue(false));
|
||||
builder.add("sparse", VPackValue(true));
|
||||
builder.add("minLength", VPackValue(_minWordLength));
|
||||
builder.close();
|
||||
}
|
||||
|
||||
/// @brief Test if this index matches the definition
|
||||
bool RocksDBFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
||||
TRI_ASSERT(info.isObject());
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
VPackSlice typeSlice = info.get("type");
|
||||
TRI_ASSERT(typeSlice.isString());
|
||||
StringRef typeStr(typeSlice);
|
||||
TRI_ASSERT(typeStr == oldtypeName());
|
||||
#endif
|
||||
auto value = info.get("id");
|
||||
if (!value.isNone()) {
|
||||
// We already have an id.
|
||||
if (!value.isString()) {
|
||||
// Invalid ID
|
||||
return false;
|
||||
}
|
||||
// Short circuit. If id is correct the index is identical.
|
||||
StringRef idRef(value);
|
||||
return idRef == std::to_string(_iid);
|
||||
}
|
||||
|
||||
value = info.get("minLength");
|
||||
if (value.isNumber()) {
|
||||
int cmp = value.getNumericValue<int>();
|
||||
if (cmp <= 0) {
|
||||
if (_minWordLength != 1) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (_minWordLength != cmp) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if (!value.isNone()) {
|
||||
// Illegal minLength
|
||||
return false;
|
||||
}
|
||||
|
||||
value = info.get("fields");
|
||||
if (!value.isArray()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t const n = static_cast<size_t>(value.length());
|
||||
if (n != _fields.size()) {
|
||||
return false;
|
||||
}
|
||||
if (_unique != arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
info, "unique", false)) {
|
||||
return false;
|
||||
}
|
||||
if (_sparse != arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
info, "sparse", true)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// This check takes ordering of attributes into account.
|
||||
std::vector<arangodb::basics::AttributeName> translate;
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
translate.clear();
|
||||
VPackSlice f = value.at(i);
|
||||
if (!f.isString()) {
|
||||
// Invalid field definition!
|
||||
return false;
|
||||
}
|
||||
arangodb::StringRef in(f);
|
||||
TRI_ParseAttributeString(in, translate, true);
|
||||
if (!arangodb::basics::AttributeName::isIdentical(_fields[i], translate,
|
||||
false)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
// LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not build wordlist";
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
RocksDBValue value = RocksDBValue::IndexValue();
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
size_t const count = words.size();
|
||||
size_t i = 0;
|
||||
for (; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
|
||||
rocksdb::Status s = rtrx->Put(key.string(), value.string());
|
||||
if (!s.ok()) {
|
||||
auto status = rocksutils::convertStatus(s, rocksutils::StatusHint::index);
|
||||
res = status.errorNumber();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
std::string const& word = words[j];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
rtrx->Delete(key.string());
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
// LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not build wordlist";
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
RocksDBValue value = RocksDBValue::IndexValue();
|
||||
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
batch->Put(key.string(), value.string());
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
// LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not build wordlist";
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
|
||||
rocksdb::Status s = rtrx->Delete(key.string());
|
||||
if (!s.ok()) {
|
||||
auto status = rocksutils::convertStatus(s, rocksutils::StatusHint::index);
|
||||
res = status.errorNumber();
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::removeRaw(rocksdb::WriteBatch* batch, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
batch->Delete(key.string());
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::cleanup() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
rocksdb::CompactRangeOptions opts;
|
||||
RocksDBKeyBounds bounds =
|
||||
RocksDBKeyBounds::FulltextIndexEntries(_objectId, StringRef());
|
||||
rocksdb::Slice b = bounds.start(), e = bounds.end();
|
||||
db->CompactRange(opts, &b, &e);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief walk over the attribute. Also Extract sub-attributes and elements in
|
||||
/// list.
|
||||
static void ExtractWords(std::vector<std::string>& words,
|
||||
VPackSlice const value, size_t minWordLength,
|
||||
int level) {
|
||||
if (value.isString()) {
|
||||
// extract the string value for the indexed attribute
|
||||
std::string text = value.copyString();
|
||||
|
||||
// parse the document text
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(
|
||||
words, text, minWordLength, TRI_FULLTEXT_MAX_WORD_LENGTH, true);
|
||||
// We don't care for the result. If the result is false, words stays
|
||||
// unchanged and is not indexed
|
||||
} else if (value.isArray() && level == 0) {
|
||||
for (auto const& v : VPackArrayIterator(value)) {
|
||||
ExtractWords(words, v, minWordLength, level + 1);
|
||||
}
|
||||
} else if (value.isObject() && level == 0) {
|
||||
for (auto const& v : VPackObjectIterator(value)) {
|
||||
ExtractWords(words, v.value, minWordLength, level + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief callback function called by the fulltext index to determine the
|
||||
/// words to index for a specific document
|
||||
std::vector<std::string> RocksDBFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::vector<std::string> words;
|
||||
try {
|
||||
VPackSlice const value = doc.get(_attr);
|
||||
|
||||
if (!value.isString() && !value.isArray() && !value.isObject()) {
|
||||
// Invalid Input
|
||||
return words;
|
||||
}
|
||||
|
||||
ExtractWords(words, value, _minWordLength, 0);
|
||||
} catch (...) {
|
||||
// Backwards compatibility
|
||||
// The pre-vpack impl. did just ignore all errors and returned nulltpr
|
||||
return words;
|
||||
}
|
||||
return words;
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBFulltextIndex::executeQuery(std::string const& queryString,
|
||||
VPackBuilder &builder) {
|
||||
|
||||
}
|
|
@ -0,0 +1,138 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Simon Grätzer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_ROCKSDB_ENGINE_FULLTEXT_INDEX_H
|
||||
#define ARANGOD_ROCKSDB_ENGINE_FULLTEXT_INDEX_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "RocksDBEngine/RocksDBIndex.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
|
||||
/// @brief maximum length of an indexed word in characters
|
||||
/// a character may consist of up to 4 bytes
|
||||
#define TRI_FULLTEXT_MAX_WORD_LENGTH 40
|
||||
|
||||
/// @brief default minimum word length for a fulltext index
|
||||
#define TRI_FULLTEXT_MIN_WORD_LENGTH_DEFAULT 2
|
||||
|
||||
namespace arangodb {
|
||||
struct DocumentIdentifierToken;
|
||||
|
||||
struct FulltextQueryToken {
|
||||
/// @brief fulltext query match options
|
||||
/// substring not implemented, maybe later
|
||||
enum MatchType {COMPLETE, PREFIX, SUBSTRING};
|
||||
/// @brief fulltext query logical operators
|
||||
enum Operation {AND, OR, EXCLUDE};
|
||||
|
||||
std::string value;
|
||||
MatchType matchType;
|
||||
Operation operation;
|
||||
};
|
||||
/// A query consists of a list of tokens evaluated left to right:
|
||||
/// An AND operation causes the entire result set on the left to
|
||||
/// be intersected with every result containing the token.
|
||||
/// Similarly an OR triggers union
|
||||
typedef std::vector<FulltextQueryToken> FulltextQuery;
|
||||
|
||||
|
||||
class RocksDBFulltextIndex final : public RocksDBIndex {
|
||||
public:
|
||||
RocksDBFulltextIndex() = delete;
|
||||
|
||||
RocksDBFulltextIndex(TRI_idx_iid_t, LogicalCollection*,
|
||||
arangodb::velocypack::Slice const&);
|
||||
|
||||
~RocksDBFulltextIndex();
|
||||
|
||||
public:
|
||||
IndexType type() const override { return Index::TRI_IDX_TYPE_FULLTEXT_INDEX; }
|
||||
|
||||
char const* typeName() const override { return "fulltext-rocksdb"; }
|
||||
|
||||
bool allowExpansion() const override { return false; }
|
||||
|
||||
bool canBeDropped() const override { return true; }
|
||||
|
||||
bool isSorted() const override { return true; }
|
||||
|
||||
bool hasSelectivityEstimate() const override { return false; }
|
||||
|
||||
size_t memory() const override;
|
||||
|
||||
void toVelocyPack(VPackBuilder&, bool, bool) const override;
|
||||
// Uses default toVelocyPackFigures
|
||||
|
||||
bool matchesDefinition(VPackSlice const&) const override;
|
||||
|
||||
int insert(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
int cleanup() override;
|
||||
|
||||
bool isSame(std::string const& field, int minWordLength) const {
|
||||
std::string fieldString;
|
||||
TRI_AttributeNamesToString(fields()[0], fieldString);
|
||||
return (_minWordLength == minWordLength && fieldString == field);
|
||||
}
|
||||
|
||||
/// insert index elements into the specified write batch. Should be used
|
||||
/// as an optimization for the non transactional fillIndex method
|
||||
int insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch. Should be
|
||||
/// used as an optimization for the non transactional fillIndex method
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
// TRI_fts_index_t* internals() { return _fulltextIndex; }
|
||||
|
||||
static TRI_voc_rid_t fromDocumentIdentifierToken(
|
||||
DocumentIdentifierToken const& token);
|
||||
static DocumentIdentifierToken toDocumentIdentifierToken(
|
||||
TRI_voc_rid_t revisionId);
|
||||
|
||||
arangodb::Result executeQuery(std::string const& queryString,
|
||||
velocypack::Builder &builder);
|
||||
|
||||
private:
|
||||
std::vector<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
||||
/// @brief the indexed attribute (path)
|
||||
std::vector<std::string> _attr;
|
||||
|
||||
/// @brief minimum word length
|
||||
int _minWordLength;
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -85,9 +85,6 @@ class RocksDBIndex : public Index {
|
|||
virtual int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) = 0;
|
||||
|
||||
virtual void compact() = 0;
|
||||
virtual uint64_t estimateSize() = 0;
|
||||
|
||||
protected:
|
||||
void createCache();
|
||||
void disableCache();
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Indexes/Index.h"
|
||||
#include "RocksDBEngine/RocksDBEdgeIndex.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
#include "RocksDBEngine/RocksDBFulltextIndex.h"
|
||||
#include "RocksDBEngine/RocksDBHashIndex.h"
|
||||
#include "RocksDBEngine/RocksDBPersistentIndex.h"
|
||||
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
|
||||
|
@ -210,6 +211,28 @@ static int EnhanceJsonIndexGeo2(VPackSlice const definition,
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief enhances the json of a fulltext index
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int EnhanceJsonIndexFulltext(VPackSlice const definition,
|
||||
VPackBuilder& builder, bool create) {
|
||||
int res = ProcessIndexFields(definition, builder, 1, create);
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
// handle "minLength" attribute
|
||||
int minWordLength = TRI_FULLTEXT_MIN_WORD_LENGTH_DEFAULT;
|
||||
VPackSlice minLength = definition.get("minLength");
|
||||
if (minLength.isNumber()) {
|
||||
minWordLength = minLength.getNumericValue<int>();
|
||||
} else if (!minLength.isNull() && !minLength.isNone()) {
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
builder.add("minLength", VPackValue(minWordLength));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
|
||||
VPackBuilder& enhanced,
|
||||
bool create, bool isCoordinator) const {
|
||||
|
@ -300,6 +323,10 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
|
|||
case Index::TRI_IDX_TYPE_PERSISTENT_INDEX:
|
||||
res = EnhanceJsonIndexPersistent(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_FULLTEXT_INDEX:
|
||||
res = EnhanceJsonIndexFulltext(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_UNKNOWN:
|
||||
default: {
|
||||
|
@ -401,6 +428,10 @@ std::shared_ptr<Index> RocksDBIndexFactory::prepareIndexFromSlice(
|
|||
newIdx.reset(new arangodb::RocksDBPersistentIndex(iid, col, info));
|
||||
break;
|
||||
}
|
||||
case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: {
|
||||
newIdx.reset(new arangodb::RocksDBFulltextIndex(iid, col, info));
|
||||
break;
|
||||
}
|
||||
|
||||
case arangodb::Index::TRI_IDX_TYPE_UNKNOWN:
|
||||
default: {
|
||||
|
@ -434,5 +465,5 @@ void RocksDBIndexFactory::fillSystemIndexes(
|
|||
|
||||
std::vector<std::string> RocksDBIndexFactory::supportedIndexes() const {
|
||||
return std::vector<std::string>{"primary", "edge", "hash", "skiplist",
|
||||
"persistent"};
|
||||
"persistent", "fulltext"};
|
||||
}
|
||||
|
|
|
@ -59,8 +59,8 @@ RocksDBKey RocksDBKey::PrimaryIndexValue(uint64_t indexId,
|
|||
}
|
||||
|
||||
RocksDBKey RocksDBKey::EdgeIndexValue(uint64_t indexId,
|
||||
std::string const& vertexId,
|
||||
std::string const& primaryKey) {
|
||||
arangodb::StringRef const& vertexId,
|
||||
arangodb::StringRef const& primaryKey) {
|
||||
return RocksDBKey(RocksDBEntryType::EdgeIndexValue, indexId, vertexId,
|
||||
primaryKey);
|
||||
}
|
||||
|
@ -93,6 +93,14 @@ RocksDBKey RocksDBKey::ReplicationApplierConfig(TRI_voc_tick_t databaseId) {
|
|||
return RocksDBKey(RocksDBEntryType::ReplicationApplierConfig, databaseId);
|
||||
}
|
||||
|
||||
RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId,
|
||||
arangodb::StringRef const& word,
|
||||
arangodb::StringRef const& primaryKey) {
|
||||
return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, primaryKey);
|
||||
}
|
||||
|
||||
// ========================= Member methods ===========================
|
||||
|
||||
RocksDBEntryType RocksDBKey::type(RocksDBKey const& key) {
|
||||
return type(key._buffer.data(), key._buffer.size());
|
||||
}
|
||||
|
@ -295,18 +303,20 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
|
|||
}
|
||||
|
||||
RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
|
||||
std::string const& second, std::string const& third)
|
||||
arangodb::StringRef const& second,
|
||||
arangodb::StringRef const& third)
|
||||
: _type(type), _buffer() {
|
||||
switch (_type) {
|
||||
case RocksDBEntryType::FulltextIndexValue:
|
||||
case RocksDBEntryType::EdgeIndexValue: {
|
||||
size_t length = sizeof(char) + sizeof(uint64_t) + second.size() +
|
||||
sizeof(char) + third.size() + sizeof(uint8_t);
|
||||
_buffer.reserve(length);
|
||||
_buffer.push_back(static_cast<char>(_type));
|
||||
uint64ToPersistent(_buffer, first);
|
||||
_buffer.append(second);
|
||||
_buffer.append(second.data(), second.length());
|
||||
_buffer.push_back(_stringSeparator);
|
||||
_buffer.append(third);
|
||||
_buffer.append(third.data(), third.length());
|
||||
TRI_ASSERT(third.size() <= 254);
|
||||
_buffer.push_back(static_cast<char>(third.size() & 0xff));
|
||||
break;
|
||||
|
|
|
@ -79,8 +79,8 @@ class RocksDBKey {
|
|||
/// for the `_to` sub-index and one for the `_from` sub-index.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey EdgeIndexValue(uint64_t indexId,
|
||||
std::string const& vertexId,
|
||||
std::string const& primaryKey);
|
||||
arangodb::StringRef const& vertexId,
|
||||
arangodb::StringRef const& primaryKey);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Create a fully-specified key for an entry in a user-defined,
|
||||
|
@ -122,6 +122,13 @@ class RocksDBKey {
|
|||
/// @brief Create a fully-specified key for a replication applier config
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey ReplicationApplierConfig(TRI_voc_tick_t databaseId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Create a fully-specified key for the fulltext index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKey FulltextIndexValue(uint64_t indexId,
|
||||
arangodb::StringRef const& word,
|
||||
arangodb::StringRef const& primaryKey);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -224,8 +231,8 @@ class RocksDBKey {
|
|||
arangodb::StringRef const& docKey, VPackSlice const& indexData);
|
||||
RocksDBKey(RocksDBEntryType type, uint64_t first,
|
||||
arangodb::StringRef const& second);
|
||||
RocksDBKey(RocksDBEntryType type, uint64_t first, std::string const& second,
|
||||
std::string const& third);
|
||||
RocksDBKey(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second,
|
||||
arangodb::StringRef const& third);
|
||||
|
||||
private:
|
||||
static RocksDBEntryType type(char const* data, size_t size);
|
||||
|
|
|
@ -61,7 +61,7 @@ RocksDBKeyBounds RocksDBKeyBounds::EdgeIndex(uint64_t indexId) {
|
|||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::EdgeIndexVertex(
|
||||
uint64_t indexId, std::string const& vertexId) {
|
||||
uint64_t indexId, arangodb::StringRef const& vertexId) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::EdgeIndexValue, indexId, vertexId);
|
||||
}
|
||||
|
||||
|
@ -94,6 +94,13 @@ RocksDBKeyBounds RocksDBKeyBounds::CounterValues() {
|
|||
return RocksDBKeyBounds(RocksDBEntryType::CounterValue);
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::FulltextIndexEntries(uint64_t indexId,
|
||||
arangodb::StringRef const& word) {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::FulltextIndexValue, indexId, word);
|
||||
}
|
||||
|
||||
// ============================ Member Methods ==============================
|
||||
|
||||
rocksdb::Slice const RocksDBKeyBounds::start() const {
|
||||
return rocksdb::Slice(_startBuffer);
|
||||
}
|
||||
|
@ -222,22 +229,35 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
|
|||
}
|
||||
|
||||
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
||||
std::string const& second)
|
||||
arangodb::StringRef const& second)
|
||||
: _type(type), _startBuffer(), _endBuffer() {
|
||||
switch (_type) {
|
||||
case RocksDBEntryType::FulltextIndexValue: {
|
||||
size_t length =
|
||||
sizeof(char) + sizeof(uint64_t) + second.size() + sizeof(char);
|
||||
_startBuffer.reserve(length);
|
||||
_startBuffer.push_back(static_cast<char>(_type));
|
||||
uint64ToPersistent(_startBuffer, first);
|
||||
_startBuffer.append(second.data(), second.length());
|
||||
|
||||
_endBuffer.clear();
|
||||
_endBuffer.append(_startBuffer);
|
||||
_endBuffer.push_back(0xFF);
|
||||
break;
|
||||
}
|
||||
|
||||
case RocksDBEntryType::EdgeIndexValue: {
|
||||
size_t length =
|
||||
sizeof(char) + sizeof(uint64_t) + second.size() + sizeof(char);
|
||||
_startBuffer.reserve(length);
|
||||
_startBuffer.push_back(static_cast<char>(_type));
|
||||
uint64ToPersistent(_startBuffer, first);
|
||||
_startBuffer.append(second);
|
||||
_startBuffer.append(second.data(), second.length());
|
||||
_startBuffer.push_back(_stringSeparator);
|
||||
|
||||
_endBuffer.clear();
|
||||
_endBuffer.append(_startBuffer);
|
||||
nextPrefix(_endBuffer);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,9 +26,9 @@
|
|||
#define ARANGO_ROCKSDB_ROCKSDB_KEY_BOUNDS_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
#include <rocksdb/slice.h>
|
||||
|
||||
#include <velocypack/Slice.h>
|
||||
|
@ -73,7 +73,7 @@ class RocksDBKeyBounds {
|
|||
/// to the specified vertex
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds EdgeIndexVertex(uint64_t indexId,
|
||||
std::string const& vertexId);
|
||||
arangodb::StringRef const& vertexId);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all index-entries belonging to a specified non-unique index
|
||||
|
@ -109,6 +109,12 @@ class RocksDBKeyBounds {
|
|||
/// @brief Bounds for all counter values
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds CounterValues();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for all entries of a fulltext index
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds FulltextIndexEntries(uint64_t,
|
||||
arangodb::StringRef const&);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -140,7 +146,7 @@ class RocksDBKeyBounds {
|
|||
explicit RocksDBKeyBounds(RocksDBEntryType type);
|
||||
RocksDBKeyBounds(RocksDBEntryType type, uint64_t first);
|
||||
RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
||||
std::string const& second);
|
||||
arangodb::StringRef const& second);
|
||||
RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
||||
VPackSlice const& second, VPackSlice const& third);
|
||||
|
||||
|
|
|
@ -328,7 +328,21 @@ size_t RocksDBPrimaryIndex::size() const {
|
|||
|
||||
/// @brief return the memory usage of the index
|
||||
size_t RocksDBPrimaryIndex::memory() const {
|
||||
return 0; // TODO
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::PrimaryIndex(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return (size_t)out;
|
||||
}
|
||||
|
||||
int RocksDBPrimaryIndex::cleanup() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
rocksdb::CompactRangeOptions opts;
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::PrimaryIndex(_objectId);
|
||||
rocksdb::Slice b = bounds.start(), e = bounds.end();
|
||||
db->CompactRange(opts, &b, &e);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief return a VelocyPack representation of the index
|
||||
|
@ -695,20 +709,3 @@ void RocksDBPrimaryIndex::handleValNode(transaction::Methods* trx,
|
|||
VPackValueType::String));
|
||||
}
|
||||
}
|
||||
|
||||
void RocksDBPrimaryIndex::compact() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
rocksdb::CompactRangeOptions opts;
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::PrimaryIndex(_objectId);
|
||||
rocksdb::Slice b = bounds.start(), e = bounds.end();
|
||||
db->CompactRange(opts, &b, &e);
|
||||
}
|
||||
|
||||
uint64_t RocksDBPrimaryIndex::estimateSize() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::PrimaryIndex(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -212,9 +212,8 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
transaction::Methods* trx,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback) const;
|
||||
|
||||
void compact() override;
|
||||
uint64_t estimateSize() override;
|
||||
|
||||
int cleanup() override;
|
||||
|
||||
private:
|
||||
/// @brief create the iterator, for a single attribute, IN operator
|
||||
IndexIterator* createInIterator(transaction::Methods*, ManagedDocumentResult*,
|
||||
|
|
|
@ -73,7 +73,8 @@ class WALParser : public rocksdb::WriteBatch::Handler {
|
|||
: _vocbase(vocbase),
|
||||
_includeSystem(includeSystem),
|
||||
_onlyCollectionId(collectionId),
|
||||
_builder(builder) {}
|
||||
_builder(builder),
|
||||
_currentSequence(0) {}
|
||||
|
||||
void LogData(rocksdb::Slice const& blob) override {
|
||||
RocksDBLogType type = RocksDBLogValue::type(blob);
|
||||
|
|
|
@ -229,15 +229,7 @@ void RocksDBRestExportHandler::createCursor() {
|
|||
options, "flush", false);
|
||||
|
||||
if (flush) {
|
||||
rocksdb::TransactionDB* db =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
|
||||
|
||||
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
|
||||
|
||||
if (!status.ok()) {
|
||||
Result res = rocksutils::convertStatus(status);
|
||||
THROW_ARANGO_EXCEPTION(res.errorNumber());
|
||||
}
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
double flushWait =
|
||||
arangodb::basics::VelocyPackHelper::getNumericValue<double>(
|
||||
|
|
|
@ -372,10 +372,10 @@ void RocksDBRestReplicationHandler::handleCommandBatch() {
|
|||
// int res = engine->insertCompactionBlocker(_vocbase, expires, id);
|
||||
|
||||
RocksDBReplicationContext* ctx = _manager->createContext();
|
||||
RocksDBReplicationContextGuard(_manager, ctx);
|
||||
if (ctx == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_FAILED);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to create replication context");
|
||||
}
|
||||
RocksDBReplicationContextGuard(_manager, ctx);
|
||||
ctx->bind(_vocbase); // create transaction+snapshot
|
||||
|
||||
VPackBuilder b;
|
||||
|
@ -423,8 +423,12 @@ void RocksDBRestReplicationHandler::handleCommandBatch() {
|
|||
RocksDBReplicationContextGuard(_manager, ctx);
|
||||
if (busy) {
|
||||
res = TRI_ERROR_CURSOR_BUSY;
|
||||
generateError(GeneralResponse::responseCode(res), res);
|
||||
return;
|
||||
} else if (ctx == nullptr) {
|
||||
res = TRI_ERROR_CURSOR_NOT_FOUND;
|
||||
generateError(GeneralResponse::responseCode(res), res);
|
||||
return;
|
||||
}
|
||||
|
||||
// add client
|
||||
|
@ -1636,17 +1640,7 @@ void RocksDBRestReplicationHandler::handleCommandSync() {
|
|||
config._useCollectionId = useCollectionId;
|
||||
|
||||
// wait until all data in current logfile got synced
|
||||
// MMFilesLogfileManager::instance()->waitForSync(5.0);
|
||||
rocksdb::TransactionDB* db =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
|
||||
|
||||
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
Result res = rocksutils::convertStatus(status).errorNumber();
|
||||
generateError(rest::ResponseCode::BAD, res.errorNumber(),
|
||||
res.errorMessage());
|
||||
return;
|
||||
}
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
|
||||
verbose);
|
||||
|
|
|
@ -140,20 +140,8 @@ void RocksDBRestWalHandler::flush() {
|
|||
if (ServerState::instance()->isCoordinator()) {
|
||||
res = flushWalOnAllDBServers(waitForSync, waitForCollector);
|
||||
} else {
|
||||
rocksdb::TransactionDB* db =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
|
||||
|
||||
if (waitForSync) {
|
||||
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
res = rocksutils::convertStatus(status).errorNumber();
|
||||
}
|
||||
}
|
||||
if (waitForCollector) {
|
||||
// does not make sense in rocksdb
|
||||
/*rocksdb::FlushOptions flushOptions;
|
||||
flushOptions.wait = true;
|
||||
db->Flush(flushOptions);*/
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,8 @@ enum class RocksDBEntryType : char {
|
|||
UniqueIndexValue = '7',
|
||||
View = '8',
|
||||
SettingsValue = '9',
|
||||
ReplicationApplierConfig = ':'
|
||||
ReplicationApplierConfig = ':',
|
||||
FulltextIndexValue = ';'
|
||||
};
|
||||
|
||||
enum class RocksDBLogType : char {
|
||||
|
|
|
@ -46,15 +46,7 @@ static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
rocksdb::TransactionDB* db =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
|
||||
|
||||
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
|
||||
|
||||
if (!status.ok()) {
|
||||
Result res = rocksutils::convertStatus(status);
|
||||
TRI_V8_THROW_EXCEPTION(res.errorNumber());
|
||||
}
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
|
|
|
@ -181,7 +181,13 @@ RocksDBVPackIndex::RocksDBVPackIndex(TRI_idx_iid_t iid,
|
|||
RocksDBVPackIndex::~RocksDBVPackIndex() {}
|
||||
|
||||
size_t RocksDBVPackIndex::memory() const {
|
||||
return 0; // TODO
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = _unique ? RocksDBKeyBounds::UniqueIndex(_objectId)
|
||||
: RocksDBKeyBounds::IndexEntries(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return (size_t)out;
|
||||
}
|
||||
|
||||
/// @brief return a VelocyPack representation of the index
|
||||
|
@ -1402,21 +1408,12 @@ bool RocksDBVPackIndex::isDuplicateOperator(
|
|||
return duplicate;
|
||||
}
|
||||
|
||||
void RocksDBVPackIndex::compact() {
|
||||
int RocksDBVPackIndex::cleanup() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
rocksdb::CompactRangeOptions opts;
|
||||
RocksDBKeyBounds bounds = _unique ? RocksDBKeyBounds::UniqueIndex(_objectId)
|
||||
: RocksDBKeyBounds::IndexEntries(_objectId);
|
||||
rocksdb::Slice b = bounds.start(), e = bounds.end();
|
||||
db->CompactRange(opts, &b, &e);
|
||||
}
|
||||
|
||||
uint64_t RocksDBVPackIndex::estimateSize() {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
RocksDBKeyBounds bounds = _unique ? RocksDBKeyBounds::UniqueIndex(_objectId)
|
||||
: RocksDBKeyBounds::IndexEntries(_objectId);
|
||||
rocksdb::Range r(bounds.start(), bounds.end());
|
||||
uint64_t out;
|
||||
db->GetApproximateSizes(&r, 1, &out, true);
|
||||
return out;
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
|
|
@ -178,8 +178,7 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
|
||||
|
||||
void compact() override;
|
||||
uint64_t estimateSize() override;
|
||||
int cleanup() override;
|
||||
|
||||
private:
|
||||
bool isDuplicateOperator(arangodb::aql::AstNode const*,
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Scheduler/AcceptorTcp.h"
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Endpoint/EndpointIp.h"
|
||||
#include "Scheduler/SocketTcp.h"
|
||||
|
||||
|
@ -79,6 +79,9 @@ void AcceptorTcp::open() {
|
|||
void AcceptorTcp::asyncAccept(AcceptHandler const& handler) {
|
||||
createPeer();
|
||||
auto peer = dynamic_cast<SocketTcp*>(_peer.get());
|
||||
if (peer == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected socket type");
|
||||
}
|
||||
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Scheduler/AcceptorUnixDomain.h"
|
||||
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/FileUtils.h"
|
||||
#include "Endpoint/EndpointUnixDomain.h"
|
||||
#include "Scheduler/SocketUnixDomain.h"
|
||||
|
@ -53,6 +53,9 @@ void AcceptorUnixDomain::open() {
|
|||
void AcceptorUnixDomain::asyncAccept(AcceptHandler const& handler) {
|
||||
createPeer();
|
||||
auto peer = dynamic_cast<SocketUnixDomain*>(_peer.get());
|
||||
if (peer == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected socket type");
|
||||
}
|
||||
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
|
||||
}
|
||||
|
||||
|
|
|
@ -279,6 +279,18 @@ void SchedulerFeature::buildControlCHandler() {
|
|||
}
|
||||
#else
|
||||
|
||||
#ifndef WIN32
|
||||
// Signal masking on POSIX platforms
|
||||
//
|
||||
// POSIX allows signals to be blocked using functions such as sigprocmask()
|
||||
// and pthread_sigmask(). For signals to be delivered, programs must ensure
|
||||
// that any signals registered using signal_set objects are unblocked in at
|
||||
// least one thread.
|
||||
sigset_t all;
|
||||
sigemptyset(&all);
|
||||
pthread_sigmask(SIG_SETMASK, &all, 0);
|
||||
#endif
|
||||
|
||||
auto ioService = _scheduler->managerService();
|
||||
_exitSignals = std::make_shared<boost::asio::signal_set>(*ioService, SIGINT,
|
||||
SIGTERM, SIGQUIT);
|
||||
|
|
|
@ -77,24 +77,20 @@ install(
|
|||
${PROJECT_SOURCE_DIR}/js/apps
|
||||
${PROJECT_SOURCE_DIR}/js/contrib
|
||||
${PROJECT_SOURCE_DIR}/js/server
|
||||
DESTINATION
|
||||
${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
REGEX
|
||||
"^.*/server/tests$" EXCLUDE
|
||||
REGEX
|
||||
"^.*/aardvark/APP/node_modules$" EXCLUDE
|
||||
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
REGEX "^.*/server/tests$" EXCLUDE
|
||||
REGEX "^.*/aardvark/APP/node_modules$" EXCLUDE
|
||||
REGEX "^.*/aardvark/APP/test$" EXCLUDE
|
||||
REGEX "^.*/.bin" EXCLUDE
|
||||
)
|
||||
|
||||
if (USE_ENTERPRISE)
|
||||
install(
|
||||
DIRECTORY
|
||||
${PROJECT_SOURCE_DIR}/enterprise/js/server
|
||||
DESTINATION
|
||||
${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
REGEX
|
||||
"^.*/server/tests$" EXCLUDE
|
||||
REGEX
|
||||
"^.*/aardvark/APP/node_modules$" EXCLUDE
|
||||
DIRECTORY ${PROJECT_SOURCE_DIR}/enterprise/js/server
|
||||
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
REGEX "^.*/server/tests$" EXCLUDE
|
||||
REGEX "^.*/aardvark/APP/node_modules$" EXCLUDE
|
||||
REGEX "^.*/aardvark/APP/test$" EXCLUDE
|
||||
)
|
||||
endif ()
|
||||
|
||||
|
|
|
@ -23,16 +23,11 @@ if (USE_ENTERPRISE)
|
|||
DIRECTORY
|
||||
${ARANGODB_SOURCE_DIR}/enterprise/js/common
|
||||
${ARANGODB_SOURCE_DIR}/enterprise/js/client
|
||||
DESTINATION
|
||||
${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
FILES_MATCHING
|
||||
PATTERN "*.js"
|
||||
REGEX
|
||||
"^.*/common/test-data$" EXCLUDE
|
||||
REGEX
|
||||
"^.*/common/tests$" EXCLUDE
|
||||
REGEX
|
||||
"^.*/client/tests$" EXCLUDE
|
||||
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
FILES_MATCHING PATTERN "*.js"
|
||||
REGEX "^.*/common/test-data$" EXCLUDE
|
||||
REGEX "^.*/common/tests$" EXCLUDE
|
||||
REGEX "^.*/client/tests$" EXCLUDE
|
||||
)
|
||||
endif ()
|
||||
|
||||
|
@ -40,4 +35,8 @@ endif ()
|
|||
install(
|
||||
DIRECTORY ${ARANGODB_SOURCE_DIR}/js/node
|
||||
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR_ARANGO}/js
|
||||
REGEX "^.*/eslint" EXCLUDE
|
||||
REGEX "^.*/.npmignore" EXCLUDE
|
||||
REGEX "^.*/expect.js$" EXCLUDE
|
||||
REGEX "^.*/.bin" EXCLUDE
|
||||
)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
message("enabling MacOSX 'Bundle' package")
|
||||
|
||||
if (${USE_ENTERPRISE})
|
||||
set(CPACK_PACKAGE_NAME "ArangoDB3e-CLI")
|
||||
else()
|
||||
|
@ -12,6 +13,7 @@ configure_file("${PROJECT_SOURCE_DIR}/Installation/MacOSX/Bundle/Info.plist.in"
|
|||
set(CPACK_BUNDLE_PLIST "${CMAKE_CURRENT_BINARY_DIR}/Info.plist")
|
||||
|
||||
set(CPACK_BUNDLE_PREFIX "Contents/MacOS")
|
||||
set(CPACK_BUNDLE_APPLE_CERT_APP "Developer ID Application: ArangoDB GmbH (W7UC4UQXPV)")
|
||||
set(CPACK_INSTALL_PREFIX "${CPACK_PACKAGE_NAME}.app/${CPACK_BUNDLE_PREFIX}${CMAKE_INSTALL_PREFIX}")
|
||||
|
||||
set(INST_USR_LIBDIR "/Library/ArangoDB")
|
||||
|
@ -29,10 +31,21 @@ to_native_path("CPACK_ARANGO_DATA_DIR")
|
|||
to_native_path("CPACK_ARANGO_STATE_DIR")
|
||||
to_native_path("CPACK_ARANGO_LOG_DIR")
|
||||
|
||||
# we wrap HDIUTIL to inject our own parameter:
|
||||
find_program(HDIUTIL_EXECUTABLE hdiutil)
|
||||
# for now 240MB seems to be enough:
|
||||
set(CMAKE_DMG_SIZE 240)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/Installation/MacOSX/Bundle/hdiutilwrapper.sh.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/hdiutilwrapper.sh"
|
||||
@ONLY)
|
||||
set(CPACK_COMMAND_HDIUTIL "${CMAKE_CURRENT_BINARY_DIR}/hdiutilwrapper.sh")
|
||||
|
||||
|
||||
configure_file("${PROJECT_SOURCE_DIR}/Installation/MacOSX/Bundle/arangodb-cli.sh.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/arangodb-cli.sh"
|
||||
@ONLY)
|
||||
|
||||
|
||||
set(CPACK_BUNDLE_STARTUP_COMMAND "${CMAKE_CURRENT_BINARY_DIR}/arangodb-cli.sh")
|
||||
|
||||
add_custom_target(package-arongodb-server-bundle
|
||||
|
|
|
@ -51,7 +51,7 @@ const toArgv = require('internal').toArgv;
|
|||
|
||||
function exportTest (options) {
|
||||
const cluster = options.cluster ? '-cluster' : '';
|
||||
const tmpPath = fs.getTempPath();
|
||||
const tmpPath = fs.join(options.testOutputDirectory, 'export');
|
||||
const DOMParser = new xmldom.DOMParser({
|
||||
locator: {},
|
||||
errorHandler: {
|
||||
|
@ -118,7 +118,6 @@ function exportTest (options) {
|
|||
results.exportJson.failed = results.exportJson.status ? 0 : 1;
|
||||
|
||||
try {
|
||||
// const filesContent = JSON.parse(fs.read(fs.join(tmpPath, 'UnitTestsExport.json')));
|
||||
results.parseJson = {
|
||||
failed: 0,
|
||||
status: true
|
||||
|
|
|
@ -178,12 +178,14 @@ function rubyTests (options, ssl) {
|
|||
instanceInfo.exitStatus = 'server is gone.';
|
||||
break;
|
||||
}
|
||||
const subFolder = ssl ? 'ssl_server' : 'http_server';
|
||||
const resultfn = fs.join(options.testOutputDirectory, subFolder, te + '.json');
|
||||
|
||||
args = ['--color',
|
||||
'-I', fs.join('UnitTests', 'HttpInterface'),
|
||||
'--format', 'd',
|
||||
'--format', 'j',
|
||||
'--out', fs.join('out', 'UnitTests', te + '.json'),
|
||||
'--out', resultfn,
|
||||
'--require', tmpname,
|
||||
tfn
|
||||
];
|
||||
|
@ -201,7 +203,6 @@ function rubyTests (options, ssl) {
|
|||
status: res.status
|
||||
};
|
||||
|
||||
const resultfn = fs.join('out', 'UnitTests', te + '.json');
|
||||
|
||||
try {
|
||||
const jsonResult = JSON.parse(fs.read(resultfn));
|
||||
|
|
|
@ -34,7 +34,7 @@ var db = require("internal").db;
|
|||
var users = require("@arangodb/users");
|
||||
var request = require('@arangodb/request');
|
||||
var crypto = require('@arangodb/crypto');
|
||||
var expect = require('expect.js');
|
||||
const expect = require('chai').expect;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
|
@ -287,7 +287,7 @@ function AuthSuite () {
|
|||
|
||||
testAuthOpen: function() {
|
||||
var res = request(baseUrl() + "/_open/auth");
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
// mop: GET is an unsupported method, but it is skipping auth
|
||||
expect(res).to.have.property('statusCode', 405);
|
||||
},
|
||||
|
@ -297,7 +297,7 @@ function AuthSuite () {
|
|||
url: baseUrl() + "/_open/auth",
|
||||
body: JSON.stringify({"username": "root", "password": ""})
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 200);
|
||||
|
||||
expect(res.body).to.be.an('string');
|
||||
|
@ -305,7 +305,7 @@ function AuthSuite () {
|
|||
expect(obj).to.have.property('jwt');
|
||||
expect(obj).to.have.property('must_change_password');
|
||||
expect(obj.jwt).to.be.a('string');
|
||||
expect(obj.jwt.split('.').length).to.be(3);
|
||||
expect(obj.jwt.split('.').length).to.be.equal(3);
|
||||
expect(obj.must_change_password).to.be.a('boolean');
|
||||
},
|
||||
|
||||
|
@ -317,14 +317,14 @@ function AuthSuite () {
|
|||
url: baseUrl() + "/_open/auth",
|
||||
body: JSON.stringify({"username": "hackers@arangodb.com", "password": "foobar"})
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 200);
|
||||
expect(res.body).to.be.an('string');
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj).to.have.property('jwt');
|
||||
expect(obj).to.have.property('must_change_password');
|
||||
expect(obj.jwt).to.be.a('string');
|
||||
expect(obj.jwt.split('.').length).to.be(3);
|
||||
expect(obj.jwt.split('.').length).to.be.equal(3);
|
||||
expect(obj.must_change_password).to.be.a('boolean');
|
||||
},
|
||||
|
||||
|
@ -336,7 +336,7 @@ function AuthSuite () {
|
|||
url: baseUrl() + "/_open/auth",
|
||||
body: JSON.stringify({"username": "hackers@arangodb.com", "password": "foobar"})
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 401);
|
||||
},
|
||||
|
||||
|
@ -345,7 +345,7 @@ function AuthSuite () {
|
|||
url: baseUrl() + "/_open/auth",
|
||||
body: JSON.stringify({"username": "hackers@arangodb.com", "passwordaa": "foobar"}),
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 400);
|
||||
},
|
||||
|
||||
|
@ -354,13 +354,13 @@ function AuthSuite () {
|
|||
url: baseUrl() + "/_open/auth",
|
||||
body: JSON.stringify({"usern": "hackers@arangodb.com", "password": "foobar"}),
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 400);
|
||||
},
|
||||
|
||||
testAuthRequired: function() {
|
||||
var res = request.get(baseUrl() + "/_api/version");
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 401);
|
||||
},
|
||||
|
||||
|
@ -378,7 +378,7 @@ function AuthSuite () {
|
|||
}
|
||||
});
|
||||
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 200);
|
||||
},
|
||||
|
||||
|
@ -391,7 +391,7 @@ function AuthSuite () {
|
|||
bearer: jwt,
|
||||
}
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 200);
|
||||
},
|
||||
|
||||
|
@ -404,7 +404,7 @@ function AuthSuite () {
|
|||
bearer: jwt,
|
||||
}
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 401);
|
||||
},
|
||||
|
||||
|
@ -417,7 +417,7 @@ function AuthSuite () {
|
|||
bearer: jwt,
|
||||
}
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 401);
|
||||
},
|
||||
|
||||
|
@ -430,7 +430,7 @@ function AuthSuite () {
|
|||
bearer: jwt,
|
||||
}
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 401);
|
||||
},
|
||||
|
||||
|
@ -443,7 +443,7 @@ function AuthSuite () {
|
|||
bearer: jwt,
|
||||
}
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 200);
|
||||
},
|
||||
|
||||
|
@ -456,7 +456,7 @@ function AuthSuite () {
|
|||
bearer: jwt,
|
||||
}
|
||||
});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('statusCode', 401);
|
||||
},
|
||||
};
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require('jsunity');
|
||||
var expect = require('expect.js');
|
||||
var expect = require('chai').expect;
|
||||
var request = require('@arangodb/request');
|
||||
var url = require('url');
|
||||
var querystring = require('querystring');
|
||||
|
@ -61,11 +61,14 @@ function RequestSuite () {
|
|||
testDeleteMethod: function () {
|
||||
var path = '/lol';
|
||||
var res = request.delete(buildUrl(path), {timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -75,7 +78,7 @@ function RequestSuite () {
|
|||
testGetMethod: function () {
|
||||
var path = '/lol';
|
||||
var res = request.get(buildUrl(path), {timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
var obj = JSON.parse(res.body);
|
||||
|
@ -90,8 +93,8 @@ function RequestSuite () {
|
|||
testHeadMethod: function () {
|
||||
var path = '/lol';
|
||||
var res = request.head(buildUrl(path), {timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res.body).to.be.empty();
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.empty;
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -101,7 +104,7 @@ function RequestSuite () {
|
|||
testPostMethod: function () {
|
||||
var path = '/lol';
|
||||
var res = request.post(buildUrl(path), {timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
var obj = JSON.parse(res.body);
|
||||
|
@ -116,7 +119,7 @@ function RequestSuite () {
|
|||
var path = '/lol';
|
||||
var body = {hello: 'world'};
|
||||
var res = request.post(buildUrl(path), {body: body, json: true, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
expect(res.json).to.be.an('object');
|
||||
var obj = res.json;
|
||||
|
@ -133,7 +136,7 @@ function RequestSuite () {
|
|||
var path = '/lol';
|
||||
var body = {hello: 'world'};
|
||||
var res = request.put(buildUrl(path), {body: body, json: true, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
expect(res.json).to.be.an('object');
|
||||
var obj = res.json;
|
||||
|
@ -154,7 +157,7 @@ function RequestSuite () {
|
|||
'x-hovercraft': 'full-of-eels'
|
||||
};
|
||||
var res = request.post(buildUrl(path), {headers: headers, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('headers');
|
||||
|
@ -173,7 +176,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
};
|
||||
var res = request.post(buildUrl(path), {qs: qstring, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
var urlObj = url.parse(obj.url);
|
||||
var query = qs.parse(urlObj.query);
|
||||
|
@ -186,7 +189,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
};
|
||||
var res = request.post(buildUrl(path), {qs: qstring, useQuerystring: true, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
var urlObj = url.parse(obj.url);
|
||||
var query = querystring.parse(urlObj.query);
|
||||
|
@ -199,7 +202,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
});
|
||||
var res = request.post(buildUrl(path), {qs: qstring, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
var urlObj = url.parse(obj.url);
|
||||
expect(urlObj.query).to.eql(qstring);
|
||||
|
@ -212,7 +215,7 @@ function RequestSuite () {
|
|||
testUrlObject: function () {
|
||||
var path = url.parse(buildUrl('/lol'));
|
||||
var res = request.post({url: path, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.url).to.equal(path.pathname);
|
||||
},
|
||||
|
@ -224,7 +227,7 @@ function RequestSuite () {
|
|||
test404: function () {
|
||||
var url = buildUrlBroken('/lol');
|
||||
var res = request.get(url, {timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res).to.have.property('message', 'Not Found');
|
||||
expect(res).to.have.property('statusCode', 404);
|
||||
expect(res).to.have.property('status', 404);
|
||||
|
@ -237,8 +240,8 @@ function RequestSuite () {
|
|||
testBadJson: function () {
|
||||
var url = buildUrl('/_admin/aardvark/index.html', false);
|
||||
var res = request.get(url, {json: true, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res.json).to.be(undefined);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.json).to.be.equal(undefined);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -252,7 +255,7 @@ function RequestSuite () {
|
|||
password: 'bionicman'
|
||||
};
|
||||
var res = request.post(buildUrl(path), {auth: auth, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('headers');
|
||||
|
@ -271,7 +274,7 @@ function RequestSuite () {
|
|||
var res = request.post(buildUrl(path).replace(/^(https?:\/\/)/, function (m) {
|
||||
return m + encodeURIComponent(auth.username) + ':' + encodeURIComponent(auth.password) + '@';
|
||||
}), {timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('headers');
|
||||
|
@ -287,7 +290,7 @@ function RequestSuite () {
|
|||
bearer: 'full of bears'
|
||||
};
|
||||
var res = request.post(buildUrl(path), {auth: auth, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('headers');
|
||||
|
@ -307,7 +310,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
};
|
||||
var res = request.post(buildUrl(path), {body: reqBody, json: true, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.json).to.be.an('object');
|
||||
var obj = res.json;
|
||||
expect(obj.path).to.equal(path);
|
||||
|
@ -323,7 +326,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
};
|
||||
var res = request.post(buildUrl(path), {form: reqBody, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('requestBody');
|
||||
|
@ -338,7 +341,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
};
|
||||
var res = request.post(buildUrl(path), {form: reqBody, useQuerystring: true, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('requestBody');
|
||||
|
@ -353,7 +356,7 @@ function RequestSuite () {
|
|||
hovercraft: ['full', 'of', 'eels']
|
||||
};
|
||||
var res = request.post(buildUrl(path), {form: qs.stringify(reqBody), timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('requestBody');
|
||||
|
@ -364,7 +367,7 @@ function RequestSuite () {
|
|||
var path = '/lol';
|
||||
var reqBody = 'hello world';
|
||||
var res = request.post(buildUrl(path), {body: reqBody, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('requestBody');
|
||||
|
@ -376,7 +379,7 @@ function RequestSuite () {
|
|||
var reqBody = new Buffer('hello world');
|
||||
var headers = {'content-type': 'application/octet-stream'};
|
||||
var res = request.post(buildUrl(path), {body: reqBody, headers: headers, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
var obj = JSON.parse(res.body);
|
||||
expect(obj.path).to.equal(path);
|
||||
expect(obj).to.have.property('requestBody');
|
||||
|
@ -386,10 +389,9 @@ function RequestSuite () {
|
|||
testBufferResponse: function () {
|
||||
var path = '/_admin/aardvark/favicon.ico';
|
||||
var res = request.get(buildUrl(path, false), {encoding: null, timeout: 300});
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res.body).to.be.a(Buffer);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.an.instanceof(Buffer);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
'use strict';
|
||||
|
||||
var jsunity = require('jsunity');
|
||||
var expect = require('expect.js');
|
||||
var expect = require('chai').expect;
|
||||
var request = require('@arangodb/request');
|
||||
var url = require('url');
|
||||
var querystring = require('querystring');
|
||||
|
@ -71,7 +71,7 @@ function versionJsonJson() {
|
|||
|
||||
var res = request.post(buildUrl(path, false), {headers : headers, timeout: 300});
|
||||
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
expect(String(res.headers['content-type'])).to.have.string("application/json");
|
||||
|
@ -82,7 +82,7 @@ function versionJsonJson() {
|
|||
expect(obj).to.have.property('version');
|
||||
expect(obj).to.have.property('license');
|
||||
|
||||
expect(obj.server).to.be('arango');
|
||||
expect(obj.server).to.be.equal('arango');
|
||||
expect(obj.version).to.match(/[0-9]+\.[0-9]+\.([0-9]+|(milestone|alpha|beta|devel|rc)[0-9]*)/);
|
||||
|
||||
expect(obj.license).to.match(/enterprise|community/g);
|
||||
|
@ -97,7 +97,7 @@ function versionVpackJson() {
|
|||
|
||||
var res = request.post(buildUrl(path, false), {headers : headers, timeout: 300});
|
||||
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
expect(String(res.headers['content-type'])).to.have.string("application/json");
|
||||
|
@ -108,7 +108,7 @@ function versionVpackJson() {
|
|||
expect(obj).to.have.property('version');
|
||||
expect(obj).to.have.property('license');
|
||||
|
||||
expect(obj.server).to.be('arango');
|
||||
expect(obj.server).to.be.equal('arango');
|
||||
expect(obj.version).to.match(/[0-9]+\.[0-9]+\.([0-9]+|(milestone|alpha|beta|devel|rc)[0-9]*)/);
|
||||
expect(obj.license).to.match(/enterprise|community/g);
|
||||
};
|
||||
|
@ -122,7 +122,7 @@ function versionJsonVpack () {
|
|||
|
||||
var res = request.post(buildUrl(path,false), {headers : headers, timeout: 300});
|
||||
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
expect(String(res.headers['content-type'])).to.have.string("application/x-velocypack");
|
||||
|
@ -133,7 +133,7 @@ function versionJsonVpack () {
|
|||
expect(obj).to.have.property('version');
|
||||
expect(obj).to.have.property('license');
|
||||
|
||||
expect(obj.server).to.be('arango');
|
||||
expect(obj.server).to.be.equal('arango');
|
||||
expect(obj.version).to.match(/[0-9]+\.[0-9]+\.([0-9]+|(milestone|alpha|beta|devel|rc)[0-9]*)/);
|
||||
expect(obj.license).to.match(/enterprise|community/g);
|
||||
};
|
||||
|
@ -147,7 +147,7 @@ function versionVpackVpack () {
|
|||
|
||||
var res = request.post(buildUrl(path,false), {headers : headers, timeout: 300});
|
||||
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
expect(String(res.headers['content-type'])).to.have.string("application/x-velocypack");
|
||||
|
@ -158,7 +158,7 @@ function versionVpackVpack () {
|
|||
expect(obj).to.have.property('version');
|
||||
expect(obj).to.have.property('license');
|
||||
|
||||
expect(obj.server).to.be('arango');
|
||||
expect(obj.server).to.be.equal('arango');
|
||||
expect(obj.version).to.match(/[0-9]+\.[0-9]+\.([0-9]+|(milestone|alpha|beta|devel|rc)[0-9]*)/);
|
||||
expect(obj.license).to.match(/enterprise|community/g);
|
||||
};
|
||||
|
@ -176,7 +176,7 @@ function echoVpackVpack () {
|
|||
var body = V8_TO_VPACK(obj);
|
||||
var res = request.post(buildUrl(path),{ headers : headers, body : body, timeout: 300});
|
||||
|
||||
expect(res).to.be.a(request.Response);
|
||||
expect(res).to.be.an.instanceof(request.Response);
|
||||
expect(res.body).to.be.a('string');
|
||||
expect(Number(res.headers['content-length'])).to.equal(res.rawBody.length);
|
||||
};
|
||||
|
|
|
@ -28,11 +28,12 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require('jsunity');
|
||||
var expect = require('expect.js');
|
||||
const expect = require('chai').expect;
|
||||
var util = require('util');
|
||||
var Console = require('@arangodb/foxx/legacy/console').Console;
|
||||
var db = require('@arangodb').db;
|
||||
var qb = require('aqb');
|
||||
var AssertionError = require('assert').AssertionError;
|
||||
|
||||
var mountPath = '##TEST##';
|
||||
|
||||
|
@ -96,7 +97,7 @@ function ConsoleTestSuite () {
|
|||
rmrf();
|
||||
console.log('hi');
|
||||
var logs = ls();
|
||||
expect(logs.length).to.be(1);
|
||||
expect(logs.length).to.be.equal(1);
|
||||
expect(logs[0]).to.have.property('level', 'INFO');
|
||||
expect(logs[0]).to.have.property('levelNum', console._logLevels.INFO);
|
||||
expect(logs[0]).to.have.property('message', 'hi');
|
||||
|
@ -133,7 +134,7 @@ function ConsoleTestSuite () {
|
|||
expect(max).to.be.greaterThan(min); // sanity checking
|
||||
var logs = ls();
|
||||
var match = logs[0].message.match(/^([^:]+):\s+(\d+)ms$/);
|
||||
expect(match).to.be.ok();
|
||||
expect(match).to.be.ok;
|
||||
expect(match[1]).to.equal('hi');
|
||||
var elapsed = Number(match[2]);
|
||||
expect(elapsed).not.to.be.lessThan(min - start);
|
||||
|
@ -142,7 +143,7 @@ function ConsoleTestSuite () {
|
|||
testConsoleTimeThrowsForInvalidLabel: function () {
|
||||
expect(function () {
|
||||
console.timeEnd('this is a label that does not exist');
|
||||
}).to.throwError();
|
||||
}).to.throw(Error);
|
||||
},
|
||||
|
||||
testConsoleDirUsesInspect: function () {
|
||||
|
@ -165,7 +166,7 @@ function ConsoleTestSuite () {
|
|||
console.setAssertThrows(false);
|
||||
expect(function () {
|
||||
console.assert(false, 'potato');
|
||||
}).not.to.throwError();
|
||||
}).not.to.throw(Error);
|
||||
var logs = ls();
|
||||
expect(logs.length).to.equal(1);
|
||||
expect(logs[0]).to.have.property('level', 'ERROR');
|
||||
|
@ -178,10 +179,7 @@ function ConsoleTestSuite () {
|
|||
console.setAssertThrows(true);
|
||||
expect(function () {
|
||||
console.assert(false, 'potato');
|
||||
}).to.throwError(function (e) {
|
||||
expect(e.name).to.be('AssertionError');
|
||||
expect(e.message).to.be('potato');
|
||||
});
|
||||
}).to.throw(AssertionError).with.property('message', 'potato');
|
||||
var logs = ls();
|
||||
expect(logs.length).to.equal(1);
|
||||
expect(logs[0].message).to.match(/AssertionError: potato/);
|
||||
|
@ -196,7 +194,7 @@ function ConsoleTestSuite () {
|
|||
console.log.level = 'INFO';
|
||||
delete console._logLevels.POTATO;
|
||||
var logs = ls();
|
||||
expect(logs).to.be.empty();
|
||||
expect(logs).to.be.empty;
|
||||
},
|
||||
testConsoleTracingAddsInfo: function () {
|
||||
rmrf();
|
||||
|
@ -239,7 +237,7 @@ function ConsoleTestSuite () {
|
|||
console.log('sup');
|
||||
console.log('banana');
|
||||
var logs = console.logs.list();
|
||||
expect(logs.length).to.be(2);
|
||||
expect(logs.length).to.be.equal(2);
|
||||
expect(logs[0]).to.have.property('message', 'sup');
|
||||
expect(logs[1]).to.have.property('message', 'banana');
|
||||
},
|
||||
|
@ -249,7 +247,7 @@ function ConsoleTestSuite () {
|
|||
console.log('sup');
|
||||
console.log('banana');
|
||||
var logs = console.logs.list({sort: 'DESC'});
|
||||
expect(logs.length).to.be(2);
|
||||
expect(logs.length).to.be.equal(2);
|
||||
expect(logs[0]).to.have.property('message', 'banana');
|
||||
expect(logs[1]).to.have.property('message', 'sup');
|
||||
},
|
||||
|
@ -259,7 +257,7 @@ function ConsoleTestSuite () {
|
|||
console.log('sup');
|
||||
console.log('banana');
|
||||
var logs = console.logs.list({limit: 1});
|
||||
expect(logs.length).to.be(1);
|
||||
expect(logs.length).to.be.equal(1);
|
||||
expect(logs[0]).to.have.property('message', 'sup');
|
||||
},
|
||||
|
||||
|
@ -268,7 +266,7 @@ function ConsoleTestSuite () {
|
|||
console.log('sup');
|
||||
console.log('banana');
|
||||
var logs = console.logs.list({limit: 1, offset: 1});
|
||||
expect(logs.length).to.be(1);
|
||||
expect(logs.length).to.be.equal(1);
|
||||
expect(logs[0]).to.have.property('message', 'banana');
|
||||
},
|
||||
|
||||
|
@ -278,12 +276,12 @@ function ConsoleTestSuite () {
|
|||
console.debug('lol');
|
||||
console.error('hey');
|
||||
logs = console.logs.list({minLevel: 'DEBUG'});
|
||||
expect(logs.length).to.be(2);
|
||||
expect(logs.length).to.be.equal(2);
|
||||
logs = console.logs.list({minLevel: console._logLevels.DEBUG + 1});
|
||||
expect(logs.length).to.be(1);
|
||||
expect(logs.length).to.be.equal(1);
|
||||
expect(logs[0]).to.have.property('message', 'hey');
|
||||
logs = console.logs.list({minLevel: console._logLevels.ERROR + 1});
|
||||
expect(logs.length).to.be(0);
|
||||
expect(logs.length).to.be.equal(0);
|
||||
},
|
||||
|
||||
testLogsListWithLevel: function () {
|
||||
|
@ -292,7 +290,7 @@ function ConsoleTestSuite () {
|
|||
console.debug('lol');
|
||||
console.error('hey');
|
||||
logs = console.logs.list({level: 'DEBUG'});
|
||||
expect(logs.length).to.be(1);
|
||||
expect(logs.length).to.be.equal(1);
|
||||
expect(logs[0]).to.have.property('message', 'lol');
|
||||
},
|
||||
|
||||
|
@ -300,9 +298,7 @@ function ConsoleTestSuite () {
|
|||
console.setTracing(false);
|
||||
expect(function () {
|
||||
console.logs.searchByFileName('lol');
|
||||
}).to.throwError(function (e) {
|
||||
expect(e.message).to.match(/tracing/i);
|
||||
});
|
||||
}).to.throw(Error).with.property('message', 'Tracing must be enabled in order to search by filename.');
|
||||
},
|
||||
|
||||
testLogsSearchByFileName: function () {
|
||||
|
@ -329,6 +325,7 @@ function ConsoleTestSuite () {
|
|||
expect(console.logs.searchByMessage('ef')).to.have.property('length', 1);
|
||||
expect(console.logs.searchByMessage('fail')).to.have.property('length', 0);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
'use strict';
|
||||
|
||||
var sinon = require('sinon');
|
||||
var expect = require('expect.js');
|
||||
const expect = require('chai').expect;
|
||||
var FoxxRepository = require('@arangodb/foxx/legacy/repository').Repository;
|
||||
var Model = require('@arangodb/foxx/legacy/model').Model;
|
||||
|
||||
|
@ -28,38 +28,38 @@ describe('Model Events', function () {
|
|||
it('should emit beforeCreate and afterCreate events when creating the model', function () {
|
||||
addHooks(instance, 'Create');
|
||||
expect(repository.save(instance)).to.eql(instance);
|
||||
expect(instance.get('beforeCalled')).to.be(true);
|
||||
expect(instance.get('afterCalled')).to.be(true);
|
||||
expect(instance.get('beforeCalled')).to.be.true;
|
||||
expect(instance.get('afterCalled')).to.be.true;
|
||||
});
|
||||
|
||||
it('should emit beforeSave and afterSave events when creating the model', function () {
|
||||
addHooks(instance, 'Save');
|
||||
expect(repository.save(instance)).to.eql(instance);
|
||||
expect(instance.get('beforeCalled')).to.be(true);
|
||||
expect(instance.get('afterCalled')).to.be(true);
|
||||
expect(instance.get('beforeCalled')).to.be.true;
|
||||
expect(instance.get('afterCalled')).to.be.true;
|
||||
});
|
||||
|
||||
it('should emit beforeUpdate and afterUpdate events when updating the model', function () {
|
||||
var newData = { newAttribute: 'test' };
|
||||
addHooks(instance, 'Update', newData);
|
||||
expect(repository.update(instance, newData)).to.eql(instance);
|
||||
expect(instance.get('beforeCalled')).to.be(true);
|
||||
expect(instance.get('afterCalled')).to.be(true);
|
||||
expect(instance.get('beforeCalled')).to.be.true;
|
||||
expect(instance.get('afterCalled')).to.be.true;
|
||||
});
|
||||
|
||||
it('should emit beforeSave and afterSave events when updating the model', function () {
|
||||
var newData = { newAttribute: 'test' };
|
||||
addHooks(instance, 'Save', newData);
|
||||
expect(repository.update(instance, newData)).to.eql(instance);
|
||||
expect(instance.get('beforeCalled')).to.be(true);
|
||||
expect(instance.get('afterCalled')).to.be(true);
|
||||
expect(instance.get('beforeCalled')).to.be.true;
|
||||
expect(instance.get('afterCalled')).to.be.true;
|
||||
});
|
||||
|
||||
it('should emit beforeRemove and afterRemove events when removing the model', function () {
|
||||
addHooks(instance, 'Remove');
|
||||
repository.remove(instance);
|
||||
expect(instance.get('beforeCalled')).to.be(true);
|
||||
expect(instance.get('afterCalled')).to.be(true);
|
||||
expect(instance.get('beforeCalled')).to.be.true;
|
||||
expect(instance.get('afterCalled')).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -76,7 +76,7 @@ function addHooks (model, ev, dataToReceive) {
|
|||
expect(this).to.eql(model);
|
||||
expect(data).to.eql(dataToReceive);
|
||||
this.set('afterCalled', true);
|
||||
expect(this.get('beforeCalled')).to.be(true);
|
||||
expect(this.get('beforeCalled')).to.be.true;
|
||||
expect(this.get('random')).to.eql(random);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -31,12 +31,12 @@
|
|||
'use strict';
|
||||
|
||||
var FoxxManager = require('@arangodb/foxx/manager');
|
||||
var expect = require('expect.js');
|
||||
const expect = require('chai').expect;
|
||||
|
||||
FoxxManager.update();
|
||||
|
||||
var list = FoxxManager.availableJson(false);
|
||||
expect(list).not.to.be.empty();
|
||||
expect(list).not.to.be.empty;
|
||||
|
||||
describe('Foxx Manager', function () {
|
||||
const _it = it;
|
||||
|
@ -45,11 +45,11 @@ describe('Foxx Manager', function () {
|
|||
describe(`service "${service.name}" from the store`, () => {
|
||||
it('should have proper name and author', function () {
|
||||
expect(service).to.have.property('name');
|
||||
expect(service.name).to.not.be.empty();
|
||||
expect(service.name).to.not.be.empty;
|
||||
expect(service).to.have.property('latestVersion');
|
||||
expect(service.latestVersion).to.not.be.empty();
|
||||
expect(service.latestVersion).to.not.be.empty;
|
||||
expect(service).to.have.property('description');
|
||||
expect(service.description).to.not.be.empty();
|
||||
expect(service.description).to.not.be.empty;
|
||||
});
|
||||
|
||||
if (service.name === 'hello-foxx') {
|
||||
|
|
|
@ -384,35 +384,27 @@ void ApplicationServer::setupDependencies(bool failOnMissing) {
|
|||
// first insert all features, even the inactive ones
|
||||
std::vector<ApplicationFeature*> features;
|
||||
for (auto& it : _features) {
|
||||
auto const& us = it.second;
|
||||
auto insertPosition = features.end();
|
||||
|
||||
if (!features.empty()) {
|
||||
for (size_t i = features.size(); i > 0; --i) {
|
||||
if (it.second->doesStartBefore(features[i - 1]->name())) {
|
||||
for (size_t i = features.size(); i > 0; --i) {
|
||||
auto const& other = features[i - 1];
|
||||
if (us->doesStartBefore(other->name())) {
|
||||
// we start before the other feature. so move ourselves up
|
||||
insertPosition = features.begin() + (i - 1);
|
||||
} else if (other->doesStartBefore(us->name())) {
|
||||
// the other feature starts before us. so stop moving up
|
||||
break;
|
||||
} else {
|
||||
// no dependencies between the two features
|
||||
if (us->name() < other->name()) {
|
||||
insertPosition = features.begin() + (i - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
features.insert(insertPosition, it.second);
|
||||
}
|
||||
|
||||
for (size_t i = 1; i < features.size(); ++i) {
|
||||
auto feature = features[i];
|
||||
size_t insert = i;
|
||||
for (size_t j = i; j > 0; --j) {
|
||||
if (features[j - 1]->doesStartBefore(feature->name())) {
|
||||
break;
|
||||
}
|
||||
insert = j - 1;
|
||||
}
|
||||
if (insert != i) {
|
||||
for (size_t j = i; j > insert; --j) {
|
||||
features[j] = features[j - 1];
|
||||
}
|
||||
features[insert] = feature;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LOG_TOPIC(TRACE, Logger::STARTUP) << "ordered features:";
|
||||
|
||||
int position = 0;
|
||||
|
@ -425,7 +417,7 @@ void ApplicationServer::setupDependencies(bool failOnMissing) {
|
|||
}
|
||||
LOG_TOPIC(TRACE, Logger::STARTUP)
|
||||
<< "feature #" << ++position << ": " << feature->name()
|
||||
<< (feature->isEnabled() ? "" : " (disabled)") << " " << dependencies;
|
||||
<< (feature->isEnabled() ? "" : " (disabled)") << dependencies;
|
||||
}
|
||||
|
||||
// remove all inactive features
|
||||
|
@ -605,6 +597,10 @@ void ApplicationServer::start() {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("startup aborted: ") + TRI_errno_string(res));
|
||||
}
|
||||
}
|
||||
|
||||
for (auto const& callback : _startupCallbacks) {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
|
||||
void ApplicationServer::stop() {
|
||||
|
@ -701,3 +697,4 @@ void ApplicationServer::reportFeatureProgress(ServerState state,
|
|||
reporter._feature(state, name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -220,6 +220,10 @@ class ApplicationServer {
|
|||
static ApplicationFeature* lookupFeature(std::string const&);
|
||||
|
||||
char const* getBinaryPath() { return _binaryPath;}
|
||||
|
||||
void registerStartupCallback(std::function<void()> const& callback) {
|
||||
_startupCallbacks.emplace_back(callback);
|
||||
}
|
||||
|
||||
private:
|
||||
// throws an exception that a requested feature was not found
|
||||
|
@ -305,6 +309,9 @@ class ApplicationServer {
|
|||
// reporter for progress
|
||||
std::vector<ProgressHandler> _progressReports;
|
||||
|
||||
// callbacks that are called after start
|
||||
std::vector<std::function<void()>> _startupCallbacks;
|
||||
|
||||
// help section displayed
|
||||
std::string _helpSection;
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ SECTION("test_primary_index") {
|
|||
|
||||
/// @brief test edge index
|
||||
SECTION("test_edge_index") {
|
||||
RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, "a/1", "foobar");
|
||||
RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), StringRef("foobar"));
|
||||
auto const& s1 = key1.string();
|
||||
|
||||
CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + strlen("foobar") + sizeof(char));
|
||||
|
|
Loading…
Reference in New Issue