mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel
This commit is contained in:
commit
0a9fb050e9
|
@ -51,8 +51,6 @@ void RocksDBBackgroundThread::run() {
|
|||
guard.wait(static_cast<uint64_t>(_interval * 1000000.0));
|
||||
}
|
||||
|
||||
_engine->counterManager()->writeSettings();
|
||||
|
||||
if (!isStopping()) {
|
||||
_engine->counterManager()->sync(false);
|
||||
}
|
||||
|
@ -67,5 +65,5 @@ void RocksDBBackgroundThread::run() {
|
|||
});
|
||||
}
|
||||
}
|
||||
_engine->counterManager()->writeSettings(); // final write on shutdown
|
||||
_engine->counterManager()->sync(true); // final write on shutdown
|
||||
}
|
||||
|
|
|
@ -1104,15 +1104,15 @@ int RocksDBCollection::saveIndex(transaction::Methods* trx,
|
|||
|
||||
arangodb::Result RocksDBCollection::fillIndexes(
|
||||
transaction::Methods* trx, std::shared_ptr<arangodb::Index> added) {
|
||||
ManagedDocumentResult mmr;
|
||||
ManagedDocumentResult mmdr;
|
||||
std::unique_ptr<IndexIterator> iter(
|
||||
primaryIndex()->allIterator(trx, &mmr, false));
|
||||
primaryIndex()->allIterator(trx, &mmdr, false));
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
auto cb = [&](DocumentIdentifierToken token) {
|
||||
if (res == TRI_ERROR_NO_ERROR && this->readDocument(trx, token, mmr)) {
|
||||
if (res == TRI_ERROR_NO_ERROR && this->readDocument(trx, token, mmdr)) {
|
||||
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(added.get());
|
||||
res = ridx->insert(trx, mmr.lastRevisionId(), VPackSlice(mmr.vpack()),
|
||||
res = ridx->insert(trx, mmdr.lastRevisionId(), VPackSlice(mmdr.vpack()),
|
||||
false);
|
||||
}
|
||||
};
|
||||
|
@ -1475,9 +1475,9 @@ uint64_t RocksDBCollection::recalculateCounts() {
|
|||
//update counter manager value
|
||||
res = globalRocksEngine()->counterManager()->setAbsoluteCounter(_objectId,_numberDocuments);
|
||||
if(res.ok()){
|
||||
// in case of fail the counter has never been written and hence does not
|
||||
// need correction. The value is not changed and does not need to be synced
|
||||
globalRocksEngine()->counterManager()->sync(true);
|
||||
} else {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
return _numberDocuments;
|
||||
|
|
|
@ -141,7 +141,10 @@ arangodb::Result RocksDBCounterManager::setAbsoluteCounter(uint64_t objectId, ui
|
|||
if (it != _counters.end()) {
|
||||
it->second._count = value;
|
||||
} else {
|
||||
res.reset(TRI_ERROR_INTERNAL, "counter value not found");
|
||||
// nothing to do as the counter has never been written it can not be set to
|
||||
// a value that would require correction. but we use the return value to
|
||||
// signal that no sync is rquired
|
||||
res.reset(TRI_ERROR_INTERNAL, "counter value not found - no sync required");
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -162,10 +165,6 @@ void RocksDBCounterManager::removeCounter(uint64_t objectId) {
|
|||
|
||||
/// Thread-Safe force sync
|
||||
Result RocksDBCounterManager::sync(bool force) {
|
||||
#if 0
|
||||
writeSettings();
|
||||
#endif
|
||||
|
||||
if (force) {
|
||||
while (true) {
|
||||
bool expected = false;
|
||||
|
@ -215,12 +214,34 @@ Result RocksDBCounterManager::sync(bool force) {
|
|||
rocksdb::Status s = rtrx->Put(key.string(), value);
|
||||
if (!s.ok()) {
|
||||
rtrx->Rollback();
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "writing counters failed";
|
||||
return rocksutils::convertStatus(s);
|
||||
}
|
||||
}
|
||||
|
||||
// now write global settings
|
||||
b.clear();
|
||||
b.openObject();
|
||||
b.add("tick", VPackValue(std::to_string(TRI_CurrentTickServer())));
|
||||
b.add("hlc", VPackValue(std::to_string(TRI_HybridLogicalClock())));
|
||||
b.close();
|
||||
|
||||
VPackSlice slice = b.slice();
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "writing settings: " << slice.toJson();
|
||||
|
||||
RocksDBKey key = RocksDBKey::SettingsValue();
|
||||
rocksdb::Slice value(slice.startAs<char>(), slice.byteSize());
|
||||
|
||||
rocksdb::Status s = rtrx->Put(key.string(), value);
|
||||
|
||||
if (!s.ok()) {
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "writing settings failed";
|
||||
rtrx->Rollback();
|
||||
return rocksutils::convertStatus(s);
|
||||
}
|
||||
|
||||
// we have to commit all counters in one batch
|
||||
rocksdb::Status s = rtrx->Commit();
|
||||
s = rtrx->Commit();
|
||||
if (s.ok()) {
|
||||
for (std::pair<uint64_t, CMValue> const& pair : copy) {
|
||||
_syncedSeqNums[pair.first] = pair.second._sequenceNum;
|
||||
|
@ -249,6 +270,13 @@ void RocksDBCounterManager::readSettings() {
|
|||
basics::VelocyPackHelper::stringUInt64(slice.get("tick"));
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "using last tick: " << lastTick;
|
||||
TRI_UpdateTickServer(lastTick);
|
||||
|
||||
if (slice.hasKey("hlc")) {
|
||||
uint64_t lastHlc =
|
||||
basics::VelocyPackHelper::stringUInt64(slice.get("hlc"));
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "using last hlc: " << lastHlc;
|
||||
TRI_HybridLogicalClock(lastHlc);
|
||||
}
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, Logger::ENGINES)
|
||||
<< "unable to read initial settings: invalid data";
|
||||
|
@ -257,27 +285,6 @@ void RocksDBCounterManager::readSettings() {
|
|||
}
|
||||
}
|
||||
|
||||
void RocksDBCounterManager::writeSettings() {
|
||||
RocksDBKey key = RocksDBKey::SettingsValue();
|
||||
|
||||
VPackBuilder builder;
|
||||
builder.openObject();
|
||||
builder.add("tick", VPackValue(std::to_string(TRI_CurrentTickServer())));
|
||||
builder.close();
|
||||
|
||||
VPackSlice slice = builder.slice();
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "writing settings: " << slice.toJson();
|
||||
|
||||
rocksdb::Slice value(slice.startAs<char>(), slice.byteSize());
|
||||
|
||||
rocksdb::Status status =
|
||||
_db->Put(rocksdb::WriteOptions(), key.string(), value);
|
||||
|
||||
if (!status.ok()) {
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "writing settings failed";
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse counter values from rocksdb
|
||||
void RocksDBCounterManager::readCounterValues() {
|
||||
WRITE_LOCKER(guard, _rwLock);
|
||||
|
@ -308,10 +315,10 @@ struct WBReader : public rocksdb::WriteBatch::Handler {
|
|||
uint64_t _maxTick = 0;
|
||||
uint64_t _maxHLC = 0;
|
||||
|
||||
explicit WBReader() : currentSeqNum(0) {}
|
||||
WBReader() : currentSeqNum(0) {}
|
||||
virtual ~WBReader() {
|
||||
// update ticks after parsing wal
|
||||
TRI_UpdateTickServer(std::max(_maxTick, TRI_CurrentTickServer()));
|
||||
TRI_UpdateTickServer(_maxTick);
|
||||
TRI_HybridLogicalClock(_maxHLC);
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,6 @@ class RocksDBCounterManager {
|
|||
arangodb::Result sync(bool force);
|
||||
|
||||
void readSettings();
|
||||
void writeSettings();
|
||||
|
||||
protected:
|
||||
struct CMValue {
|
||||
|
|
|
@ -1210,6 +1210,36 @@ RocksDBReplicationManager* RocksDBEngine::replicationManager() const {
|
|||
TRI_ASSERT(_replicationManager);
|
||||
return _replicationManager.get();
|
||||
}
|
||||
|
||||
void RocksDBEngine::rocksdbProperties(VPackBuilder &builder) {
|
||||
builder.openObject();
|
||||
// add int properties
|
||||
auto c1 = [&](std::string const& s) {
|
||||
std::string v;
|
||||
if (_db->GetProperty(s, &v)) {
|
||||
builder.add(s, VPackValue(v));
|
||||
}
|
||||
};
|
||||
c1(rocksdb::DB::Properties::kNumImmutableMemTable);
|
||||
c1(rocksdb::DB::Properties::kMemTableFlushPending);
|
||||
c1(rocksdb::DB::Properties::kCompactionPending);
|
||||
c1(rocksdb::DB::Properties::kBackgroundErrors);
|
||||
c1(rocksdb::DB::Properties::kCurSizeActiveMemTable);
|
||||
c1(rocksdb::DB::Properties::kCurSizeAllMemTables);
|
||||
c1(rocksdb::DB::Properties::kSizeAllMemTables);
|
||||
c1(rocksdb::DB::Properties::kNumEntriesImmMemTables);
|
||||
c1(rocksdb::DB::Properties::kNumSnapshots);
|
||||
c1(rocksdb::DB::Properties::kDBStats);
|
||||
c1(rocksdb::DB::Properties::kCFStats);
|
||||
c1(rocksdb::DB::Properties::kSSTables);
|
||||
c1(rocksdb::DB::Properties::kNumRunningCompactions);
|
||||
c1(rocksdb::DB::Properties::kNumRunningFlushes);
|
||||
c1(rocksdb::DB::Properties::kIsFileDeletionsEnabled);
|
||||
c1(rocksdb::DB::Properties::kBaseLevel);
|
||||
c1(rocksdb::DB::Properties::kTotalSstFilesSize);
|
||||
|
||||
builder.close();
|
||||
}
|
||||
|
||||
int RocksDBEngine::handleSyncKeys(arangodb::InitialSyncer& syncer,
|
||||
arangodb::LogicalCollection* col,
|
||||
|
|
|
@ -269,6 +269,8 @@ class RocksDBEngine final : public StorageEngine {
|
|||
static std::string const FeatureName;
|
||||
RocksDBCounterManager* counterManager() const;
|
||||
RocksDBReplicationManager* replicationManager() const;
|
||||
|
||||
void rocksdbProperties(VPackBuilder &builder);
|
||||
|
||||
private:
|
||||
/// single rocksdb database used in this storage engine
|
||||
|
|
|
@ -25,21 +25,22 @@
|
|||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "V8/v8-conv.h"
|
||||
#include "V8/v8-globals.h"
|
||||
#include "V8/v8-utils.h"
|
||||
#include "V8/v8-vpack.h"
|
||||
#include "V8Server/v8-externals.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
#include <v8.h>
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
/// this is just a stub
|
||||
/// flush the WAL
|
||||
static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
@ -102,6 +103,28 @@ static void JS_PropertiesWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
/// return rocksdb properties
|
||||
static void JS_EngineStats(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
RocksDBEngine* engine =
|
||||
dynamic_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
if (engine == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
VPackBuilder builder;
|
||||
engine->rocksdbProperties(builder);
|
||||
v8::Handle<v8::Value> result = TRI_VPackToV8(isolate, builder.slice());
|
||||
TRI_V8_RETURN(result);
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
static void JS_RecalculateCounts(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
|
@ -145,6 +168,8 @@ void RocksDBV8Functions::registerResources() {
|
|||
JS_PropertiesWal, true);
|
||||
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("WAL_TRANSACTIONS"),
|
||||
JS_TransactionsWal, true);
|
||||
TRI_AddGlobalFunctionVocbase(isolate, TRI_V8_ASCII_STRING("ENGINE_STATS"),
|
||||
JS_EngineStats, true);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("recalculateCount"),
|
||||
JS_RecalculateCounts);
|
||||
}
|
||||
|
|
|
@ -510,9 +510,8 @@ int RocksDBVPackIndex::insert(transaction::Methods* trx,
|
|||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
auto s = rtrx->Put(key.string(), value.string());
|
||||
|
||||
auto status = rocksutils::convertStatus(s, rocksutils::StatusHint::index);
|
||||
if (!status.ok()) {
|
||||
if (!s.ok()) {
|
||||
auto status = rocksutils::convertStatus(s, rocksutils::StatusHint::index);
|
||||
res = status.errorNumber();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,6 +177,9 @@ void SchedulerFeature::stop() {
|
|||
usleep(100000);
|
||||
}
|
||||
|
||||
// shutdown user jobs again, in case new ones appear
|
||||
TRI_ShutdownV8Dispatcher();
|
||||
|
||||
_scheduler->shutdown();
|
||||
}
|
||||
|
||||
|
|
|
@ -259,11 +259,13 @@ V8Task::callbackFunction() {
|
|||
|
||||
// now do the work:
|
||||
work();
|
||||
|
||||
if (_periodic) {
|
||||
|
||||
if (_periodic && !SchedulerFeature::SCHEDULER->isStopping()) {
|
||||
_timer->expires_from_now(_interval);
|
||||
_timer->async_wait(callbackFunction());
|
||||
} else {
|
||||
// in case of one-off tasks or in case of a shutdown, simply
|
||||
// remove the task from the list
|
||||
V8Task::unregisterTask(_id, false);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -32,20 +32,16 @@ if(SNAPCRAFT_FOUND)
|
|||
DESTINATION "${SNAPCRAFT_SOURCE_DIR}/"
|
||||
)
|
||||
|
||||
set(CPACK_SNAP_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}_${ARANGODB_PACKAGE_ARCHITECTURE}.snap")
|
||||
if(NOT EXISTS ${CPACK_SNAP_PACKAGE_FILE_NAME})
|
||||
set(CPACK_SNAP_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}.snap")
|
||||
endif()
|
||||
add_custom_target(snap
|
||||
COMMENT "create snap-package"
|
||||
COMMAND ${SNAP_EXE} snap
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${SNAPCRAFT_SOURCE_DIR}/${CPACK_SNAP_PACKAGE_FILE_NAME} ${PROJECT_BINARY_DIR}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${SNAPCRAFT_SOURCE_DIR}/${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}*_${ARANGODB_PACKAGE_ARCHITECTURE}.snap ${PROJECT_BINARY_DIR}
|
||||
DEPENDS TGZ_package
|
||||
WORKING_DIRECTORY ${SNAPCRAFT_SOURCE_DIR}
|
||||
)
|
||||
|
||||
add_custom_target(copy_snap_packages
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_SNAP_PACKAGE_FILE_NAME} ${PACKAGE_TARGET_DIR})
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}*_${ARANGODB_PACKAGE_ARCHITECTURE}.snap ${PACKAGE_TARGET_DIR})
|
||||
|
||||
list(APPEND COPY_PACKAGES_LIST copy_snap_packages)
|
||||
|
||||
|
|
|
@ -514,6 +514,7 @@ function runInArangosh (options, instanceInfo, file, addArgs) {
|
|||
result[0].hasOwnProperty('status')) {
|
||||
return result[0];
|
||||
} else {
|
||||
rc.failed = rc.status ? 0 : 1;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ const optionsDocumentation = [
|
|||
];
|
||||
|
||||
const tu = require('@arangodb/test-utils');
|
||||
const pu = require('@arangodb/process-utils');
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief agency tests
|
||||
|
|
|
@ -165,7 +165,7 @@ function arangobench (options) {
|
|||
};
|
||||
}
|
||||
|
||||
let results = {};
|
||||
let results = { failed: 0 };
|
||||
let continueTesting = true;
|
||||
|
||||
for (let i = 0; i < benchTodos.length; i++) {
|
||||
|
@ -205,9 +205,12 @@ function arangobench (options) {
|
|||
|
||||
results[name] = oneResult;
|
||||
results[name].total++;
|
||||
results[name].failed = 0;
|
||||
|
||||
if (!results[name].status) {
|
||||
results[name].failed = 1;
|
||||
results.status = false;
|
||||
results.failed += 1;
|
||||
}
|
||||
|
||||
continueTesting = pu.arangod.check.instanceAlive(instanceInfo, options);
|
||||
|
|
|
@ -55,7 +55,7 @@ const optionsDocumentation = [
|
|||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function arangosh (options) {
|
||||
let ret = {};
|
||||
let ret = { failed: 0 };
|
||||
[
|
||||
'testArangoshExitCodeNoConnect',
|
||||
'testArangoshExitCodeFail',
|
||||
|
@ -93,9 +93,13 @@ function arangosh (options) {
|
|||
const failSuccess = (rc.hasOwnProperty('exit') && rc.exit === expectedReturnCode);
|
||||
|
||||
if (!failSuccess) {
|
||||
ret.failed += 1;
|
||||
ret[section].failed = 1;
|
||||
ret[section]['message'] =
|
||||
'didn\'t get expected return code (' + expectedReturnCode + '): \n' +
|
||||
yaml.safeDump(rc);
|
||||
} else {
|
||||
ret[section].failed = 0;
|
||||
}
|
||||
|
||||
++ret[section]['total'];
|
||||
|
@ -160,9 +164,13 @@ function arangosh (options) {
|
|||
echoSuccess = (rc.hasOwnProperty('exit') && rc.exit === 1);
|
||||
|
||||
if (!echoSuccess) {
|
||||
ret.failed += 1;
|
||||
ret.testArangoshExitCodeEcho.failed = 1;
|
||||
ret.testArangoshExitCodeEcho['message'] =
|
||||
'didn\'t get expected return code (1): \n' +
|
||||
yaml.safeDump(rc);
|
||||
} else {
|
||||
ret.testArangoshExitCodeEcho.failed = 0;
|
||||
}
|
||||
|
||||
fs.remove(execFile);
|
||||
|
@ -204,11 +212,14 @@ function arangosh (options) {
|
|||
shebangSuccess = (rc.hasOwnProperty('exit') && rc.exit === 0);
|
||||
|
||||
if (!shebangSuccess) {
|
||||
ret.failed += 1;
|
||||
ret.testArangoshShebang.failed = 1;
|
||||
ret.testArangoshShebang['message'] =
|
||||
'didn\'t get expected return code (0): \n' +
|
||||
yaml.safeDump(rc);
|
||||
} else {
|
||||
ret.testArangoshShebang.failed = 0;
|
||||
}
|
||||
|
||||
fs.remove(shebangFile);
|
||||
|
||||
++ret.testArangoshShebang['total'];
|
||||
|
@ -217,6 +228,7 @@ function arangosh (options) {
|
|||
print((shebangSuccess ? GREEN : RED) + 'Status: ' + (shebangSuccess ? 'SUCCESS' : 'FAIL') + RESET);
|
||||
} else {
|
||||
ret.testArangoshShebang['skipped'] = true;
|
||||
ret.testArangoshShebang.failed = 0;
|
||||
}
|
||||
|
||||
print();
|
||||
|
@ -225,7 +237,7 @@ function arangosh (options) {
|
|||
|
||||
function setup (testFns, defaultFns, opts, fnDocs, optionsDoc) {
|
||||
testFns['arangosh'] = arangosh;
|
||||
|
||||
|
||||
defaultFns.push('arangosh');
|
||||
|
||||
opts['skipShebang'] = false;
|
||||
|
|
|
@ -35,7 +35,6 @@ const optionsDocumentation = [
|
|||
|
||||
const pu = require('@arangodb/process-utils');
|
||||
const tu = require('@arangodb/test-utils');
|
||||
const fs = require('fs');
|
||||
const yaml = require('js-yaml');
|
||||
|
||||
// const BLUE = require('internal').COLORS.COLOR_BLUE;
|
||||
|
|
|
@ -54,7 +54,7 @@ function locateCatchTest (name) {
|
|||
}
|
||||
|
||||
function catchRunner (options) {
|
||||
let results = {};
|
||||
let results = { failed: 0 };
|
||||
let rootDir = pu.UNITTESTS_DIR;
|
||||
|
||||
const run = locateCatchTest('arangodbtests');
|
||||
|
@ -67,8 +67,14 @@ function catchRunner (options) {
|
|||
'-o',
|
||||
fs.join(options.testOutputDirectory, 'catch-standard.xml')];
|
||||
results.basics = pu.executeAndWait(run, argv, options, 'all-catch', rootDir);
|
||||
results.basics.failed = results.basics.status ? 0 : 1;
|
||||
if (!results.basics.status) {
|
||||
results.failed += 1;
|
||||
}
|
||||
} else {
|
||||
results.failed += 1;
|
||||
results.basics = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: 'binary "basics_suite" not found'
|
||||
};
|
||||
|
@ -85,8 +91,14 @@ function catchRunner (options) {
|
|||
];
|
||||
results.cache_suite = pu.executeAndWait(run, argv, options,
|
||||
'cache_suite', rootDir);
|
||||
results.cache_suite.failed = results.cache_suite.status ? 0 : 1;
|
||||
if (!results.cache_suite.status) {
|
||||
results.failed += 1;
|
||||
}
|
||||
} else {
|
||||
results.failed += 1;
|
||||
results.cache_suite = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: 'binary "cache_suite" not found'
|
||||
};
|
||||
|
@ -102,8 +114,14 @@ function catchRunner (options) {
|
|||
fs.join(options.testOutputDirectory, 'catch-geo.xml')
|
||||
];
|
||||
results.geo_suite = pu.executeAndWait(run, argv, options, 'geo_suite', rootDir);
|
||||
results.geo_suite.failed = results.geo_suite.status ? 0 : 1;
|
||||
if (!results.geo_suite.status) {
|
||||
results.failed += 1;
|
||||
}
|
||||
} else {
|
||||
results.failed += 1;
|
||||
results.geo_suite = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: 'binary "geo_suite" not found'
|
||||
};
|
||||
|
|
|
@ -54,6 +54,7 @@ function config (options) {
|
|||
if (options.skipConfig) {
|
||||
return {
|
||||
config: {
|
||||
failed: 0,
|
||||
status: true,
|
||||
skipped: true
|
||||
}
|
||||
|
@ -61,12 +62,15 @@ function config (options) {
|
|||
}
|
||||
|
||||
let results = {
|
||||
failed: 0,
|
||||
absolut: {
|
||||
failed: 0,
|
||||
status: true,
|
||||
total: 0,
|
||||
duration: 0
|
||||
},
|
||||
relative: {
|
||||
failed: 0,
|
||||
status: true,
|
||||
total: 0,
|
||||
duration: 0
|
||||
|
@ -108,6 +112,8 @@ function config (options) {
|
|||
|
||||
if (!results.absolut[test].status) {
|
||||
results.absolut.status = false;
|
||||
results.absolut.failed += 1;
|
||||
results.failed += 1;
|
||||
}
|
||||
|
||||
results.absolut.total++;
|
||||
|
@ -141,6 +147,8 @@ function config (options) {
|
|||
results.relative[test] = pu.executeAndWait(run, toArgv(args), options, test, rootDir);
|
||||
|
||||
if (!results.relative[test].status) {
|
||||
results.failed += 1;
|
||||
results.relative.failed += 1;
|
||||
results.relative.status = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,11 +43,15 @@ function dfdb (options) {
|
|||
const args = ['-c', fs.join(pu.CONFIG_DIR, 'arango-dfdb.conf'), dataDir];
|
||||
|
||||
fs.makeDirectoryRecursive(dataDir);
|
||||
let results = {};
|
||||
let results = { failed: 0 };
|
||||
|
||||
results.dfdb = pu.executeAndWait(pu.ARANGOD_BIN, args, options, 'dfdb', dataDir);
|
||||
|
||||
print();
|
||||
results.dfdb.failed = results.dfdb.status ? 0 : 1;
|
||||
if (!results.dfdb.status) {
|
||||
results.failed += 1;
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ function dump (options) {
|
|||
|
||||
if (instanceInfo === false) {
|
||||
return {
|
||||
failed: 1,
|
||||
dump: {
|
||||
status: false,
|
||||
message: 'failed to start server!'
|
||||
|
@ -71,39 +72,53 @@ function dump (options) {
|
|||
|
||||
print(CYAN + Date() + ': Setting up' + RESET);
|
||||
|
||||
let results = {};
|
||||
let results = { failed: 1 };
|
||||
results.setup = tu.runInArangosh(options, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/dump/dump-setup' + cluster + '.js'));
|
||||
results.setup.failed = 1;
|
||||
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.setup.status === true)) {
|
||||
results.setup.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - dump' + RESET);
|
||||
|
||||
results.dump = pu.run.arangoDumpRestore(options, instanceInfo, 'dump',
|
||||
'UnitTestsDumpSrc');
|
||||
|
||||
results.dump.failed = 1;
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.dump.status === true)) {
|
||||
results.dump.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - restore' + RESET);
|
||||
|
||||
results.restore = pu.run.arangoDumpRestore(options, instanceInfo, 'restore',
|
||||
'UnitTestsDumpDst');
|
||||
|
||||
results.restore.failed = 1;
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.restore.status === true)) {
|
||||
results.restore.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - dump after restore' + RESET);
|
||||
|
||||
results.test = tu.runInArangosh(options, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/dump/dump-' + storageEngine + cluster + '.js'), {
|
||||
'server.database': 'UnitTestsDumpDst'
|
||||
});
|
||||
|
||||
results.test.failed = 1;
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.test.status === true)) {
|
||||
results.test.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - teardown' + RESET);
|
||||
|
||||
results.tearDown = tu.runInArangosh(options, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/dump/dump-teardown' + cluster + '.js'));
|
||||
tu.makePathUnix('js/server/tests/dump/dump-teardown' + cluster + '.js'));
|
||||
results.tearDown.failed = 1;
|
||||
if (results.tearDown.status) {
|
||||
results.tearDown.failed = 0;
|
||||
results.failed = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,6 +75,7 @@ function dumpAuthentication (options) {
|
|||
|
||||
if (instanceInfo === false) {
|
||||
return {
|
||||
failed: 1,
|
||||
'dump_authentication': {
|
||||
status: false,
|
||||
message: 'failed to start server!'
|
||||
|
@ -84,13 +85,16 @@ function dumpAuthentication (options) {
|
|||
|
||||
print(CYAN + Date() + ': Setting up' + RESET);
|
||||
|
||||
let results = {};
|
||||
let results = { failed: 1 };
|
||||
results.setup = tu.runInArangosh(options, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/dump/dump-authentication-setup.js'),
|
||||
auth2);
|
||||
results.setup.failed = 1;
|
||||
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.setup.status === true)) {
|
||||
results.setup.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - dump' + RESET);
|
||||
|
||||
let authOpts = {
|
||||
|
@ -102,29 +106,41 @@ function dumpAuthentication (options) {
|
|||
|
||||
results.dump = pu.run.arangoDumpRestore(authOpts, instanceInfo, 'dump',
|
||||
'UnitTestsDumpSrc');
|
||||
|
||||
results.dump.failed = 1;
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.dump.status === true)) {
|
||||
results.dump.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - restore' + RESET);
|
||||
|
||||
results.restore = pu.run.arangoDumpRestore(authOpts, instanceInfo, 'restore',
|
||||
'UnitTestsDumpDst');
|
||||
|
||||
results.restore.failed = 1;
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.restore.status === true)) {
|
||||
results.restore.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - dump after restore' + RESET);
|
||||
|
||||
results.test = tu.runInArangosh(authOpts, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/dump/dump-authentication.js'), {
|
||||
'server.database': 'UnitTestsDumpDst'
|
||||
});
|
||||
|
||||
results.test.failed = 1;
|
||||
if (pu.arangod.check.instanceAlive(instanceInfo, options) &&
|
||||
(results.test.status === true)) {
|
||||
results.test.failed = 0;
|
||||
|
||||
print(CYAN + Date() + ': Dump and Restore - teardown' + RESET);
|
||||
|
||||
results.tearDown = tu.runInArangosh(options, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/dump/dump-teardown.js'), auth2);
|
||||
|
||||
results.tearDown.failed = 1;
|
||||
if (results.tearDown.status) {
|
||||
results.tearDown.failed = 0;
|
||||
results.failed = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,11 +63,13 @@ function endpoints (options) {
|
|||
};
|
||||
|
||||
return Object.keys(endpoints).reduce((results, endpointName) => {
|
||||
results.failed = 0;
|
||||
let testName = 'endpoint-' + endpointName;
|
||||
results[testName] = (function () {
|
||||
let endpoint = endpoints[endpointName]();
|
||||
if (endpoint === undefined || options.cluster || options.skipEndpoints) {
|
||||
return {
|
||||
failed: 0,
|
||||
status: true,
|
||||
skipped: true
|
||||
};
|
||||
|
@ -77,7 +79,9 @@ function endpoints (options) {
|
|||
}, testName);
|
||||
|
||||
if (instanceInfo === false) {
|
||||
result.failed += 1;
|
||||
return {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: 'failed to start server!'
|
||||
};
|
||||
|
@ -90,6 +94,9 @@ function endpoints (options) {
|
|||
pu.shutdownInstance(instanceInfo, Object.assign(options, {useKillExternal: true}));
|
||||
print(CYAN + 'done.' + RESET);
|
||||
|
||||
if (!result.status) {
|
||||
result.failed += 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}());
|
||||
|
|
|
@ -95,7 +95,7 @@ function exportTest (options) {
|
|||
'overwrite': true,
|
||||
'output-directory': tmpPath
|
||||
};
|
||||
const results = {};
|
||||
const results = {failed: 0};
|
||||
|
||||
function shutdown () {
|
||||
print(CYAN + 'Shutting down...' + RESET);
|
||||
|
@ -106,17 +106,27 @@ function exportTest (options) {
|
|||
}
|
||||
|
||||
results.setup = tu.runInArangosh(options, instanceInfo, tu.makePathUnix('js/server/tests/export/export-setup' + cluster + '.js'));
|
||||
results.setup.failed = 0;
|
||||
if (!pu.arangod.check.instanceAlive(instanceInfo, options) || results.setup.status !== true) {
|
||||
results.setup.failed = 1;
|
||||
results.failed += 1;
|
||||
return shutdown();
|
||||
}
|
||||
|
||||
print(CYAN + Date() + ': Export data (json)' + RESET);
|
||||
results.exportJson = pu.executeAndWait(pu.ARANGOEXPORT_BIN, toArgv(args), options, 'arangosh', tmpPath);
|
||||
results.exportJson.failed = results.exportJson.status ? 0 : 1;
|
||||
|
||||
try {
|
||||
// const filesContent = JSON.parse(fs.read(fs.join(tmpPath, 'UnitTestsExport.json')));
|
||||
results.parseJson = { status: true };
|
||||
} catch (e) {
|
||||
results.parseJson = {
|
||||
failed: 0,
|
||||
status: true
|
||||
};
|
||||
} catch (e) {
|
||||
results.failed += 1;
|
||||
results.parseJson = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: e
|
||||
};
|
||||
|
@ -125,6 +135,7 @@ function exportTest (options) {
|
|||
print(CYAN + Date() + ': Export data (jsonl)' + RESET);
|
||||
args['type'] = 'jsonl';
|
||||
results.exportJsonl = pu.executeAndWait(pu.ARANGOEXPORT_BIN, toArgv(args), options, 'arangosh', tmpPath);
|
||||
results.exportJsonl.failed = results.exportJsonl.status ? 0 : 1;
|
||||
try {
|
||||
const filesContent = fs.read(fs.join(tmpPath, 'UnitTestsExport.jsonl')).split('\n');
|
||||
for (const line of filesContent) {
|
||||
|
@ -132,10 +143,13 @@ function exportTest (options) {
|
|||
JSON.parse(line);
|
||||
}
|
||||
results.parseJsonl = {
|
||||
failed: 0,
|
||||
status: true
|
||||
};
|
||||
} catch (e) {
|
||||
results.failed += 1;
|
||||
results.parseJsonl = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: e
|
||||
};
|
||||
|
@ -145,19 +159,26 @@ function exportTest (options) {
|
|||
args['type'] = 'xgmml';
|
||||
args['graph-name'] = 'UnitTestsExport';
|
||||
results.exportXgmml = pu.executeAndWait(pu.ARANGOEXPORT_BIN, toArgv(args), options, 'arangosh', tmpPath);
|
||||
results.exportXgmml.failed = results.exportXgmml.status ? 0 : 1;
|
||||
try {
|
||||
const filesContent = fs.read(fs.join(tmpPath, 'UnitTestsExport.xgmml'));
|
||||
DOMParser.parseFromString(filesContent);
|
||||
results.parseXgmml = { status: true };
|
||||
results.parseXgmml = {
|
||||
failed: 0,
|
||||
status: true
|
||||
};
|
||||
|
||||
if (xmlErrors !== null) {
|
||||
results.parseXgmml = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: xmlErrors
|
||||
};
|
||||
}
|
||||
} catch (e) {
|
||||
results.failed += 1;
|
||||
results.parseXgmml = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: e
|
||||
};
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/* jshint strict: false, sub: true */
|
||||
/* global print */
|
||||
'use strict';
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -63,7 +62,7 @@ function fail (options) {
|
|||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
function setup (testFns, defaultFns, opts, fnDocs, optionsDoc) {
|
||||
testFns['fail'] = fail;
|
||||
|
|
|
@ -168,7 +168,9 @@ function importing (options) {
|
|||
}
|
||||
|
||||
return {
|
||||
'failed': 0,
|
||||
'importing': {
|
||||
'failed': 0,
|
||||
'status': true,
|
||||
'message': 'skipped because of cluster',
|
||||
'skipped': true
|
||||
|
@ -180,20 +182,25 @@ function importing (options) {
|
|||
|
||||
if (instanceInfo === false) {
|
||||
return {
|
||||
'failed': 1,
|
||||
'importing': {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: 'failed to start server!'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let result = {};
|
||||
let result = { failed: 0 };
|
||||
|
||||
try {
|
||||
result.setup = tu.runInArangosh(options, instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/import/import-setup.js'));
|
||||
|
||||
result.setup.failed = 0;
|
||||
if (result.setup.status !== true) {
|
||||
result.setup.failed = 1;
|
||||
result.failed += 1;
|
||||
throw new Error('cannot start import setup');
|
||||
}
|
||||
|
||||
|
@ -201,17 +208,26 @@ function importing (options) {
|
|||
const impTodo = impTodos[i];
|
||||
|
||||
result[impTodo.id] = pu.run.arangoImp(options, instanceInfo, impTodo);
|
||||
result[impTodo.id].failed = 0;
|
||||
|
||||
if (result[impTodo.id].status !== true && !options.force) {
|
||||
result[impTodo.id].failed = 1;
|
||||
result.failed += 1;
|
||||
throw new Error('cannot run import');
|
||||
}
|
||||
}
|
||||
|
||||
result.check = tu.runInArangosh(options, instanceInfo,
|
||||
result.check = tu.runInArangosh(
|
||||
options,
|
||||
instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/import/import.js'));
|
||||
result.check.failed = result.check.success ? 0 : 1;
|
||||
|
||||
result.teardown = tu.runInArangosh(options, instanceInfo,
|
||||
result.teardown = tu.runInArangosh(
|
||||
options,
|
||||
instanceInfo,
|
||||
tu.makePathUnix('js/server/tests/import/import-teardown.js'));
|
||||
result.teardown.failed = result.teardown.success ? 0 : 1;
|
||||
} catch (banana) {
|
||||
print('An exceptions of the following form was caught:',
|
||||
yaml.safeDump(banana));
|
||||
|
|
|
@ -26,155 +26,149 @@
|
|||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const functionsDocumentation = {
|
||||
'ldap': 'ldap tests',
|
||||
'ldap': 'ldap tests'
|
||||
};
|
||||
const optionsDocumentation = [
|
||||
' - `ldapUrl : testing authentication and authentication_paramaters will be skipped.'
|
||||
];
|
||||
|
||||
const pu = require('@arangodb/process-utils');
|
||||
const tu = require('@arangodb/test-utils');
|
||||
const request = require('@arangodb/request');
|
||||
const arango = require("@arangodb").arango;
|
||||
const fs = require('fs');
|
||||
const yaml = require('js-yaml');
|
||||
|
||||
// const BLUE = require('internal').COLORS.COLOR_BLUE;
|
||||
const CYAN = require('internal').COLORS.COLOR_CYAN;
|
||||
// const GREEN = require('internal').COLORS.COLOR_GREEN;
|
||||
const RED = require('internal').COLORS.COLOR_RED;
|
||||
// const RED = require('internal').COLORS.COLOR_RED;
|
||||
const RESET = require('internal').COLORS.COLOR_RESET;
|
||||
// const YELLOW = require('internal').COLORS.COLOR_YELLOW;
|
||||
|
||||
const download = require('internal').download;
|
||||
|
||||
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief TEST: ldap
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function ldap(options) {
|
||||
|
||||
function ldap (options) {
|
||||
print(`DAP FQDN is: ${options.ldapUrl} ${options.caCertFilePath}`);
|
||||
const results = {};
|
||||
const tests = [{
|
||||
name: 'ldapBasicLDAP',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only':false,
|
||||
'ldap.enabled':true,
|
||||
'ldap.server':options.ldapUrl,
|
||||
'ldap.port': 3890,
|
||||
'ldap.prefix': 'uid=',
|
||||
'ldap.suffix':',dc=example,dc=com',
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.search-attribute': 'uid',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
const results = { failed: 0 };
|
||||
const tests = [
|
||||
{
|
||||
name: 'ldapBasicLDAP',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only': false,
|
||||
'ldap.enabled': true,
|
||||
'ldap.server': options.ldapUrl,
|
||||
'ldap.port': 3890,
|
||||
'ldap.prefix': 'uid=',
|
||||
'ldap.suffix': ',dc=example,dc=com',
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.search-attribute': 'uid',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
},
|
||||
user: {
|
||||
name: 'fermi',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 200
|
||||
}
|
||||
},
|
||||
user: {
|
||||
name: 'fermi',
|
||||
pass: 'password'
|
||||
{
|
||||
name: 'ldapBindSearchAuth',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only': false,
|
||||
'ldap.enabled': true,
|
||||
'ldap.server': options.ldapUrl,
|
||||
'ldap.port': 3890,
|
||||
'ldap.basedn': 'dc=example,dc=com',
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.search-attribute': 'uid',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
},
|
||||
user: {
|
||||
name: 'albert',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 200
|
||||
}
|
||||
},
|
||||
result:{
|
||||
statusCode: 200
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'ldapBindSearchAuth',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only':false,
|
||||
'ldap.enabled':true,
|
||||
'ldap.server':options.ldapUrl,
|
||||
'ldap.port': 3890,
|
||||
'ldap.basedn':'dc=example,dc=com',
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.search-attribute': 'uid',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
{
|
||||
name: 'ldapBindSearchAuthWrongUser',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only': false,
|
||||
'ldap.enabled': true,
|
||||
'ldap.server': options.ldapUrl,
|
||||
'ldap.port': 3890,
|
||||
'ldap.basedn': 'dc=example,dc=com',
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.search-attribute': 'uid',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
},
|
||||
user: {
|
||||
name: 'werner',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 500
|
||||
}
|
||||
},
|
||||
user: {
|
||||
name: 'albert',
|
||||
pass: 'password'
|
||||
{
|
||||
name: 'ldapUrlBindSearchAuth',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only': false,
|
||||
'ldap.enabled': true,
|
||||
'ldap.url': `ldap://${options.ldapUrl}:3890/dc=example,dc=com?uid?sub`,
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
},
|
||||
user: {
|
||||
name: 'fermi',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 200
|
||||
}
|
||||
},
|
||||
result:{
|
||||
statusCode: 200
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'ldapBindSearchAuthWrongUser',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only':false,
|
||||
'ldap.enabled':true,
|
||||
'ldap.server':options.ldapUrl,
|
||||
'ldap.port': 3890,
|
||||
'ldap.basedn':'dc=example,dc=com',
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.search-attribute': 'uid',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
},
|
||||
user: {
|
||||
name: 'werner',
|
||||
pass: 'password'
|
||||
},
|
||||
result:{
|
||||
statusCode: 500
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'ldapUrlBindSearchAuth',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only':false,
|
||||
'ldap.enabled':true,
|
||||
'ldap.url':`ldap://${options.ldapUrl}:3890/dc=example,dc=com?uid?sub`,
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description'
|
||||
},
|
||||
user: {
|
||||
name: 'fermi',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 200
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'ldapUrlBindSearchTlsAuth',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only':false,
|
||||
'ldap.enabled':true,
|
||||
'ldap.url':`ldap://${options.ldapUrl}:3890/dc=example,dc=com?uid?sub`,
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description',
|
||||
'ldap.tls': true,
|
||||
'ldap.tls-cacert-file': options.caCertFilePath,
|
||||
'ldap.tls-cert-check-strategy': 'hard'
|
||||
{
|
||||
name: 'ldapUrlBindSearchTlsAuth',
|
||||
conf: {
|
||||
'server.authentication': true,
|
||||
'server.authentication-system-only': false,
|
||||
'ldap.enabled': true,
|
||||
'ldap.url': `ldap://${options.ldapUrl}:3890/dc=example,dc=com?uid?sub`,
|
||||
'ldap.search-filter': 'objectClass=simpleSecurityObject',
|
||||
'ldap.binddn': 'cn=admin,dc=example,dc=com',
|
||||
'ldap.bindpasswd': 'hallo',
|
||||
'ldap.permissions-attribute-name': 'description',
|
||||
'ldap.tls': true,
|
||||
'ldap.tls-cacert-file': options.caCertFilePath,
|
||||
'ldap.tls-cert-check-strategy': 'hard'
|
||||
|
||||
},
|
||||
user: {
|
||||
name: 'fermi',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 200
|
||||
}
|
||||
}];
|
||||
},
|
||||
user: {
|
||||
name: 'fermi',
|
||||
pass: 'password'
|
||||
},
|
||||
result: {
|
||||
statusCode: 200
|
||||
}
|
||||
}];
|
||||
|
||||
if (options.skipLdap === true) {
|
||||
print('skipping LDAP tests!');
|
||||
return {
|
||||
failed: 0,
|
||||
ldap: {
|
||||
failed: 0,
|
||||
status: true,
|
||||
skipped: true
|
||||
}
|
||||
|
@ -184,7 +178,9 @@ function ldap(options) {
|
|||
if (options.cluster) {
|
||||
print('skipping LDAP tests on cluster!');
|
||||
return {
|
||||
failed: 0,
|
||||
ldap: {
|
||||
failed: 0,
|
||||
status: true,
|
||||
skipped: true
|
||||
}
|
||||
|
@ -193,20 +189,39 @@ function ldap(options) {
|
|||
|
||||
print(CYAN + 'LDAP tests...' + RESET);
|
||||
|
||||
for(const t of tests) {
|
||||
for (const t of tests) {
|
||||
const adbInstance = pu.startInstance('tcp', options, t.conf, 'ldap');
|
||||
if (adbInstance === false) {
|
||||
results[t.name] = {status: false, message: 'failed to start server!'};
|
||||
results.failed += 1;
|
||||
results[t.name] = {
|
||||
failed: 1,
|
||||
status: false,
|
||||
message: 'failed to start server!'
|
||||
};
|
||||
continue;
|
||||
}
|
||||
|
||||
const res = request.post({
|
||||
url:`${adbInstance.arangods[0].url}/_open/auth`,
|
||||
body: JSON.stringify({username: t.user.name, password: t.user.pass})
|
||||
url: `${adbInstance.arangods[0].url}/_open/auth`,
|
||||
body: JSON.stringify(
|
||||
{
|
||||
username: t.user.name,
|
||||
password: t.user.pass
|
||||
})
|
||||
});
|
||||
|
||||
results[t.name] = { status: t.result.statusCode === res.statusCode };
|
||||
|
||||
if (t.result.statusCode !== res.statusCode) {
|
||||
results.failed += 1;
|
||||
results[t.name] = {
|
||||
failed: 1,
|
||||
status: false
|
||||
};
|
||||
} else {
|
||||
results[t.name] = {
|
||||
failed: 0,
|
||||
status: true
|
||||
};
|
||||
}
|
||||
pu.shutdownInstance(adbInstance, options);
|
||||
}
|
||||
|
||||
|
@ -214,7 +229,7 @@ function ldap(options) {
|
|||
return results;
|
||||
}
|
||||
|
||||
exports.setup = function(testFns, defaultFns, opts, fnDocs, optionsDoc) {
|
||||
exports.setup = function (testFns, defaultFns, opts, fnDocs, optionsDoc) {
|
||||
testFns['ldap'] = ldap;
|
||||
defaultFns.push('ldap');
|
||||
opts['ldapUrl'] = '127.0.0.1';
|
||||
|
|
|
@ -77,9 +77,9 @@ function runArangodRecovery (instanceInfo, options, script, setup) {
|
|||
}
|
||||
|
||||
// enable development debugging if extremeVerbosity is set
|
||||
if (options.extremeVerbosity === true){
|
||||
if (options.extremeVerbosity === true) {
|
||||
argv = argv.concat([
|
||||
'--log.level', 'development=info',
|
||||
'--log.level', 'development=info'
|
||||
]);
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ function recovery (options) {
|
|||
let results = {};
|
||||
|
||||
if (!global.ARANGODB_CLIENT_VERSION(true)['failure-tests'] ||
|
||||
global.ARANGODB_CLIENT_VERSION(true)['failure-tests'] === "false") {
|
||||
global.ARANGODB_CLIENT_VERSION(true)['failure-tests'] === 'false') {
|
||||
results.recovery = {
|
||||
status: false,
|
||||
message: 'failure-tests not enabled. please recompile with -DUSE_FAILURE_TESTS=On'
|
||||
|
|
|
@ -33,7 +33,6 @@ const functionsDocumentation = {
|
|||
const optionsDocumentation = [
|
||||
];
|
||||
|
||||
const pu = require('@arangodb/process-utils');
|
||||
const tu = require('@arangodb/test-utils');
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -154,6 +154,7 @@ function rubyTests (options, ssl) {
|
|||
print('RSpec test case falied: \n' + msg);
|
||||
res[tName].message += '\n' + msg;
|
||||
}
|
||||
return status ? 0 : 1;
|
||||
};
|
||||
|
||||
let count = 0;
|
||||
|
@ -196,6 +197,7 @@ function rubyTests (options, ssl) {
|
|||
|
||||
result[te] = {
|
||||
total: 0,
|
||||
failed: 0,
|
||||
status: res.status
|
||||
};
|
||||
|
||||
|
@ -209,8 +211,9 @@ function rubyTests (options, ssl) {
|
|||
}
|
||||
|
||||
for (let j = 0; j < jsonResult.examples.length; ++j) {
|
||||
parseRspecJson(jsonResult.examples[j], result[te],
|
||||
jsonResult.summary.duration);
|
||||
result[te].failed += parseRspecJson(
|
||||
jsonResult.examples[j], result[te],
|
||||
jsonResult.summary.duration);
|
||||
}
|
||||
|
||||
result[te].duration = jsonResult.summary.duration;
|
||||
|
|
|
@ -44,6 +44,7 @@ function upgrade (options) {
|
|||
if (options.cluster) {
|
||||
return {
|
||||
'upgrade': {
|
||||
failed: 0,
|
||||
'status': true,
|
||||
'message': 'skipped because of cluster',
|
||||
'skipped': true
|
||||
|
@ -53,6 +54,7 @@ function upgrade (options) {
|
|||
|
||||
let result = {
|
||||
upgrade: {
|
||||
failed: 0,
|
||||
status: true,
|
||||
total: 1
|
||||
}
|
||||
|
@ -76,6 +78,7 @@ function upgrade (options) {
|
|||
result.upgrade.first = pu.executeAndWait(pu.ARANGOD_BIN, argv, options, 'upgrade', tmpDataDir);
|
||||
|
||||
if (result.upgrade.first.status !== true) {
|
||||
result.upgrade.failed = 1;
|
||||
print('not removing ' + tmpDataDir);
|
||||
return result.upgrade;
|
||||
}
|
||||
|
@ -85,6 +88,7 @@ function upgrade (options) {
|
|||
result.upgrade.second = pu.executeAndWait(pu.ARANGOD_BIN, argv, options, 'upgrade', tmpDataDir);
|
||||
|
||||
if (result.upgrade.second.status !== true) {
|
||||
result.upgrade.failed = 1;
|
||||
print('not removing ' + tmpDataDir);
|
||||
return result.upgrade;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue