mirror of https://gitee.com/bigwinds/arangodb
Merge remote-tracking branch 'origin/devel' into feature/ldap-auth
This commit is contained in:
commit
c7487d44f4
|
@ -163,6 +163,8 @@ v3.1.16 (2017-XX-XX)
|
|||
|
||||
* fixed issue #2392
|
||||
|
||||
* try to raise file descriptors to at least 8192, warn otherwise
|
||||
|
||||
* ui - aql editor improvements + updated ace editor version (memory leak)
|
||||
|
||||
* fixed lost HTTP requests
|
||||
|
|
|
@ -292,7 +292,7 @@ build-book:
|
|||
make ppbook-check-directory-link
|
||||
make book-check-images-referenced
|
||||
|
||||
if test -n $(NODE_MODLUES_DIR); then \
|
||||
if test -n "$(NODE_MODLUES_DIR)"; then \
|
||||
cp -a $(NODE_MODLUES_DIR) ppbooks/$(NAME); \
|
||||
else \
|
||||
cd ppbooks/$(NAME); gitbook install -g ; \
|
||||
|
|
|
@ -4,6 +4,19 @@ import os
|
|||
import json
|
||||
#import MarkdownPP
|
||||
|
||||
|
||||
RESET = '\033[0m'
|
||||
def make_std_color(No):
|
||||
# defined for 1 through 7
|
||||
return '\033[3' + No+ 'm'
|
||||
def make_color(No):
|
||||
# defined for 1 through 255
|
||||
return '\033[38;5;'+ No + 'm'
|
||||
|
||||
WRN_COLOR = make_std_color('3')
|
||||
ERR_COLOR = make_std_color('1')
|
||||
STD_COLOR = make_color('8')
|
||||
|
||||
################################################################################
|
||||
### @brief length of the swagger definition namespace
|
||||
################################################################################
|
||||
|
@ -31,8 +44,7 @@ def getReference(name, source, verb):
|
|||
try:
|
||||
ref = name['$ref'][defLen:]
|
||||
except Exception as x:
|
||||
print >>sys.stderr, "No reference in: "
|
||||
print >>sys.stderr, name
|
||||
print >>sys.stderr, ERR_COLOR + "No reference in: " + name + RESET
|
||||
raise
|
||||
if not ref in swagger['definitions']:
|
||||
fn = ''
|
||||
|
@ -40,7 +52,7 @@ def getReference(name, source, verb):
|
|||
fn = swagger['paths'][route][verb]['x-filename']
|
||||
else:
|
||||
fn = swagger['definitions'][source]['x-filename']
|
||||
print >> sys.stderr, json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True)
|
||||
print >> sys.stderr, STD_COLOR + json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True) + RESET
|
||||
raise Exception("invalid reference: " + ref + " in " + fn)
|
||||
return ref
|
||||
|
||||
|
@ -85,8 +97,8 @@ def unwrapPostJson(reference, layer):
|
|||
try:
|
||||
subStructRef = getReference(thisParam['items'], reference, None)
|
||||
except:
|
||||
print >>sys.stderr, "while analyzing: " + param
|
||||
print >>sys.stderr, thisParam
|
||||
print >>sys.stderr, ERR_COLOR + "while analyzing: " + param + RESET
|
||||
print >>sys.stderr, WRN_COLOR + thisParam + RESET
|
||||
rc += "\n" + unwrapPostJson(subStructRef, layer + 1)
|
||||
else:
|
||||
rc += ' ' * layer + " - **" + param + "**: " + TrimThisParam(thisParam['description'], layer) + '\n'
|
||||
|
@ -122,8 +134,8 @@ def getRestReplyBodyParam(param):
|
|||
try:
|
||||
rc += unwrapPostJson(getReference(thisVerb['responses'][param]['schema'], route, verb), 0)
|
||||
except Exception:
|
||||
print >>sys.stderr,"failed to search " + param + " in: "
|
||||
print >>sys.stderr,json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)
|
||||
print >>sys.stderr, ERR_COLOR + "failed to search " + param + " in: " + RESET
|
||||
print >>sys.stderr, WRN_COLOR + json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True) + RESET
|
||||
raise
|
||||
return rc + "\n"
|
||||
|
||||
|
@ -273,14 +285,14 @@ def replaceCode(lines, blockName):
|
|||
(verb,route) = headerMatch.group(1).split(',')[0].split(' ')
|
||||
verb = verb.lower()
|
||||
except:
|
||||
print >> sys.stderr, "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName
|
||||
print >> sys.stderr, ERR_COLOR + "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName + RESET
|
||||
raise
|
||||
|
||||
try:
|
||||
thisVerb = swagger['paths'][route][verb]
|
||||
except:
|
||||
print >> sys.stderr, "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName
|
||||
print >> sys.stderr, lines
|
||||
print >> sys.stderr, ERR_COLOR + "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName + RESET
|
||||
print >> sys.stderr, WRN_COLOR + lines + RESET
|
||||
raise
|
||||
|
||||
for (oneRX, repl) in RX:
|
||||
|
@ -395,7 +407,7 @@ def walk_on_files(inDirPath, outDirPath):
|
|||
mdpp.close()
|
||||
md.close()
|
||||
findStartCode(md, outFileFull)
|
||||
print "Processed %d files, skipped %d" % (count, skipped)
|
||||
print STD_COLOR + "Processed %d files, skipped %d" % (count, skipped) + RESET
|
||||
|
||||
def findStartCode(fd,full_path):
|
||||
inFD = open(full_path, "r")
|
||||
|
@ -422,7 +434,7 @@ def findStartCode(fd,full_path):
|
|||
try:
|
||||
textFile = replaceCodeFullFile(textFile)
|
||||
except:
|
||||
print >>sys.stderr, "while parsing :\n" + textFile
|
||||
print >>sys.stderr, ERR_COLOR + "while parsing : " + full_path + RESET
|
||||
raise
|
||||
#print "9" * 80
|
||||
#print textFile
|
||||
|
@ -438,10 +450,10 @@ def replaceText(text, pathOfFile, searchText):
|
|||
#print '7'*80
|
||||
global dokuBlocks
|
||||
if not searchText in dokuBlocks[0]:
|
||||
print >> sys.stderr, "Failed to locate the docublock '%s' for replacing it into the file '%s'\n have:" % (searchText, pathOfFile)
|
||||
print >> sys.stderr, dokuBlocks[0].keys()
|
||||
print >> sys.stderr, '*' * 80
|
||||
print >> sys.stderr, text
|
||||
print >> sys.stderr, "%sFailed to locate the docublock '%s' for replacing it into the file '%s'\n have:%s" % (ERR_COLOR, searchText, pathOfFile, RESET)
|
||||
print >> sys.stderr, WRN_COLOR + dokuBlocks[0].keys() + RESET
|
||||
print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET
|
||||
print >> sys.stderr, WRN_COLOR + text + RESET
|
||||
exit(1)
|
||||
#print '7'*80
|
||||
#print dokuBlocks[0][searchText]
|
||||
|
@ -453,22 +465,22 @@ def replaceTextInline(text, pathOfFile, searchText):
|
|||
''' reads the mdpp and generates the md '''
|
||||
global dokuBlocks
|
||||
if not searchText in dokuBlocks[1]:
|
||||
print >> sys.stderr, "Failed to locate the inline docublock '%s' for replacing it into the file '%s'\n have:" % (searchText, pathOfFile)
|
||||
print >> sys.stderr, dokuBlocks[1].keys()
|
||||
print >> sys.stderr, '*' * 80
|
||||
print >> sys.stderr, text
|
||||
print >> sys.stderr, "%sFailed to locate the inline docublock '%s' for replacing it into the file '%s'\n have: %s" % (ERR_COLOR, searchText, pathOfFile, RESET)
|
||||
print >> sys.stderr, "%s%s%s" %(WRN_COLOR, dokuBlocks[1].keys(), RESET)
|
||||
print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET
|
||||
print >> sys.stderr, WRN_COLOR + text + RESET
|
||||
exit(1)
|
||||
rePattern = r'(?s)\s*@startDocuBlockInline\s+'+ searchText +'\s.*?@endDocuBlock\s' + searchText
|
||||
# (?s) is equivalent to flags=re.DOTALL but works in Python 2.6
|
||||
match = re.search(rePattern, text)
|
||||
|
||||
if (match == None):
|
||||
print >> sys.stderr, "failed to match with '%s' for %s in file %s in: \n%s" % (rePattern, searchText, pathOfFile, text)
|
||||
print >> sys.stderr, "%sfailed to match with '%s' for %s in file %s in: \n%s" % (ERR_COLOR, rePattern, searchText, pathOfFile, text, RESET)
|
||||
exit(1)
|
||||
|
||||
subtext = match.group(0)
|
||||
if (len(re.findall('@startDocuBlock', subtext)) > 1):
|
||||
print >> sys.stderr, "failed to snap with '%s' on end docublock for %s in %s our match is:\n%s" % (rePattern, searchText, pathOfFile, subtext)
|
||||
print >> sys.stderr, "%sfailed to snap with '%s' on end docublock for %s in %s our match is:\n%s" % (ERR_COLOR, rePattern, searchText, pathOfFile, subtext, RESET)
|
||||
exit(1)
|
||||
|
||||
return re.sub(rePattern, dokuBlocks[1][searchText], text)
|
||||
|
@ -495,7 +507,7 @@ def readStartLine(line):
|
|||
try:
|
||||
thisBlockName = SEARCH_START.search(line).group(1).strip()
|
||||
except:
|
||||
print >> sys.stderr, "failed to read startDocuBlock: [" + line + "]"
|
||||
print >> sys.stderr, ERR_COLOR + "failed to read startDocuBlock: [" + line + "]" + RESET
|
||||
exit(1)
|
||||
dokuBlocks[thisBlockType][thisBlockName] = ""
|
||||
return STATE_SEARCH_END
|
||||
|
@ -525,10 +537,10 @@ def loadDokuBlocks():
|
|||
|
||||
if blockFilter != None:
|
||||
remainBlocks= {}
|
||||
print "filtering blocks"
|
||||
print STD_COLOR + "filtering blocks" + RESET
|
||||
for oneBlock in dokuBlocks[0]:
|
||||
if blockFilter.match(oneBlock) != None:
|
||||
print "found block %s" % oneBlock
|
||||
print "%sfound block %s%s" % (STD_COLOR, oneBlock, RESET)
|
||||
#print dokuBlocks[0][oneBlock]
|
||||
remainBlocks[oneBlock] = dokuBlocks[0][oneBlock]
|
||||
dokuBlocks[0] = remainBlocks
|
||||
|
@ -541,14 +553,14 @@ def loadDokuBlocks():
|
|||
#print dokuBlocks[0][oneBlock]
|
||||
#print "6"*80
|
||||
except:
|
||||
print >>sys.stderr, "while parsing :\n" + oneBlock
|
||||
print >>sys.stderr, ERR_COLOR + "while parsing :\n" + oneBlock + RESET
|
||||
raise
|
||||
|
||||
for oneBlock in dokuBlocks[1]:
|
||||
try:
|
||||
dokuBlocks[1][oneBlock] = replaceCode(dokuBlocks[1][oneBlock], oneBlock)
|
||||
except:
|
||||
print >>sys.stderr, "while parsing :\n" + oneBlock
|
||||
print >>sys.stderr, WRN_COLOR + "while parsing :\n" + oneBlock + RESET
|
||||
raise
|
||||
|
||||
|
||||
|
@ -560,15 +572,15 @@ if __name__ == '__main__':
|
|||
outDir = sys.argv[2]
|
||||
swaggerJson = sys.argv[3]
|
||||
if len(sys.argv) > 4 and sys.argv[4].strip() != '':
|
||||
print "filtering " + sys.argv[4]
|
||||
print STD_COLOR + "filtering " + sys.argv[4] + RESET
|
||||
fileFilter = re.compile(sys.argv[4])
|
||||
if len(sys.argv) > 5 and sys.argv[5].strip() != '':
|
||||
print "filtering Docublocks: " + sys.argv[5]
|
||||
print STD_COLOR + "filtering Docublocks: " + sys.argv[5] + RESET
|
||||
blockFilter = re.compile(sys.argv[5])
|
||||
f=open(swaggerJson, 'rU')
|
||||
swagger= json.load(f)
|
||||
f.close()
|
||||
loadDokuBlocks()
|
||||
print "loaded %d / %d docu blocks" % (len(dokuBlocks[0]), len(dokuBlocks[1]))
|
||||
print "%sloaded %d / %d docu blocks%s" % (STD_COLOR, len(dokuBlocks[0]), len(dokuBlocks[1]), RESET)
|
||||
#print dokuBlocks[0].keys()
|
||||
walk_on_files(inDir, outDir)
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
|
|
|
@ -79,19 +79,23 @@ Table::Table(uint32_t logSize)
|
|||
_size(static_cast<uint64_t>(1) << _logSize),
|
||||
_shift(32 - _logSize),
|
||||
_mask((_size - 1) << _shift),
|
||||
_buckets(new GenericBucket[_size]),
|
||||
_buffer(new uint8_t[(_size * BUCKET_SIZE) + Table::padding]),
|
||||
_buckets(reinterpret_cast<GenericBucket*>(
|
||||
reinterpret_cast<uint64_t>((_buffer.get() + 63)) &
|
||||
~(static_cast<uint64_t>(0x3fU)))),
|
||||
_auxiliary(nullptr),
|
||||
_bucketClearer(defaultClearer),
|
||||
_slotsTotal(_size),
|
||||
_slotsUsed(0) {
|
||||
_slotsUsed(static_cast<uint64_t>(0)) {
|
||||
_state.lock();
|
||||
_state.toggleFlag(State::Flag::disabled);
|
||||
memset(_buckets.get(), 0, BUCKET_SIZE * _size);
|
||||
memset(_buckets, 0, BUCKET_SIZE * _size);
|
||||
_state.unlock();
|
||||
}
|
||||
|
||||
uint64_t Table::allocationSize(uint32_t logSize) {
|
||||
return sizeof(Table) + (BUCKET_SIZE * (static_cast<uint64_t>(1) << logSize));
|
||||
return sizeof(Table) + (BUCKET_SIZE * (static_cast<uint64_t>(1) << logSize)) +
|
||||
Table::padding;
|
||||
}
|
||||
|
||||
uint64_t Table::memoryUsage() const { return Table::allocationSize(_logSize); }
|
||||
|
@ -108,7 +112,6 @@ std::pair<void*, std::shared_ptr<Table>> Table::fetchAndLockBucket(
|
|||
if (ok) {
|
||||
ok = !_state.isSet(State::Flag::disabled);
|
||||
if (ok) {
|
||||
TRI_ASSERT(_buckets.get() != nullptr);
|
||||
bucket = &(_buckets[(hash & _mask) >> _shift]);
|
||||
source = shared_from_this();
|
||||
ok = bucket->lock(maxTries);
|
||||
|
@ -154,7 +157,6 @@ void* Table::primaryBucket(uint32_t index) {
|
|||
if (!isEnabled()) {
|
||||
return nullptr;
|
||||
}
|
||||
TRI_ASSERT(_buckets.get() != nullptr);
|
||||
return &(_buckets[index]);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ class Table : public std::enable_shared_from_this<Table> {
|
|||
static const uint32_t maxLogSize;
|
||||
static constexpr uint32_t standardLogSizeAdjustment = 6;
|
||||
static constexpr int64_t triesGuarantee = -1;
|
||||
static constexpr uint64_t padding = 64;
|
||||
|
||||
typedef std::function<void(void*)> BucketClearer;
|
||||
|
||||
|
@ -187,7 +188,8 @@ class Table : public std::enable_shared_from_this<Table> {
|
|||
uint64_t _size;
|
||||
uint32_t _shift;
|
||||
uint32_t _mask;
|
||||
std::unique_ptr<GenericBucket[]> _buckets;
|
||||
std::unique_ptr<uint8_t[]> _buffer;
|
||||
GenericBucket* _buckets;
|
||||
|
||||
std::shared_ptr<Table> _auxiliary;
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ uint32_t HLLCounter::getCount() {
|
|||
} else if (estimate > (1.0 / 30.0) * pow_2_32) {
|
||||
estimate = neg_pow_2_32 * log(1.0 - (estimate / pow_2_32));
|
||||
}
|
||||
return estimate;
|
||||
return (uint32_t) estimate;
|
||||
}
|
||||
|
||||
void HLLCounter::merge(HLLCounter const& other) {
|
||||
|
|
|
@ -56,13 +56,13 @@ struct HITSComputation
|
|||
|
||||
void compute(
|
||||
MessageIterator<SenderMessage<double>> const& messages) override {
|
||||
double auth = 0.0f;
|
||||
double hub = 0.0f;
|
||||
double auth = 0.0;
|
||||
double hub = 0.0;
|
||||
// we don't know our incoming neighbours in step 0, therfore we need step 0
|
||||
// as 'initialization' before actually starting to converge
|
||||
if (globalSuperstep() <= 1) {
|
||||
auth = 1.0f;
|
||||
hub = 1.0f;
|
||||
auth = 1.0;
|
||||
hub = 1.0;
|
||||
} else {
|
||||
HITSWorkerContext const* ctx = static_cast<HITSWorkerContext const*>(context());
|
||||
for (SenderMessage<double> const* message : messages) {
|
||||
|
|
|
@ -32,7 +32,7 @@ using namespace arangodb;
|
|||
using namespace arangodb::pregel;
|
||||
using namespace arangodb::pregel::algos;
|
||||
|
||||
static float EPS = 0.00001;
|
||||
static float EPS = 0.00001f;
|
||||
static std::string const kConvergence = "convergence";
|
||||
|
||||
struct PRWorkerContext : public WorkerContext {
|
||||
|
@ -41,9 +41,9 @@ struct PRWorkerContext : public WorkerContext {
|
|||
float commonProb = 0;
|
||||
void preGlobalSuperstep(uint64_t gss) override {
|
||||
if (gss == 0) {
|
||||
commonProb = 1.0 / vertexCount();
|
||||
commonProb = 1.0f / vertexCount();
|
||||
} else {
|
||||
commonProb = 0.15 / vertexCount();
|
||||
commonProb = 0.15f / vertexCount();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -64,11 +64,11 @@ struct PRComputation : public VertexComputation<float, float, float> {
|
|||
if (globalSuperstep() == 0) {
|
||||
*ptr = ctx->commonProb;
|
||||
} else {
|
||||
float sum = 0.0;
|
||||
float sum = 0.0f;
|
||||
for (const float* msg : messages) {
|
||||
sum += *msg;
|
||||
}
|
||||
*ptr = 0.85 * sum + ctx->commonProb;
|
||||
*ptr = 0.85f * sum + ctx->commonProb;
|
||||
}
|
||||
float diff = fabs(copy - *ptr);
|
||||
aggregate<float>(kConvergence, diff);
|
||||
|
|
|
@ -115,7 +115,7 @@ struct SenderMessageFormat : public MessageFormat<SenderMessage<T>> {
|
|||
SenderMessageFormat() {}
|
||||
void unwrapValue(VPackSlice s, SenderMessage<T>& senderVal) const override {
|
||||
VPackArrayIterator array(s);
|
||||
senderVal.senderId.shard = (*array).getUInt();
|
||||
senderVal.senderId.shard = (PregelShard) ((*array).getUInt());
|
||||
senderVal.senderId.key = (*(++array)).copyString();
|
||||
senderVal.value = (*(++array)).getNumber<T>();
|
||||
}
|
||||
|
|
|
@ -522,9 +522,9 @@ void Worker<V, E, M>::_finishedProcessing() {
|
|||
// async adaptive message buffering
|
||||
_messageBatchSize = _algorithm->messageBatchSize(_config, _messageStats);
|
||||
} else {
|
||||
uint32_t tn = _config.parallelism();
|
||||
uint32_t s = _messageStats.sendCount / tn / 2UL;
|
||||
_messageBatchSize = s > 1000 ? s : 1000;
|
||||
uint64_t tn = _config.parallelism();
|
||||
uint64_t s = _messageStats.sendCount / tn / 2UL;
|
||||
_messageBatchSize = s > 1000 ? (uint32_t)s : 1000;
|
||||
}
|
||||
_messageStats.resetTracking();
|
||||
LOG_TOPIC(DEBUG, Logger::PREGEL) << "Batch size: " << _messageBatchSize;
|
||||
|
|
|
@ -183,10 +183,12 @@ int InitialSyncer::run(std::string& errorMsg, bool incremental) {
|
|||
return res;
|
||||
}
|
||||
|
||||
if (_masterInfo._majorVersion == 1 ||
|
||||
(_masterInfo._majorVersion == 2 && _masterInfo._minorVersion <= 6)) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION) << "incremental replication is not supported with a master < ArangoDB 2.7";
|
||||
incremental = false;
|
||||
if (incremental) {
|
||||
if (_masterInfo._majorVersion == 1 ||
|
||||
(_masterInfo._majorVersion == 2 && _masterInfo._minorVersion <= 6)) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION) << "incremental replication is not supported with a master < ArangoDB 2.7";
|
||||
incremental = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (incremental) {
|
||||
|
@ -605,7 +607,7 @@ int InitialSyncer::handleCollectionDump(
|
|||
appendix = "&flush=false";
|
||||
} else {
|
||||
// only flush WAL once
|
||||
appendix = "&flush=true&flushWait=5";
|
||||
appendix = "&flush=true&flushWait=15";
|
||||
_hasFlushed = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,9 +33,11 @@ using namespace arangodb::application_features;
|
|||
using namespace arangodb::basics;
|
||||
using namespace arangodb::options;
|
||||
|
||||
uint64_t const FileDescriptorsFeature::RECOMMENDED = 8192;
|
||||
|
||||
FileDescriptorsFeature::FileDescriptorsFeature(
|
||||
application_features::ApplicationServer* server)
|
||||
: ApplicationFeature(server, "FileDescriptors"), _descriptorsMinimum(1024) {
|
||||
: ApplicationFeature(server, "FileDescriptors"), _descriptorsMinimum(0) {
|
||||
setOptional(false);
|
||||
requiresElevatedPrivileges(false);
|
||||
startsAfter("Logger");
|
||||
|
@ -76,76 +78,84 @@ void FileDescriptorsFeature::start() {
|
|||
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_cur);
|
||||
}
|
||||
|
||||
if (rlim.rlim_cur < RECOMMENDED) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::SYSCALL)
|
||||
<< "file-descriptors limit is too low, currently "
|
||||
<< StringifyLimitValue(rlim.rlim_cur) << ", raise to at least "
|
||||
<< RECOMMENDED;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void FileDescriptorsFeature::adjustFileDescriptors() {
|
||||
#ifdef TRI_HAVE_GETRLIMIT
|
||||
if (0 < _descriptorsMinimum) {
|
||||
struct rlimit rlim;
|
||||
int res = getrlimit(RLIMIT_NOFILE, &rlim);
|
||||
struct rlimit rlim;
|
||||
int res = getrlimit(RLIMIT_NOFILE, &rlim);
|
||||
|
||||
if (res != 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot get the file descriptor limit: " << strerror(errno);
|
||||
if (res != 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::SYSCALL)
|
||||
<< "cannot get the file descriptor limit: " << strerror(errno);
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::SYSCALL)
|
||||
<< "file-descriptors (nofiles) hard limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_cur);
|
||||
|
||||
uint64_t recommended = RECOMMENDED;
|
||||
uint64_t minimum = _descriptorsMinimum;
|
||||
|
||||
if (recommended < minimum) {
|
||||
recommended = minimum;
|
||||
}
|
||||
|
||||
if (rlim.rlim_max < recommended) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::SYSCALL)
|
||||
<< "hard limit " << rlim.rlim_max << " is too small, trying to raise";
|
||||
|
||||
rlim.rlim_max = recommended;
|
||||
rlim.rlim_cur = recommended;
|
||||
|
||||
res = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
|
||||
if (0 < minimum && minimum < recommended && res < 0) {
|
||||
rlim.rlim_max = minimum;
|
||||
rlim.rlim_cur = minimum;
|
||||
|
||||
res = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
}
|
||||
|
||||
if (0 < minimum && res < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::SYSCALL)
|
||||
<< "cannot raise the file descriptor limit to " << minimum << ": "
|
||||
<< strerror(errno);
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
} else if (rlim.rlim_cur < recommended) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::SYSCALL)
|
||||
<< "soft limit " << rlim.rlim_cur << " is too small, trying to raise";
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
|
||||
<< "file-descriptors (nofiles) hard limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_cur);
|
||||
|
||||
bool changed = false;
|
||||
|
||||
if (rlim.rlim_max < _descriptorsMinimum) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
|
||||
<< "hard limit " << rlim.rlim_max << " is too small, trying to raise";
|
||||
|
||||
rlim.rlim_max = _descriptorsMinimum;
|
||||
rlim.rlim_cur = _descriptorsMinimum;
|
||||
|
||||
res = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
|
||||
if (res < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot raise the file descriptor limit to "
|
||||
<< _descriptorsMinimum << ": " << strerror(errno);
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
changed = true;
|
||||
} else if (rlim.rlim_cur < _descriptorsMinimum) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
|
||||
<< "soft limit " << rlim.rlim_cur << " is too small, trying to raise";
|
||||
|
||||
rlim.rlim_cur = _descriptorsMinimum;
|
||||
|
||||
res = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
|
||||
if (res < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot raise the file descriptor limit to "
|
||||
<< _descriptorsMinimum << ": " << strerror(errno);
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
changed = true;
|
||||
if (recommended < rlim.rlim_max) {
|
||||
recommended = rlim.rlim_max;
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
res = getrlimit(RLIMIT_NOFILE, &rlim);
|
||||
rlim.rlim_cur = recommended;
|
||||
|
||||
if (res != 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::SYSCALL)
|
||||
<< "cannot get the file descriptor limit: " << strerror(errno);
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
res = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
|
||||
LOG_TOPIC(INFO, arangodb::Logger::SYSCALL)
|
||||
<< "file-descriptors (nofiles) new hard limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_max) << ", new soft limit is "
|
||||
<< StringifyLimitValue(rlim.rlim_cur);
|
||||
if (0 < minimum && minimum < recommended && res < 0) {
|
||||
rlim.rlim_cur = minimum;
|
||||
|
||||
res = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
}
|
||||
|
||||
if (0 < minimum && res < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::SYSCALL)
|
||||
<< "cannot raise the file descriptor limit to " << minimum << ": "
|
||||
<< strerror(errno);
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,6 +28,9 @@
|
|||
|
||||
namespace arangodb {
|
||||
class FileDescriptorsFeature : public application_features::ApplicationFeature {
|
||||
public:
|
||||
static uint64_t const RECOMMENDED;
|
||||
|
||||
public:
|
||||
explicit FileDescriptorsFeature(application_features::ApplicationServer*);
|
||||
|
||||
|
|
|
@ -34,7 +34,9 @@
|
|||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb::transaction;
|
||||
using VPackHelper = arangodb::basics::VelocyPackHelper;
|
||||
using TraverserOptions = arangodb::traverser::TraverserOptions;
|
||||
|
||||
arangodb::traverser::TraverserOptions::LookupInfo::LookupInfo()
|
||||
: expression(nullptr),
|
||||
|
|
|
@ -101,6 +101,8 @@ const optionsDocumentation = [
|
|||
' - `loopSleepWhen`: sleep every nth iteration',
|
||||
' - `loopSleepSec`: sleep seconds between iterations',
|
||||
'',
|
||||
' - `storageEngine`: set to `rocksdb` or `mmfiles` - defaults to `mmfiles`',
|
||||
'',
|
||||
' - `server`: server_url (e.g. tcp://127.0.0.1:8529) for external server',
|
||||
' - `cluster`: if set to true the tests are run with the coordinator',
|
||||
' of a small local cluster',
|
||||
|
@ -190,6 +192,7 @@ const optionsDefaults = {
|
|||
'skipShebang': false,
|
||||
'skipSsl': false,
|
||||
'skipTimeCritical': false,
|
||||
'storageEngine': 'mmfiles',
|
||||
'test': undefined,
|
||||
'testBuckets': undefined,
|
||||
'username': 'root',
|
||||
|
@ -261,6 +264,25 @@ let LOGS_DIR;
|
|||
let UNITTESTS_DIR;
|
||||
let GDB_OUTPUT = "";
|
||||
|
||||
function doOnePathInner(path) {
|
||||
return _.filter(fs.list(makePathUnix(path)),
|
||||
function (p) {
|
||||
return p.substr(-3) === '.js';
|
||||
})
|
||||
.map(function (x) {
|
||||
return fs.join(makePathUnix(path), x);
|
||||
}).sort();
|
||||
}
|
||||
|
||||
function scanTestPath(path) {
|
||||
var community = doOnePathInner(path);
|
||||
if (global.ARANGODB_CLIENT_VERSION(true)['enterprise-version']) {
|
||||
return community.concat(doOnePathInner('enterprise/' + path));
|
||||
} else {
|
||||
return community;
|
||||
}
|
||||
}
|
||||
|
||||
function makeResults (testname, instanceInfo) {
|
||||
const startTime = time();
|
||||
|
||||
|
@ -322,13 +344,17 @@ function makeArgsArangod (options, appDir, role) {
|
|||
config = "arangod-" + role + ".conf";
|
||||
}
|
||||
|
||||
return {
|
||||
let args = {
|
||||
'configuration': fs.join(CONFIG_DIR, config),
|
||||
'define': 'TOP_DIR=' + TOP_DIR,
|
||||
'wal.flush-timeout': options.walFlushTimeout,
|
||||
'javascript.app-path': appDir,
|
||||
'http.trusted-origin': options.httpTrustedOrigin || 'all'
|
||||
};
|
||||
if (options.storageEngine !== 'mmfiles') {
|
||||
args['server.storage-engine'] = 'rocksdb';
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -545,7 +571,7 @@ function analyzeCrash (binary, arangod, options, checkStr) {
|
|||
var cp = corePattern.asciiSlice(0, corePattern.length);
|
||||
|
||||
if (matchApport.exec(cp) != null) {
|
||||
print(RED + "apport handles corefiles on your system. Uninstall it if you want us to get corefiles for analysis.");
|
||||
print(RED + "apport handles corefiles on your system. Uninstall it if you want us to get corefiles for analysis." + RESET);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -556,7 +582,7 @@ function analyzeCrash (binary, arangod, options, checkStr) {
|
|||
options.coreDirectory = cp.replace("%e", "*").replace("%t", "*").replace("%p", arangod.pid);
|
||||
}
|
||||
else {
|
||||
print(RED + "Don't know howto locate corefiles in your system. '" + cpf + "' contains: '" + cp + "'");
|
||||
print(RED + "Don't know howto locate corefiles in your system. '" + cpf + "' contains: '" + cp + "'" + RESET);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -573,7 +599,7 @@ function analyzeCrash (binary, arangod, options, checkStr) {
|
|||
storeArangodPath + ' for later analysis.\n' +
|
||||
'Server shut down with :\n' +
|
||||
yaml.safeDump(arangod) +
|
||||
'marking build as crashy.');
|
||||
'marking build as crashy.' + RESET);
|
||||
|
||||
let corePath = (options.coreDirectory === '')
|
||||
? 'core'
|
||||
|
@ -826,6 +852,7 @@ function performTests (options, testList, testname, runFn) {
|
|||
|
||||
let results = {};
|
||||
let continueTesting = true;
|
||||
let count = 0;
|
||||
|
||||
for (let i = 0; i < testList.length; i++) {
|
||||
let te = testList[i];
|
||||
|
@ -834,6 +861,7 @@ function performTests (options, testList, testname, runFn) {
|
|||
if (filterTestcaseByOptions(te, options, filtered)) {
|
||||
let first = true;
|
||||
let loopCount = 0;
|
||||
count += 1;
|
||||
|
||||
while (first || options.loopEternal) {
|
||||
if (!continueTesting) {
|
||||
|
@ -895,6 +923,15 @@ function performTests (options, testList, testname, runFn) {
|
|||
}
|
||||
}
|
||||
|
||||
if (count === 0) {
|
||||
results["ALLTESTS"] = {
|
||||
status: false,
|
||||
skipped: true
|
||||
};
|
||||
results.status = false;
|
||||
print(RED + "No testcase matched the filter." + RESET);
|
||||
}
|
||||
|
||||
print('Shutting down...');
|
||||
shutdownInstance(instanceInfo, options);
|
||||
print('done.');
|
||||
|
@ -1029,7 +1066,7 @@ function executeArangod (cmd, args, options) {
|
|||
// / @brief executes a command and wait for result
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function executeAndWait (cmd, args, options, valgrindTest, rootDir) {
|
||||
function executeAndWait (cmd, args, options, valgrindTest, rootDir, disableCoreCheck = false) {
|
||||
if (valgrindTest && options.valgrind) {
|
||||
let valgrindOpts = {};
|
||||
|
||||
|
@ -1072,7 +1109,8 @@ function executeAndWait (cmd, args, options, valgrindTest, rootDir) {
|
|||
|
||||
let errorMessage = ' - ';
|
||||
|
||||
if (res.hasOwnProperty('signal') &&
|
||||
if (!disableCoreCheck &&
|
||||
res.hasOwnProperty('signal') &&
|
||||
((res.signal === 11) ||
|
||||
(res.signal === 6) ||
|
||||
// Windows sometimes has random numbers in signal...
|
||||
|
@ -1793,11 +1831,13 @@ function rubyTests (options, ssl) {
|
|||
}
|
||||
};
|
||||
|
||||
let count = 0;
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const te = files[i];
|
||||
|
||||
if (te.substr(0, 4) === 'api-' && te.substr(-3) === '.rb') {
|
||||
if (filterTestcaseByOptions(te, options, filtered)) {
|
||||
count += 1;
|
||||
if (!continueTesting) {
|
||||
print('Skipping ' + te + ' server is gone.');
|
||||
|
||||
|
@ -1870,6 +1910,15 @@ function rubyTests (options, ssl) {
|
|||
|
||||
print('Shutting down...');
|
||||
|
||||
if (count === 0) {
|
||||
result["ALLTESTS"] = {
|
||||
status: false,
|
||||
skipped: true
|
||||
};
|
||||
result.status = false;
|
||||
print(RED + "No testcase matched the filter." + RESET);
|
||||
}
|
||||
|
||||
fs.remove(tmpname);
|
||||
shutdownInstance(instanceInfo, options);
|
||||
print('done.');
|
||||
|
@ -1886,56 +1935,37 @@ let testsCases = {
|
|||
};
|
||||
|
||||
function findTests () {
|
||||
function doOnePathInner(path) {
|
||||
return _.filter(fs.list(makePathUnix(path)),
|
||||
function (p) {
|
||||
return p.substr(-3) === '.js';
|
||||
})
|
||||
.map(function (x) {
|
||||
return fs.join(makePathUnix(path), x);
|
||||
}).sort();
|
||||
}
|
||||
|
||||
function doOnePath(path) {
|
||||
var community = doOnePathInner(path);
|
||||
if (global.ARANGODB_CLIENT_VERSION(true)['enterprise-version']) {
|
||||
return community.concat(doOnePathInner('enterprise/' + path));
|
||||
} else {
|
||||
return community;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (testsCases.setup) {
|
||||
return;
|
||||
}
|
||||
|
||||
testsCases.common = doOnePath('js/common/tests/shell');
|
||||
testsCases.common = scanTestPath('js/common/tests/shell');
|
||||
|
||||
testsCases.server_only = doOnePath('js/server/tests/shell');
|
||||
testsCases.server_only = scanTestPath('js/server/tests/shell');
|
||||
|
||||
testsCases.client_only = doOnePath('js/client/tests/shell');
|
||||
testsCases.client_only = scanTestPath('js/client/tests/shell');
|
||||
|
||||
testsCases.server_aql = doOnePath('js/server/tests/aql');
|
||||
testsCases.server_aql = scanTestPath('js/server/tests/aql');
|
||||
testsCases.server_aql = _.filter(testsCases.server_aql,
|
||||
function(p) { return p.indexOf('ranges-combined') === -1; });
|
||||
|
||||
testsCases.server_aql_extended = doOnePath('js/server/tests/aql');
|
||||
testsCases.server_aql_extended = scanTestPath('js/server/tests/aql');
|
||||
testsCases.server_aql_extended = _.filter(testsCases.server_aql_extended,
|
||||
function(p) { return p.indexOf('ranges-combined') !== -1; });
|
||||
|
||||
testsCases.server_aql_performance = doOnePath('js/server/perftests');
|
||||
testsCases.server_aql_performance = scanTestPath('js/server/perftests');
|
||||
|
||||
testsCases.server_http = doOnePath('js/common/tests/http');
|
||||
testsCases.server_http = scanTestPath('js/common/tests/http');
|
||||
|
||||
testsCases.replication = doOnePath('js/common/tests/replication');
|
||||
testsCases.replication = scanTestPath('js/common/tests/replication');
|
||||
|
||||
testsCases.agency = doOnePath('js/client/tests/agency');
|
||||
testsCases.agency = scanTestPath('js/client/tests/agency');
|
||||
|
||||
testsCases.resilience = doOnePath('js/server/tests/resilience');
|
||||
testsCases.resilience = scanTestPath('js/server/tests/resilience');
|
||||
|
||||
testsCases.client_resilience = doOnePath('js/client/tests/resilience');
|
||||
testsCases.cluster_sync = doOnePath('js/server/tests/cluster-sync');
|
||||
testsCases.client_resilience = scanTestPath('js/client/tests/resilience');
|
||||
testsCases.cluster_sync = scanTestPath('js/server/tests/cluster-sync');
|
||||
|
||||
testsCases.server = testsCases.common.concat(testsCases.server_only);
|
||||
testsCases.client = testsCases.common.concat(testsCases.client_only);
|
||||
|
@ -1950,7 +1980,7 @@ function findTests () {
|
|||
function filterTestcaseByOptions (testname, options, whichFilter) {
|
||||
if (options.hasOwnProperty('test') && (typeof (options.test) !== 'undefined')) {
|
||||
whichFilter.filter = 'testcase';
|
||||
return testname === options.test;
|
||||
return testname.search(options.test) >= 0;
|
||||
}
|
||||
|
||||
if (options.replication) {
|
||||
|
@ -2016,6 +2046,14 @@ function filterTestcaseByOptions (testname, options, whichFilter) {
|
|||
return false;
|
||||
}
|
||||
|
||||
if ((testname.indexOf('-mmfiles') !== -1) && options.storageEngine === "rocksdb") {
|
||||
whichFilter.filter = 'skip when running as rocksdb';
|
||||
return false;
|
||||
}
|
||||
if ((testname.indexOf('-rocksdb') !== -1) && options.storageEngine === "mmfiles") {
|
||||
whichFilter.filter = 'skip when running as mmfiles';
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3466,8 +3504,7 @@ function runArangodRecovery (instanceInfo, options, script, setup) {
|
|||
}
|
||||
|
||||
argv = argv.concat([
|
||||
'--javascript.script',
|
||||
fs.join('.', 'js', 'server', 'tests', 'recovery', script + '.js')
|
||||
'--javascript.script', script
|
||||
]);
|
||||
|
||||
let binary = ARANGOD_BIN;
|
||||
|
@ -3476,94 +3513,9 @@ function runArangodRecovery (instanceInfo, options, script, setup) {
|
|||
argv.unshift(ARANGOD_BIN);
|
||||
}
|
||||
|
||||
instanceInfo.pid = executeAndWait(binary, argv, options, "recovery", instanceInfo.rootDir);
|
||||
instanceInfo.pid = executeAndWait(binary, argv, options, "recovery", instanceInfo.rootDir, setup);
|
||||
}
|
||||
|
||||
const recoveryTests = [
|
||||
'insert-update-replace',
|
||||
'die-during-collector',
|
||||
'disk-full-logfile',
|
||||
'disk-full-logfile-data',
|
||||
'disk-full-datafile',
|
||||
'collection-drop-recreate',
|
||||
'collection-duplicate-name',
|
||||
'create-with-temp',
|
||||
'create-with-temp-old',
|
||||
'create-collection-fail',
|
||||
'create-collection-tmpfile',
|
||||
'create-database-existing',
|
||||
'create-database-fail',
|
||||
'empty-datafiles',
|
||||
'flush-drop-database-and-fail',
|
||||
'drop-database-flush-and-fail',
|
||||
'drop-database-only-tmp',
|
||||
'create-databases',
|
||||
'recreate-databases',
|
||||
'drop-databases',
|
||||
'create-and-drop-databases',
|
||||
'drop-database-and-fail',
|
||||
'flush-drop-database-and-fail',
|
||||
'collection-rename-recreate',
|
||||
'collection-rename-recreate-flush',
|
||||
'collection-unload',
|
||||
'resume-recovery-multi-flush',
|
||||
'resume-recovery-simple',
|
||||
'resume-recovery-all',
|
||||
'resume-recovery-other',
|
||||
'resume-recovery',
|
||||
'foxx-directories',
|
||||
'collection-duplicate',
|
||||
'collection-rename',
|
||||
'collection-properties',
|
||||
'empty-logfiles',
|
||||
'many-logs',
|
||||
'multiple-logs',
|
||||
'collection-recreate',
|
||||
'drop-index',
|
||||
'drop-index-shutdown',
|
||||
'drop-indexes',
|
||||
'create-indexes',
|
||||
'create-collections',
|
||||
'recreate-collection',
|
||||
'drop-single-collection',
|
||||
'drop-collections',
|
||||
'collections-reuse',
|
||||
'collections-different-attributes',
|
||||
'indexes-after-flush',
|
||||
'indexes-hash',
|
||||
'indexes-rocksdb',
|
||||
'indexes-rocksdb-nosync',
|
||||
'indexes-rocksdb-restore',
|
||||
'indexes-sparse-hash',
|
||||
'indexes-skiplist',
|
||||
'indexes-sparse-skiplist',
|
||||
'indexes-geo',
|
||||
'edges',
|
||||
'indexes',
|
||||
'many-inserts',
|
||||
'many-updates',
|
||||
'wait-for-sync',
|
||||
'attributes',
|
||||
'no-journal',
|
||||
'write-throttling',
|
||||
'collector-oom',
|
||||
'transaction-no-abort',
|
||||
'transaction-no-commit',
|
||||
'transaction-just-committed',
|
||||
'multi-database-durability',
|
||||
'disk-full-no-collection-journal',
|
||||
'no-shutdown-info-with-flush',
|
||||
'no-shutdown-info-no-flush',
|
||||
'no-shutdown-info-multiple-logs',
|
||||
'insert-update-remove',
|
||||
'insert-update-remove-distance',
|
||||
'big-transaction-durability',
|
||||
'transaction-durability',
|
||||
'transaction-durability-multiple',
|
||||
'corrupt-wal-marker-multiple',
|
||||
'corrupt-wal-marker-single'
|
||||
];
|
||||
|
||||
testFuncs.recovery = function (options) {
|
||||
let results = {};
|
||||
|
||||
|
@ -3577,11 +3529,16 @@ testFuncs.recovery = function (options) {
|
|||
|
||||
let status = true;
|
||||
|
||||
let recoveryTests = scanTestPath('js/server/tests/recovery');
|
||||
let count = 0;
|
||||
|
||||
for (let i = 0; i < recoveryTests.length; ++i) {
|
||||
let test = recoveryTests[i];
|
||||
let filtered = {};
|
||||
|
||||
if (options.test === undefined || options.test === test) {
|
||||
if (filterTestcaseByOptions (test, options, filtered )) {
|
||||
let instanceInfo = {};
|
||||
count += 1;
|
||||
|
||||
runArangodRecovery(instanceInfo, options, test, true);
|
||||
|
||||
|
@ -3597,13 +3554,20 @@ testFuncs.recovery = function (options) {
|
|||
status = false;
|
||||
}
|
||||
} else {
|
||||
results[test] = {
|
||||
status: true,
|
||||
skipped: true
|
||||
};
|
||||
if (options.extremeVerbosity) {
|
||||
print('Skipped ' + test + ' because of ' + filtered.filter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (count === 0) {
|
||||
results["ALLTESTS"] = {
|
||||
status: false,
|
||||
skipped: true
|
||||
};
|
||||
status = false;
|
||||
print(RED + "No testcase matched the filter." + RESET);
|
||||
}
|
||||
results.status = status;
|
||||
|
||||
return {
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
/* global describe, it */
|
||||
'use strict';
|
||||
const expect = require('chai').expect;
|
||||
const sinon = require('sinon');
|
||||
const statuses = require('statuses');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const internal = require('internal');
|
||||
const crypto = require('@arangodb/crypto');
|
||||
const SyntheticResponse = require('@arangodb/foxx/router/response');
|
||||
|
||||
describe('SyntheticResponse', function () {
|
||||
describe('cookie', function () {
|
||||
it('adds a cookie', function () {
|
||||
require("console").log('adds a cookie');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana');
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'}
|
||||
]);
|
||||
});
|
||||
it('optionally adds a TTL', function () {
|
||||
require("console").log('optionally adds a TTL');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', {ttl: 22});
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana', lifeTime: 22}
|
||||
]);
|
||||
});
|
||||
it('optionally adds some metadata', function () {
|
||||
require("console").log('optionally adds some metadata');
|
||||
const rawRes = {};
|
||||
require("console").log("1");
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
require("console").log("2");
|
||||
res.cookie('hello', 'banana', {
|
||||
path: '/path',
|
||||
domain: 'cats.example',
|
||||
secure: true,
|
||||
httpOnly: true
|
||||
});
|
||||
require("console").log("3");
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{
|
||||
name: 'hello',
|
||||
value: 'banana',
|
||||
path: '/path',
|
||||
domain: 'cats.example',
|
||||
secure: true,
|
||||
httpOnly: true
|
||||
}
|
||||
]);
|
||||
require("console").log("4");
|
||||
});
|
||||
it('supports signed cookies when a secret is provided', function () {
|
||||
require("console").log('supports signed cookies when a secret is provided');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', {secret: 'potato'});
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'},
|
||||
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
|
||||
]);
|
||||
});
|
||||
it('supports signed cookies with different algorithms', function () {
|
||||
require("console").log('supports signed cookies with different algorithms');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', {
|
||||
secret: 'potato',
|
||||
algorithm: 'sha512'
|
||||
});
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'},
|
||||
{name: 'hello.sig', value: crypto.hmac('potato', 'banana', 'sha512')}
|
||||
]);
|
||||
});
|
||||
it('treats options string as a secret', function () {
|
||||
require("console").log('treats options string as a secret');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', 'potato');
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'},
|
||||
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
|
||||
]);
|
||||
});
|
||||
it('treats options number as a TTL value', function () {
|
||||
require("console").log('treats options number as a TTL value');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', 22);
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana', lifeTime: 22}
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -694,91 +694,4 @@ describe('SyntheticResponse', function () {
|
|||
expect(res.headers).to.have.a.property('vary', '*');
|
||||
});
|
||||
});
|
||||
describe('cookie', function () {
|
||||
it('adds a cookie', function () {
|
||||
require("console").log('adds a cookie');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana');
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'}
|
||||
]);
|
||||
});
|
||||
it('optionally adds a TTL', function () {
|
||||
require("console").log('optionally adds a TTL');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', {ttl: 22});
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana', lifeTime: 22}
|
||||
]);
|
||||
});
|
||||
it('optionally adds some metadata', function () {
|
||||
require("console").log('optionally adds some metadata');
|
||||
const rawRes = {};
|
||||
require("console").log("1");
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
require("console").log("2");
|
||||
res.cookie('hello', 'banana', {
|
||||
path: '/path',
|
||||
domain: 'cats.example',
|
||||
secure: true,
|
||||
httpOnly: true
|
||||
});
|
||||
require("console").log("3");
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{
|
||||
name: 'hello',
|
||||
value: 'banana',
|
||||
path: '/path',
|
||||
domain: 'cats.example',
|
||||
secure: true,
|
||||
httpOnly: true
|
||||
}
|
||||
]);
|
||||
require("console").log("4");
|
||||
});
|
||||
it('supports signed cookies when a secret is provided', function () {
|
||||
require("console").log('supports signed cookies when a secret is provided');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', {secret: 'potato'});
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'},
|
||||
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
|
||||
]);
|
||||
});
|
||||
it('supports signed cookies with different algorithms', function () {
|
||||
require("console").log('supports signed cookies with different algorithms');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', {
|
||||
secret: 'potato',
|
||||
algorithm: 'sha512'
|
||||
});
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'},
|
||||
{name: 'hello.sig', value: crypto.hmac('potato', 'banana', 'sha512')}
|
||||
]);
|
||||
});
|
||||
it('treats options string as a secret', function () {
|
||||
require("console").log('treats options string as a secret');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', 'potato');
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana'},
|
||||
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
|
||||
]);
|
||||
});
|
||||
it('treats options number as a TTL value', function () {
|
||||
require("console").log('treats options number as a TTL value');
|
||||
const rawRes = {};
|
||||
const res = new SyntheticResponse(rawRes, {});
|
||||
res.cookie('hello', 'banana', 22);
|
||||
expect(rawRes.cookies).to.eql([
|
||||
{name: 'hello', value: 'banana', lifeTime: 22}
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -61,7 +61,7 @@ LogTopic Logger::REQUESTS("requests", LogLevel::FATAL); // suppress
|
|||
LogTopic Logger::SSL("ssl", LogLevel::WARN);
|
||||
LogTopic Logger::STARTUP("startup", LogLevel::INFO);
|
||||
LogTopic Logger::SUPERVISION("supervision", LogLevel::INFO);
|
||||
LogTopic Logger::SYSCALL("syscall", LogLevel::WARN);
|
||||
LogTopic Logger::SYSCALL("syscall", LogLevel::INFO);
|
||||
LogTopic Logger::THREADS("threads", LogLevel::WARN);
|
||||
LogTopic Logger::TRANSACTIONS("trx", LogLevel::WARN);
|
||||
LogTopic Logger::V8("v8", LogLevel::WARN);
|
||||
|
|
|
@ -10,7 +10,7 @@ else
|
|||
PS='/'
|
||||
fi;
|
||||
|
||||
ulimit -n 2048
|
||||
ulimit -n 8192
|
||||
|
||||
export PORT=`expr 1024 + $RANDOM`
|
||||
|
||||
|
|
|
@ -40,7 +40,8 @@ using namespace arangodb::cache;
|
|||
TEST_CASE("cache::Table", "[cache]") {
|
||||
SECTION("test static allocation size method") {
|
||||
for (uint32_t i = Table::minLogSize; i <= Table::maxLogSize; i++) {
|
||||
REQUIRE(Table::allocationSize(i) == (sizeof(Table) + (BUCKET_SIZE << i)));
|
||||
REQUIRE(Table::allocationSize(i) ==
|
||||
(sizeof(Table) + (BUCKET_SIZE << i) + Table::padding));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,7 +49,8 @@ TEST_CASE("cache::Table", "[cache]") {
|
|||
for (uint32_t i = Table::minLogSize; i <= 20; i++) {
|
||||
auto table = std::make_shared<Table>(i);
|
||||
REQUIRE(table.get() != nullptr);
|
||||
REQUIRE(table->memoryUsage() == (sizeof(Table) + (BUCKET_SIZE << i)));
|
||||
REQUIRE(table->memoryUsage() ==
|
||||
(sizeof(Table) + (BUCKET_SIZE << i) + Table::padding));
|
||||
REQUIRE(table->logSize() == i);
|
||||
REQUIRE(table->size() == (static_cast<uint64_t>(1) << i));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue