mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into feature/storage-format-refactoring
This commit is contained in:
commit
12d1b2d318
14
CHANGELOG
14
CHANGELOG
|
@ -847,6 +847,20 @@ v3.1.1 (2016-11-15)
|
|||
v3.1.0 (2016-10-29)
|
||||
-------------------
|
||||
|
||||
* AQL breaking change in cluster:
|
||||
|
||||
from ArangoDB 3.1 onwards `WITH` is required for traversals in a
|
||||
clustered environment in order to avoid deadlocks.
|
||||
|
||||
Note that for queries that access only a single collection or that have all
|
||||
collection names specified somewhere else in the query string, there is no
|
||||
need to use *WITH*. *WITH* is only useful when the AQL query parser cannot
|
||||
automatically figure out which collections are going to be used by the query.
|
||||
*WITH* is only useful for queries that dynamically access collections, e.g.
|
||||
via traversals, shortest path operations or the *DOCUMENT()* function.
|
||||
|
||||
more info can be found [here](https://github.com/arangodb/arangodb/blob/devel/Documentation/Books/AQL/Operations/With.md)
|
||||
|
||||
* added AQL function `DISTANCE` to calculate the distance between two arbitrary
|
||||
coordinates (haversine formula)
|
||||
|
||||
|
|
|
@ -345,6 +345,17 @@ option value for individual queries when running an AQL query.
|
|||
The default value is *0*, meaning that there is no memory limit.
|
||||
|
||||
|
||||
### Turning AQL warnings into errors
|
||||
|
||||
`--query.fail-on-warning value`
|
||||
|
||||
When set to *true*, AQL queries that produce warnings will instantly abort and
|
||||
throw an exception. This option can be set to catch obvious issues with AQL
|
||||
queries early. When set to *false*, AQL queries that produce warnings will not
|
||||
abort and return the warnings along with the query results.
|
||||
The option can also be overridden for each individual AQL query.
|
||||
|
||||
|
||||
### Enable/disable AQL query tracking
|
||||
|
||||
`--query.tracking flag`
|
||||
|
|
|
@ -595,18 +595,10 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
result.add("part", VPackValue("dependent"));
|
||||
}
|
||||
|
||||
result.add("options", VPackValue(VPackValueType::Object));
|
||||
result.add("optimizer", VPackValue(VPackValueType::Object));
|
||||
result.add("rules", VPackValue(VPackValueType::Array));
|
||||
result.add(VPackValue("-all"));
|
||||
result.close(); // options.optimizer.rules
|
||||
result.close(); // options.optimizer
|
||||
int64_t tracing = query->queryOptions().tracing;
|
||||
result.add("tracing", VPackValue(tracing));
|
||||
double satelliteSyncWait = query->queryOptions().satelliteSyncWait;
|
||||
result.add("satelliteSyncWait", VPackValue(satelliteSyncWait));
|
||||
result.close(); // options
|
||||
|
||||
result.add(VPackValue("options"));
|
||||
// the toVelocyPack will open & close the "options" object
|
||||
query->queryOptions().toVelocyPack(result, true);
|
||||
|
||||
result.close();
|
||||
|
||||
TRI_ASSERT(result.isClosed());
|
||||
|
@ -1239,7 +1231,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
auto inst =
|
||||
std::make_unique<CoordinatorInstanciator>(query, queryRegistry);
|
||||
// optionally restrict query to certain shards
|
||||
inst->includedShards(query->queryOptions().includedShards);
|
||||
inst->includedShards(query->queryOptions().shardIds);
|
||||
|
||||
try {
|
||||
plan->root()->walk(inst.get()); // if this throws, we need to
|
||||
|
|
|
@ -167,12 +167,55 @@ void QueryOptions::fromVelocyPack(VPackSlice const& slice) {
|
|||
while (it.valid()) {
|
||||
VPackSlice value = it.value();
|
||||
if (value.isString()) {
|
||||
includedShards.emplace(value.copyString());
|
||||
shardIds.emplace(value.copyString());
|
||||
}
|
||||
it.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void QueryOptions::toVelocyPack(VPackBuilder& builder) {
|
||||
void QueryOptions::toVelocyPack(VPackBuilder& builder, bool disableOptimizerRules) const {
|
||||
builder.openObject();
|
||||
|
||||
builder.add("memoryLimit", VPackValue(memoryLimit));
|
||||
builder.add("maxNumberOfPlans", VPackValue(maxNumberOfPlans));
|
||||
builder.add("maxWarningCount", VPackValue(maxWarningCount));
|
||||
builder.add("literalSizeThreshold", VPackValue(literalSizeThreshold));
|
||||
builder.add("tracing", VPackValue(tracing));
|
||||
builder.add("satelliteSyncWait", VPackValue(satelliteSyncWait));
|
||||
builder.add("profile", VPackValue(profile));
|
||||
builder.add("allPlans", VPackValue(allPlans));
|
||||
builder.add("verbosePlans", VPackValue(verbosePlans));
|
||||
builder.add("silent", VPackValue(silent));
|
||||
builder.add("failOnWarning", VPackValue(failOnWarning));
|
||||
builder.add("cache", VPackValue(cache));
|
||||
builder.add("fullCount", VPackValue(fullCount));
|
||||
builder.add("count", VPackValue(count));
|
||||
builder.add("verboseErrors", VPackValue(verboseErrors));
|
||||
|
||||
builder.add("optimizer", VPackValue(VPackValueType::Object));
|
||||
builder.add("inspectSimplePlans", VPackValue(inspectSimplePlans));
|
||||
if (!optimizerRules.empty() || disableOptimizerRules) {
|
||||
builder.add("rules", VPackValue(VPackValueType::Array));
|
||||
if (disableOptimizerRules) {
|
||||
// turn off all optimizer rules
|
||||
builder.add(VPackValue("-all"));
|
||||
} else {
|
||||
for (auto const& it : optimizerRules) {
|
||||
builder.add(VPackValue(it));
|
||||
}
|
||||
}
|
||||
builder.close(); // optimizer.rules
|
||||
}
|
||||
builder.close(); // optimizer
|
||||
|
||||
if (!shardIds.empty()) {
|
||||
builder.add("shardIds", VPackValue(VPackValueType::Array));
|
||||
for (auto const& it : shardIds) {
|
||||
builder.add(VPackValue(it));
|
||||
}
|
||||
builder.close(); // shardIds
|
||||
}
|
||||
|
||||
builder.close();
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ struct QueryOptions {
|
|||
QueryOptions();
|
||||
|
||||
void fromVelocyPack(arangodb::velocypack::Slice const&);
|
||||
void toVelocyPack(arangodb::velocypack::Builder&);
|
||||
void toVelocyPack(arangodb::velocypack::Builder&, bool disableOptimizerRules) const;
|
||||
|
||||
size_t memoryLimit;
|
||||
size_t maxNumberOfPlans;
|
||||
|
@ -57,7 +57,7 @@ struct QueryOptions {
|
|||
bool verboseErrors;
|
||||
bool inspectSimplePlans;
|
||||
std::vector<std::string> optimizerRules;
|
||||
std::unordered_set<std::string> includedShards;
|
||||
std::unordered_set<std::string> shardIds;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -78,6 +78,47 @@ class Methods;
|
|||
}
|
||||
namespace rocksutils {
|
||||
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_integral<T>::value,void>::type
|
||||
toPersistent(T in, char* out){
|
||||
using TT = typename std::decay<T>::type;
|
||||
std::memcpy(out, &in, sizeof(TT));
|
||||
out += sizeof(TT);
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
typename std::enable_if<std::is_integral<typename std::remove_reference<T>::type>::value, int>::type = 0
|
||||
>
|
||||
typename std::decay<T>::type fromPersistent(char const* in){
|
||||
using TT = typename std::decay<T>::type;
|
||||
TT out;
|
||||
std::memcpy(&out, in, sizeof(TT));
|
||||
in += sizeof(TT);
|
||||
return out;
|
||||
}
|
||||
|
||||
template <typename T, typename StringLike,
|
||||
typename std::enable_if<std::is_integral<typename std::remove_reference<T>::type>::value, int>::type = 2
|
||||
>
|
||||
typename std::decay<T>::type fromPersistent(StringLike& in){
|
||||
using TT = typename std::decay<T>::type;
|
||||
TT out;
|
||||
std::memcpy(&out, in.data(), sizeof(TT));
|
||||
return out;
|
||||
}
|
||||
|
||||
inline uint64_t doubleToInt(double d){
|
||||
uint64_t i;
|
||||
std::memcpy(&i, &d, sizeof(i));
|
||||
return i;
|
||||
}
|
||||
|
||||
inline double intToDouble(uint64_t i){
|
||||
double d;
|
||||
std::memcpy(&d, &i, sizeof(i));
|
||||
return d;
|
||||
}
|
||||
|
||||
uint64_t uint64FromPersistent(char const* p);
|
||||
void uint64ToPersistent(char* p, uint64_t value);
|
||||
void uint64ToPersistent(std::string& out, uint64_t value);
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
#include <type_traits>
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
|
|
|
@ -99,14 +99,14 @@ typedef struct {
|
|||
/* only used for a leaf pot. */
|
||||
/* =================================================== */
|
||||
typedef struct {
|
||||
int LorLeaf;
|
||||
int RorPoints;
|
||||
int32_t LorLeaf;
|
||||
int32_t RorPoints;
|
||||
GeoString middle;
|
||||
GeoFix maxdist[GeoIndexFIXEDPOINTS];
|
||||
GeoString start;
|
||||
GeoString end;
|
||||
int level;
|
||||
int points[GeoIndexPOTSIZE];
|
||||
int32_t level;
|
||||
int32_t points[GeoIndexPOTSIZE];
|
||||
} GeoPot;
|
||||
/* =================================================== */
|
||||
/* GeoIx structure */
|
||||
|
@ -266,8 +266,98 @@ typedef struct {
|
|||
#include <StorageEngine/EngineSelectorFeature.h>
|
||||
|
||||
namespace arangodb { namespace rocksdbengine {
|
||||
|
||||
|
||||
|
||||
GeoCoordinate& fromPersistent(char const* in, GeoCoordinate& out){
|
||||
const char* start = in;
|
||||
|
||||
//convert latituide and longitute to uint64 for network transfer / storage
|
||||
uint64_t fromStorage = rocksutils::fromPersistent<uint64_t>(start);
|
||||
start += sizeof(uint64_t);
|
||||
out.latitude = rocksutils::intToDouble(fromStorage);
|
||||
|
||||
fromStorage = rocksutils::fromPersistent<uint64_t>(start);
|
||||
start += sizeof(uint64_t);
|
||||
out.longitude = rocksutils::intToDouble(fromStorage);
|
||||
|
||||
out.data = rocksutils::fromPersistent<uint64_t>(start);
|
||||
start += sizeof(uint64_t);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void toPersistent(GeoCoordinate& in, char* out){
|
||||
char* start = out;
|
||||
|
||||
uint64_t toStorage = rocksutils::doubleToInt(in.latitude);
|
||||
rocksutils::toPersistent(toStorage, start);
|
||||
start += sizeof(in.latitude);
|
||||
|
||||
toStorage = rocksutils::doubleToInt(in.longitude);
|
||||
rocksutils::toPersistent(toStorage, start);
|
||||
start += sizeof(in.longitude);
|
||||
|
||||
rocksutils::toPersistent(in.data, start);
|
||||
start += sizeof(in.data);
|
||||
}
|
||||
|
||||
GeoPot& fromPersistent(char const* in, GeoPot& out){
|
||||
const char* start = in;
|
||||
|
||||
out.LorLeaf = rocksutils::fromPersistent<int32_t>(start);
|
||||
start += sizeof(int32_t);
|
||||
out.RorPoints = rocksutils::fromPersistent<int32_t>(start);
|
||||
start += sizeof(int32_t);
|
||||
out.middle = rocksutils::fromPersistent<GeoString>(start);
|
||||
start += sizeof(GeoString);
|
||||
|
||||
for(std::size_t i = 0; i < GeoIndexFIXEDPOINTS; i++){
|
||||
out.maxdist[i] = rocksutils::fromPersistent<GeoFix>(start);
|
||||
start += sizeof(GeoFix);
|
||||
}
|
||||
|
||||
out.start = rocksutils::fromPersistent<GeoString>(start);
|
||||
start += sizeof(GeoString);
|
||||
out.end = rocksutils::fromPersistent<GeoString>(start);
|
||||
start += sizeof(GeoString);
|
||||
out.level = rocksutils::fromPersistent<int32_t>(start);
|
||||
start += sizeof(int32_t);
|
||||
|
||||
for(std::size_t i = 0; i < GeoIndexFIXEDPOINTS; i++){
|
||||
out.points[i] = rocksutils::fromPersistent<int32_t>(start);
|
||||
start += sizeof(int32_t);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void toPersistent(GeoPot const& in, char* out){
|
||||
char* start = out;
|
||||
|
||||
rocksutils::toPersistent(in.LorLeaf, start);
|
||||
start += sizeof(int32_t);
|
||||
rocksutils::toPersistent(in.RorPoints, start);
|
||||
start += sizeof(int32_t);
|
||||
rocksutils::toPersistent(in.middle, start);
|
||||
start += sizeof(GeoString);
|
||||
|
||||
for(std::size_t i = 0; i< GeoIndexFIXEDPOINTS; i++){
|
||||
rocksutils::toPersistent(in.maxdist[i], start);
|
||||
start += sizeof(GeoFix);
|
||||
}
|
||||
|
||||
rocksutils::toPersistent(in.start, start);
|
||||
start += sizeof(GeoString);
|
||||
rocksutils::toPersistent(in.end, start);
|
||||
start += sizeof(GeoString);
|
||||
rocksutils::toPersistent(in.level, start);
|
||||
start += sizeof(int32_t);
|
||||
|
||||
for(std::size_t i = 0; i< GeoIndexFIXEDPOINTS; i++){
|
||||
rocksutils::toPersistent(in.points[i], start);
|
||||
start += sizeof(int32_t);
|
||||
}
|
||||
}
|
||||
|
||||
/* CRUD interface */
|
||||
|
||||
void GeoIndex_setRocksMethods(GeoIdx* gi, RocksDBMethods* trx) {
|
||||
|
@ -279,14 +369,14 @@ void GeoIndex_clearRocks(GeoIdx* gi) {
|
|||
GeoIx* gix = (GeoIx*)gi;
|
||||
gix->rocksMethods = nullptr;
|
||||
}
|
||||
|
||||
|
||||
inline void RocksRead(GeoIx * gix, RocksDBKey const& key, std::string *val) {
|
||||
arangodb::Result r = gix->rocksMethods->Get(RocksDBColumnFamily::geo(), key, val);
|
||||
if (!r.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void RocksWrite(GeoIx * gix,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& slice) {
|
||||
|
@ -322,13 +412,23 @@ void SlotRead(GeoIx * gix, int slot, GeoCoordinate * gc /*out param*/)
|
|||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
std::string slotValue;
|
||||
RocksRead(gix, key, &slotValue);
|
||||
memcpy(gc, slotValue.data(), slotValue.size());
|
||||
fromPersistent(slotValue.data(),*gc);
|
||||
//memcpy(gc, slotValue.data(), slotValue.size());
|
||||
}
|
||||
void SlotWrite(GeoIx * gix,int slot, GeoCoordinate * gc)
|
||||
{
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
RocksWrite(gix, key, rocksdb::Slice((char*)gc,
|
||||
sizeof(GeoCoordinate)));
|
||||
char data[sizeof (GeoCoordinate)];
|
||||
toPersistent(*gc, &data[0]);
|
||||
RocksWrite(gix, key, rocksdb::Slice(&data[0], sizeof(GeoCoordinate)));
|
||||
|
||||
GeoCoordinate test;
|
||||
fromPersistent(&data[0],test);
|
||||
// RocksWrite(gix, key, rocksdb::Slice((char*)gc, sizeof(GeoCoordinate)));
|
||||
|
||||
TRI_ASSERT(test.longitude == gc->longitude);
|
||||
TRI_ASSERT(test.latitude == gc->latitude);
|
||||
TRI_ASSERT(test.data == gc->data);
|
||||
}
|
||||
|
||||
void PotRead(GeoIx * gix, int pot, GeoPot * gp)
|
||||
|
@ -336,12 +436,21 @@ void PotRead(GeoIx * gix, int pot, GeoPot * gp)
|
|||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, pot, false);
|
||||
std::string potValue;
|
||||
RocksRead(gix, key, &potValue);
|
||||
memcpy(gp, potValue.data(), potValue.size());
|
||||
TRI_ASSERT(potValue.size() == sizeof(GeoPot));
|
||||
fromPersistent(potValue.data(), *gp);
|
||||
//memcpy(gp, potValue.data(), potValue.size());
|
||||
}
|
||||
|
||||
void PotWrite(GeoIx * gix, int pot, GeoPot * gp) {
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, pot, false);
|
||||
RocksWrite(gix, key, rocksdb::Slice((char*)gp, sizeof(GeoPot)));
|
||||
char data[sizeof (GeoPot)];
|
||||
toPersistent(*gp, &data[0]);
|
||||
RocksWrite(gix, key, rocksdb::Slice(&data[0], sizeof(GeoPot)));
|
||||
//RocksWrite(gix, key, rocksdb::Slice((char*)gp, sizeof(GeoPot)));
|
||||
|
||||
GeoPot test;
|
||||
fromPersistent(&data[0],test);
|
||||
TRI_ASSERT(test.level == gp->level);
|
||||
}
|
||||
|
||||
/* =================================================== */
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace rocksdbengine {
|
|||
/* first the things that a user might want to change */
|
||||
|
||||
/* a GeoString - a signed type of at least 64 bits */
|
||||
typedef std::uint_fast64_t GeoString;
|
||||
typedef std::uint64_t GeoString;
|
||||
|
||||
/* percentage growth of slot or slotslot tables */
|
||||
#define GeoIndexGROW 50
|
||||
|
|
|
@ -208,14 +208,14 @@ if (MSVC AND NOT(SKIP_PACKAGING))
|
|||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/InstallMacros.cmake)
|
||||
set(CMAKE_INSTALL_FULL_SBINDIR "${CMAKE_INSTALL_FULL_BINDIR}")
|
||||
|
||||
|
||||
install_readme(README.windows README.windows.txt)
|
||||
|
||||
# install the visual studio runtime:
|
||||
set(CMAKE_INSTALL_UCRT_LIBRARIES 1)
|
||||
set(CMAKE_INSTALL_SYSTEM_RUNTIME_DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
include(InstallRequiredSystemLibraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ${CMAKE_INSTALL_SBINDIR} COMPONENT Libraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT} DESTINATION ${CMAKE_INSTALL_SBINDIR} COMPONENT Libraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT Libraries)
|
||||
INSTALL(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT Libraries)
|
||||
|
||||
# install openssl
|
||||
if (NOT LIB_EAY_RELEASE_DLL OR NOT SSL_EAY_RELEASE_DLL)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
set(W_INSTALL_FILES "${PROJECT_SOURCE_DIR}/Installation/Windows/")
|
||||
|
||||
set(CPACK_MONOLITHIC_INSTALL 1)
|
||||
set(CPACK_NSIS_DISPLAY_NAME, ${ARANGODB_DISPLAY_NAME})
|
||||
set(CPACK_NSIS_HELP_LINK ${ARANGODB_HELP_LINK})
|
||||
set(CPACK_NSIS_URL_INFO_ABOUT ${ARANGODB_URL_INFO_ABOUT})
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertEqual, assertTrue, AQL_EXECUTE */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for query language, limit optimizations
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var internal = require("internal");
|
||||
var db = internal.db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function ahuacatlShardIdsTestSuite () {
|
||||
var collection = null;
|
||||
var cn = "UnitTestsShardIds";
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set up
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
setUp : function () {
|
||||
internal.db._drop(cn);
|
||||
collection = internal.db._create(cn, { numberOfShards: 4 });
|
||||
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
collection.save({ "value" : i });
|
||||
}
|
||||
|
||||
var result = collection.count(true), sum = 0, shards = Object.keys(result);
|
||||
assertEqual(4, shards.length);
|
||||
shards.forEach(function(key) {
|
||||
sum += result[key];
|
||||
});
|
||||
assertEqual(100, sum);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tear down
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
tearDown : function () {
|
||||
internal.db._drop(cn);
|
||||
},
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief no restriction to a shard
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testQueryUnrestricted : function () {
|
||||
var query = "FOR doc IN " + cn + " RETURN 1";
|
||||
|
||||
var actual = AQL_EXECUTE(query);
|
||||
assertEqual(100, actual.json.length);
|
||||
},
|
||||
|
||||
testQueryRestrictedToShards : function () {
|
||||
var count = collection.count(true);
|
||||
var shards = Object.keys(count);
|
||||
var query = "FOR doc IN " + cn + " RETURN 1";
|
||||
|
||||
assertTrue(shards.length > 1);
|
||||
var sum = 0;
|
||||
shards.forEach(function(s) {
|
||||
var actual = AQL_EXECUTE(query, null, { shardIds: [ s ] });
|
||||
assertEqual(count[s], actual.json.length);
|
||||
sum += actual.json.length;
|
||||
});
|
||||
|
||||
assertEqual(100, sum);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(ahuacatlShardIdsTestSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
|
@ -25,7 +25,11 @@
|
|||
#define ARANGODB_BASICS_COMMON_H 1
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
#define WIN32_LEAN_AND_MEAN 1
|
||||
#endif
|
||||
|
||||
// debug malloc for Windows (only used when DEBUG is set)
|
||||
#define _CRTDBG_MAP_ALLOC
|
||||
#include <stdlib.h>
|
||||
|
|
|
@ -36,7 +36,7 @@ suite_all(){
|
|||
local tests=""
|
||||
case $count in
|
||||
num)
|
||||
echo "6"
|
||||
echo "11"
|
||||
return
|
||||
;;
|
||||
name)
|
||||
|
@ -48,21 +48,33 @@ suite_all(){
|
|||
return
|
||||
;;
|
||||
1)
|
||||
tests="shell_server shell_client"
|
||||
tests="shell_server"
|
||||
;;
|
||||
2)
|
||||
tests="shell_server_aql"
|
||||
;;
|
||||
2)
|
||||
tests="http_server server_http"
|
||||
tests="shell_client"
|
||||
;;
|
||||
3)
|
||||
tests="dump importing export arangobench upgrade"
|
||||
tests="shell_server_aql"
|
||||
;;
|
||||
4)
|
||||
tests="replication_sync replication_static replication_ongoing http_replication shell_replication"
|
||||
tests="http_server"
|
||||
;;
|
||||
5)
|
||||
tests="server_http"
|
||||
;;
|
||||
6)
|
||||
tests="dump importing"
|
||||
;;
|
||||
7)
|
||||
tests="export arangobench upgrade"
|
||||
;;
|
||||
8)
|
||||
tests="replication_sync replication_static"
|
||||
;;
|
||||
9)
|
||||
tests="replication_ongoing http_replication shell_replication"
|
||||
;;
|
||||
10)
|
||||
tests="agency cluster_sync"
|
||||
;;
|
||||
*)
|
|
@ -55,6 +55,7 @@ add_executable(
|
|||
Geo/georeg.cpp
|
||||
Pregel/typedbuffer.cpp
|
||||
RocksDBEngine/IndexEstimatorTest.cpp
|
||||
RocksDBEngine/TypeConversionTest.cpp
|
||||
main.cpp
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2004-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Christoph Uhde
|
||||
/// @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- test suite
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
using namespace arangodb::rocksutils;
|
||||
// @brief setup
|
||||
|
||||
void doFromToTest(double num){
|
||||
CHECK( (num == intToDouble(doubleToInt(num))) );
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void doFromToTest(T num){
|
||||
T x = num , y;
|
||||
char s[sizeof(x)];
|
||||
toPersistent(x,&s[0]);
|
||||
y = fromPersistent<decltype(y)>(&s[0]);
|
||||
CHECK((x == y));
|
||||
}
|
||||
|
||||
TEST_CASE("TypeConversion", "[type_conv]") {
|
||||
|
||||
// @brief Test fixme
|
||||
|
||||
SECTION("test_from_to_persist_uint64"){
|
||||
doFromToTest(std::numeric_limits<uint64_t>::min());
|
||||
doFromToTest(std::numeric_limits<uint64_t>::max()/2);
|
||||
doFromToTest(std::numeric_limits<uint64_t>::max());
|
||||
}
|
||||
|
||||
|
||||
SECTION("test_from_to_persist_int32"){
|
||||
doFromToTest(std::numeric_limits<int32_t>::min());
|
||||
doFromToTest(std::numeric_limits<int32_t>::lowest());
|
||||
doFromToTest(std::numeric_limits<int32_t>::max()/2);
|
||||
doFromToTest(std::numeric_limits<int32_t>::max());
|
||||
}
|
||||
|
||||
// @brief generate tests
|
||||
SECTION("test_from_to_double"){
|
||||
doFromToTest(std::numeric_limits<double>::min());
|
||||
doFromToTest(std::numeric_limits<double>::lowest());
|
||||
doFromToTest(std::numeric_limits<double>::max()/2);
|
||||
doFromToTest(std::numeric_limits<double>::max());
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue