1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
Michael Hackstein 2016-10-10 09:45:55 +02:00
commit 02f854beeb
11 changed files with 151 additions and 68 deletions

View File

@ -8,40 +8,42 @@ SWAGGER=1
EXAMPLES=1
LINT=1
if [ "$1" == "--no-lint" ]; then
LINT=0
shift
fi
while [ "$#" -gt 1 ]; do
if [ "$1" == "--no-lint" ]; then
LINT=0
shift
fi
if [ "$1" == "--no-build" ]; then
BUILD=0
shift
fi
if [ "$1" == "--no-build" ]; then
BUILD=0
shift
fi
if [ "$1" == "--recycle-build" ]; then
BUILD=2
shift
fi
if [ "$1" == "--recycle-build" ]; then
BUILD=2
shift
fi
if [ "$1" == "--no-swagger" ]; then
SWAGGER=0
shift
fi
if [ "$1" == "--no-swagger" ]; then
SWAGGER=0
shift
fi
if [ "$1" == "--no-examples" ]; then
EXAMPLES=0
shift
fi
if [ "$1" == "--no-examples" ]; then
EXAMPLES=0
shift
fi
if [ "$1" == "--no-commit" ]; then
TAG=0
shift
fi
if [ "$1" == "--no-commit" ]; then
TAG=0
shift
fi
if [ "$1" == "--no-book" ]; then
BOOK=0
shift
fi
if [ "$1" == "--no-book" ]; then
BOOK=0
shift
fi
done
if [ "$#" -ne 1 ]; then
echo "usage: $0 <major>.<minor>.<revision>"
@ -85,8 +87,14 @@ if [ `uname` == "Darwin" ]; then
CMAKE_CONFIGURE="${CMAKE_CONFIGURE} -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl -DCMAKE_OSX_DEPLOYMENT_TARGET=10.11"
fi
ENTERPRISE=0
if [ -d enterprise ]; then
ENTERPRISE=1
fi
if [ "$BUILD" != "0" ]; then
echo "COMPILING"
echo "COMPILING COMMUNITY"
if [ "$BUILD" == "1" ]; then
rm -rf build && mkdir build
@ -97,6 +105,20 @@ if [ "$BUILD" != "0" ]; then
cmake .. ${CMAKE_CONFIGURE}
make -j 8
)
if [ "$ENTERPRISE" == "1" ]; then
echo "COMPILING ENTERPRISE"
if [ "$BUILD" == "1" ]; then
rm -rf build-enterprise && mkdir build-enterprise
fi
(
cd build-enterprise
cmake .. ${CMAKE_CONFIGURE} -DUSE_ENTERPRISE=ON
make -j 8
)
fi
fi
if [ "$LINT" == "1" ]; then
@ -158,6 +180,17 @@ if [ "$TAG" == "1" ]; then
git tag "v$VERSION"
git push --tags
if [ "$ENTERPRISE" == "1" ]; then
(
cd enterprise
git commit -m "release version $VERSION enterprise" -a
git push
git tag "v$VERSION"
git push --tags
)
fi
echo
echo "--------------------------------------------------"
echo "Remember to update the VERSION in 'devel' as well."

View File

@ -565,7 +565,7 @@ AstNode* Ast::createNodeCollection(char const* name,
_query->collections()->add(name, accessType);
if (ServerState::instance()->isRunningInCluster()) {
if (ServerState::instance()->isCoordinator()) {
auto ci = ClusterInfo::instance();
// We want to tolerate that a collection name is given here
// which does not exist, if only for some unit tests:
@ -1004,7 +1004,7 @@ AstNode* Ast::createNodeWithCollections (AstNode const* collections) {
if (c->isStringValue()) {
std::string name = c->getString();
_query->collections()->add(name, TRI_TRANSACTION_READ);
if (ServerState::instance()->isRunningInCluster()) {
if (ServerState::instance()->isCoordinator()) {
auto ci = ClusterInfo::instance();
// We want to tolerate that a collection name is given here
// which does not exist, if only for some unit tests:
@ -1040,7 +1040,7 @@ AstNode* Ast::createNodeCollectionList(AstNode const* edgeCollections) {
auto doTheAdd = [&](std::string name) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
if (ss->isRunningInCluster()) {
if (ss->isCoordinator()) {
try {
auto c = ci->getCollection(_query->vocbase()->name(), name);
auto names = c->realNames();
@ -1490,7 +1490,7 @@ void Ast::injectBindParameters(BindParameters& parameters) {
for (const auto& n : eColls) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
if (ServerState::instance()->isRunningInCluster()) {
if (ServerState::instance()->isCoordinator()) {
auto ci = ClusterInfo::instance();
for (const auto& n : eColls) {
try {
@ -1519,7 +1519,7 @@ void Ast::injectBindParameters(BindParameters& parameters) {
for (const auto& n : eColls) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
if (ServerState::instance()->isRunningInCluster()) {
if (ServerState::instance()->isCoordinator()) {
auto ci = ClusterInfo::instance();
for (const auto& n : eColls) {
try {
@ -1545,7 +1545,7 @@ void Ast::injectBindParameters(BindParameters& parameters) {
if (it->type == NODE_TYPE_COLLECTION) {
std::string name = it->getString();
_query->collections()->add(name, TRI_TRANSACTION_WRITE);
if (ServerState::instance()->isRunningInCluster()) {
if (ServerState::instance()->isCoordinator()) {
auto ci = ClusterInfo::instance();
// We want to tolerate that a collection name is given here
// which does not exist, if only for some unit tests:

View File

@ -251,6 +251,17 @@ class DistributeNode : public ExecutionNode {
/// @brief set collection
void setCollection(Collection* coll) { _collection = coll; }
/// @brief set varId
void setVarId(VariableId varId) { _varId = varId; }
/// @brief set alternativeVarId
void setAlternativeVarId(VariableId alternativeVarId) {
_alternativeVarId = alternativeVarId;
}
/// @brief set createKeys
void setCreateKeys(bool b) { _createKeys = b; }
private:
/// @brief the underlying database
TRI_vocbase_t* _vocbase;
@ -259,14 +270,14 @@ class DistributeNode : public ExecutionNode {
Collection const* _collection;
/// @brief the variable we must inspect to know where to distribute
VariableId const _varId;
VariableId _varId;
/// @brief an optional second variable we must inspect to know where to
/// distribute
VariableId const _alternativeVarId;
VariableId _alternativeVarId;
/// @brief the node is responsible for creating document keys
bool const _createKeys;
bool _createKeys;
/// @brief allow conversion of key to object
bool const _allowKeyConversionToObject;

View File

@ -432,12 +432,17 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
// value is no object
errorCode = TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID;
} else {
OperationResult opRes = _trx->insert(_collection->name, a.slice(), options);
errorCode = opRes.code;
if (!ep->_options.consultAqlWriteFilter ||
!_collection->getCollection()->skipForAqlWrite(a.slice())) {
OperationResult opRes = _trx->insert(_collection->name, a.slice(), options);
errorCode = opRes.code;
if (producesOutput && errorCode == TRI_ERROR_NO_ERROR) {
// return $NEW
result->setValue(dstRow, _outRegNew, AqlValue(opRes.slice().get("new")));
if (producesOutput && errorCode == TRI_ERROR_NO_ERROR) {
// return $NEW
result->setValue(dstRow, _outRegNew, AqlValue(opRes.slice().get("new")));
}
} else {
errorCode = TRI_ERROR_NO_ERROR;
}
}
@ -454,32 +459,45 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
// only copy 1st row of registers inherited from previous frame(s)
inheritRegisters(res, result.get(), i, dstRow);
// TODO This may be optimized with externals
babyBuilder.add(a.slice());
if (!ep->_options.consultAqlWriteFilter ||
!_collection->getCollection()->skipForAqlWrite(a.slice())) {
babyBuilder.add(a.slice());
}
++dstRow;
}
babyBuilder.close();
VPackSlice toSend = babyBuilder.slice();
OperationResult opRes =
_trx->insert(_collection->name, toSend, options);
OperationResult opRes;
if (toSend.length() > 0) {
opRes = _trx->insert(_collection->name, toSend, options);
if (producesOutput) {
// Reset dstRow
dstRow -= n;
VPackSlice resultList = opRes.slice();
TRI_ASSERT(resultList.isArray());
for (auto const& elm: VPackArrayIterator(resultList)) {
bool wasError = arangodb::basics::VelocyPackHelper::getBooleanValue(
elm, "error", false);
if (!wasError) {
// return $NEW
result->setValue(dstRow, _outRegNew, AqlValue(elm.get("new")));
if (producesOutput) {
// Reset dstRow
dstRow -= n;
VPackSlice resultList = opRes.slice();
TRI_ASSERT(resultList.isArray());
auto iter = VPackArrayIterator(resultList);
for (size_t i = 0; i < n; ++i) {
AqlValue a = res->getValue(i, registerId);
if (!ep->_options.consultAqlWriteFilter ||
!_collection->getCollection()->skipForAqlWrite(a.slice())) {
TRI_ASSERT(iter.valid());
auto elm = iter.value();
bool wasError = arangodb::basics::VelocyPackHelper::getBooleanValue(
elm, "error", false);
if (!wasError) {
// return $NEW
result->setValue(dstRow, _outRegNew, AqlValue(elm.get("new")));
}
++iter;
}
++dstRow;
}
++dstRow;
}
}
handleBabyResult(opRes.countErrorCodes, static_cast<size_t>(toSend.length()),
ep->_options.ignoreErrors);
handleBabyResult(opRes.countErrorCodes, static_cast<size_t>(toSend.length()),
ep->_options.ignoreErrors);
}
}
// now free it already
(*it) = nullptr;

View File

@ -42,6 +42,8 @@ ModificationOptions::ModificationOptions(VPackSlice const& slice) {
basics::VelocyPackHelper::getBooleanValue(obj, "readCompleteInput", true);
useIsRestore =
basics::VelocyPackHelper::getBooleanValue(obj, "useIsRestore", false);
consultAqlWriteFilter =
basics::VelocyPackHelper::getBooleanValue(obj, "consultAqlWriteFilter", false);
}
void ModificationOptions::toVelocyPack(VPackBuilder& builder) const {
@ -53,4 +55,5 @@ void ModificationOptions::toVelocyPack(VPackBuilder& builder) const {
builder.add("ignoreDocumentNotFound", VPackValue(ignoreDocumentNotFound));
builder.add("readCompleteInput", VPackValue(readCompleteInput));
builder.add("useIsRestore", VPackValue(useIsRestore));
builder.add("consultAqlWriteFilter", VPackValue(consultAqlWriteFilter));
}

View File

@ -44,7 +44,8 @@ struct ModificationOptions {
mergeObjects(true),
ignoreDocumentNotFound(false),
readCompleteInput(true),
useIsRestore(false) {}
useIsRestore(false),
consultAqlWriteFilter(false) {}
void toVelocyPack(arangodb::velocypack::Builder&) const;
@ -55,6 +56,7 @@ struct ModificationOptions {
bool ignoreDocumentNotFound;
bool readCompleteInput;
bool useIsRestore;
bool consultAqlWriteFilter;
};
} // namespace arangodb::aql

View File

@ -2373,7 +2373,7 @@ int ClusterInfo::getResponsibleShard(LogicalCollection* collInfo,
std::shared_ptr<std::vector<std::string>> shardKeysPtr;
std::shared_ptr<std::vector<ShardID>> shards;
bool found = false;
CollectionID collectionId = collInfo->planId();
CollectionID collectionId = std::to_string(collInfo->planId());
while (true) {
{

View File

@ -1692,6 +1692,8 @@ int RestReplicationHandler::processRestoreCollectionCoordinator(
std::string&& newId = StringUtils::itoa(newIdTick);
toMerge.openObject();
toMerge.add("id", VPackValue(newId));
toMerge.add("cid", VPackValue(newId));
toMerge.add("planId", VPackValue(newId));
// shard keys
VPackSlice const shardKeys = parameters.get("shardKeys");

View File

@ -877,9 +877,11 @@ bool LogicalCollection::allowUserKeys() const {
return _allowUserKeys;
}
#ifndef USE_ENTERPRISE
bool LogicalCollection::usesDefaultShardKeys() const {
return (_shardKeys.size() == 1 && _shardKeys[0] == StaticStrings::KeyString);
}
#endif
std::vector<std::string> const& LogicalCollection::shardKeys() const {
return _shardKeys;
@ -3400,3 +3402,11 @@ void LogicalCollection::newObjectForRemove(
builder.add(StaticStrings::RevString, VPackValue(rev));
builder.close();
}
/// @brief a method to skip certain documents in AQL write operations,
/// this is only used in the enterprise edition for smart graphs
#ifndef USE_ENTERPRISE
bool LogicalCollection::skipForAqlWrite(arangodb::velocypack::Slice document) const {
return false;
}
#endif

View File

@ -236,6 +236,10 @@ class LogicalCollection {
std::shared_ptr<ShardMap> shardIds() const;
void setShardMap(std::shared_ptr<ShardMap>& map);
/// @brief a method to skip certain documents in AQL write operations,
/// this is only used in the enterprise edition for smart graphs
virtual bool skipForAqlWrite(arangodb::velocypack::Slice document) const;
// SECTION: Modification Functions
int rename(std::string const&);
virtual void drop();
@ -474,7 +478,7 @@ class LogicalCollection {
void increaseInternalVersion();
void toVelocyPackInObject(VPackBuilder& result) const;
void toVelocyPackInObject(arangodb::velocypack::Builder& result) const;
protected:
// SECTION: Private variables

View File

@ -1,5 +1,5 @@
#!/bin/bash
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js --cluster true --agencySupervision false
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js --cluster true --agencySize 1
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js "$@"
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js --cluster true --agencySupervision false "$@"
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js "$@"
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js --cluster true --agencySize 1 "$@"