From a80d208590b624fef1a83bbe479540fadc9458fb Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Wed, 4 May 2016 14:33:32 +0200 Subject: [PATCH 01/31] Use core2 as the absolute minimum architecture --- CMakeLists.txt | 8 +++++--- scripts/build-deb.sh | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e7327ac1b1..6eb5df9840 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -292,10 +292,12 @@ else () option(USE_OPTIMIZE_FOR_ARCHITECTURE "try to determine CPU architecture" ON) - if (USE_OPTIMIZE_FOR_ARCHITECTURE) - include(OptimizeForArchitecture) - OptimizeForArchitecture() + if (NOT USE_OPTIMIZE_FOR_ARCHITECTURE) + # mop: core2 (merom) is our absolute minimum! + SET(TARGET_ARCHITECTURE "merom") endif () + include(OptimizeForArchitecture) + OptimizeForArchitecture() set(BASE_FLAGS "${Vc_ARCHITECTURE_FLAGS} ${BASE_FLAGS}") endif () diff --git a/scripts/build-deb.sh b/scripts/build-deb.sh index 16c9923c58..42b40dfbfc 100755 --- a/scripts/build-deb.sh +++ b/scripts/build-deb.sh @@ -4,7 +4,7 @@ set -e mkdir -p build-debian cd build-debian -cmake -DASM_OPTIMIZATIONS=Off -DETCDIR=/etc -DCMAKE_INSTALL_PREFIX=/usr -DVARDIR=/var .. +cmake -DCMAKE_BUILD_TYPE=Release -DUSE_OPTIMIZE_FOR_ARCHITECTURE=Off -DETCDIR=/etc -DCMAKE_INSTALL_PREFIX=/usr -DVARDIR=/var .. make -j12 cpack -G DEB --verbose cd .. From 17abab2695157871dda1850c679d2bb7c2e6c663 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 09:02:58 +0200 Subject: [PATCH 02/31] Improve functionality of scripts/dumpAgency.sh You can now say for example: scripts/dumpAgency.sh .[0].arango.Plan.Collections with the obvious meaning. --- scripts/dumpAgency.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/dumpAgency.sh b/scripts/dumpAgency.sh index a035c6059f..87518b260b 100755 --- a/scripts/dumpAgency.sh +++ b/scripts/dumpAgency.sh @@ -1,2 +1,6 @@ #!/bin/bash -curl -X POST http://localhost:4001/_api/agency/read -d '[["/"]]' | jq . +if [ "$*" == "" ] ; then + curl -s X POST http://localhost:4001/_api/agency/read -d '[["/"]]' | jq . +else + curl -s -X POST http://localhost:4001/_api/agency/read -d '[["/"]]' | jq $* +fi From 7232432dceef5e924732e26e6970c20ea59268b3 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 09:15:33 +0200 Subject: [PATCH 03/31] Fix usage of getValues2 in FollowerInfo class. --- arangod/Cluster/ClusterInfo.cpp | 60 +++++++++++++++------------------ 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 62d3071373..4725586491 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -2786,30 +2786,28 @@ void FollowerInfo::add(ServerID const& sid) { if (res.successful()) { - velocypack::Slice currentShards = + velocypack::Slice currentEntry = res.slice()[0].get(std::vector( {AgencyComm::prefixStripped(), "Current", "Collections", _docColl->_vocbase->_name, std::to_string(_docColl->_info.planId()), _docColl->_info.name()})); - VPackObjectIterator curShardsIt(currentShards); - if (curShardsIt.size() == 0) { - for (auto const& shard : curShardsIt) { - VPackSlice oldValue = shard.value; - auto newValue = newShardEntry(oldValue, sid, true); - AgencyCommResult res2 = - ac.casValue(path, oldValue, newValue.slice(), 0, 0); - if (res2.successful()) { - success = true; - break; // - } else { - LOG(WARN) << "FollowerInfo::add, could not cas key " << path; - } + if (!currentEntry.isObject()) { + LOG(ERR) << "FollowerInfo::add, did not find object in " << path; + if (!currentEntry.isNone()) { + LOG(ERR) << "Found: " << currentEntry.toJson(); } } else { - LOG(ERR) << "FollowerInfo::add, did not find key " << path - << " in agency."; + auto newValue = newShardEntry(currentEntry, sid, true); + AgencyCommResult res2 = + ac.casValue(path, currentEntry, newValue.slice(), 0, 0); + if (res2.successful()) { + success = true; + break; // + } else { + LOG(WARN) << "FollowerInfo::add, could not cas key " << path; + } } } else { LOG(ERR) << "FollowerInfo::add, could not read " << path << " in agency."; @@ -2857,30 +2855,28 @@ void FollowerInfo::remove(ServerID const& sid) { AgencyCommResult res = ac.getValues2(path); if (res.successful()) { - velocypack::Slice currentShards = + velocypack::Slice currentEntry = res.slice()[0].get(std::vector( {AgencyComm::prefixStripped(), "Current", "Collections", _docColl->_vocbase->_name, std::to_string(_docColl->_info.planId()), _docColl->_info.name()})); - VPackObjectIterator curShardsIt(currentShards); - if (curShardsIt.size() == 0) { - for (auto const& shard : curShardsIt) { - VPackSlice oldValue = shard.value; - auto newValue = newShardEntry(oldValue, sid, false); - AgencyCommResult res2 = - ac.casValue(path, oldValue, newValue.slice(), 0, 0); - if (res2.successful()) { - success = true; - break; // - } else { - LOG(WARN) << "FollowerInfo::remove, could not cas key " << path; - } + if (!currentEntry.isObject()) { + LOG(ERR) << "FollowerInfo::remove, did not find object in " << path; + if (!currentEntry.isNone()) { + LOG(ERR) << "Found: " << currentEntry.toJson(); } } else { - LOG(ERR) << "FollowerInfo::remove, did not find key " << path - << " in agency."; + auto newValue = newShardEntry(currentEntry, sid, false); + AgencyCommResult res2 = + ac.casValue(path, currentEntry, newValue.slice(), 0, 0); + if (res2.successful()) { + success = true; + break; // + } else { + LOG(WARN) << "FollowerInfo::remove, could not cas key " << path; + } } } else { LOG(ERR) << "FollowerInfo::remove, could not read " << path From 0256193a278e548c91340967175dfc04772598fb Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 09:22:18 +0200 Subject: [PATCH 04/31] Handle int case for HeartbeatThreadMs in agency gracefully. --- arangod/Cluster/ClusterFeature.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arangod/Cluster/ClusterFeature.cpp b/arangod/Cluster/ClusterFeature.cpp index e111e31e53..9d35069768 100644 --- a/arangod/Cluster/ClusterFeature.cpp +++ b/arangod/Cluster/ClusterFeature.cpp @@ -371,11 +371,16 @@ void ClusterFeature::start() { result.slice()[0].get(std::vector( {AgencyComm::prefixStripped(), "Sync", "HeartbeatIntervalMs"})); - if (HeartbeatIntervalMs.isUInt()) { - _heartbeatInterval = HeartbeatIntervalMs.getUInt(); + if (HeartbeatIntervalMs.isInteger()) { + try { + _heartbeatInterval = HeartbeatIntervalMs.getUInt(); + LOG(INFO) << "using heartbeat interval value '" << _heartbeatInterval + << " ms' from agency"; + } + catch (...) { + // Ignore if it is not a small int or uint + } - LOG(INFO) << "using heartbeat interval value '" << _heartbeatInterval - << " ms' from agency"; } } From 5690c0410c3ed5297397d8fa196698e5dd4a5d0b Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 09:51:54 +0200 Subject: [PATCH 05/31] Remove option replicationQuorum which was unused as of now. --- arangod/Cluster/ClusterInfo.h | 15 --------------- arangod/Cluster/v8-cluster.cpp | 2 -- arangod/V8Server/v8-collection.cpp | 3 --- arangod/V8Server/v8-vocindex.cpp | 10 ---------- js/actions/_api/collection/app.js | 4 ---- .../aardvark/APP/frontend/build/arangoes6.js | 2 +- .../modules/client/@arangodb/arango-database.js | 2 +- js/client/modules/@arangodb/arango-database.js | 2 +- 8 files changed, 3 insertions(+), 37 deletions(-) diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index f95dcc5024..1019d8b6e2 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -82,21 +82,6 @@ class CollectionInfo { return 1; } - ////////////////////////////////////////////////////////////////////////////// - /// @brief returns the replication quorum - ////////////////////////////////////////////////////////////////////////////// - - int replicationQuorum () const { - TRI_json_t* const node - = arangodb::basics::JsonHelper::getObjectElement(_json, - "replicationQuorum"); - - if (TRI_IsNumberJson(node)) { - return (int) (node->_value._number); - } - return 1; - } - ////////////////////////////////////////////////////////////////////////////// /// @brief checks whether there is no info contained ////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/Cluster/v8-cluster.cpp b/arangod/Cluster/v8-cluster.cpp index 09f3b22098..16458dc98b 100644 --- a/arangod/Cluster/v8-cluster.cpp +++ b/arangod/Cluster/v8-cluster.cpp @@ -770,8 +770,6 @@ static void JS_GetCollectionInfoClusterInfo( v8::Number::New(isolate, ci->journalSize())); result->Set(TRI_V8_ASCII_STRING("replicationFactor"), v8::Number::New(isolate, ci->replicationFactor())); - result->Set(TRI_V8_ASCII_STRING("replicationQuorum"), - v8::Number::New(isolate, ci->replicationQuorum())); std::vector const& sks = ci->shardKeys(); v8::Handle shardKeys = v8::Array::New(isolate, (int)sks.size()); diff --git a/arangod/V8Server/v8-collection.cpp b/arangod/V8Server/v8-collection.cpp index 052e87d625..9b5af59b9a 100644 --- a/arangod/V8Server/v8-collection.cpp +++ b/arangod/V8Server/v8-collection.cpp @@ -1305,9 +1305,6 @@ static void JS_PropertiesVocbaseCol( result->Set( TRI_V8_ASCII_STRING("replicationFactor"), v8::Number::New(isolate, static_cast(c->replicationFactor()))); - result->Set( - TRI_V8_ASCII_STRING("replicationQuorum"), - v8::Number::New(isolate, static_cast(c->replicationQuorum()))); TRI_V8_RETURN(result); } diff --git a/arangod/V8Server/v8-vocindex.cpp b/arangod/V8Server/v8-vocindex.cpp index 445be6475f..2ddcb758d6 100644 --- a/arangod/V8Server/v8-vocindex.cpp +++ b/arangod/V8Server/v8-vocindex.cpp @@ -871,7 +871,6 @@ static void CreateCollectionCoordinator( uint64_t numberOfShards = 1; std::vector shardKeys; uint64_t replicationFactor = 1; - uint64_t replicationQuorum = 1; // default shard key shardKeys.push_back("_key"); @@ -950,10 +949,6 @@ static void CreateCollectionCoordinator( if (p->Has(TRI_V8_ASCII_STRING("replicationFactor"))) { replicationFactor = TRI_ObjectToUInt64(p->Get(TRI_V8_ASCII_STRING("replicationFactor")), false); } - - if (p->Has(TRI_V8_ASCII_STRING("replicationQuorum"))) { - replicationQuorum = TRI_ObjectToUInt64(p->Get(TRI_V8_ASCII_STRING("replicationQuorum")), false); - } } if (numberOfShards == 0 || numberOfShards > 1000) { @@ -964,10 +959,6 @@ static void CreateCollectionCoordinator( TRI_V8_THROW_EXCEPTION_PARAMETER("invalid replicationFactor"); } - if (replicationQuorum == 0 || replicationQuorum > replicationFactor) { - TRI_V8_THROW_EXCEPTION_PARAMETER("invalid replicationQuorum"); - } - if (shardKeys.empty() || shardKeys.size() > 8) { TRI_V8_THROW_EXCEPTION_PARAMETER("invalid number of shard keys"); } @@ -1062,7 +1053,6 @@ static void CreateCollectionCoordinator( ("journalSize", Value(parameters.maximalSize())) ("indexBuckets", Value(parameters.indexBuckets())) ("replicationFactor", Value(replicationFactor)) - ("replicationQuorum", Value(replicationQuorum)) ("keyOptions", Value(ValueType::Object)) ("type", Value("traditional")) ("allowUserKeys", Value(allowUserKeys)) diff --git a/js/actions/_api/collection/app.js b/js/actions/_api/collection/app.js index db9f8aed81..69d2664fb4 100644 --- a/js/actions/_api/collection/app.js +++ b/js/actions/_api/collection/app.js @@ -167,10 +167,6 @@ function parseBodyForCreateCollection (req, res) { if (body.hasOwnProperty("replicationFactor")) { r.parameters.replicationFactor = body.replicationFactor || ""; } - - if (body.hasOwnProperty("replicationQuorum")) { - r.parameters.replicationQuorum = body.replicationQuorum || ""; - } } return r; diff --git a/js/apps/system/_admin/aardvark/APP/frontend/build/arangoes6.js b/js/apps/system/_admin/aardvark/APP/frontend/build/arangoes6.js index 5b9f0da3b8..0b39af5afb 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/build/arangoes6.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/build/arangoes6.js @@ -4739,7 +4739,7 @@ ArangoDatabase.prototype._create = function (name, properties, type) { [ "waitForSync", "journalSize", "isSystem", "isVolatile", "doCompact", "keyOptions", "shardKeys", "numberOfShards", "distributeShardsLike", "indexBuckets", "id", - "replicationFactor", "replicationQuorum" ].forEach(function(p) { + "replicationFactor" ].forEach(function(p) { if (properties.hasOwnProperty(p)) { body[p] = properties[p]; } diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/modules/client/@arangodb/arango-database.js b/js/apps/system/_admin/aardvark/APP/frontend/js/modules/client/@arangodb/arango-database.js index 6de24908d3..f69180fe4b 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/modules/client/@arangodb/arango-database.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/modules/client/@arangodb/arango-database.js @@ -304,7 +304,7 @@ ArangoDatabase.prototype._create = function (name, properties, type) { [ "waitForSync", "journalSize", "isSystem", "isVolatile", "doCompact", "keyOptions", "shardKeys", "numberOfShards", "distributeShardsLike", "indexBuckets", "id", - "replicationFactor", "replicationQuorum" ].forEach(function(p) { + "replicationFactor" ].forEach(function(p) { if (properties.hasOwnProperty(p)) { body[p] = properties[p]; } diff --git a/js/client/modules/@arangodb/arango-database.js b/js/client/modules/@arangodb/arango-database.js index 2b9d98aeae..a8b94002b5 100644 --- a/js/client/modules/@arangodb/arango-database.js +++ b/js/client/modules/@arangodb/arango-database.js @@ -315,7 +315,7 @@ ArangoDatabase.prototype._create = function (name, properties, type) { [ "waitForSync", "journalSize", "isSystem", "isVolatile", "doCompact", "keyOptions", "shardKeys", "numberOfShards", "distributeShardsLike", "indexBuckets", "id", - "replicationFactor", "replicationQuorum" ].forEach(function(p) { + "replicationFactor" ].forEach(function(p) { if (properties.hasOwnProperty(p)) { body[p] = properties[p]; } From 1a4a95a2ad4e12b28c1101d10fc13cc76a8f6f8b Mon Sep 17 00:00:00 2001 From: jsteemann Date: Fri, 6 May 2016 10:20:00 +0200 Subject: [PATCH 06/31] notify queries about shutdown --- arangod/Aql/QueryList.cpp | 26 +++++++++++++++++ arangod/Aql/QueryList.h | 3 ++ arangod/RestServer/BootstrapFeature.cpp | 37 ++++++++++++++++++++++++- arangod/RestServer/BootstrapFeature.h | 1 + arangod/VocBase/server.cpp | 4 +-- arangod/VocBase/server.h | 4 +-- 6 files changed, 70 insertions(+), 5 deletions(-) diff --git a/arangod/Aql/QueryList.cpp b/arangod/Aql/QueryList.cpp index b04cdd8695..e39277e5d4 100644 --- a/arangod/Aql/QueryList.cpp +++ b/arangod/Aql/QueryList.cpp @@ -210,6 +210,32 @@ int QueryList::kill(TRI_voc_tick_t id) { return TRI_ERROR_NO_ERROR; } + +/// @brief kills all currently running queries +uint64_t QueryList::killAll(bool silent) { + uint64_t killed = 0; + + WRITE_LOCKER(writeLocker, _lock); + + for (auto& it : _current) { + auto entry = it.second; + const_cast(entry->query)->killed(true); + + ++killed; + + std::string queryString(entry->query->queryString(), + entry->query->queryLength()); + + + if (silent) { + LOG(TRACE) << "killing AQL query " << entry->query->id() << " '" << queryString << "'"; + } else { + LOG(WARN) << "killing AQL query " << entry->query->id() << " '" << queryString << "'"; + } + } + + return killed; +} /// @brief get the list of currently running queries std::vector QueryList::listCurrent() { diff --git a/arangod/Aql/QueryList.h b/arangod/Aql/QueryList.h index 786560d03f..a933683123 100644 --- a/arangod/Aql/QueryList.h +++ b/arangod/Aql/QueryList.h @@ -150,6 +150,9 @@ class QueryList { /// @brief kills a query int kill(TRI_voc_tick_t); + + /// @brief kills all currently running queries + uint64_t killAll(bool silent); /// @brief return the list of running queries std::vector listCurrent(); diff --git a/arangod/RestServer/BootstrapFeature.cpp b/arangod/RestServer/BootstrapFeature.cpp index 6757e3f388..c03c74fcfd 100644 --- a/arangod/RestServer/BootstrapFeature.cpp +++ b/arangod/RestServer/BootstrapFeature.cpp @@ -22,15 +22,18 @@ #include "RestServer/BootstrapFeature.h" -#include "Logger/Logger.h" +#include "Aql/QueryList.h" #include "Cluster/AgencyComm.h" #include "Cluster/ClusterInfo.h" #include "Cluster/ServerState.h" #include "HttpServer/HttpHandlerFactory.h" +#include "Logger/Logger.h" #include "Rest/GeneralResponse.h" #include "Rest/Version.h" #include "RestServer/DatabaseFeature.h" +#include "RestServer/DatabaseServerFeature.h" #include "V8Server/V8DealerFeature.h" +#include "VocBase/server.h" using namespace arangodb; using namespace arangodb::application_features; @@ -148,3 +151,35 @@ void BootstrapFeature::start() { << ") is ready for business. Have fun!"; } +void BootstrapFeature::stop() { + auto server = ApplicationServer::getFeature("DatabaseServer"); + + TRI_server_t* s = server->SERVER; + + // notify all currently running queries about the shutdown + if (ServerState::instance()->isCoordinator()) { + std::vector ids = TRI_GetIdsCoordinatorDatabaseServer(s, true); + for (auto& id : ids) { + TRI_vocbase_t* vocbase = TRI_UseByIdCoordinatorDatabaseServer(s, id); + + if (vocbase != nullptr) { + vocbase->_queries->killAll(true); + TRI_ReleaseVocBase(vocbase); + } + } + } else { + std::vector names; + int res = TRI_GetDatabaseNamesServer(s, names); + if (res == TRI_ERROR_NO_ERROR) { + for (auto& name : names) { + TRI_vocbase_t* vocbase = TRI_UseDatabaseServer(s, name.c_str()); + + if (vocbase != nullptr) { + vocbase->_queries->killAll(true); + TRI_ReleaseVocBase(vocbase); + } + } + } + } +} + diff --git a/arangod/RestServer/BootstrapFeature.h b/arangod/RestServer/BootstrapFeature.h index 17c6c5a7ed..73a4132f55 100644 --- a/arangod/RestServer/BootstrapFeature.h +++ b/arangod/RestServer/BootstrapFeature.h @@ -32,6 +32,7 @@ class BootstrapFeature final : public application_features::ApplicationFeature { public: void start() override final; + void stop() override final; }; } diff --git a/arangod/VocBase/server.cpp b/arangod/VocBase/server.cpp index 55ae46c66e..bf870c3fe1 100644 --- a/arangod/VocBase/server.cpp +++ b/arangod/VocBase/server.cpp @@ -1712,7 +1712,7 @@ void TRI_EnableDeadlockDetectionDatabasesServer(TRI_server_t* server) { //////////////////////////////////////////////////////////////////////////////// std::vector TRI_GetIdsCoordinatorDatabaseServer( - TRI_server_t* server) { + TRI_server_t* server, bool includeSystem) { std::vector v; { auto unuser(server->_databasesProtector.use()); @@ -1722,7 +1722,7 @@ std::vector TRI_GetIdsCoordinatorDatabaseServer( TRI_vocbase_t* vocbase = p.second; TRI_ASSERT(vocbase != nullptr); - if (!TRI_EqualString(vocbase->_name, TRI_VOC_SYSTEM_DATABASE)) { + if (includeSystem || !TRI_EqualString(vocbase->_name, TRI_VOC_SYSTEM_DATABASE)) { v.emplace_back(vocbase->_id); } } diff --git a/arangod/VocBase/server.h b/arangod/VocBase/server.h index ffb3ae9a69..390ffde642 100644 --- a/arangod/VocBase/server.h +++ b/arangod/VocBase/server.h @@ -160,10 +160,10 @@ void TRI_EnableDeadlockDetectionDatabasesServer(TRI_server_t*); //////////////////////////////////////////////////////////////////////////////// /// @brief get the ids of all local coordinator databases -/// the caller is responsible for freeing the result //////////////////////////////////////////////////////////////////////////////// -std::vector TRI_GetIdsCoordinatorDatabaseServer(TRI_server_t*); +std::vector TRI_GetIdsCoordinatorDatabaseServer(TRI_server_t*, + bool includeSystem = false); //////////////////////////////////////////////////////////////////////////////// /// @brief drops an existing coordinator database From 838e3dc866413d97139fbfbec6882d20dec7f7e6 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 11:52:06 +0200 Subject: [PATCH 07/31] fixed handling of externals in some cases --- arangod/Aql/AqlValue.cpp | 28 +++++++++------------- arangod/Aql/AqlValue.h | 14 +++++++++-- js/server/tests/aql/aql-vpack-externals.js | 16 +++++++++++++ 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/arangod/Aql/AqlValue.cpp b/arangod/Aql/AqlValue.cpp index 37b784f57c..4fc9474861 100644 --- a/arangod/Aql/AqlValue.cpp +++ b/arangod/Aql/AqlValue.cpp @@ -37,12 +37,6 @@ using namespace arangodb; using namespace arangodb::aql; -/// @brief construct a document -AqlValue::AqlValue(TRI_doc_mptr_t const* mptr) { - _data.pointer = mptr->vpack(); - setType(AqlValueType::VPACK_SLICE_POINTER); -} - /// @brief hashes the value uint64_t AqlValue::hash(arangodb::AqlTransaction* trx, uint64_t seed) const { switch (type()) { @@ -170,7 +164,7 @@ AqlValue AqlValue::at(int64_t position, bool& mustDestroy, position = n + position; } if (position >= 0 && position < n) { - if (doCopy || s.byteSize() < sizeof(_data.internal)) { + if (doCopy) { mustDestroy = true; return AqlValue(s.at(position)); } @@ -242,7 +236,7 @@ AqlValue AqlValue::getKeyAttribute(arangodb::AqlTransaction* trx, if (s.isObject()) { VPackSlice found = Transaction::extractKeyFromDocument(s); if (!found.isNone()) { - if (doCopy || found.byteSize() < sizeof(_data.internal)) { + if (doCopy) { mustDestroy = true; return AqlValue(found); } @@ -278,7 +272,7 @@ AqlValue AqlValue::getFromAttribute(arangodb::AqlTransaction* trx, if (s.isObject()) { VPackSlice found = Transaction::extractFromFromDocument(s); if (!found.isNone()) { - if (doCopy || found.byteSize() < sizeof(_data.internal)) { + if (doCopy) { mustDestroy = true; return AqlValue(found); } @@ -314,7 +308,7 @@ AqlValue AqlValue::getToAttribute(arangodb::AqlTransaction* trx, if (s.isObject()) { VPackSlice found = Transaction::extractToFromDocument(s); if (!found.isNone()) { - if (doCopy || found.byteSize() < sizeof(_data.internal)) { + if (doCopy) { mustDestroy = true; return AqlValue(found); } @@ -349,14 +343,14 @@ AqlValue AqlValue::get(arangodb::AqlTransaction* trx, case VPACK_MANAGED: { VPackSlice s(slice()); if (s.isObject()) { - VPackSlice found(s.resolveExternal().get(name)); + VPackSlice found(s.get(name)); if (found.isCustom()) { // _id needs special treatment mustDestroy = true; return AqlValue(trx->extractIdString(s)); } if (!found.isNone()) { - if (doCopy || found.byteSize() < sizeof(_data.internal)) { + if (doCopy) { mustDestroy = true; return AqlValue(found); } @@ -391,14 +385,14 @@ AqlValue AqlValue::get(arangodb::AqlTransaction* trx, case VPACK_MANAGED: { VPackSlice s(slice()); if (s.isObject()) { - VPackSlice found(s.resolveExternal().get(names, true)); + VPackSlice found(s.get(names, true)); if (found.isCustom()) { // _id needs special treatment mustDestroy = true; return AqlValue(trx->extractIdString(s)); } if (!found.isNone()) { - if (doCopy || found.byteSize() < sizeof(_data.internal)) { + if (doCopy) { mustDestroy = true; return AqlValue(found); } @@ -427,7 +421,7 @@ bool AqlValue::hasKey(arangodb::AqlTransaction* trx, case VPACK_SLICE_POINTER: case VPACK_INLINE: case VPACK_MANAGED: { - VPackSlice s(slice().resolveExternal()); + VPackSlice s(slice()); return (s.isObject() && s.hasKey(name)); } case DOCVEC: @@ -841,14 +835,14 @@ VPackSlice AqlValue::slice() const { case VPACK_INLINE: { VPackSlice s(&_data.internal[0]); if (s.isExternal()) { - s = VPackSlice(s.getExternal()); + s = s.resolveExternal(); } return s; } case VPACK_MANAGED: { VPackSlice s(_data.buffer->data()); if (s.isExternal()) { - s = VPackSlice(s.getExternal()); + s = s.resolveExternal(); } return s; } diff --git a/arangod/Aql/AqlValue.h b/arangod/Aql/AqlValue.h index 430597fa95..4349b9f759 100644 --- a/arangod/Aql/AqlValue.h +++ b/arangod/Aql/AqlValue.h @@ -29,6 +29,7 @@ #include "Aql/Range.h" #include "Aql/types.h" #include "Basics/VelocyPackHelper.h" +#include "VocBase/document-collection.h" #include #include @@ -94,12 +95,21 @@ struct AqlValue final { } // construct from document - explicit AqlValue(TRI_doc_mptr_t const* mptr); + explicit AqlValue(TRI_doc_mptr_t const* mptr) { + _data.pointer = mptr->vpack(); + setType(AqlValueType::VPACK_SLICE_POINTER); + TRI_ASSERT(VPackSlice(_data.pointer).isObject()); + TRI_ASSERT(!VPackSlice(_data.pointer).isExternal()); + } // construct from pointer explicit AqlValue(uint8_t const* pointer) { - _data.pointer = pointer; + // we must get rid of Externals first here, because all + // methods that use VPACK_SLICE_POINTER expect its contents + // to be non-Externals + _data.pointer = VPackSlice(pointer).resolveExternals().begin(); setType(AqlValueType::VPACK_SLICE_POINTER); + TRI_ASSERT(!VPackSlice(_data.pointer).isExternal()); } // construct from docvec, taking over its ownership diff --git a/js/server/tests/aql/aql-vpack-externals.js b/js/server/tests/aql/aql-vpack-externals.js index bccc27cb21..0bf50d0e42 100644 --- a/js/server/tests/aql/aql-vpack-externals.js +++ b/js/server/tests/aql/aql-vpack-externals.js @@ -136,7 +136,23 @@ function aqlVPackExternalsTestSuite () { } }, + testExternalAttributeAccess: function () { + let coll = db._collection(collName); + let ecoll = db._collection(edgeColl); + coll.truncate(); + ecoll.truncate(); + coll.insert({ _key: "a", w: 1}); + coll.insert({ _key: "b", w: 2}); + coll.insert({ _key: "c", w: 3}); + ecoll.insert({ _key: "a", _from: coll.name() + "/a", _to: coll.name() + "/b", w: 1}); + ecoll.insert({ _key: "b", _from: coll.name() + "/b", _to: coll.name() + "/c", w: 2}); + const query = `FOR x,y,p IN 1..10 OUTBOUND '${collName}/a' ${edgeColl} SORT x._key, y._key RETURN p.vertices[*].w`; + const cursor = db._query(query); + + assertEqual([ 1, 2 ], cursor.next()); + assertEqual([ 1, 2, 3 ], cursor.next()); + } }; } From 6b4e9fba3816df84a5eb17da35f2d430c351204b Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 12:12:12 +0200 Subject: [PATCH 08/31] quicker access to _id --- arangod/Aql/AqlValue.cpp | 41 +++++++++++++++++++++++++++++++ arangod/Aql/AqlValue.h | 3 +++ arangod/Aql/AttributeAccessor.cpp | 4 +++ arangod/Aql/AttributeAccessor.h | 1 + arangod/Utils/Transaction.cpp | 34 +++++++++++++++++++++++++ arangod/Utils/Transaction.h | 9 +++++++ 6 files changed, 92 insertions(+) diff --git a/arangod/Aql/AqlValue.cpp b/arangod/Aql/AqlValue.cpp index 4fc9474861..8a780f4be9 100644 --- a/arangod/Aql/AqlValue.cpp +++ b/arangod/Aql/AqlValue.cpp @@ -258,6 +258,47 @@ AqlValue AqlValue::getKeyAttribute(arangodb::AqlTransaction* trx, return AqlValue(arangodb::basics::VelocyPackHelper::NullValue()); } +/// @brief get the _id attribute from an object/document +AqlValue AqlValue::getIdAttribute(arangodb::AqlTransaction* trx, + bool& mustDestroy, bool doCopy) const { + mustDestroy = false; + switch (type()) { + case VPACK_SLICE_POINTER: + case VPACK_INLINE: + doCopy = false; + // fall-through intentional + case VPACK_MANAGED: { + VPackSlice s(slice()); + if (s.isObject()) { + VPackSlice found = Transaction::extractIdFromDocument(s); + if (found.isCustom()) { + // _id as a custom type needs special treatment + mustDestroy = true; + return AqlValue(trx->extractIdString(trx->resolver(), found, s)); + } + if (!found.isNone()) { + if (doCopy) { + mustDestroy = true; + return AqlValue(found); + } + // return a reference to an existing slice + return AqlValue(found.begin()); + } + } + // fall-through intentional + break; + } + case DOCVEC: + case RANGE: { + // will return null + break; + } + } + + // default is to return null + return AqlValue(arangodb::basics::VelocyPackHelper::NullValue()); +} + /// @brief get the _from attribute from an object/document AqlValue AqlValue::getFromAttribute(arangodb::AqlTransaction* trx, bool& mustDestroy, bool doCopy) const { diff --git a/arangod/Aql/AqlValue.h b/arangod/Aql/AqlValue.h index 4349b9f759..e06292accc 100644 --- a/arangod/Aql/AqlValue.h +++ b/arangod/Aql/AqlValue.h @@ -223,6 +223,9 @@ struct AqlValue final { /// @brief get the _key attribute from an object/document AqlValue getKeyAttribute(arangodb::AqlTransaction* trx, bool& mustDestroy, bool copy) const; + /// @brief get the _id attribute from an object/document + AqlValue getIdAttribute(arangodb::AqlTransaction* trx, + bool& mustDestroy, bool copy) const; /// @brief get the _from attribute from an object/document AqlValue getFromAttribute(arangodb::AqlTransaction* trx, bool& mustDestroy, bool copy) const; diff --git a/arangod/Aql/AttributeAccessor.cpp b/arangod/Aql/AttributeAccessor.cpp index 137ab9989f..12085489e4 100644 --- a/arangod/Aql/AttributeAccessor.cpp +++ b/arangod/Aql/AttributeAccessor.cpp @@ -43,6 +43,8 @@ AttributeAccessor::AttributeAccessor( if (_attributeParts.size() == 1) { if (attributeParts[0] == StaticStrings::KeyString) { _type = EXTRACT_KEY; + } else if (attributeParts[0] == StaticStrings::IdString) { + _type = EXTRACT_ID; } else if (attributeParts[0] == StaticStrings::FromString) { _type = EXTRACT_FROM; } else if (attributeParts[0] == StaticStrings::ToString) { @@ -76,6 +78,8 @@ AqlValue AttributeAccessor::get(arangodb::AqlTransaction* trx, switch (_type) { case EXTRACT_KEY: return argv->getValueReference(startPos, regs[i]).getKeyAttribute(trx, mustDestroy, true); + case EXTRACT_ID: + return argv->getValueReference(startPos, regs[i]).getIdAttribute(trx, mustDestroy, true); case EXTRACT_FROM: return argv->getValueReference(startPos, regs[i]).getFromAttribute(trx, mustDestroy, true); case EXTRACT_TO: diff --git a/arangod/Aql/AttributeAccessor.h b/arangod/Aql/AttributeAccessor.h index 517f857fde..ff9c10367d 100644 --- a/arangod/Aql/AttributeAccessor.h +++ b/arangod/Aql/AttributeAccessor.h @@ -52,6 +52,7 @@ class AttributeAccessor { private: enum AccessorType { EXTRACT_KEY, + EXTRACT_ID, EXTRACT_FROM, EXTRACT_TO, EXTRACT_SINGLE, diff --git a/arangod/Utils/Transaction.cpp b/arangod/Utils/Transaction.cpp index 68d25334ef..012de4eeb0 100644 --- a/arangod/Utils/Transaction.cpp +++ b/arangod/Utils/Transaction.cpp @@ -764,6 +764,40 @@ VPackSlice Transaction::extractKeyFromDocument(VPackSlice slice) { return slice.get(StaticStrings::KeyString); } +////////////////////////////////////////////////////////////////////////////// +/// @brief quick access to the _id attribute in a database document +/// the document must have at least two attributes, and _id is supposed to +/// be the second one +/// note that this may return a Slice of type Custom! +////////////////////////////////////////////////////////////////////////////// + +VPackSlice Transaction::extractIdFromDocument(VPackSlice slice) { + if (slice.isExternal()) { + slice = slice.resolveExternal(); + } + TRI_ASSERT(slice.isObject()); + // a regular document must have at least the three attributes + // _key, _id and _rev (in this order). _id must be the second attribute + TRI_ASSERT(slice.length() >= 2); + + uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head()); + + if (*p == basics::VelocyPackHelper::KeyAttribute) { + // skip over _key + ++p; + // skip over _key value + p += VPackSlice(p).byteSize(); + if (*p == basics::VelocyPackHelper::IdAttribute) { + // the + 1 is required so that we can skip over the attribute name + // and point to the attribute value + return VPackSlice(p + 1); + } + } + + // fall back to the regular lookup method + return slice.get(StaticStrings::IdString); +} + ////////////////////////////////////////////////////////////////////////////// /// @brief quick access to the _from attribute in a database document /// the document must have at least five attributes: _key, _id, _from, _to diff --git a/arangod/Utils/Transaction.h b/arangod/Utils/Transaction.h index 3ab8b2190b..544f54068b 100644 --- a/arangod/Utils/Transaction.h +++ b/arangod/Utils/Transaction.h @@ -308,6 +308,15 @@ class Transaction { ////////////////////////////////////////////////////////////////////////////// static VPackSlice extractKeyFromDocument(VPackSlice); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief quick access to the _id attribute in a database document + /// the document must have at least two attributes, and _id is supposed to + /// be the second one + /// note that this may return a Slice of type Custom! + ////////////////////////////////////////////////////////////////////////////// + + static VPackSlice extractIdFromDocument(VPackSlice); ////////////////////////////////////////////////////////////////////////////// /// @brief quick access to the _from attribute in a database document From 49c39f6659b9eb61930cecf22e9ece8f6f85ccc1 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 12:30:04 +0200 Subject: [PATCH 09/31] removed unused functions --- UnitTests/Basics/json-test.cpp | 123 --------------------------------- lib/Basics/json.cpp | 16 ----- lib/Basics/json.h | 13 ---- lib/JsonParser/json-parser.ll | 70 ------------------- 4 files changed, 222 deletions(-) diff --git a/UnitTests/Basics/json-test.cpp b/UnitTests/Basics/json-test.cpp index ac991cc4a4..e134bb5468 100644 --- a/UnitTests/Basics/json-test.cpp +++ b/UnitTests/Basics/json-test.cpp @@ -263,129 +263,6 @@ BOOST_AUTO_TEST_CASE (tst_json_string2) { FREE_BUFFER } -//////////////////////////////////////////////////////////////////////////////// -/// @brief test string reference value -//////////////////////////////////////////////////////////////////////////////// - -BOOST_AUTO_TEST_CASE (tst_json_string_reference) { - INIT_BUFFER - - const char* data = "The Quick Brown Fox"; - char copy[64]; - - memset(copy, 0, sizeof(copy)); - memcpy(copy, data, strlen(data)); - - TRI_json_t* json = TRI_CreateStringReferenceJson(TRI_UNKNOWN_MEM_ZONE, copy, strlen(copy)); - BOOST_CHECK_EQUAL(true, TRI_IsStringJson(json)); - - STRINGIFY - BOOST_CHECK_EQUAL("\"The Quick Brown Fox\"", STRING_VALUE); - FREE_BUFFER - - FREE_JSON - - // freeing JSON should not affect our string - BOOST_CHECK_EQUAL("The Quick Brown Fox", copy); - - json = TRI_CreateStringReferenceJson(TRI_UNKNOWN_MEM_ZONE, copy, strlen(copy)); - BOOST_CHECK_EQUAL(true, TRI_IsStringJson(json)); - - // modify the string we're referring to - copy[0] = '*'; - copy[1] = '/'; - copy[2] = '+'; - copy[strlen(copy) - 1] = '!'; - - sb = TRI_CreateStringBuffer(TRI_UNKNOWN_MEM_ZONE); - STRINGIFY - BOOST_CHECK_EQUAL("\"*/+ Quick Brown Fo!\"", STRING_VALUE); - FREE_BUFFER - - BOOST_CHECK_EQUAL("*/+ Quick Brown Fo!", copy); - - FREE_JSON -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief test string reference value -//////////////////////////////////////////////////////////////////////////////// - -BOOST_AUTO_TEST_CASE (tst_json_string_reference2) { - INIT_BUFFER - - const char* data1 = "The first Brown Fox"; - const char* data2 = "The second Brown Fox"; - char copy1[64]; - char copy2[64]; - TRI_json_t* json; - size_t len1 = strlen(data1); - size_t len2 = strlen(data2); - - memset(copy1, 0, sizeof(copy1)); - memcpy(copy1, data1, len1); - - memset(copy2, 0, sizeof(copy2)); - memcpy(copy2, data2, len2); - - json = TRI_CreateObjectJson(TRI_UNKNOWN_MEM_ZONE); - - TRI_Insert3ObjectJson(TRI_UNKNOWN_MEM_ZONE, json, "first", - TRI_CreateStringReferenceJson(TRI_UNKNOWN_MEM_ZONE, copy1, strlen(copy1))); - - TRI_Insert3ObjectJson(TRI_UNKNOWN_MEM_ZONE, json, "second", - TRI_CreateStringReferenceJson(TRI_UNKNOWN_MEM_ZONE, copy2, len2)); - - BOOST_CHECK_EQUAL(true, TRI_IsObjectJson(json)); - - STRINGIFY - BOOST_CHECK_EQUAL("{\"first\":\"The first Brown Fox\",\"second\":\"The second Brown Fox\"}", STRING_VALUE); - FREE_BUFFER - - FREE_JSON - - // freeing JSON should not affect our string - BOOST_CHECK_EQUAL("The first Brown Fox", copy1); - BOOST_CHECK_EQUAL("The second Brown Fox", copy2); - - json = TRI_CreateObjectJson(TRI_UNKNOWN_MEM_ZONE); - - TRI_Insert3ObjectJson(TRI_UNKNOWN_MEM_ZONE, json, "first", - TRI_CreateStringReferenceJson(TRI_UNKNOWN_MEM_ZONE, copy1, strlen(copy1))); - - TRI_Insert3ObjectJson(TRI_UNKNOWN_MEM_ZONE, json, "second", - TRI_CreateStringReferenceJson(TRI_UNKNOWN_MEM_ZONE, copy2, len2)); - - BOOST_CHECK_EQUAL(true, TRI_IsObjectJson(json)); - - // modify the string we're referring to - copy1[0] = '*'; - copy1[1] = '/'; - copy1[2] = '+'; - copy1[len1 - 1] = '!'; - - copy2[0] = '*'; - copy2[1] = '/'; - copy2[2] = '+'; - copy2[len2 - 1] = '!'; - - BOOST_CHECK_EQUAL("*/+ first Brown Fo!", copy1); - BOOST_CHECK_EQUAL("*/+ second Brown Fo!", copy2); - - sb = TRI_CreateStringBuffer(TRI_UNKNOWN_MEM_ZONE); - STRINGIFY - BOOST_CHECK_EQUAL("{\"first\":\"*/+ first Brown Fo!\",\"second\":\"*/+ second Brown Fo!\"}", STRING_VALUE); - - FREE_BUFFER - - // freeing JSON should not affect our string - BOOST_CHECK_EQUAL("*/+ first Brown Fo!", copy1); - BOOST_CHECK_EQUAL("*/+ second Brown Fo!", copy2); - - FREE_JSON -} - - //////////////////////////////////////////////////////////////////////////////// /// @brief test string value (escaped) //////////////////////////////////////////////////////////////////////////////// diff --git a/lib/Basics/json.cpp b/lib/Basics/json.cpp index 3e77b930b0..eea4d81ae2 100644 --- a/lib/Basics/json.cpp +++ b/lib/Basics/json.cpp @@ -445,22 +445,6 @@ int TRI_InitStringCopyJson(TRI_memory_zone_t* zone, TRI_json_t* result, return TRI_ERROR_NO_ERROR; } -//////////////////////////////////////////////////////////////////////////////// -/// @brief creates a string reference object with given length -//////////////////////////////////////////////////////////////////////////////// - -TRI_json_t* TRI_CreateStringReferenceJson(TRI_memory_zone_t* zone, - char const* value, size_t length) { - TRI_json_t* result = - static_cast(TRI_Allocate(zone, sizeof(TRI_json_t), false)); - - if (result != nullptr) { - InitStringReference(result, value, length); - } - - return result; -} - //////////////////////////////////////////////////////////////////////////////// /// @brief initializes a string reference object //////////////////////////////////////////////////////////////////////////////// diff --git a/lib/Basics/json.h b/lib/Basics/json.h index 251d7512c9..9b3242fad5 100644 --- a/lib/Basics/json.h +++ b/lib/Basics/json.h @@ -121,13 +121,6 @@ void TRI_InitStringJson(TRI_json_t*, char*, size_t); int TRI_InitStringCopyJson(TRI_memory_zone_t*, TRI_json_t*, char const*, size_t); -//////////////////////////////////////////////////////////////////////////////// -/// @brief creates a string reference object with given length -//////////////////////////////////////////////////////////////////////////////// - -TRI_json_t* TRI_CreateStringReferenceJson(TRI_memory_zone_t*, char const* value, - size_t length); - //////////////////////////////////////////////////////////////////////////////// /// @brief creates a string reference object //////////////////////////////////////////////////////////////////////////////// @@ -306,12 +299,6 @@ TRI_json_t* TRI_CopyJson(TRI_memory_zone_t*, TRI_json_t const*); TRI_json_t* TRI_JsonString(TRI_memory_zone_t*, char const* text); -//////////////////////////////////////////////////////////////////////////////// -/// @brief parses a json file and returns error message -//////////////////////////////////////////////////////////////////////////////// - -TRI_json_t* TRI_JsonFile(TRI_memory_zone_t*, char const* path, char** error); - //////////////////////////////////////////////////////////////////////////////// /// @brief default deleter for TRI_json_t /// this can be used to put a TRI_json_t with TRI_UNKNOWN_MEM_ZONE into an diff --git a/lib/JsonParser/json-parser.ll b/lib/JsonParser/json-parser.ll index ef8fc72128..80062c9683 100644 --- a/lib/JsonParser/json-parser.ll +++ b/lib/JsonParser/json-parser.ll @@ -542,73 +542,3 @@ TRI_json_t* TRI_JsonString (TRI_memory_zone_t* zone, char const* text) { return object; } -//////////////////////////////////////////////////////////////////////////////// -/// @brief parses a json file -//////////////////////////////////////////////////////////////////////////////// - -TRI_json_t* TRI_JsonFile (TRI_memory_zone_t* zone, char const* path, char** error) { - FILE* in; - TRI_json_t* value; - int c; - struct yyguts_t * yyg; - yyscan_t scanner; - - value = static_cast(TRI_Allocate(zone, sizeof(TRI_json_t), false)); - - if (value == nullptr) { - // out of memory - return nullptr; - } - - in = fopen(path, "rb"); - - if (in == nullptr) { - TRI_Free(zone, value); - - return nullptr; - } - - // init as a JSON null object so the memory in value is initialized - TRI_InitNullJson(value); - - yylex_init(&scanner); - yyg = (struct yyguts_t*) scanner; - - yyextra._memoryZone = zone; - yyin = in; - - c = yylex(scanner); - if (! ParseValue(scanner, value, c)) { - TRI_FreeJson(zone, value); - value = nullptr; - } - else { - c = yylex(scanner); - - if (c != END_OF_FILE) { - TRI_FreeJson(zone, value); - value = nullptr; - } - } - - if (error != nullptr) { - if (yyextra._message != nullptr) { - *error = TRI_DuplicateString(yyextra._message); - } - else { - *error = nullptr; - } - } - - yylex_destroy(scanner); - - fclose(in); - - return value; -} - -// Local Variables: -// mode: C -// mode: outline-minor -// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// --SECTION--\\|/// @\\}\\)" -// End: From 94c2f4c9b4937cbd97d40c04c635277d0210017c Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 13:44:33 +0200 Subject: [PATCH 10/31] fixed crash --- arangod/Aql/AqlValue.cpp | 14 +++++++------- arangod/Aql/AqlValue.h | 10 +++++----- arangod/Aql/EnumerateListBlock.cpp | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arangod/Aql/AqlValue.cpp b/arangod/Aql/AqlValue.cpp index 8a780f4be9..e2f89a1daf 100644 --- a/arangod/Aql/AqlValue.cpp +++ b/arangod/Aql/AqlValue.cpp @@ -152,8 +152,8 @@ AqlValue AqlValue::at(int64_t position, bool& mustDestroy, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); @@ -228,8 +228,8 @@ AqlValue AqlValue::getKeyAttribute(arangodb::AqlTransaction* trx, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); @@ -264,8 +264,8 @@ AqlValue AqlValue::getIdAttribute(arangodb::AqlTransaction* trx, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); @@ -305,8 +305,8 @@ AqlValue AqlValue::getFromAttribute(arangodb::AqlTransaction* trx, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); @@ -341,8 +341,8 @@ AqlValue AqlValue::getToAttribute(arangodb::AqlTransaction* trx, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); @@ -378,8 +378,8 @@ AqlValue AqlValue::get(arangodb::AqlTransaction* trx, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); @@ -420,8 +420,8 @@ AqlValue AqlValue::get(arangodb::AqlTransaction* trx, mustDestroy = false; switch (type()) { case VPACK_SLICE_POINTER: - case VPACK_INLINE: doCopy = false; + case VPACK_INLINE: // fall-through intentional case VPACK_MANAGED: { VPackSlice s(slice()); diff --git a/arangod/Aql/AqlValue.h b/arangod/Aql/AqlValue.h index e06292accc..3ecb8f405e 100644 --- a/arangod/Aql/AqlValue.h +++ b/arangod/Aql/AqlValue.h @@ -120,7 +120,9 @@ struct AqlValue final { // construct boolean value type explicit AqlValue(bool value) { - initFromSlice(value ? arangodb::basics::VelocyPackHelper::TrueValue() : arangodb::basics::VelocyPackHelper::FalseValue()); + VPackSlice slice(value ? arangodb::basics::VelocyPackHelper::TrueValue() : arangodb::basics::VelocyPackHelper::FalseValue()); + memcpy(_data.internal, slice.begin(), slice.byteSize()); + setType(AqlValueType::VPACK_INLINE); } // construct from std::string @@ -323,12 +325,10 @@ struct AqlValue final { } /// @brief initializes value from a slice - void initFromSlice(arangodb::velocypack::Slice const& slice) { + void initFromSlice(arangodb::velocypack::Slice slice) { if (slice.isExternal()) { // recursively resolve externals - _data.pointer = slice.resolveExternals().start(); - setType(AqlValueType::VPACK_SLICE_POINTER); - return; + slice = slice.resolveExternals(); } arangodb::velocypack::ValueLength length = slice.byteSize(); if (length < sizeof(_data.internal)) { diff --git a/arangod/Aql/EnumerateListBlock.cpp b/arangod/Aql/EnumerateListBlock.cpp index 9497399d0d..838c39495e 100644 --- a/arangod/Aql/EnumerateListBlock.cpp +++ b/arangod/Aql/EnumerateListBlock.cpp @@ -130,7 +130,7 @@ AqlItemBlock* EnumerateListBlock::getSome(size_t, size_t atMost) { for (size_t j = 0; j < toSend; j++) { if (j > 0) { - // re-use already copied aqlvalues + // re-use already copied AqlValues for (RegisterId i = 0; i < cur->getNrRegs(); i++) { res->setValue(j, i, res->getValueReference(0, i)); // Note that if this throws, all values will be From fb723c3c2435cf9de22189fc4a35adc10531df6b Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 13:59:24 +0200 Subject: [PATCH 11/31] fixed crash --- arangod/Aql/CalculationBlock.cpp | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arangod/Aql/CalculationBlock.cpp b/arangod/Aql/CalculationBlock.cpp index 3b6047418c..d8bcbcc2b7 100644 --- a/arangod/Aql/CalculationBlock.cpp +++ b/arangod/Aql/CalculationBlock.cpp @@ -90,15 +90,10 @@ void CalculationBlock::fillBlockWithReference(AqlItemBlock* result) { // care of correct freeing: auto a = result->getValueReference(i, _inRegs[0]); - try { - TRI_IF_FAILURE("CalculationBlock::fillBlockWithReference") { - THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); - } - result->setValue(i, _outReg, a); - } catch (...) { - a.destroy(); - throw; + TRI_IF_FAILURE("CalculationBlock::fillBlockWithReference") { + THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); } + result->setValue(i, _outReg, a); } } From 316f2bd1fbbfb80955a7f894bb9e7a10b11d36e1 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 14:00:00 +0200 Subject: [PATCH 12/31] updated changes --- .../Books/AQL/Functions/TypeCast.mdpp | 18 ++--- Documentation/Books/AQL/Operators.mdpp | 31 ++++----- .../ReleaseNotes/UpgradingChanges30.mdpp | 66 ++++++++++++++++++- 3 files changed, 88 insertions(+), 27 deletions(-) diff --git a/Documentation/Books/AQL/Functions/TypeCast.mdpp b/Documentation/Books/AQL/Functions/TypeCast.mdpp index 4daf21c3ca..08fc57a7a6 100644 --- a/Documentation/Books/AQL/Functions/TypeCast.mdpp +++ b/Documentation/Books/AQL/Functions/TypeCast.mdpp @@ -44,30 +44,30 @@ not not 1 // true - Strings are converted to their numeric equivalent if the string contains a valid representation of a number. Whitespace at the start and end of the string is allowed. String values that do not contain any valid representation of a number - will be converted to *null*. + will be converted to the number *0*. - An empty array is converted to *0*, an array with one member is converted into the result of `TO_NUMBER()` for its sole member. An array with two or more members is - converted to *null*. - - An object / document is converted to *null*. + converted to the number *0*. + - An object / document is converted to the number *0*. An unary plus will also try to cast to a number, but `TO_NUMBER()` is the preferred way: ```js +'5' // 5 +[8] // 8 -+[8,9] // null -+{} // null ++[8,9] // 0 ++{} // 0 ``` - An unary minus works likewise, except that a numeric value is also negated: + A unary minus works likewise, except that a numeric value is also negated: ```js -'5' // -5 -[8] // -8 --[8,9] // null --{} // null +-[8,9] // 0 +-{} // 0 ``` - *TO_STRING(value)*: Takes an input *value* of any type and converts it into a string value as follows: - - *null* is converted to the string *"null"* + - *null* is converted to the the empty string *""* - *false* is converted to the string *"false"*, *true* to the string *"true"* - Numbers are converted to their string representations. This can also be a scientific notation: `TO_STRING(0.0000002) // "2e-7"` diff --git a/Documentation/Books/AQL/Operators.mdpp b/Documentation/Books/AQL/Operators.mdpp index 5f4ea966e2..ba306cf734 100644 --- a/Documentation/Books/AQL/Operators.mdpp +++ b/Documentation/Books/AQL/Operators.mdpp @@ -198,42 +198,39 @@ Some example arithmetic operations: -15 +9.99 -The arithmetic operators accept operands of any type. This behavior has changed in -ArangoDB 2.3. Passing non-numeric values to an arithmetic operator is now allow. -Any non-numeric operands will be casted to numbers implicitly by the operator, -without making the query abort. +The arithmetic operators accept operands of any type. Passing non-numeric values to an +arithmetic operator will cast the operands to numbers using the type casting rules +applied by the `TO_NUMBER` function: -The *conversion to a numeric value* works as follows: - `null` will be converted to `0` - `false` will be converted to `0`, true will be converted to `1` -- a valid numeric value remains unchanged, but NaN and Infinity will be converted to `null` +- a valid numeric value remains unchanged, but NaN and Infinity will be converted to `0` - string values are converted to a number if they contain a valid string representation of a number. Any whitespace at the start or the end of the string is ignored. Strings - with any other contents are converted to `null` + with any other contents are converted to the number `0` - an empty array is converted to `0`, an array with one member is converted to the numeric - representation of its sole member. Arrays with more members are converted - to `null` -- objects / documents are converted to `null` + representation of its sole member. Arrays with more members are converted to the number + `0`. +- objects / documents are converted to the number `0`. -If the conversion to a number produces a value of `null` for one of the operands, -the result of the whole arithmetic operation will also be `null`. An arithmetic operation -that produces an invalid value, such as `1 / 0` will also produce a value of `null`. +An arithmetic operation that produces an invalid value, such as `1 / 0` will also produce +a result value of `0`. Here are a few examples: - 1 + "a" // null + 1 + "a" // 1 1 + "99" // 100 1 + null // 1 null + 1 // 1 3 + [ ] // 3 24 + [ 2 ] // 26 - 24 + [ 2, 4 ] // null + 24 + [ 2, 4 ] // 0 25 - null // 25 17 - true // 16 - 23 * { } // null + 23 * { } // 0 5 * [ 7 ] // 35 24 / "12" // 2 - 1 / 0 // null + 1 / 0 // 0 !SUBSUBSECTION Ternary operator diff --git a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp index 021c2898e7..af7c977907 100644 --- a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp +++ b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp @@ -39,10 +39,74 @@ by the drop operation now. !SECTION AQL +!SUBSECTION Typecasting functions + +The type casting applied by the `TO_NUMBER()` AQL function has changed as follows: +- string values that do not contain a valid numeric value are now converted to the number + `0`. In previous versions of ArangoDB such string values were converted to the value + `null`. +- array values with more than 1 member are now converted to the number `0`. In previous + versions of ArangoDB such arrays were converted to the value `null`. +- objects / documents are now converted to the number `0`. In previous versions of ArangoDB + objects / documents were converted to the value `null`. + +Additionally, the `TO_STRING()` AQL function now converts `null` values into an empty string +(`""`) instead of the string `"null"`. + + +!SUBSECTION Arithmetic operators + +As the arithmetic operations in AQL implicitly convert their operands to numeric values using +`TO_NUMBER()`, their casting behavior has also changed as described above. + +Some examples of the changed behavior: + +- `"foo" + 1` produces `1` now. In previous versions this produced `null`. +- `[ 1, 2 ] + 1` produces `1`. In previous versions this produced `null`. +- `1 + "foo" + 1ยด produces `2` now. In previous version this produced `1`. + +!SUBSECTION Keywords + `LIKE` is now a keyword in AQL. Using `LIKE` in either case as an attribute or collection name in AQL queries now requires quoting. -The AQL optimizer rule "merge-traversal-filter" was renamed to "optimize-traversals". +!SUBSECTION Subqueries + +Queries that contain Subqueries that contain data-modification operations such as `INSERT`, +`UPDATE`, `REPLACE`, `UPSERT` or `REMOVE` will now refuse to execute if the collection +affected by the subquery's data-modification operation is read-accessed in an outer scope +of the query. + +For example, the following query will refuse to execute as the collection `myCollection` +is modified in the subquery but also read-accessed in the outer scope: + +``` +FOR doc IN myCollection + LET changes = ( + FOR what IN myCollection + FILTER what.value == 1 + REMOVE what IN myCollection + ) + RETURN doc +``` + +It is still possible to write to collections from which data is read in the same query, +e.g. + +``` +FOR doc IN myCollection + FILTER doc.value == 1 + REMOVE doc IN myCollection +``` + +and to modify data in different collection via subqueries. + + +!SUBSECTION Other changes + +The AQL optimizer rule "merge-traversal-filter" that already existed in 3.0 was renamed to +"optimize-traversals". This should be of no relevance to client applications except if +they programatically look for applied optimizer rules in the explain out of AQL queries. !SECTION Command-line options From 04ba5156e50a4d4018083293d16bc9b28e539649 Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Fri, 6 May 2016 17:03:13 +0200 Subject: [PATCH 13/31] Rework planned collection loading --- arangod/Cluster/ClusterInfo.cpp | 389 +++++++----------- arangod/Cluster/ClusterInfo.h | 181 ++++---- arangod/Cluster/v8-cluster.cpp | 2 +- arangod/Utils/Transaction.cpp | 8 +- arangod/V8Server/v8-vocindex.cpp | 6 +- arangod/VocBase/collection.cpp | 11 +- .../modules/@arangodb/arango-statement.js | 7 +- scripts/startLocalCluster.sh | 5 +- 8 files changed, 251 insertions(+), 358 deletions(-) diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index b1191826dc..25b4d29250 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -99,34 +99,25 @@ static std::string extractErrorMessage(std::string const& shardId, } //////////////////////////////////////////////////////////////////////////////// -/// @brief creates an empty collection info object +/// @brief creates an empty collection info object //////////////////////////////////////////////////////////////////////////////// - -CollectionInfo::CollectionInfo() : _json(nullptr) {} +CollectionInfo::CollectionInfo() + : _vpack(std::make_shared()) { +} //////////////////////////////////////////////////////////////////////////////// /// @brief creates a collection info object from json //////////////////////////////////////////////////////////////////////////////// -CollectionInfo::CollectionInfo(TRI_json_t* json) : _json(json) {} +CollectionInfo::CollectionInfo(std::shared_ptr vpack, VPackSlice slice) + : _vpack(vpack), _slice(slice) {} //////////////////////////////////////////////////////////////////////////////// /// @brief creates a collection info object from another //////////////////////////////////////////////////////////////////////////////// CollectionInfo::CollectionInfo(CollectionInfo const& other) - : _json(other._json) { - if (other._json != nullptr) { - _json = TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, other._json); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief move constructs a collection info object from another -//////////////////////////////////////////////////////////////////////////////// - -CollectionInfo::CollectionInfo(CollectionInfo&& other) : _json(other._json) { - other._json = nullptr; + : _vpack(other._vpack), _slice(other._slice) { } //////////////////////////////////////////////////////////////////////////////// @@ -134,29 +125,8 @@ CollectionInfo::CollectionInfo(CollectionInfo&& other) : _json(other._json) { //////////////////////////////////////////////////////////////////////////////// CollectionInfo& CollectionInfo::operator=(CollectionInfo const& other) { - if (other._json != nullptr && this != &other) { - _json = TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, other._json); - } else { - _json = nullptr; - } - - return *this; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief move assigns a collection info object from another one -//////////////////////////////////////////////////////////////////////////////// - -CollectionInfo& CollectionInfo::operator=(CollectionInfo&& other) { - if (this == &other) { - return *this; - } - - if (_json != nullptr) { - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, _json); - } - _json = other._json; - other._json = nullptr; + _vpack = other._vpack; + _slice = other._slice; return *this; } @@ -166,9 +136,6 @@ CollectionInfo& CollectionInfo::operator=(CollectionInfo&& other) { //////////////////////////////////////////////////////////////////////////////// CollectionInfo::~CollectionInfo() { - if (_json != nullptr) { - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, _json); - } } //////////////////////////////////////////////////////////////////////////////// @@ -339,7 +306,6 @@ void ClusterInfo::flush() { loadCurrentCoordinators(); loadPlan(); loadCurrentDatabases(); - loadPlannedCollections(); loadCurrentCollections(); } @@ -483,9 +449,15 @@ void ClusterInfo::loadPlan() { if (planSlice.isObject()) { decltype(_plannedDatabases) newDatabases; + decltype(_plannedCollections) newCollections; + decltype(_shards) newShards; + decltype(_shardKeys) newShardKeys; + bool swapDatabases = false; - - auto databasesSlice = planSlice.get("Databases"); + bool swapCollections = false; + + VPackSlice databasesSlice; + databasesSlice = planSlice.get("Databases"); if (databasesSlice.isObject()) { for (auto const& database : VPackObjectIterator(databasesSlice)) { std::string const& name = database.key.copyString(); @@ -495,11 +467,64 @@ void ClusterInfo::loadPlan() { swapDatabases = true; } + // mop: immediate children of collections are DATABASES, followed by their collections + databasesSlice = planSlice.get("Collections"); + if (databasesSlice.isObject()) { + for (auto const& databasePairSlice : VPackObjectIterator(databasesSlice)) { + VPackSlice const& collectionsSlice = databasePairSlice.value; + if (!collectionsSlice.isObject()) { + continue; + } + DatabaseCollections databaseCollections; + std::string const databaseName = databasePairSlice.key.copyString(); + + for (auto const& collectionPairSlice: VPackObjectIterator(collectionsSlice)) { + VPackSlice const& collectionSlice = collectionPairSlice.value; + if (!collectionSlice.isObject()) { + continue; + } + + std::string const collectionId = collectionPairSlice.key.copyString(); + auto newCollection = std::make_shared(planBuilder, collectionSlice); + std::string const collectionName = newCollection->name(); + + // mop: register with name as well as with id + databaseCollections.emplace( + std::make_pair(collectionName, newCollection)); + databaseCollections.emplace(std::make_pair(collectionId, newCollection)); + + auto shardKeys = std::make_shared>( + newCollection->shardKeys()); + newShardKeys.insert(make_pair(collectionId, shardKeys)); + + auto shardIDs = newCollection->shardIds(); + auto shards = std::make_shared>(); + for (auto const& p : *shardIDs) { + shards->push_back(p.first); + } + // Sort by the number in the shard ID ("s0000001" for example): + std::sort(shards->begin(), shards->end(), + [](std::string const& a, std::string const& b) -> bool { + return std::strtol(a.c_str() + 1, nullptr, 10) < + std::strtol(b.c_str() + 1, nullptr, 10); + }); + newShards.emplace(std::make_pair(collectionId, shards)); + } + newCollections.emplace(std::make_pair(databaseName, databaseCollections)); + swapCollections = true; + } + } + WRITE_LOCKER(writeLocker, _planProt.lock); _plan = planBuilder; if (swapDatabases) { _plannedDatabases.swap(newDatabases); } + if (swapCollections) { + _plannedCollections.swap(newCollections); + _shards.swap(newShards); + _shardKeys.swap(newShardKeys); + } _planProt.version++; // such that others notice our change _planProt.isValid = true; // will never be reset to false return; @@ -617,114 +642,6 @@ void ClusterInfo::loadCurrentDatabases() { << " body: " << result.body(); } -//////////////////////////////////////////////////////////////////////////////// -/// @brief (re-)load the information about collections from the agency -/// Usually one does not have to call this directly. -//////////////////////////////////////////////////////////////////////////////// - -static std::string const prefixPlannedCollections = "Plan/Collections"; - -void ClusterInfo::loadPlannedCollections() { - uint64_t storedVersion = _plannedCollectionsProt.version; - MUTEX_LOCKER(mutexLocker, _plannedCollectionsProt.mutex); - if (_plannedCollectionsProt.version > storedVersion) { - // Somebody else did, what we intended to do, so just return - return; - } - - // Now contact the agency: - AgencyCommResult result; - { - AgencyCommLocker locker("Plan", "READ"); - - if (locker.successful()) { - result = _agency.getValues2(prefixPlannedCollections, true); - } else { - LOG(ERR) << "Error while locking " << prefixPlannedCollections; - return; - } - } - - if (result.successful()) { - - velocypack::Slice databases = - result._vpack->slice()[0].get(std::vector( - {AgencyComm::prefixStripped(), "Plan", "Collections"})); - - if (!databases.isNone()) { - decltype(_plannedCollections) newCollections; - decltype(_shards) newShards; - decltype(_shardKeys) newShardKeys; - - for (auto const& db : VPackObjectIterator(databases)) { - std::string const database = db.key.copyString(); - - for (auto const& col : VPackObjectIterator(db.value)) { - std::string const collection = col.key.copyString(); - - // check whether we have created an entry for the database already - AllCollections::iterator it2 = newCollections.find(database); - - if (it2 == newCollections.end()) { - // not yet, so create an entry for the database - DatabaseCollections empty; - newCollections.emplace(std::make_pair(database, empty)); - it2 = newCollections.find(database); - } - - // TODO: The Collection info has to store VPack instead of JSON - TRI_json_t* json = arangodb::basics::VelocyPackHelper::velocyPackToJson( - col.value); - - auto collectionData = std::make_shared(json); - auto shardKeys = std::make_shared>( - collectionData->shardKeys()); - newShardKeys.insert(make_pair(collection, shardKeys)); - auto shardIDs = collectionData->shardIds(); - auto shards = std::make_shared>(); - for (auto const& p : *shardIDs) { - shards->push_back(p.first); - } - // Sort by the number in the shard ID ("s0000001" for example): - std::sort(shards->begin(), shards->end(), - [](std::string const& a, std::string const& b) -> bool { - return std::strtol(a.c_str() + 1, nullptr, 10) < - std::strtol(b.c_str() + 1, nullptr, 10); - }); - newShards.emplace(std::make_pair(collection, shards)); - - // insert the collection into the existing map, insert it under its - // ID as well as under its name, so that a lookup can be done with - // either of the two. - - (*it2).second.emplace(std::make_pair(collection, collectionData)); - (*it2).second.emplace( - std::make_pair(collectionData->name(), collectionData)); - - } - } - - // Now set the new value: - { - WRITE_LOCKER(writeLocker, _plannedCollectionsProt.lock); - - _plannedCollections.swap(newCollections); - _shards.swap(newShards); - _shardKeys.swap(newShardKeys); - _plannedCollectionsProt.version++; // such that others notice our change - _plannedCollectionsProt.isValid = true; // will never be reset to false - } - return; - } - } - - LOG(ERR) << "Error while loading " << prefixPlannedCollections - << " httpCode: " << result.httpCode() - << " errorCode: " << result.errorCode() - << " errorMessage: " << result.errorMessage() - << " body: " << result.body(); -} - //////////////////////////////////////////////////////////////////////////////// /// @brief ask about a collection /// If it is not found in the cache, the cache is reloaded once @@ -734,14 +651,14 @@ std::shared_ptr ClusterInfo::getCollection( DatabaseID const& databaseID, CollectionID const& collectionID) { int tries = 0; - if (!_plannedCollectionsProt.isValid) { - loadPlannedCollections(); + if (!_planProt.isValid) { + loadPlan(); ++tries; } while (true) { // left by break { - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); // look up database by id AllCollections::const_iterator it = _plannedCollections.find(databaseID); @@ -760,7 +677,7 @@ std::shared_ptr ClusterInfo::getCollection( } // must load collections outside the lock - loadPlannedCollections(); + loadPlan(); } return std::make_shared(); @@ -795,9 +712,9 @@ std::vector> const ClusterInfo::getCollections( std::vector> result; // always reload - loadPlannedCollections(); + loadPlan(); - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); // look up database by id AllCollections::const_iterator it = _plannedCollections.find(databaseID); @@ -1155,7 +1072,6 @@ int ClusterInfo::dropDatabaseCoordinator(std::string const& name, // Load our own caches: loadPlan(); - loadPlannedCollections(); // Now wait for it to appear and be complete: res.clear(); @@ -1201,9 +1117,9 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName, { // check if a collection with the same name is already planned - loadPlannedCollections(); + loadPlan(); - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); AllCollections::const_iterator it = _plannedCollections.find(databaseName); if (it != _plannedCollections.end()) { std::string const name = @@ -1299,7 +1215,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName, } // Update our cache: - loadPlannedCollections(); + loadPlan(); while (TRI_microtime() <= endTime) { agencyCallback->executeByCallbackOrTimeout(interval); @@ -1391,7 +1307,7 @@ int ClusterInfo::dropCollectionCoordinator(std::string const& databaseName, } // Update our own cache: - loadPlannedCollections(); + loadPlan(); while (TRI_microtime() <= endTime) { agencyCallback->executeByCallbackOrTimeout(interval); @@ -1475,7 +1391,7 @@ int ClusterInfo::setCollectionPropertiesCoordinator( } if (res.successful()) { - loadPlannedCollections(); + loadPlan(); return TRI_ERROR_NO_ERROR; } @@ -1554,7 +1470,7 @@ int ClusterInfo::setCollectionStatusCoordinator( } if (res.successful()) { - loadPlannedCollections(); + loadPlan(); return TRI_ERROR_NO_ERROR; } @@ -1711,7 +1627,7 @@ int ClusterInfo::ensureIndexCoordinator( } velocypack::Slice collection = database.get(collectionID); - loadPlannedCollections(); + loadPlan(); // It is possible that between the fetching of the planned collections // and the write lock we acquire below something has changed. Therefore // we first get the previous value and then do a compare and swap operation. @@ -1722,7 +1638,7 @@ int ClusterInfo::ensureIndexCoordinator( return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_LOCK_PLAN, errorMsg); } - std::shared_ptr collectionBuilder; + auto collectionBuilder = std::make_shared(); { std::shared_ptr c = getCollection(databaseName, collectionID); @@ -1732,14 +1648,14 @@ int ClusterInfo::ensureIndexCoordinator( // that getCollection fetches the read lock and releases it before // we get it again. // - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); if (c->empty()) { return setErrormsg(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, errorMsg); } - std::shared_ptr tmp = - arangodb::basics::JsonHelper::toVelocyPack(c->getIndexes()); + std::shared_ptr tmp = std::make_shared(); + tmp->add(c->getIndexes()); MUTEX_LOCKER(guard, numberOfShardsMutex); { numberOfShards = c->numberOfShards(); @@ -1785,8 +1701,7 @@ int ClusterInfo::ensureIndexCoordinator( } // now create a new index - collectionBuilder = - arangodb::basics::JsonHelper::toVelocyPack(c->getJson()); + collectionBuilder->add(c->getSlice()); } VPackSlice const collectionSlice = collectionBuilder->slice(); @@ -1843,7 +1758,7 @@ int ClusterInfo::ensureIndexCoordinator( } // reload our own cache: - loadPlannedCollections(); + loadPlan(); TRI_ASSERT(numberOfShards > 0); @@ -1956,7 +1871,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName, _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); - loadPlannedCollections(); + loadPlan(); // It is possible that between the fetching of the planned collections // and the write lock we acquire below something has changed. Therefore // we first get the previous value and then do a compare and swap operation. @@ -1967,93 +1882,71 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName, return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_LOCK_PLAN, errorMsg); } - TRI_json_t* collectionJson = nullptr; - TRI_json_t const* indexes = nullptr; - + VPackSlice indexes; { std::shared_ptr c = getCollection(databaseName, collectionID); - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); if (c->empty()) { return setErrormsg(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, errorMsg); } indexes = c->getIndexes(); - if (!TRI_IsArrayJson(indexes)) { + if (!indexes.isArray()) { // no indexes present, so we can't delete our index return setErrormsg(TRI_ERROR_ARANGO_INDEX_NOT_FOUND, errorMsg); } - collectionJson = TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, c->getJson()); MUTEX_LOCKER(guard, numberOfShardsMutex); numberOfShards = c->numberOfShards(); } - if (collectionJson == nullptr) { - return setErrormsg(TRI_ERROR_OUT_OF_MEMORY, errorMsg); - } - - TRI_ASSERT(TRI_IsArrayJson(indexes)); - - // delete previous indexes entry - TRI_DeleteObjectJson(TRI_UNKNOWN_MEM_ZONE, collectionJson, "indexes"); - - TRI_json_t* copy = TRI_CreateArrayJson(TRI_UNKNOWN_MEM_ZONE); - - if (copy == nullptr) { - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, collectionJson); - return setErrormsg(TRI_ERROR_OUT_OF_MEMORY, errorMsg); - } - bool found = false; + VPackBuilder newIndexes; + { + VPackArrayBuilder newIndexesArrayBuilder(&newIndexes); + // mop: eh....do we need a flag to mark it invalid until cache is renewed? + // TRI_DeleteObjectJson(TRI_UNKNOWN_MEM_ZONE, collectionJson, "indexes"); + for (auto const& indexSlice: VPackArrayIterator(indexes)) { + VPackSlice id = indexSlice.get("id"); + VPackSlice type = indexSlice.get("type"); - // copy remaining indexes back into collection - size_t const n = TRI_LengthArrayJson(indexes); - for (size_t i = 0; i < n; ++i) { - TRI_json_t const* v = TRI_LookupArrayJson(indexes, i); - TRI_json_t const* id = TRI_LookupObjectJson(v, "id"); - TRI_json_t const* type = TRI_LookupObjectJson(v, "type"); - - if (!TRI_IsStringJson(id) || !TRI_IsStringJson(type)) { - continue; - } - - if (idString == std::string(id->_value._string.data)) { - // found our index, ignore it when copying - found = true; - - std::string const typeString(type->_value._string.data); - if (typeString == "primary" || typeString == "edge") { - // must not delete these index types - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, copy); - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, collectionJson); - return setErrormsg(TRI_ERROR_FORBIDDEN, errorMsg); + if (!id.isString() || !type.isString()) { + continue; } + if (idString == id.copyString()) { + // found our index, ignore it when copying + found = true; - continue; + std::string const typeString = type.copyString(); + if (typeString == "primary" || typeString == "edge") { + return setErrormsg(TRI_ERROR_FORBIDDEN, errorMsg); + } + continue; + } + newIndexes.add(indexSlice); } - - TRI_PushBack3ArrayJson(TRI_UNKNOWN_MEM_ZONE, copy, - TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, v)); } - - TRI_Insert3ObjectJson(TRI_UNKNOWN_MEM_ZONE, collectionJson, "indexes", - copy); - if (!found) { - // did not find the sought index - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, collectionJson); return setErrormsg(TRI_ERROR_ARANGO_INDEX_NOT_FOUND, errorMsg); } - - auto tmp = arangodb::basics::JsonHelper::toVelocyPack(collectionJson); + + VPackBuilder newCollectionBuilder; + { + VPackObjectBuilder newCollectionObjectBuilder(&newCollectionBuilder); + for (auto const& property: VPackObjectIterator(previous)) { + if (property.key.copyString() == "indexes") { + newCollectionBuilder.add(property.key.copyString(), newIndexes.slice()); + } else { + newCollectionBuilder.add(property.key.copyString(), property.value); + } + } + } AgencyCommResult result = - ac.casValue(key, previous, tmp->slice(), 0.0, 0.0); - - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, collectionJson); + ac.casValue(key, previous, newCollectionBuilder.slice(), 0.0, 0.0); if (!result.successful()) { return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN, @@ -2062,7 +1955,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName, } // load our own cache: - loadPlannedCollections(); + loadPlan(); { MUTEX_LOCKER(guard, numberOfShardsMutex); @@ -2455,15 +2348,15 @@ std::shared_ptr> ClusterInfo::getResponsibleServer( std::shared_ptr> ClusterInfo::getShardList( CollectionID const& collectionID) { - if (!_plannedCollectionsProt.isValid) { - loadPlannedCollections(); + if (!_planProt.isValid) { + loadPlan(); } int tries = 0; while (true) { { // Get the sharding keys and the number of shards: - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); // _shards is a map-type >> auto it = _shards.find(collectionID); @@ -2474,7 +2367,7 @@ std::shared_ptr> ClusterInfo::getShardList( if (++tries >= 2) { return std::make_shared>(); } - loadPlannedCollections(); + loadPlan(); } } @@ -2504,8 +2397,8 @@ int ClusterInfo::getResponsibleShard(CollectionID const& collectionID, // Note that currently we take the number of shards and the shardKeys // from Plan, since they are immutable. Later we will have to switch // this to Current, when we allow to add and remove shards. - if (!_plannedCollectionsProt.isValid) { - loadPlannedCollections(); + if (!_planProt.isValid) { + loadPlan(); } int tries = 0; @@ -2516,7 +2409,7 @@ int ClusterInfo::getResponsibleShard(CollectionID const& collectionID, while (true) { { // Get the sharding keys and the number of shards: - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); // _shards is a map-type >> auto it = _shards.find(collectionID); @@ -2537,7 +2430,7 @@ int ClusterInfo::getResponsibleShard(CollectionID const& collectionID, if (++tries >= 2) { break; } - loadPlannedCollections(); + loadPlan(); } if (!found) { @@ -2582,8 +2475,8 @@ int ClusterInfo::getResponsibleShard(CollectionID const& collectionID, // Note that currently we take the number of shards and the shardKeys // from Plan, since they are immutable. Later we will have to switch // this to Current, when we allow to add and remove shards. - if (!_plannedCollectionsProt.isValid) { - loadPlannedCollections(); + if (!_planProt.isValid) { + loadPlan(); } int tries = 0; @@ -2595,7 +2488,7 @@ int ClusterInfo::getResponsibleShard(CollectionID const& collectionID, while (true) { { // Get the sharding keys and the number of shards: - READ_LOCKER(readLocker, _plannedCollectionsProt.lock); + READ_LOCKER(readLocker, _planProt.lock); // _shards is a map-type >> auto it = _shards.find(collectionID); @@ -2621,7 +2514,7 @@ int ClusterInfo::getResponsibleShard(CollectionID const& collectionID, if (++tries >= 2) { break; } - loadPlannedCollections(); + loadPlan(); } if (!found) { @@ -2689,8 +2582,8 @@ void ClusterInfo::invalidatePlan() { _planProt.isValid = false; } { - WRITE_LOCKER(writeLocker, _plannedCollectionsProt.lock); - _plannedCollectionsProt.isValid = false; + WRITE_LOCKER(writeLocker, _planProt.lock); + _planProt.isValid = false; } } diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index 0bdbe45c9c..3328662c72 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -27,6 +27,7 @@ #include "Basics/Common.h" #include "Basics/JsonHelper.h" +#include "Basics/VelocyPackHelper.h" #include "Basics/Mutex.h" #include "Basics/ReadWriteLock.h" #include "Cluster/AgencyComm.h" @@ -35,6 +36,8 @@ #include "VocBase/vocbase.h" #include +#include +#include struct TRI_json_t; @@ -55,7 +58,7 @@ class CollectionInfo { public: CollectionInfo(); - explicit CollectionInfo(struct TRI_json_t*); + CollectionInfo(std::shared_ptr, VPackSlice); CollectionInfo(CollectionInfo const&); @@ -72,14 +75,8 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// int replicationFactor () const { - TRI_json_t* const node - = arangodb::basics::JsonHelper::getObjectElement(_json, - "replicationFactor"); - - if (TRI_IsNumberJson(node)) { - return (int) (node->_value._number); - } - return 1; + return arangodb::basics::VelocyPackHelper::getNumericValue( + _slice, "replicationFactor", 1); } ////////////////////////////////////////////////////////////////////////////// @@ -87,14 +84,8 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// int replicationQuorum () const { - TRI_json_t* const node - = arangodb::basics::JsonHelper::getObjectElement(_json, - "replicationQuorum"); - - if (TRI_IsNumberJson(node)) { - return (int) (node->_value._number); - } - return 1; + return arangodb::basics::VelocyPackHelper::getNumericValue( + _slice, "replicationQuorum", 1); } ////////////////////////////////////////////////////////////////////////////// @@ -102,7 +93,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool empty() const { - return (nullptr == _json); //|| (id() == 0); + return _slice.isNone(); } ////////////////////////////////////////////////////////////////////////////// @@ -110,7 +101,14 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// TRI_voc_cid_t id() const { - return arangodb::basics::JsonHelper::stringUInt64(_json, "id"); + if (!_slice.isObject()) { + return 0; + } + VPackSlice idSlice = _slice.get("id"); + if (idSlice.isString()) { + return arangodb::basics::VelocyPackHelper::stringUInt64(idSlice); + } + return 0; } ////////////////////////////////////////////////////////////////////////////// @@ -118,7 +116,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// std::string id_as_string() const { - return arangodb::basics::JsonHelper::getStringValue(_json, "id", ""); + return arangodb::basics::VelocyPackHelper::getStringValue(_slice, "id", ""); } ////////////////////////////////////////////////////////////////////////////// @@ -126,7 +124,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// std::string name() const { - return arangodb::basics::JsonHelper::getStringValue(_json, "name", ""); + return arangodb::basics::VelocyPackHelper::getStringValue(_slice, "name", ""); } ////////////////////////////////////////////////////////////////////////////// @@ -134,8 +132,8 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// TRI_col_type_e type() const { - return (TRI_col_type_e)arangodb::basics::JsonHelper::getNumericValue( - _json, "type", (int)TRI_COL_TYPE_UNKNOWN); + return (TRI_col_type_e)arangodb::basics::VelocyPackHelper::getNumericValue( + _slice, "type", (int)TRI_COL_TYPE_UNKNOWN); } ////////////////////////////////////////////////////////////////////////////// @@ -144,8 +142,8 @@ class CollectionInfo { TRI_vocbase_col_status_e status() const { return (TRI_vocbase_col_status_e) - arangodb::basics::JsonHelper::getNumericValue( - _json, "status", (int)TRI_VOC_COL_STATUS_CORRUPTED); + arangodb::basics::VelocyPackHelper::getNumericValue( + _slice, "status", (int)TRI_VOC_COL_STATUS_CORRUPTED); } ////////////////////////////////////////////////////////////////////////////// @@ -161,7 +159,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool deleted() const { - return arangodb::basics::JsonHelper::getBooleanValue(_json, "deleted", + return arangodb::basics::VelocyPackHelper::getBooleanValue(_slice, "deleted", false); } @@ -170,7 +168,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool doCompact() const { - return arangodb::basics::JsonHelper::getBooleanValue(_json, "doCompact", + return arangodb::basics::VelocyPackHelper::getBooleanValue(_slice, "doCompact", false); } @@ -179,7 +177,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool isSystem() const { - return arangodb::basics::JsonHelper::getBooleanValue(_json, "isSystem", + return arangodb::basics::VelocyPackHelper::getBooleanValue(_slice, "isSystem", false); } @@ -188,7 +186,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool isVolatile() const { - return arangodb::basics::JsonHelper::getBooleanValue(_json, "isVolatile", + return arangodb::basics::VelocyPackHelper::getBooleanValue(_slice, "isVolatile", false); } @@ -196,24 +194,22 @@ class CollectionInfo { /// @brief returns the indexes ////////////////////////////////////////////////////////////////////////////// - TRI_json_t const* getIndexes() const { - return arangodb::basics::JsonHelper::getObjectElement(_json, "indexes"); + VPackSlice const getIndexes() const { + if (_slice.isNone()) { + return VPackSlice(); + } + return _slice.get("indexes"); } ////////////////////////////////////////////////////////////////////////////// /// @brief returns a copy of the key options - /// the caller is responsible for freeing it ////////////////////////////////////////////////////////////////////////////// - TRI_json_t* keyOptions() const { - TRI_json_t const* keyOptions = - arangodb::basics::JsonHelper::getObjectElement(_json, "keyOptions"); - - if (keyOptions != nullptr) { - return TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, keyOptions); + VPackSlice const keyOptions() const { + if (_slice.isNone()) { + return VPackSlice(); } - - return nullptr; + return _slice.get("keyOptions"); } ////////////////////////////////////////////////////////////////////////////// @@ -221,12 +217,10 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool allowUserKeys() const { - TRI_json_t const* keyOptions = - arangodb::basics::JsonHelper::getObjectElement(_json, "keyOptions"); - - if (keyOptions != nullptr) { - return arangodb::basics::JsonHelper::getBooleanValue( - keyOptions, "allowUserKeys", true); + VPackSlice keyOptionsSlice = keyOptions(); + if (!keyOptionsSlice.isNone()) { + return arangodb::basics::VelocyPackHelper::getBooleanValue( + keyOptionsSlice, "allowUserKeys", true); } return true; // the default value @@ -237,7 +231,7 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool waitForSync() const { - return arangodb::basics::JsonHelper::getBooleanValue(_json, "waitForSync", + return arangodb::basics::VelocyPackHelper::getBooleanValue(_slice, "waitForSync", false); } @@ -246,8 +240,8 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// TRI_voc_size_t journalSize() const { - return arangodb::basics::JsonHelper::getNumericValue( - _json, "journalSize", 0); + return arangodb::basics::VelocyPackHelper::getNumericValue( + _slice, "journalSize", 0); } ////////////////////////////////////////////////////////////////////////////// @@ -255,8 +249,8 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// uint32_t indexBuckets() const { - return arangodb::basics::JsonHelper::getNumericValue( - _json, "indexBuckets", 1); + return arangodb::basics::VelocyPackHelper::getNumericValue( + _slice, "indexBuckets", 1); } ////////////////////////////////////////////////////////////////////////////// @@ -264,9 +258,19 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// std::vector shardKeys() const { - TRI_json_t* const node = - arangodb::basics::JsonHelper::getObjectElement(_json, "shardKeys"); - return arangodb::basics::JsonHelper::stringArray(node); + std::vector shardKeys; + + if (_slice.isNone()) { + return shardKeys; + } + auto shardKeysSlice = _slice.get("shardKeys"); + if (shardKeysSlice.isArray()) { + for (auto const& shardKey: VPackArrayIterator(shardKeysSlice)) { + shardKeys.push_back(shardKey.copyString()); + } + } + + return shardKeys; } ////////////////////////////////////////////////////////////////////////////// @@ -274,15 +278,18 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// bool usesDefaultShardKeys() const { - TRI_json_t* const node = - arangodb::basics::JsonHelper::getObjectElement(_json, "shardKeys"); - if (TRI_LengthArrayJson(node) != 1) { + if (_slice.isNone()) { return false; } - TRI_json_t* firstKey = TRI_LookupArrayJson(node, 0); - TRI_ASSERT(TRI_IsStringJson(firstKey)); + auto shardKeysSlice = _slice.get("shardKeys"); + if (!shardKeysSlice.isArray() || shardKeysSlice.length() != 1) { + return false; + } + + auto firstElement = shardKeysSlice.get(0); + TRI_ASSERT(firstElement.isString()); std::string shardKey = - arangodb::basics::JsonHelper::getStringValue(firstKey, ""); + arangodb::basics::VelocyPackHelper::getStringValue(firstElement, ""); return shardKey == TRI_VOC_ATTRIBUTE_KEY; } @@ -302,22 +309,18 @@ class CollectionInfo { return res; } res.reset(new ShardMap()); - TRI_json_t* const node = - arangodb::basics::JsonHelper::getObjectElement(_json, "shards"); - if (node != nullptr && TRI_IsObjectJson(node)) { - size_t len = TRI_LengthVector(&node->_value._objects); - for (size_t i = 0; i < len; i += 2) { - auto key = - static_cast(TRI_AtVector(&node->_value._objects, i)); - auto value = static_cast( - TRI_AtVector(&node->_value._objects, i + 1)); - if (TRI_IsStringJson(key) && TRI_IsArrayJson(value)) { - ShardID shard = arangodb::basics::JsonHelper::getStringValue(key, ""); - std::vector servers = - arangodb::basics::JsonHelper::stringArray(value); - if (shard != "") { - (*res).insert(make_pair(shard, servers)); + + auto shardsSlice = _slice.get("shards"); + if (shardsSlice.isObject()) { + for (auto const& shardSlice: VPackObjectIterator(shardsSlice)) { + if (shardSlice.key.isString() && shardSlice.value.isArray()) { + ShardID shard = shardSlice.key.copyString(); + + std::vector servers; + for (auto const& serverSlice: VPackArrayIterator(shardSlice.value)) { + servers.push_back(serverSlice.copyString()); } + (*res).insert(make_pair(shardSlice.key.copyString(), servers)); } } } @@ -333,11 +336,13 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// int numberOfShards() const { - TRI_json_t* const node = - arangodb::basics::JsonHelper::getObjectElement(_json, "shards"); + if (_slice.isNone()) { + return 0; + } + auto shardsSlice = _slice.get("shards"); - if (TRI_IsObjectJson(node)) { - return (int)(TRI_LengthVector(&node->_value._objects) / 2); + if (shardsSlice.isObject()) { + return shardsSlice.length(); } return 0; } @@ -346,10 +351,16 @@ class CollectionInfo { /// @brief returns the json ////////////////////////////////////////////////////////////////////////////// - TRI_json_t const* getJson() const { return _json; } + std::shared_ptr const getVPack() const { return _vpack; } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief returns the slice + ////////////////////////////////////////////////////////////////////////////// + VPackSlice const getSlice() const { return _slice; } private: - TRI_json_t* _json; + std::shared_ptr _vpack; + VPackSlice _slice; // Only to protect the cache: mutable Mutex _mutex; @@ -583,13 +594,6 @@ class ClusterInfo { std::vector listDatabases(bool = false); - ////////////////////////////////////////////////////////////////////////////// - /// @brief (re-)load the information about planned collections from the agency - /// Usually one does not have to call this directly. - ////////////////////////////////////////////////////////////////////////////// - - void loadPlannedCollections(); - ////////////////////////////////////////////////////////////////////////////// /// @brief (re-)load the information about our plan /// Usually one does not have to call this directly. @@ -933,7 +937,6 @@ class ClusterInfo { // The Plan state: AllCollections _plannedCollections; // from Plan/Collections/ - ProtectionData _plannedCollectionsProt; std::unordered_map>> _shards; // from Plan/Collections/ diff --git a/arangod/Cluster/v8-cluster.cpp b/arangod/Cluster/v8-cluster.cpp index 16d781024a..a36153c1b8 100644 --- a/arangod/Cluster/v8-cluster.cpp +++ b/arangod/Cluster/v8-cluster.cpp @@ -824,7 +824,7 @@ static void JS_GetCollectionInfoClusterInfo( } result->Set(TRI_V8_ASCII_STRING("shards"), shardIds); - v8::Handle indexes = TRI_ObjectJson(isolate, ci->getIndexes()); + v8::Handle indexes = TRI_VPackToV8(isolate, ci->getIndexes()); result->Set(TRI_V8_ASCII_STRING("indexes"), indexes); TRI_V8_RETURN(result); diff --git a/arangod/Utils/Transaction.cpp b/arangod/Utils/Transaction.cpp index 4aec7c52b7..7475f6f16c 100644 --- a/arangod/Utils/Transaction.cpp +++ b/arangod/Utils/Transaction.cpp @@ -2996,9 +2996,7 @@ std::shared_ptr Transaction::indexForCollectionCoordinator( name.c_str(), _vocbase->_name); } - TRI_json_t const* json = (*collectionInfo).getIndexes(); - auto indexBuilder = arangodb::basics::JsonHelper::toVelocyPack(json); - VPackSlice const slice = indexBuilder->slice(); + VPackSlice const slice = (*collectionInfo).getIndexes(); if (slice.isArray()) { for (auto const& v : VPackArrayIterator(slice)) { @@ -3056,9 +3054,7 @@ std::vector> Transaction::indexesForCollectionCoordinator name.c_str(), _vocbase->_name); } - TRI_json_t const* json = collectionInfo->getIndexes(); - auto indexBuilder = arangodb::basics::JsonHelper::toVelocyPack(json); - VPackSlice const slice = indexBuilder->slice(); + VPackSlice const slice = collectionInfo->getIndexes(); if (slice.isArray()) { size_t const n = static_cast(slice.length()); diff --git a/arangod/V8Server/v8-vocindex.cpp b/arangod/V8Server/v8-vocindex.cpp index 7765f1f2a9..024520331d 100644 --- a/arangod/V8Server/v8-vocindex.cpp +++ b/arangod/V8Server/v8-vocindex.cpp @@ -1072,7 +1072,7 @@ static void CreateCollectionCoordinator( if (myerrno != TRI_ERROR_NO_ERROR) { TRI_V8_THROW_EXCEPTION_MESSAGE(myerrno, errorMsg); } - ci->loadPlannedCollections(); + ci->loadPlan(); std::shared_ptr c = ci->getCollection(databaseName, cid); TRI_vocbase_col_t* newcoll = CoordinatorCollection(vocbase, *c); @@ -1256,9 +1256,7 @@ static void GetIndexesCoordinator( v8::Handle ret = v8::Array::New(isolate); - std::shared_ptr tmp = - arangodb::basics::JsonHelper::toVelocyPack(c->getIndexes()); - VPackSlice slice = tmp->slice(); + VPackSlice slice = c->getIndexes(); if (slice.isArray()) { uint32_t j = 0; diff --git a/arangod/VocBase/collection.cpp b/arangod/VocBase/collection.cpp index 1973e5fdae..46dcb55393 100644 --- a/arangod/VocBase/collection.cpp +++ b/arangod/VocBase/collection.cpp @@ -894,12 +894,13 @@ VocbaseCollectionInfo::VocbaseCollectionInfo(CollectionInfo const& other) std::string const name = other.name(); memset(_name, 0, sizeof(_name)); memcpy(_name, name.c_str(), name.size()); + + VPackSlice keyOptionsSlice(other.keyOptions()); - std::unique_ptr otherOpts(other.keyOptions()); - if (otherOpts != nullptr) { - std::shared_ptr builder = - arangodb::basics::JsonHelper::toVelocyPack(otherOpts.get()); - _keyOptions = builder->steal(); + if (!keyOptionsSlice.isNone()) { + VPackBuilder builder; + builder.add(keyOptionsSlice); + _keyOptions = builder.steal(); } } diff --git a/js/server/modules/@arangodb/arango-statement.js b/js/server/modules/@arangodb/arango-statement.js index ca07babd1d..c62cfef782 100644 --- a/js/server/modules/@arangodb/arango-statement.js +++ b/js/server/modules/@arangodb/arango-statement.js @@ -79,8 +79,13 @@ ArangoStatement.prototype.execute = function () { opts.cache = this._cache; } } - + + try { var result = AQL_EXECUTE(this._query, this._bindVars, opts); + } catch (e) { + console.log("HASSHASSHASSHASS", this._query, e); + throw e; + } return new GeneralArrayCursor(result.json, 0, null, result); }; diff --git a/scripts/startLocalCluster.sh b/scripts/startLocalCluster.sh index 2bfb69d495..d56efcd97c 100755 --- a/scripts/startLocalCluster.sh +++ b/scripts/startLocalCluster.sh @@ -1,9 +1,6 @@ #!/bin/bash if [ -z "$XTERM" ] ; then - XTERM=x-terminal-emulator -fi -if [ -z "$XTERMOPTIONS" ] ; then - XTERMOPTIONS="--geometry=80x43" + XTERM=xterm fi From 1b9f10bfa8cdde093b2f2839988391f16d0c0bd5 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 17:14:11 +0200 Subject: [PATCH 14/31] updated API changes documentation --- .../ReleaseNotes/UpgradingChanges30.mdpp | 215 +++++++++++++++++- .../JSF_general_graph_create_http_examples.md | 8 +- .../JSF_general_graph_drop_http_examples.md | 7 +- ...graph_edge_definition_add_http_examples.md | 7 +- ...ph_edge_definition_modify_http_examples.md | 4 +- ...ph_edge_definition_remove_http_examples.md | 7 +- ...eneral_graph_edge_replace_http_examples.md | 4 +- ...aph_vertex_collection_add_http_examples.md | 7 +- ..._vertex_collection_remove_http_examples.md | 5 +- arangod/Utils/Transaction.cpp | 1 + 10 files changed, 249 insertions(+), 16 deletions(-) diff --git a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp index af7c977907..89c481c6fa 100644 --- a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp +++ b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp @@ -212,15 +212,220 @@ syslog targets. !SECTION HTTP API changes +!SUBSECTION CRUD operations + +!SUBSUBSECTION General + +The HTTP insert operations for single documents and edges (POST `/_api/document`) do +not support the URL parameter "createCollection" anymore. In previous versions of +ArangoDB this parameter could be used to automatically create a collection upon +insertion of the first document. It is now required that the target collection already +exists when using this API, otherwise it will return an HTTP 404 error. + +Collections can still be created easily via a separate call to POST `/_api/collection` +as before. + +The "location" HTTP header returned by ArangoDB when inserting a new document or edge +now always contains the database name. This was also the default behavior in previous +versions of ArangoDB, but it could be overridden by clients sending the HTTP header +`x-arangodb-version: 1.4` in the request. Clients can continue to send this header to +ArangoDB 3.0, but the header will not influence the location response headers produced +by ArangoDB 3.0 anymore. + +Additionally the CRUD operations APIs do not return an attribute "error" in the +response body with an attribute value of "false" in case an operation succeeded. + +!SUBSUBSECTION Revision id handling + +The operations for updating, replacing and removing documents can optionally check the +revision number of the document to be updated, replaced or removed so the caller can +ensure the operation works on a specific version of the document and there are no +lost updates. + +Previous versions of ArangoDB allowed passing the revision id of the previous document +either in the HTTP header `If-Match` or in the URL parameter `rev`. For example, +removing a document with a specific revision id could be achieved as follows: + +``` +curl -X DELETE \ + "http://127.0.0.1:8529/_api/document/myCollection/myKey?rev=123" +``` + +ArangoDB 3.0 does not support passing the revision id via the "rev" URL parameter +anymore. Instead the previous revision id must be passed in the HTTP header `If-Match`, +e.g. + +``` +curl -X DELETE \ + --header "If-Match: '123'" \ + "http://127.0.0.1:8529/_api/document/myCollection/myKey" +``` + +The URL parameter "policy" was also usable in previous versions of ArangoDB to +control revision handling. Using it was redundant to specifying the expected revision +id via the "rev" parameter or "If-Match" HTTP header and therefore support for the "policy" +parameter was removed in 3.0. + +In order to check for a previous revision id when updating, replacing or removing +documents please use the `If-Match` HTTP header as described above. When no revision +check if required the HTTP header can be omitted, and the operations will work on the +current revision of the document, regardless of its revision id. + + +!SUBSECTION All documents API + +The HTTP API for retrieving the ids, keys or URLs of all documents from a collection +was previously located at GET `/_api/document?collection=...`. This API was moved to +PUT `/_api/simple/all-keys` and is now executed as an AQL query. +The name of the collection must now be passed in the HTTP request body instead of in +the request URL. The same is true for the "type" parameter, which controls the type of +the result to be created. + +Calls to the previous API can be translated as follows: + +- old: GET `/_api/document?collection=&type=` without HTTP request body +- 3.0: PUT `/_api/simple/all-keys` with HTTP request body `{"name":"","type":"id"}` + +The result format of this API has also changed slightly. In previous versions calls to +the API returned a JSON object with a `documents` attribute. As the functionality is +based on AQL internally in 3.0, the API now returns a JSON object with a `result` attribute: + +!SUBSECTION Edges API + +!SUBSUBSECTION CRUD operations + +The API for documents and edges have been unified in ArangoDB 3.0. The CRUD operations +for documents and edges are now handled by the same endpoint at `/_api/document`. For +CRUD operations there is no distinction anymore between documents and edges API-wise. + +That means CRUD operations concerning edges need to be sent to the HTTP endpoint +`/_api/document` instead of `/_api/edge`. Sending requests to `/_api/edge` will +result in an HTTP 404 error in 3.0. The following methods are available at +`/_api/document` for documents and edge: + +- HTTP POST: insert new document or edge +- HTTP GET: fetch an existing document or edge +- HTTP PUT: replace an existing document or edge +- HTTP PATCH: partially update an existing document or edge +- HTTP DELETE: remove an existing document or edge + +When completely replacing an edge via HTTP PUT please note that the replacing edge +data now needs to contain the `_from` and `_to` attributes for the edge. Previous +versions of ArangoDB did not require sending `_from` and `_to` when replacing edges, +as `_from` and `_to` values were immutable for existing edges. + +The `_from` and `_to` attributes of edges now also need to be present inside the +edges objects sent to the server: + +``` +curl -X POST \ + --data '{"value":1,"_from":"myVertexCollection/1","_to":"myVertexCollection/2"}' \ + "http://127.0.0.1:8529/_api/document?collection=myEdgeCollection" +``` + +Previous versions of ArangoDB required the `_from` and `_to` attributes of edges be +sent separately in URL parameter `from` and `to`: + +``` +curl -X POST \ + --data '{"value":1}' \ + "http://127.0.0.1:8529/_api/edge?collection=e&from=myVertexCollection/1&to=myVertexCollection/2" +``` + +!SUBSUBSECTION Querying connected edges + +The REST API for querying connected edges at GET `/_api/edges/` will now +make the edge ids unique before returning the connected edges. This is probably desired anyway +as results will now be returned only once per distinct input edge id. However, it may break +client applications that rely on the old behavior. + +!SUBSUBSECTION Graph API + +Some data-modification operations in the named graphs API at `/_api/gharial` now return either +HTTP 202 (Accepted) or HTTP 201 (Created) if the operation succeeds. Which status code is returned +depends on the `waitForSync` attribute of the affected collection. In previous versions some +of these operations return HTTP 200 regardless of the `waitForSync` value. + +!SUBSECTION Simple queries API + +The REST routes PUT `/_api/simple/first` and `/_api/simple/last` have been removed +entirely. These APIs were responsible for returning the first-inserted and +least-inserted documents in a collection. This feature was built on cap constraints +internally, which have been removed in 3.0. + +Calling one of these endpoints in 3.0 will result in an HTTP 404 error. + +!SUBSECTION Indexes API + +It is not supported in 3.0 to create an index with type `cap` (cap constraint) in +3.0 as the cap constraints feature has bee removed. Calling the index creation +endpoint HTTP API POST `/_api/index?collection=...` with an index type `cap` will +therefore result in an HTTP 400 error. + +!SUBSECTION Log entries API + The REST route HTTP GET `/_admin/log` is now accessible from within all databases. In previous versions of ArangoDB, this route was accessible from within the `_system` database only, and an HTTP 403 (Forbidden) was thrown by the server for any access from within another database. -The undocumented HTTP REST endpoints `/_open/cerberus` and `/_system/cerberus` have -been removed. These endpoints have been used by some ArangoDB-internal applications -and were not part of ArangoDB's public API. +!SUBSECTION Figures API +The REST route HTTP GET `/_api/collection//figures` will not return the +following result attributes as they became meaningless in 3.0: + +- shapefiles.count +- shapes.fileSize +- shapes.count +- shapes.size +- attributes.count +- attributes.size + +!SUBSECTION Databases and Collections APIs + +When creating a database via the API POST `/_api/database`, ArangoDB will now always +return the HTTP status code 202 (created) if the operation succeeds. Previous versions +of ArangoDB returned HTTP 202 as well, but this behavior was changable by sending an +HTTP header `x-arangodb-version: 1.4`. When sending this header, previous versions of +ArangoDB returned an HTTP status code 200 (ok). Clients can still send this header to +ArangoDB 3.0 but this will not influence the HTTP status code produced by ArangoDB. + +The "location" header produced by ArangoDB 3.0 will now always contain the database +name. This was also the default in previous versions of ArangoDB, but the behaviour +could be overriden by sending the HTTP header `x-arangodb-version: 1.4`. Clients can +still send the header, but this will not make the database name in the "location" +response header disappear. + +!SUBSECTION Replication APIs + +The URL parameter "failOnUnknown" was removed from the REST API GET `/_api/replication/dump`. +This parameter controlled whether dumping or replicating edges should fail if one +of the vertex collections linked in the edge's `_from` or `_to` attributes was not +present anymore. In this case the `_from` and `_to` values could not be translated into +meaningful ids anymore. + +There were two ways for handling this: +- setting `failOnUnknown` to `true` caused the HTTP request to fail, leaving error + handling to the user +- setting `failOnUnknown` to `false` caused the HTTP request to continue, translating + the collection name part in the `_from` or `_to` value to `_unknown`. + +In ArangoDB 3.0 this parameter is obsolete, as `_from` and `_to` are stored as self-contained +string values all the time, so they cannot get invalid when referenced collections are +dropped. + +!SUBSECTION Undocumented APIs + +The following undocumented HTTP REST endpoints have been removed from ArangoDB's REST +API: + +- `/_open/cerberus` and `/_system/cerberus`: these endpoints were intended for some + ArangoDB-internal applications only +- PUT /_api/simple/by-example-hash`, PUT `/_api/simple/by-example-skiplist` and + PUT `/_api/simple/by-condition-skiplist`: these methods were documented in early + versions of ArangoDB but have been marked as not intended to be called by end + users since ArangoDB version 2.3. These methods should not have been part of any + ArangoDB manual since version 2.4. !SECTION ArangoShell and client tools @@ -252,6 +457,6 @@ been renamed to `arangobench` in 3.0. !SECTION Miscellaneous changes The checksum calculation algorithm for the `collection.checksum()` method and its -corresponding REST API has changed in 3.0. Checksums calculated in 3.0 will differ -from checksums calculated with 2.8 or before. +corresponding REST API GET `/_api/collection/ Date: Fri, 6 May 2016 17:59:06 +0200 Subject: [PATCH 15/31] removed unused functions --- .../ReleaseNotes/UpgradingChanges30.mdpp | 285 ++++++++++++------ .../modules/@arangodb/arango-collection.js | 2 - .../modules/@arangodb/arango-collection.js | 96 ------ 3 files changed, 195 insertions(+), 188 deletions(-) diff --git a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp index 89c481c6fa..46c18f82f4 100644 --- a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp +++ b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp @@ -15,7 +15,6 @@ id component at the end. The new pattern is `collection--`, where `< is the collection id and `` is a random number. Previous versions of ArangoDB used a pattern `collection-` without the random number. - !SECTION Edges and edges attributes In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated @@ -36,7 +35,6 @@ referenced by `_from` and `_to` values may be dropped and re-created later. Any `_from` and `_to` values of edges pointing to such dropped collection are unaffected by the drop operation now. - !SECTION AQL !SUBSECTION Typecasting functions @@ -53,7 +51,6 @@ The type casting applied by the `TO_NUMBER()` AQL function has changed as follow Additionally, the `TO_STRING()` AQL function now converts `null` values into an empty string (`""`) instead of the string `"null"`. - !SUBSECTION Arithmetic operators As the arithmetic operations in AQL implicitly convert their operands to numeric values using @@ -101,119 +98,125 @@ FOR doc IN myCollection and to modify data in different collection via subqueries. - !SUBSECTION Other changes The AQL optimizer rule "merge-traversal-filter" that already existed in 3.0 was renamed to "optimize-traversals". This should be of no relevance to client applications except if they programatically look for applied optimizer rules in the explain out of AQL queries. +!SECTION JavaScript API changes -!SECTION Command-line options +The following incompatible changes have been made to the JavaScript API in ArangoDB 3.0: -Quite a few startup options in ArangoDB 2 were double negations (like -`--server.disable-authentication false`). In ArangoDB 3 these are now expressed as -positives (e. g. `--server.authentication`). Also the options between the ArangoDB -server and its client tools have being unified. For example, the logger options are -now the same for the server and the client tools. Additionally many options have -been moved into more appropriate topic sections. +!SUBSECTION Edges API -!SUBSECTION Renamed options +When completely replacing an edge via a collection's `replace()` function the replacing +edge data now needs to contain the `_from` and `_to` attributes for the new edge. Previous +versions of ArangoDB did not require the edge data to contain `_from` and `_to` attributes +when replacing an edge, since `_from` and `_to` values were immutable for existing edges. -The following options have been available before 3.0 and have changed their name -in 3.0: +For example, the following call worked in ArangoDB 2.8 but will fail in 3.0: -- `--server.disable-authentication` was renamed to `--server.authentication`. - Note that the meaning of the option `--server.authentication` is the opposite of - the previous `--server.disable-authentication`. -- `--server.disable-authentication-unix-sockets` was renamed to - `--server.authentication-unix-sockets`. Note that the meaning of the option - `--server.authentication-unix-sockets` is the opposite of the previous - `--server.disable-authentication-unix-sockets`. -- `--server.disable-statistics` was renamed to `--server.statistics`. Note that the - meaning of the option `--server.statistics` is the opposite of the previous - `--server.disable-statistics`. -- `--server.keyfile` was renamed to `--ssl.keyfile`. The meaning of the option is - unchanged. -- `--server.foxx-queues` was renamed to `--foxx.queues`. The meaning of the option - is unchanged. -- `--server.foxx-queues-poll-interval` was renamed to `--foxx.queues-poll-interval`. - The meaning of the option is unchanged. -- `--log.tty` was renamed to `--log.foreground-tty`. The meaning of the option is - unchanged. -- `--upgrade` has been renamed to `--database.auto-upgrade`. In contrast to 2.8 this - option now requires a boolean parameter. To actually perform an automatic database - upgrade at startup use `--database.auto-upgrade true`. To not perform it, use - `--database.auto-upgrade false`. -- `--check-version` has been renamed to `--database.check-version`. -- `--temp-path` has been renamed to `--temp.path`. - -!SUBSECTION Log verbosity, topics and output files - -Logging now supports log topics. You can control these by specifying a log -topic in front of a log level or an output. For example - -``` - --log.level startup=trace --log.level info +```js +db.edgeCollection.replace("myKey", { value: "test" }); ``` -will log messages concerning startup at trace level, everything else at info -level. `--log.level` can be specified multiple times at startup, for as many -topics as needed. +To make this work in ArangoDB 3.0, `_from` and `_to` need to be added to the replacement +data: -Some relevant log topics available in 3.0 are: +```js +db.edgeCollection.replace("myKey", { _from: "myVertexCollection/1", _to: "myVertexCollection/2", value: "test" }); +``` -- *collector*: information about the WAL collector's state -- *compactor*: information about the collection datafile compactor -- *datafiles*: datafile-related operations -- *mmap*: information about memory-mapping operations -- *performance*: some performance-related information -- *queries*: executed AQL queries -- *replication*: replication-related info -- *requests*: HTTP requests -- *startup*: information about server startup and shutdown -- *threads*: information about threads +Note that this only affects the `replace()` function but not `update()`, which will +only update the specified attributes of the edge and leave all others intact. -The new log option `--log.output ` allows directing the global -or per-topic log output to different outputs. The output definition "" -can be one of +Additionally, the functions `edges()`, `outEdges()` and `inEdges()` with an array of edge +ids will now make the edge ids unique before returning the connected edges. This is probably +desired anyway, as results will be returned only once per distinct input edge id. However, +it may break client applications that rely on the old behavior. -- "-" for stdin -- "+" for stderr -- "syslog://" -- "syslog:///" -- "file://" +!SUBSECTION Collection API -The option can be specified multiple times in order to configure the output -for different log topics. To set up a per-topic output configuration, use -`--log.output =`, e.g. +!SUBSUBSECTION Example matching - queries=file://queries.txt +The collection function `byExampleHash()` and `byExampleSkiplist()` have been removed in 3.0. +Their functionality is provided by collection's `byExample()` function, which will automatically +use a suitable index if present. -logs all queries to the file "queries.txt". +The collection function `byConditionSkiplist()` has been removed in 3.0. The same functionality +can be achieved by issuing an AQL query with the target condition, which will automatically use +a suitable index if present. -The old option `--log.file` is still available in 3.0 for convenience reasons. In -3.0 it is a shortcut for the more general option `--log.output file://filename`. - -The old option `--log.requests-file` is still available in 3.0. It is now a shortcut -for the more general option `--log.output requests=file://...`. +!SUBSUBSECTION Revision id handling -The old option `--log.performance` is still available in 3.0. It is now a shortcut -for the more general option `--log.level performance=trace`. +The `exists()` method of a collection now throws an exception when the specified document +exists but its revision id does not match the revision id specified. Previous versions of +ArangoDB simply returned `false` if either no document existed with the specified key or +when the revision id did not match. It was therefore impossible to distinguish these two +cases from the return value alone. 3.0 corrects this. Additionally, `exists()` in previous +versions always returned a boolean if only the document key was given. 3.0 now returns the +document's meta-data, which includes the document's current revision id. -!SUBSECTION Removed options for logging +Given there is a document with key `test` in collection `myCollection`, then the behavior +of 3.0 is as follows: -The options `--log.content-filter` and `--log.source-filter` have been removed. They -have most been used during ArangoDB's internal development. +```js +/* test if document exists. this returned true in 2.8 */ +db.myCollection.exists("test"); +{ + "_key" : "test", + "_id" : "myCollection/test", + "_rev" : "9758059" +} -The syslog-related options `--log.application` and `--log.facility` have been removed. -They are superseded by the more general `--log.output` option which can also handle -syslog targets. +/* test if document exists. this returned true in 2.8 */ +db.myCollection.exists({ _key: "test" }); +{ + "_key" : "test", + "_id" : "myCollection/test", + "_rev" : "9758059" +} + +/* test if document exists. this also returned false in 2.8 */ +db.myCollection.exists("foo"); +false + +/* test if document with a given revision id exists. this returned true in 2.8 */ +db.myCollection.exists({ _key: "test", _rev: "9758059" }); +{ + "_key" : "test", + "_id" : "myCollection/test", + "_rev" : "9758059" +} + +/* test if document with a given revision id exists. this returned false in 2.8 */ +db.myCollection.exists({ _key: "test", _rev: "1234" }); +JavaScript exception: ArangoError 1200: conflict +``` + +!SUBSUBSECTION Cap constraints + +The cap constraints feature has been removed. This change has led to the removal of the +collection operations `first()` and `last()`, which were internally based on data from +cap constraints. + +As cap constraints have been removed in ArangoDB 3.0 it is not possible to create an +index of type "cap" with a collection's `ensureIndex()` function. The dedicated function +`ensureCapConstraint()` has also been removed from the collection API. + +!SUBSUBSECTION Undocumented APIs + +The undocumented functions `BY_EXAMPLE_HASH()` and `BY_EXAMPLE_SKIPLIST()` and +`BY_CONDITION_SKIPLIST` have been removed. These functions were always hidden and not +intended to be part of the public JavaScript API for collections. !SECTION HTTP API changes !SUBSECTION CRUD operations +The following incompatible changes have been made to the HTTP API in ArangoDB 3.0: + !SUBSUBSECTION General The HTTP insert operations for single documents and edges (POST `/_api/document`) do @@ -271,7 +274,6 @@ documents please use the `If-Match` HTTP header as described above. When no revi check if required the HTTP header can be omitted, and the operations will work on the current revision of the document, regardless of its revision id. - !SUBSECTION All documents API The HTTP API for retrieving the ids, keys or URLs of all documents from a collection @@ -426,6 +428,109 @@ API: versions of ArangoDB but have been marked as not intended to be called by end users since ArangoDB version 2.3. These methods should not have been part of any ArangoDB manual since version 2.4. +- `/_api/structure`: an unfinished API for data format and type checks, superseded + by Foxx. + +!SECTION Command-line options + +Quite a few startup options in ArangoDB 2 were double negations (like +`--server.disable-authentication false`). In ArangoDB 3 these are now expressed as +positives (e. g. `--server.authentication`). Also the options between the ArangoDB +server and its client tools have being unified. For example, the logger options are +now the same for the server and the client tools. Additionally many options have +been moved into more appropriate topic sections. + +!SUBSECTION Renamed options + +The following options have been available before 3.0 and have changed their name +in 3.0: + +- `--server.disable-authentication` was renamed to `--server.authentication`. + Note that the meaning of the option `--server.authentication` is the opposite of + the previous `--server.disable-authentication`. +- `--server.disable-authentication-unix-sockets` was renamed to + `--server.authentication-unix-sockets`. Note that the meaning of the option + `--server.authentication-unix-sockets` is the opposite of the previous + `--server.disable-authentication-unix-sockets`. +- `--server.disable-statistics` was renamed to `--server.statistics`. Note that the + meaning of the option `--server.statistics` is the opposite of the previous + `--server.disable-statistics`. +- `--server.keyfile` was renamed to `--ssl.keyfile`. The meaning of the option is + unchanged. +- `--server.foxx-queues` was renamed to `--foxx.queues`. The meaning of the option + is unchanged. +- `--server.foxx-queues-poll-interval` was renamed to `--foxx.queues-poll-interval`. + The meaning of the option is unchanged. +- `--log.tty` was renamed to `--log.foreground-tty`. The meaning of the option is + unchanged. +- `--upgrade` has been renamed to `--database.auto-upgrade`. In contrast to 2.8 this + option now requires a boolean parameter. To actually perform an automatic database + upgrade at startup use `--database.auto-upgrade true`. To not perform it, use + `--database.auto-upgrade false`. +- `--check-version` has been renamed to `--database.check-version`. +- `--temp-path` has been renamed to `--temp.path`. + +!SUBSECTION Log verbosity, topics and output files + +Logging now supports log topics. You can control these by specifying a log +topic in front of a log level or an output. For example + +``` + --log.level startup=trace --log.level info +``` + +will log messages concerning startup at trace level, everything else at info +level. `--log.level` can be specified multiple times at startup, for as many +topics as needed. + +Some relevant log topics available in 3.0 are: + +- *collector*: information about the WAL collector's state +- *compactor*: information about the collection datafile compactor +- *datafiles*: datafile-related operations +- *mmap*: information about memory-mapping operations +- *performance*: some performance-related information +- *queries*: executed AQL queries +- *replication*: replication-related info +- *requests*: HTTP requests +- *startup*: information about server startup and shutdown +- *threads*: information about threads + +The new log option `--log.output ` allows directing the global +or per-topic log output to different outputs. The output definition "" +can be one of + +- "-" for stdin +- "+" for stderr +- "syslog://" +- "syslog:///" +- "file://" + +The option can be specified multiple times in order to configure the output +for different log topics. To set up a per-topic output configuration, use +`--log.output =`, e.g. + + queries=file://queries.txt + +logs all queries to the file "queries.txt". + +The old option `--log.file` is still available in 3.0 for convenience reasons. In +3.0 it is a shortcut for the more general option `--log.output file://filename`. + +The old option `--log.requests-file` is still available in 3.0. It is now a shortcut +for the more general option `--log.output requests=file://...`. + +The old option `--log.performance` is still available in 3.0. It is now a shortcut +for the more general option `--log.level performance=trace`. + +!SUBSECTION Removed options for logging + +The options `--log.content-filter` and `--log.source-filter` have been removed. They +have most been used during ArangoDB's internal development. + +The syslog-related options `--log.application` and `--log.facility` have been removed. +They are superseded by the more general `--log.output` option which can also handle +syslog targets. !SECTION ArangoShell and client tools @@ -437,7 +542,6 @@ and all client tools uses these APIs. In order to connect to earlier versions of ArangoDB with the client tools, an older version of the client tools needs to be kept installed. - !SUBSECTION Command-line options changed For all client tools, the option `--server.disable-authentication` was renamed to @@ -447,16 +551,17 @@ is the opposite of the previous `--server.disable-authentication`. The command-line option `--quiet` was removed from all client tools except arangosh because it had no effect in those tools. - !SUBSECTION Arangobench In order to make its purpose more apparent, the former `arangob` client tool has been renamed to `arangobench` in 3.0. - !SECTION Miscellaneous changes The checksum calculation algorithm for the `collection.checksum()` method and its corresponding REST API GET `/_api/collection/) ' + "\n" + ' remove() delete document ' + "\n" + ' exists() checks whether a document exists ' + "\n" + - ' first() first inserted/updated document ' + "\n" + - ' last() last inserted/updated document ' + "\n" + ' ' + "\n" + 'Attributes: ' + "\n" + ' _database database object ' + "\n" + diff --git a/js/server/modules/@arangodb/arango-collection.js b/js/server/modules/@arangodb/arango-collection.js index cbea774e65..22407a56d6 100644 --- a/js/server/modules/@arangodb/arango-collection.js +++ b/js/server/modules/@arangodb/arango-collection.js @@ -288,102 +288,6 @@ ArangoCollection.prototype.any = function () { return this.ANY(); }; -//////////////////////////////////////////////////////////////////////////////// -/// @brief was docuBlock documentsCollectionFirst -//////////////////////////////////////////////////////////////////////////////// - -ArangoCollection.prototype.first = function (count) { - var cluster = require("@arangodb/cluster"); - - if (cluster.isCoordinator()) { - var dbName = require("internal").db._name(); - var shards = cluster.shardList(dbName, this.name()); - - if (shards.length !== 1) { - var err = new ArangoError(); - err.errorNum = internal.errors.ERROR_CLUSTER_UNSUPPORTED.code; - err.errorMessage = "operation is not supported in sharded collections"; - - throw err; - } - - var coord = { coordTransactionID: ArangoClusterInfo.uniqid() }; - var options = { coordTransactionID: coord.coordTransactionID, timeout: 360 }; - var shard = shards[0]; - - ArangoClusterComm.asyncRequest("put", - "shard:" + shard, - dbName, - "/_api/simple/first", - JSON.stringify({ - collection: shard, - count: count - }), - { }, - options); - - var results = cluster.wait(coord, shards); - - if (results.length) { - var body = JSON.parse(results[0].body); - return body.result || null; - } - } - else { - return this.FIRST(count); - } - - return null; -}; - -//////////////////////////////////////////////////////////////////////////////// -/// @brief was docuBlock documentsCollectionLast -//////////////////////////////////////////////////////////////////////////////// - -ArangoCollection.prototype.last = function (count) { - var cluster = require("@arangodb/cluster"); - - if (cluster.isCoordinator()) { - var dbName = require("internal").db._name(); - var shards = cluster.shardList(dbName, this.name()); - - if (shards.length !== 1) { - var err = new ArangoError(); - err.errorNum = internal.errors.ERROR_CLUSTER_UNSUPPORTED.code; - err.errorMessage = "operation is not supported in sharded collections"; - - throw err; - } - - var coord = { coordTransactionID: ArangoClusterInfo.uniqid() }; - var options = { coordTransactionID: coord.coordTransactionID, timeout: 360 }; - var shard = shards[0]; - - ArangoClusterComm.asyncRequest("put", - "shard:" + shard, - dbName, - "/_api/simple/last", - JSON.stringify({ - collection: shard, - count: count - }), - { }, - options); - - var results = cluster.wait(coord, shards); - - if (results.length) { - var body = JSON.parse(results[0].body); - return body.result || null; - } - } - else { - return this.LAST(count); - } - - return null; -}; - //////////////////////////////////////////////////////////////////////////////// /// @brief was docuBlock collectionFirstExample //////////////////////////////////////////////////////////////////////////////// From a121aff1a060125f7ff03fd5be798df19aee5477 Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Fri, 6 May 2016 18:10:45 +0200 Subject: [PATCH 16/31] Implement proper (database) current loading --- arangod/Cluster/ClusterInfo.cpp | 117 +++++++++++++------------------- arangod/Cluster/ClusterInfo.h | 15 ++-- 2 files changed, 58 insertions(+), 74 deletions(-) diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 847fef33c3..85523b4cbf 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -260,7 +260,6 @@ ClusterInfo::ClusterInfo(AgencyCallbackRegistry* agencyCallbackRegistry) //////////////////////////////////////////////////////////////////////////////// ClusterInfo::~ClusterInfo() { - clearCurrentDatabases(_currentDatabases); } //////////////////////////////////////////////////////////////////////////////// @@ -305,7 +304,7 @@ void ClusterInfo::flush() { loadCurrentDBServers(); loadCurrentCoordinators(); loadPlan(); - loadCurrentDatabases(); + loadCurrent(); loadCurrentCollections(); } @@ -317,9 +316,9 @@ bool ClusterInfo::doesDatabaseExist(DatabaseID const& databaseID, bool reload) { int tries = 0; if (reload || !_planProt.isValid || - !_currentDatabasesProt.isValid || !_DBServersProt.isValid) { + !_currentProt.isValid || !_DBServersProt.isValid) { loadPlan(); - loadCurrentDatabases(); + loadCurrent(); loadCurrentDBServers(); ++tries; // no need to reload if the database is not found } @@ -343,7 +342,7 @@ bool ClusterInfo::doesDatabaseExist(DatabaseID const& databaseID, bool reload) { if (it != _plannedDatabases.end()) { // found the database in Plan - READ_LOCKER(readLocker, _currentDatabasesProt.lock); + READ_LOCKER(readLocker, _currentProt.lock); // _currentDatabases is // a map-type> auto it2 = _currentDatabases.find(databaseID); @@ -361,7 +360,7 @@ bool ClusterInfo::doesDatabaseExist(DatabaseID const& databaseID, bool reload) { } loadPlan(); - loadCurrentDatabases(); + loadCurrent(); loadCurrentDBServers(); } @@ -376,9 +375,9 @@ std::vector ClusterInfo::listDatabases(bool reload) { std::vector result; if (reload || !_planProt.isValid || - !_currentDatabasesProt.isValid || !_DBServersProt.isValid) { + !_currentProt.isValid || !_DBServersProt.isValid) { loadPlan(); - loadCurrentDatabases(); + loadCurrent(); loadCurrentDBServers(); } @@ -393,7 +392,7 @@ std::vector ClusterInfo::listDatabases(bool reload) { { READ_LOCKER(readLockerPlanned, _planProt.lock); - READ_LOCKER(readLockerCurrent, _currentDatabasesProt.lock); + READ_LOCKER(readLockerCurrent, _currentProt.lock); // _plannedDatabases is a map-type auto it = _plannedDatabases.begin(); @@ -540,32 +539,6 @@ void ClusterInfo::loadPlan() { << " body: " << result.body(); } -//////////////////////////////////////////////////////////////////////////////// -/// @brief deletes a list of current databases -//////////////////////////////////////////////////////////////////////////////// - -void ClusterInfo::clearCurrentDatabases( - std::unordered_map>& - databases) { - auto it = databases.begin(); - while (it != databases.end()) { - auto it2 = (*it).second.begin(); - - while (it2 != (*it).second.end()) { - TRI_json_t* json = (*it2).second; - - if (json != nullptr) { - TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json); - } - - ++it2; - } - ++it; - } - - databases.clear(); -} - //////////////////////////////////////////////////////////////////////////////// /// @brief (re-)load the information about current databases /// Usually one does not have to call this directly. @@ -573,10 +546,10 @@ void ClusterInfo::clearCurrentDatabases( static std::string const prefixCurrentDatabases = "Current/Databases"; -void ClusterInfo::loadCurrentDatabases() { - uint64_t storedVersion = _currentDatabasesProt.version; - MUTEX_LOCKER(mutexLocker, _currentDatabasesProt.mutex); - if (_currentDatabasesProt.version > storedVersion) { +void ClusterInfo::loadCurrent() { + uint64_t storedVersion = _currentProt.version; + MUTEX_LOCKER(mutexLocker, _currentProt.mutex); + if (_currentProt.version > storedVersion) { // Somebody else did, what we intended to do, so just return return; } @@ -591,48 +564,52 @@ void ClusterInfo::loadCurrentDatabases() { } if (result.successful()) { - - velocypack::Slice databases = - result.slice()[0].get(std::vector( - {AgencyComm::prefixStripped(), "Current", "Databases"})); - if (!databases.isNone()) { + velocypack::Slice slice = + result.slice()[0].get(std::vector( + {AgencyComm::prefixStripped(), "Current"})); + + auto currentBuilder = std::make_shared(); + currentBuilder->add(slice); + + VPackSlice currentSlice = currentBuilder->slice(); + if (currentSlice.isObject()) { decltype(_currentDatabases) newDatabases; - for (auto const& dbase : VPackObjectIterator(databases)) { + bool swapDatabases = false; - std::string const database = dbase.key.copyString(); + VPackSlice databasesSlice; + databasesSlice = currentSlice.get("Databases"); + if (databasesSlice.isObject()) { + for (auto const& databaseSlicePair : VPackObjectIterator(databasesSlice)) { + std::string const database = databaseSlicePair.key.copyString(); - // _currentDatabases is - // a map-type> - auto it2 = newDatabases.find(database); + if (!databaseSlicePair.value.isObject()) { + continue; + } - if (it2 == newDatabases.end()) { - // insert an empty list for this database - decltype(it2->second) empty; - it2 = newDatabases.insert(std::make_pair(database, empty)).first; + std::unordered_map serverList; + for (auto const& serverSlicePair : VPackObjectIterator(databaseSlicePair.value)) { + serverList.insert(std::make_pair(serverSlicePair.key.copyString(), serverSlicePair.value)); + } + + newDatabases.insert(std::make_pair(database, serverList)); } - - // TODO: _plannedDatabases need to be moved to velocypack - // Then this can be merged to swap - for (auto const& server : VPackObjectIterator(dbase.value)) { - TRI_json_t* json = arangodb::basics::VelocyPackHelper::velocyPackToJson( - server.value); - (*it2).second.insert(std::make_pair(server.key.copyString(), json)); - } - + swapDatabases = true; } // Now set the new value: { - WRITE_LOCKER(writeLocker, _currentDatabasesProt.lock); + WRITE_LOCKER(writeLocker, _currentProt.lock); + _current = currentBuilder; _currentDatabases.swap(newDatabases); - _currentDatabasesProt.version++; // such that others notice our change - _currentDatabasesProt.isValid = true; // will never be reset to false + _currentProt.version++; // such that others notice our change + _currentProt.isValid = true; // will never be reset to false } - clearCurrentDatabases(newDatabases); // delete the old stuff - return; + } else { + LOG(ERR) << "Current is not an object!"; } + return; } LOG(DEBUG) << "Error while loading " << prefixCurrentDatabases @@ -947,7 +924,7 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE; return true; } - loadCurrentDatabases(); // update our cache + loadCurrent(); // update our cache dbServerResult = setErrormsg(TRI_ERROR_NO_ERROR, errorMsg); } return true; @@ -2577,8 +2554,8 @@ void ClusterInfo::invalidateCurrent() { _coordinatorsProt.isValid = false; } { - WRITE_LOCKER(writeLocker, _currentDatabasesProt.lock); - _currentDatabasesProt.isValid = false; + WRITE_LOCKER(writeLocker, _currentProt.lock); + _currentProt.isValid = false; } { WRITE_LOCKER(writeLocker, _currentCollectionsProt.lock); diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index 6a44342469..f93458a662 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -116,6 +116,9 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// std::string id_as_string() const { + if (!_slice.isObject()) { + return std::string(""); + } return arangodb::basics::VelocyPackHelper::getStringValue(_slice, "id", ""); } @@ -124,6 +127,9 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// std::string name() const { + if (!_slice.isObject()) { + return std::string(""); + } return arangodb::basics::VelocyPackHelper::getStringValue(_slice, "name", ""); } @@ -602,11 +608,11 @@ class ClusterInfo { void loadPlan(); ////////////////////////////////////////////////////////////////////////////// - /// @brief (re-)load the information about current databases + /// @brief (re-)load the information about current state /// Usually one does not have to call this directly. ////////////////////////////////////////////////////////////////////////////// - void loadCurrentDatabases(); + void loadCurrent(); ////////////////////////////////////////////////////////////////////////////// /// @brief ask about a collection @@ -910,15 +916,16 @@ class ClusterInfo { ProtectionData _coordinatorsProt; std::shared_ptr _plan; + std::shared_ptr _current; std::unordered_map _plannedDatabases; // from Plan/Databases ProtectionData _planProt; std::unordered_map> + std::unordered_map> _currentDatabases; // from Current/Databases - ProtectionData _currentDatabasesProt; + ProtectionData _currentProt; // We need information about collections, again we have // data from Plan and from Current. From 0a14a56cb97d03d27733a616aed9ee5bb1ec4d95 Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Fri, 6 May 2016 18:29:33 +0200 Subject: [PATCH 17/31] mop trying to do c++ => fail --- arangod/Cluster/ClusterInfo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index f93458a662..b9b5177d9e 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -292,7 +292,7 @@ class CollectionInfo { return false; } - auto firstElement = shardKeysSlice.get(0); + auto firstElement = shardKeysSlice.at(0); TRI_ASSERT(firstElement.isString()); std::string shardKey = arangodb::basics::VelocyPackHelper::getStringValue(firstElement, ""); From 1233186eba08b1fb362e6c959f57e4b9378e5dcb Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Fri, 6 May 2016 18:32:55 +0200 Subject: [PATCH 18/31] actually use swapDatabases variable --- arangod/Cluster/ClusterInfo.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 85523b4cbf..ef53dd0fa4 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -602,7 +602,9 @@ void ClusterInfo::loadCurrent() { { WRITE_LOCKER(writeLocker, _currentProt.lock); _current = currentBuilder; - _currentDatabases.swap(newDatabases); + if (swapDatabases) { + _currentDatabases.swap(newDatabases); + } _currentProt.version++; // such that others notice our change _currentProt.isValid = true; // will never be reset to false } From 0bd61cf9282d153489ed9c882ed68afc7e21564c Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 18:38:37 +0200 Subject: [PATCH 19/31] removed option `--server.default-api-compatibility` --- .../Administration/Configuration/Arangod.mdpp | 24 - .../ReleaseNotes/UpgradingChanges30.mdpp | 19 +- .../HttpInterface/api-compatibility-spec.rb | 137 -- UnitTests/HttpInterface/api-database-spec.rb | 22 - .../HttpInterface/api-document-create-spec.rb | 18 +- arangod/HttpServer/HttpCommTask.cpp | 64 +- arangod/HttpServer/HttpCommTask.h | 10 +- arangod/HttpServer/HttpHandler.cpp | 13 +- arangod/HttpServer/HttpHandlerFactory.cpp | 5 +- arangod/HttpServer/HttpHandlerFactory.h | 10 +- arangod/Replication/Syncer.cpp | 4 +- arangod/RestHandler/RestBatchHandler.cpp | 2 +- arangod/RestServer/RestServerFeature.cpp | 16 +- arangod/RestServer/RestServerFeature.h | 1 - arangod/Utils/WorkMonitorArangod.cpp | 3 +- arangod/V8Server/v8-actions.cpp | 19 +- js/actions/api-structure.js | 1915 ----------------- lib/Rest/GeneralRequest.cpp | 2 - lib/Rest/GeneralRequest.h | 10 +- lib/Rest/GeneralResponse.cpp | 5 +- lib/Rest/GeneralResponse.h | 3 +- lib/Rest/HttpRequest.cpp | 66 +- lib/Rest/HttpRequest.h | 5 +- lib/Rest/HttpResponse.cpp | 6 +- lib/Rest/HttpResponse.h | 2 +- 25 files changed, 71 insertions(+), 2310 deletions(-) delete mode 100644 UnitTests/HttpInterface/api-compatibility-spec.rb delete mode 100644 js/actions/api-structure.js diff --git a/Documentation/Books/Users/Administration/Configuration/Arangod.mdpp b/Documentation/Books/Users/Administration/Configuration/Arangod.mdpp index e1c35c7551..b745ed8438 100644 --- a/Documentation/Books/Users/Administration/Configuration/Arangod.mdpp +++ b/Documentation/Books/Users/Administration/Configuration/Arangod.mdpp @@ -78,30 +78,6 @@ The default is *false*. @startDocuBlock keep_alive_timeout -!SUBSECTION Default API compatibility - - -default API compatibility -`--server.default-api-compatibility` - -This option can be used to determine the API compatibility of the ArangoDB -server. It expects an ArangoDB version number as an integer, calculated as -follows: - -*10000 \* major + 100 \* minor (example: *10400* for ArangoDB 1.4)* - -The value of this option will have an influence on some API return values -when the HTTP client used does not send any compatibility information. - -In most cases it will be sufficient to not set this option explicitly but to -keep the default value. However, in case an "old" ArangoDB client is used -that does not send any compatibility information and that cannot handle the -responses of the current version of ArangoDB, it might be reasonable to set -the option to an old version number to improve compatibility with older -clients. - - - !SUBSECTION Hide Product header diff --git a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp index 46c18f82f4..94e7313708 100644 --- a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp +++ b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp @@ -231,7 +231,7 @@ as before. The "location" HTTP header returned by ArangoDB when inserting a new document or edge now always contains the database name. This was also the default behavior in previous versions of ArangoDB, but it could be overridden by clients sending the HTTP header -`x-arangodb-version: 1.4` in the request. Clients can continue to send this header to +`x-arango-version: 1.4` in the request. Clients can continue to send this header to ArangoDB 3.0, but the header will not influence the location response headers produced by ArangoDB 3.0 anymore. @@ -388,13 +388,13 @@ following result attributes as they became meaningless in 3.0: When creating a database via the API POST `/_api/database`, ArangoDB will now always return the HTTP status code 202 (created) if the operation succeeds. Previous versions of ArangoDB returned HTTP 202 as well, but this behavior was changable by sending an -HTTP header `x-arangodb-version: 1.4`. When sending this header, previous versions of +HTTP header `x-arango-version: 1.4`. When sending this header, previous versions of ArangoDB returned an HTTP status code 200 (ok). Clients can still send this header to ArangoDB 3.0 but this will not influence the HTTP status code produced by ArangoDB. The "location" header produced by ArangoDB 3.0 will now always contain the database name. This was also the default in previous versions of ArangoDB, but the behaviour -could be overriden by sending the HTTP header `x-arangodb-version: 1.4`. Clients can +could be overridden by sending the HTTP header `x-arango-version: 1.4`. Clients can still send the header, but this will not make the database name in the "location" response header disappear. @@ -532,6 +532,19 @@ The syslog-related options `--log.application` and `--log.facility` have been re They are superseded by the more general `--log.output` option which can also handle syslog targets. +!SUBSECTION Removed other options + +The option `--server.default-api-compatibility` was present in earlier version of +ArangoDB to control various aspects of the server behavior, e.g. HTTP return codes +or the format of HTTP "location" headers. Client applications could send an HTTP +header "x-arango-version" with a version number to request the server behavior of +a certain ArangoDB version. + +This option was only honored in a handful of cases (described above) and was removed +in 3.0 because the changes in server behavior controlled by this option were changed +even before ArangoDB 2.0. This should have left enough time for client applications +to adapt to the new behavior, making the option superfluous in 3.0. + !SECTION ArangoShell and client tools The ArangoShell (arangosh) and the other client tools bundled with ArangoDB can only diff --git a/UnitTests/HttpInterface/api-compatibility-spec.rb b/UnitTests/HttpInterface/api-compatibility-spec.rb deleted file mode 100644 index dee6edcd12..0000000000 --- a/UnitTests/HttpInterface/api-compatibility-spec.rb +++ /dev/null @@ -1,137 +0,0 @@ -# coding: utf-8 - -require 'rspec' -require 'arangodb.rb' - -describe ArangoDB do - -################################################################################ -## general tests -################################################################################ - - context "checking compatibility features:" do - it "tests the compatibility value when no header is set" do - doc = ArangoDB.get("/_admin/echo", :headers => { }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(30000) - end - - it "tests the compatibility value when a broken header is set" do - versions = [ "1", "1.", "-1.3", "-1.3.", "x.4", "xx", "", " ", ".", "foobar", "foobar1.3", "xx1.4" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(30000) - end - end - - it "tests the compatibility value when a valid header is set" do - versions = [ "1.3.0", "1.3", "1.3-devel", "1.3.1", "1.3.99", "10300", "10303" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(10300) - end - end - - it "tests the compatibility value when a valid header is set" do - versions = [ "1.4.0", "1.4.1", "1.4.2", "1.4.0-devel", "1.4.0-beta2", " 1.4", "1.4 ", " 1.4.0", " 1.4.0 ", "10400", "10401", "10499" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(10400) - end - end - - it "tests the compatibility value when a valid header is set" do - versions = [ "1.5.0", "1.5.1", "1.5.2", "1.5.0-devel", "1.5.0-beta2", " 1.5", "1.5 ", " 1.5.0", " 1.5.0 ", "10500", "10501", "10599" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(10500) - end - end - - it "tests the compatibility value when a valid header is set" do - versions = [ "2.0.0", "2.0.0-devel", "2.0.0-alpha", "2.0", " 2.0", "2.0 ", " 2.0.0", " 2.0.0 ", "20000", "20000 ", "20099" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(20000) - end - end - - it "tests the compatibility value when a valid header is set" do - versions = [ "2.1.0", "2.1.0-devel", "2.1.0-alpha", "2.1", " 2.1", "2.1 ", " 2.1.0", " 2.1.0 ", "20100", "20100 ", "20199" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(20100) - end - end - - it "tests the compatibility value when a valid header is set" do - versions = [ "2.2.0", "2.2.0-devel", "2.2.0-alpha", "2.2", " 2.2", "2.2 ", " 2.2.0", " 2.2.0 ", "20200", "20200 ", "20299" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(20200) - end - end - - it "tests the compatibility value when a too low version is set" do - versions = [ "0.0", "0.1", "0.2", "0.9", "1.0", "1.1", "1.2" ] - - versions.each do|value| - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => value }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(10300) - end - end - - it "tests the compatibility value when a too high version is set" do - doc = ArangoDB.get("/_admin/echo", :headers => { "x-arango-version" => "2.4" }) - - doc.code.should eq(200) - compatibility = doc.parsed_response['compatibility'] - compatibility.should be_kind_of(Integer) - compatibility.should eq(20400) - end - - end - -end diff --git a/UnitTests/HttpInterface/api-database-spec.rb b/UnitTests/HttpInterface/api-database-spec.rb index be5552fd4f..2801118b86 100644 --- a/UnitTests/HttpInterface/api-database-spec.rb +++ b/UnitTests/HttpInterface/api-database-spec.rb @@ -67,28 +67,6 @@ describe ArangoDB do ArangoDB.delete(api + "/#{name}") end - it "creates a new database, old return code" do - body = "{\"name\" : \"#{name}\" }" - doc = ArangoDB.log_post("#{prefix}-create", api, :body => body, :headers => { "X-Arango-Version" => "1.4" }) - - doc.code.should eq(200) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - response = doc.parsed_response - response["result"].should eq(true) - response["error"].should eq(false) - end - - it "creates a new database, new return code" do - body = "{\"name\" : \"#{name}\" }" - doc = ArangoDB.log_post("#{prefix}-create", api, :body => body, :headers => { "X-Arango-Version" => "1.5" }) - - doc.code.should eq(201) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - response = doc.parsed_response - response["result"].should eq(true) - response["error"].should eq(false) - end - it "creates a new database" do body = "{\"name\" : \"#{name}\" }" doc = ArangoDB.log_post("#{prefix}-create", api, :body => body) diff --git a/UnitTests/HttpInterface/api-document-create-spec.rb b/UnitTests/HttpInterface/api-document-create-spec.rb index 3858ddf736..7e8d247139 100644 --- a/UnitTests/HttpInterface/api-document-create-spec.rb +++ b/UnitTests/HttpInterface/api-document-create-spec.rb @@ -159,7 +159,7 @@ describe ArangoDB do it "creating a new document, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}" body = "{ \"Hallo\" : \"World\" }" - doc = ArangoDB.log_post("#{prefix}", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -230,7 +230,7 @@ describe ArangoDB do it "creating a new document complex body, setting compatibility header " do cmd = "/_api/document?collection=#{@cn}" body = "{ \"Hallo\" : \"Wo\\\"rld\" }" - doc = ArangoDB.log_post("#{prefix}", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -314,7 +314,7 @@ describe ArangoDB do it "creating a new umlaut document, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}" body = "{ \"Hallo\" : \"รถรครผร–ร„รœรŸใ‚ๅฏฟๅธ\" }" - doc = ArangoDB.log_post("#{prefix}-umlaut", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-umlaut", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -404,7 +404,7 @@ describe ArangoDB do it "creating a new not normalized umlaut document, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}" body = "{ \"Hallo\" : \"GrรผรŸ Gott.\" }" - doc = ArangoDB.log_post("#{prefix}-umlaut", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-umlaut", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -487,7 +487,7 @@ describe ArangoDB do cmd = "/_api/document?collection=#{@cn}" body = "{ \"some stuff\" : \"goes here\", \"_key\" : \"#{@key}\" }" - doc = ArangoDB.log_post("#{prefix}-existing-id", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-existing-id", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -585,7 +585,7 @@ describe ArangoDB do it "creating a new document, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}" body = "{ \"Hallo\" : \"World\" }" - doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-accept", cmd, :body => body) doc.code.should eq(202) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -649,7 +649,7 @@ describe ArangoDB do it "creating a new document, waitForSync URL param = false, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}&waitForSync=false" body = "{ \"Hallo\" : \"World\" }" - doc = ArangoDB.log_post("#{prefix}-accept-sync-false", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-accept-sync-false", cmd, :body => body) doc.code.should eq(202) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -713,7 +713,7 @@ describe ArangoDB do it "creating a new document, waitForSync URL param = true, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}&waitForSync=true" body = "{ \"Hallo\" : \"World\" }" - doc = ArangoDB.log_post("#{prefix}-accept-sync-true", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-accept-sync-true", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") @@ -792,7 +792,7 @@ describe ArangoDB do it "creating a new document, setting compatibility header" do cmd = "/_api/document?collection=#{@cn}" body = "{ \"Hallo\" : \"World\" }" - doc = ArangoDB.log_post("#{prefix}-named-collection", cmd, :body => body, :headers => { "x-arango-version" => "1.4" }) + doc = ArangoDB.log_post("#{prefix}-named-collection", cmd, :body => body) doc.code.should eq(201) doc.headers['content-type'].should eq("application/json; charset=utf-8") diff --git a/arangod/HttpServer/HttpCommTask.cpp b/arangod/HttpServer/HttpCommTask.cpp index 1cccec444f..1f9ceaf6f8 100644 --- a/arangod/HttpServer/HttpCommTask.cpp +++ b/arangod/HttpServer/HttpCommTask.cpp @@ -193,8 +193,7 @@ bool HttpCommTask::processRead() { // header is too large HttpResponse response( - GeneralResponse::ResponseCode::REQUEST_HEADER_FIELDS_TOO_LARGE, - getCompatibility()); + GeneralResponse::ResponseCode::REQUEST_HEADER_FIELDS_TOO_LARGE); // we need to close the connection, because there is no way we // know what to remove and then continue @@ -223,8 +222,7 @@ bool HttpCommTask::processRead() { LOG(ERR) << "cannot generate request"; // internal server error - HttpResponse response(GeneralResponse::ResponseCode::SERVER_ERROR, - getCompatibility()); + HttpResponse response(GeneralResponse::ResponseCode::SERVER_ERROR); // we need to close the connection, because there is no way we // know how to remove the body and then continue @@ -242,8 +240,7 @@ bool HttpCommTask::processRead() { if (_httpVersion != GeneralRequest::ProtocolVersion::HTTP_1_0 && _httpVersion != GeneralRequest::ProtocolVersion::HTTP_1_1) { HttpResponse response( - GeneralResponse::ResponseCode::HTTP_VERSION_NOT_SUPPORTED, - getCompatibility()); + GeneralResponse::ResponseCode::HTTP_VERSION_NOT_SUPPORTED); // we need to close the connection, because there is no way we // know what to remove and then continue @@ -258,8 +255,7 @@ bool HttpCommTask::processRead() { if (_fullUrl.size() > 16384) { HttpResponse response( - GeneralResponse::ResponseCode::REQUEST_URI_TOO_LONG, - getCompatibility()); + GeneralResponse::ResponseCode::REQUEST_URI_TOO_LONG); // we need to close the connection, because there is no way we // know what to remove and then continue @@ -343,8 +339,7 @@ bool HttpCommTask::processRead() { // bad request, method not allowed HttpResponse response( - GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED, - getCompatibility()); + GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED); // we need to close the connection, because there is no way we // know what to remove and then continue @@ -377,8 +372,7 @@ bool HttpCommTask::processRead() { LOG(TRACE) << "cannot serve request - server is inactive"; HttpResponse response( - GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE, - getCompatibility()); + GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE); // we need to close the connection, because there is no way we // know what to remove and then continue @@ -489,8 +483,6 @@ bool HttpCommTask::processRead() { // authenticate // ............................................................................. - auto const compatibility = _request->compatibility(); - GeneralResponse::ResponseCode authResult = _server->handlerFactory()->authenticateRequest(_request); @@ -499,15 +491,15 @@ bool HttpCommTask::processRead() { if (authResult == GeneralResponse::ResponseCode::OK || isOptionsRequest) { // handle HTTP OPTIONS requests directly if (isOptionsRequest) { - processCorsOptions(compatibility); + processCorsOptions(); } else { - processRequest(compatibility); + processRequest(); } } // not found else if (authResult == GeneralResponse::ResponseCode::NOT_FOUND) { - HttpResponse response(authResult, compatibility); + HttpResponse response(authResult); response.setContentType(StaticStrings::MimeTypeJson); response.body() @@ -525,7 +517,7 @@ bool HttpCommTask::processRead() { // forbidden else if (authResult == GeneralResponse::ResponseCode::FORBIDDEN) { - HttpResponse response(authResult, compatibility); + HttpResponse response(authResult); response.setContentType(StaticStrings::MimeTypeJson); response.body() @@ -542,8 +534,7 @@ bool HttpCommTask::processRead() { // not authenticated else { - HttpResponse response(GeneralResponse::ResponseCode::UNAUTHORIZED, - compatibility); + HttpResponse response(GeneralResponse::ResponseCode::UNAUTHORIZED); if (sendWwwAuthenticateHeader()) { std::string realm = "basic realm=\"" + @@ -712,8 +703,7 @@ bool HttpCommTask::checkContentLength(bool expectContentLength) { if (bodyLength < 0) { // bad request, body length is < 0. this is a client error - HttpResponse response(GeneralResponse::ResponseCode::LENGTH_REQUIRED, - getCompatibility()); + HttpResponse response(GeneralResponse::ResponseCode::LENGTH_REQUIRED); resetState(true); handleResponse(&response); @@ -735,8 +725,7 @@ bool HttpCommTask::checkContentLength(bool expectContentLength) { // request entity too large HttpResponse response( - GeneralResponse::ResponseCode::REQUEST_ENTITY_TOO_LARGE, - getCompatibility()); + GeneralResponse::ResponseCode::REQUEST_ENTITY_TOO_LARGE); resetState(true); handleResponse(&response); @@ -779,8 +768,8 @@ void HttpCommTask::fillWriteBuffer() { /// @brief handles CORS options //////////////////////////////////////////////////////////////////////////////// -void HttpCommTask::processCorsOptions(uint32_t compatibility) { - HttpResponse response(GeneralResponse::ResponseCode::OK, compatibility); +void HttpCommTask::processCorsOptions() { + HttpResponse response(GeneralResponse::ResponseCode::OK); response.setHeaderNC(StaticStrings::Allow, StaticStrings::CorsMethods); @@ -817,7 +806,7 @@ void HttpCommTask::processCorsOptions(uint32_t compatibility) { /// @brief processes a request //////////////////////////////////////////////////////////////////////////////// -void HttpCommTask::processRequest(uint32_t compatibility) { +void HttpCommTask::processRequest() { // check for deflate bool found; std::string const& acceptEncoding = @@ -846,8 +835,7 @@ void HttpCommTask::processRequest(uint32_t compatibility) { if (handler == nullptr) { LOG(TRACE) << "no handler is known, giving up"; - HttpResponse response(GeneralResponse::ResponseCode::NOT_FOUND, - compatibility); + HttpResponse response(GeneralResponse::ResponseCode::NOT_FOUND); clearRequest(); handleResponse(&response); @@ -886,8 +874,7 @@ void HttpCommTask::processRequest(uint32_t compatibility) { } if (ok) { - HttpResponse response(GeneralResponse::ResponseCode::ACCEPTED, - compatibility); + HttpResponse response(GeneralResponse::ResponseCode::ACCEPTED); if (jobId > 0) { // return the job id we just created @@ -906,8 +893,7 @@ void HttpCommTask::processRequest(uint32_t compatibility) { } if (!ok) { - HttpResponse response(GeneralResponse::ResponseCode::SERVER_ERROR, - compatibility); + HttpResponse response(GeneralResponse::ResponseCode::SERVER_ERROR); handleResponse(&response); } } @@ -983,18 +969,6 @@ bool HttpCommTask::sendWwwAuthenticateHeader() const { return !found; } -//////////////////////////////////////////////////////////////////////////////// -/// @brief get request compatibility -//////////////////////////////////////////////////////////////////////////////// - -int32_t HttpCommTask::getCompatibility() const { - if (_request != nullptr) { - return _request->compatibility(); - } - - return GeneralRequest::MIN_COMPATIBILITY; -} - bool HttpCommTask::setup(Scheduler* scheduler, EventLoop loop) { bool ok = SocketTask::setup(scheduler, loop); diff --git a/arangod/HttpServer/HttpCommTask.h b/arangod/HttpServer/HttpCommTask.h index afd65a4189..9b53d47f04 100644 --- a/arangod/HttpServer/HttpCommTask.h +++ b/arangod/HttpServer/HttpCommTask.h @@ -124,13 +124,13 @@ class HttpCommTask : public SocketTask, public RequestStatisticsAgent { /// @brief handles CORS options ////////////////////////////////////////////////////////////////////////////// - void processCorsOptions(uint32_t compatibility); + void processCorsOptions(); ////////////////////////////////////////////////////////////////////////////// /// @brief processes a request ////////////////////////////////////////////////////////////////////////////// - void processRequest(uint32_t compatibility); + void processRequest(); ////////////////////////////////////////////////////////////////////////////// /// @brief clears the request object @@ -154,12 +154,6 @@ class HttpCommTask : public SocketTask, public RequestStatisticsAgent { bool sendWwwAuthenticateHeader() const; - ////////////////////////////////////////////////////////////////////////////// - /// @brief get request compatibility - ////////////////////////////////////////////////////////////////////////////// - - int32_t getCompatibility() const; - protected: bool setup(Scheduler* scheduler, EventLoop loop) override; diff --git a/arangod/HttpServer/HttpHandler.cpp b/arangod/HttpServer/HttpHandler.cpp index 68c5b4be89..2d81786817 100644 --- a/arangod/HttpServer/HttpHandler.cpp +++ b/arangod/HttpServer/HttpHandler.cpp @@ -185,8 +185,7 @@ HttpHandler::status_t HttpHandler::executeFull() { } if (status._status != HANDLER_ASYNC && _response == nullptr) { - _response = new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR, - GeneralRequest::MIN_COMPATIBILITY); + _response = new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR); } requestStatisticsAgentSetRequestEnd(); @@ -245,14 +244,6 @@ void HttpHandler::createResponse(GeneralResponse::ResponseCode code) { delete _response; _response = nullptr; - int32_t apiCompatibility; - - if (_request != nullptr) { - apiCompatibility = _request->compatibility(); - } else { - apiCompatibility = GeneralRequest::MIN_COMPATIBILITY; - } - // create a "standard" (standalone) Http response - _response = new HttpResponse(code, apiCompatibility); + _response = new HttpResponse(code); } diff --git a/arangod/HttpServer/HttpHandlerFactory.cpp b/arangod/HttpServer/HttpHandlerFactory.cpp index 8584a108ff..7f9e206a92 100644 --- a/arangod/HttpServer/HttpHandlerFactory.cpp +++ b/arangod/HttpServer/HttpHandlerFactory.cpp @@ -61,12 +61,10 @@ class MaintenanceHandler : public HttpHandler { //////////////////////////////////////////////////////////////////////////////// HttpHandlerFactory::HttpHandlerFactory(std::string const& authenticationRealm, - int32_t minCompatibility, bool allowMethodOverride, context_fptr setContext, void* setContextData) : _authenticationRealm(authenticationRealm), - _minCompatibility(minCompatibility), _allowMethodOverride(allowMethodOverride), _setContext(setContext), _setContextData(setContextData), @@ -141,8 +139,7 @@ std::string HttpHandlerFactory::authenticationRealm( HttpRequest* HttpHandlerFactory::createRequest(ConnectionInfo const& info, char const* ptr, size_t length) { - HttpRequest* request = new HttpRequest(info, ptr, length, _minCompatibility, - _allowMethodOverride); + HttpRequest* request = new HttpRequest(info, ptr, length, _allowMethodOverride); if (request != nullptr) { setRequestContext(request); diff --git a/arangod/HttpServer/HttpHandlerFactory.h b/arangod/HttpServer/HttpHandlerFactory.h index 9bba758007..319c38ce63 100644 --- a/arangod/HttpServer/HttpHandlerFactory.h +++ b/arangod/HttpServer/HttpHandlerFactory.h @@ -79,7 +79,7 @@ class HttpHandlerFactory { /// @brief constructs a new handler factory ////////////////////////////////////////////////////////////////////////////// - HttpHandlerFactory(std::string const&, int32_t, bool, context_fptr, void*); + HttpHandlerFactory(std::string const&, bool, context_fptr, void*); HttpHandlerFactory(HttpHandlerFactory const&) = delete; HttpHandlerFactory& operator=(HttpHandlerFactory const&) = delete; @@ -149,14 +149,6 @@ class HttpHandlerFactory { std::string _authenticationRealm; - ////////////////////////////////////////////////////////////////////////////// - /// @brief minimum compatibility - /// the value is an ArangoDB version number in the following format: - /// 10000 * major + 100 * minor (e.g. 10400 for ArangoDB 1.4) - ////////////////////////////////////////////////////////////////////////////// - - int32_t _minCompatibility; - ////////////////////////////////////////////////////////////////////////////// /// @brief allow overriding HTTP request method with custom headers ////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/Replication/Syncer.cpp b/arangod/Replication/Syncer.cpp index 7ae178ea40..4bbe1ddce5 100644 --- a/arangod/Replication/Syncer.cpp +++ b/arangod/Replication/Syncer.cpp @@ -516,7 +516,7 @@ int Syncer::createIndex(VPackSlice const& slice) { std::string cnameString = getCName(slice); // TODO - // Backwards compatibiltiy. old check to nullptr, new is empty string + // Backwards compatibility. old check to nullptr, new is empty string // Other api does not know yet. char const* cname = nullptr; if (!cnameString.empty()) { @@ -575,7 +575,7 @@ int Syncer::dropIndex(arangodb::velocypack::Slice const& slice) { std::string cnameString = getCName(slice); // TODO - // Backwards compatibiltiy. old check to nullptr, new is empty string + // Backwards compatibility. old check to nullptr, new is empty string // Other api does not know yet. char const* cname = nullptr; if (!cnameString.empty()) { diff --git a/arangod/RestHandler/RestBatchHandler.cpp b/arangod/RestHandler/RestBatchHandler.cpp index 4831e576e6..1439af23b8 100644 --- a/arangod/RestHandler/RestBatchHandler.cpp +++ b/arangod/RestHandler/RestBatchHandler.cpp @@ -132,7 +132,7 @@ HttpHandler::status_t RestBatchHandler::execute() { LOG(TRACE) << "part header is: " << std::string(headerStart, headerLength); HttpRequest* request = new HttpRequest(_request->connectionInfo(), headerStart, headerLength, - _request->compatibility(), false); + false); if (request == nullptr) { generateError(GeneralResponse::ResponseCode::SERVER_ERROR, diff --git a/arangod/RestServer/RestServerFeature.cpp b/arangod/RestServer/RestServerFeature.cpp index df3472ce83..4fc33f6c61 100644 --- a/arangod/RestServer/RestServerFeature.cpp +++ b/arangod/RestServer/RestServerFeature.cpp @@ -79,7 +79,6 @@ RestServerFeature::RestServerFeature( : ApplicationFeature(server, "RestServer"), _keepAliveTimeout(300.0), _authenticationRealm(authenticationRealm), - _defaultApiCompatibility(Version::getNumericServerVersion()), _allowMethodOverride(false), _authentication(true), _authenticationUnixSockets(true), @@ -104,10 +103,6 @@ void RestServerFeature::collectOptions( std::shared_ptr options) { options->addSection("server", "Server features"); - options->addHiddenOption("--server.default-api-compatibility", - "default API compatibility version", - new Int32Parameter(&_defaultApiCompatibility)); - options->addOption("--server.authentication", "enable or disable authentication for ALL client requests", new BooleanParameter(&_authentication)); @@ -140,12 +135,6 @@ void RestServerFeature::collectOptions( } void RestServerFeature::validateOptions(std::shared_ptr) { - if (_defaultApiCompatibility < HttpRequest::MIN_COMPATIBILITY) { - LOG(FATAL) << "invalid value for --server.default-api-compatibility. " - "minimum allowed value is " - << HttpRequest::MIN_COMPATIBILITY; - FATAL_ERROR_EXIT(); - } } static TRI_vocbase_t* LookupDatabaseFromRequest(HttpRequest* request, @@ -197,15 +186,12 @@ void RestServerFeature::prepare() { } void RestServerFeature::start() { - LOG(DEBUG) << "using default API compatibility: " - << (long int)_defaultApiCompatibility; - _jobManager.reset(new AsyncJobManager(ClusterCommRestCallback)); _httpOptions._vocbase = DatabaseFeature::DATABASE->vocbase(); _handlerFactory.reset(new HttpHandlerFactory( - _authenticationRealm, _defaultApiCompatibility, _allowMethodOverride, + _authenticationRealm, _allowMethodOverride, &SetRequestContext, DatabaseServerFeature::SERVER)); defineHandlers(); diff --git a/arangod/RestServer/RestServerFeature.h b/arangod/RestServer/RestServerFeature.h index f06010f9bc..1fd5a0ee4a 100644 --- a/arangod/RestServer/RestServerFeature.h +++ b/arangod/RestServer/RestServerFeature.h @@ -52,7 +52,6 @@ class RestServerFeature final private: double _keepAliveTimeout; std::string const _authenticationRealm; - int32_t _defaultApiCompatibility; bool _allowMethodOverride; bool _authentication; bool _authenticationUnixSockets; diff --git a/arangod/Utils/WorkMonitorArangod.cpp b/arangod/Utils/WorkMonitorArangod.cpp index b9059c40c3..2a58d0f4a0 100644 --- a/arangod/Utils/WorkMonitorArangod.cpp +++ b/arangod/Utils/WorkMonitorArangod.cpp @@ -163,8 +163,7 @@ void WorkMonitor::vpackHandler(VPackBuilder* b, WorkDescription* desc) { //////////////////////////////////////////////////////////////////////////////// void WorkMonitor::sendWorkOverview(uint64_t taskId, std::string const& data) { - auto response = std::make_unique(GeneralResponse::ResponseCode::OK, - GeneralRequest::MIN_COMPATIBILITY); + auto response = std::make_unique(GeneralResponse::ResponseCode::OK); response->setContentType(StaticStrings::MimeTypeJson); TRI_AppendString2StringBuffer(response->body().stringBuffer(), data.c_str(), diff --git a/arangod/V8Server/v8-actions.cpp b/arangod/V8Server/v8-actions.cpp index 940763fabd..9c4a3064cd 100644 --- a/arangod/V8Server/v8-actions.cpp +++ b/arangod/V8Server/v8-actions.cpp @@ -128,7 +128,7 @@ class v8_action_t : public TRI_action_t { result.isValid = true; result.response = - new HttpResponse(GeneralResponse::ResponseCode::NOT_FOUND, request->compatibility()); + new HttpResponse(GeneralResponse::ResponseCode::NOT_FOUND); return result; } @@ -488,11 +488,6 @@ static v8::Handle RequestCppToV8(v8::Isolate* isolate, TRI_GET_GLOBAL_STRING(CookiesKey); req->ForceSet(CookiesKey, cookiesObject); - // determine API compatibility version - int32_t compatibility = request->compatibility(); - TRI_GET_GLOBAL_STRING(CompatibilityKey); - req->ForceSet(CompatibilityKey, v8::Integer::New(isolate, compatibility)); - return req; } @@ -502,8 +497,7 @@ static v8::Handle RequestCppToV8(v8::Isolate* isolate, static HttpResponse* ResponseV8ToCpp(v8::Isolate* isolate, TRI_v8_global_t const* v8g, - v8::Handle const res, - uint32_t compatibility) { + v8::Handle const res) { GeneralResponse::ResponseCode code = GeneralResponse::ResponseCode::OK; TRI_GET_GLOBAL_STRING(ResponseCodeKey); @@ -513,7 +507,7 @@ static HttpResponse* ResponseV8ToCpp(v8::Isolate* isolate, (int)(TRI_ObjectToDouble(res->Get(ResponseCodeKey)))); } - auto response = std::make_unique(code, compatibility); + auto response = std::make_unique(code); TRI_GET_GLOBAL_STRING(ContentTypeKey); if (res->Has(ContentTypeKey)) { @@ -722,7 +716,7 @@ static TRI_action_result_t ExecuteActionVocbase( result.canceled = false; HttpResponse* response = - new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR, request->compatibility()); + new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR); if (errorMessage.empty()) { errorMessage = TRI_errno_string(errorCode); } @@ -736,8 +730,7 @@ static TRI_action_result_t ExecuteActionVocbase( else if (tryCatch.HasCaught()) { if (tryCatch.CanContinue()) { - HttpResponse* response = new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR, - request->compatibility()); + HttpResponse* response = new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR); response->body().appendText(TRI_StringifyV8Exception(isolate, &tryCatch)); result.response = response; @@ -750,7 +743,7 @@ static TRI_action_result_t ExecuteActionVocbase( else { result.response = - ResponseV8ToCpp(isolate, v8g, res, request->compatibility()); + ResponseV8ToCpp(isolate, v8g, res); } return result; diff --git a/js/actions/api-structure.js b/js/actions/api-structure.js deleted file mode 100644 index 5efde75e8a..0000000000 --- a/js/actions/api-structure.js +++ /dev/null @@ -1,1915 +0,0 @@ -/*jshint strict: false */ - -//////////////////////////////////////////////////////////////////////////////// -/// @brief querying and managing structures -/// -/// @file -/// -/// DISCLAIMER -/// -/// Copyright 2014 ArangoDB GmbH, Cologne, Germany -/// -/// Licensed under the Apache License, Version 2.0 (the "License"); -/// you may not use this file except in compliance with the License. -/// You may obtain a copy of the License at -/// -/// http://www.apache.org/licenses/LICENSE-2.0 -/// -/// Unless required by applicable law or agreed to in writing, software -/// distributed under the License is distributed on an "AS IS" BASIS, -/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -/// See the License for the specific language governing permissions and -/// limitations under the License. -/// -/// Copyright holder is ArangoDB GmbH, Cologne, Germany -/// -/// @author Dr. Frank Celler -/// @author Copyright 2014, ArangoDB GmbH, Cologne, Germany -/// @author Copyright 2013, triAGENS GmbH, Cologne, Germany -//////////////////////////////////////////////////////////////////////////////// - -var arangodb = require("@arangodb"); -var console = require("console"); -var actions = require("@arangodb/actions"); -var arangodb = require("@arangodb"); -var db = arangodb.db; - -var DEFAULT_KEY = "default"; -var API = "_api/structure"; -var checkedIndex = false; - - -/* -THIS MODULE IS UNOFFICIAL AND DEPRECATED - - -Configuration example document: - -{ - "_key" : "a_collection_name", <- name of the collection with structure - - "attributes": { <- List of all attributes - "number": { <- Name of the attribute - "type": "number", <- Type of the attribute - "formatter": { <- Output formatter configuration - "default": { - "args": { - "decPlaces": 4, - "decSeparator": ".", - "thouSeparator": "," - }, - "module": "@arangodb/formatter", - "do": "formatFloat" - }, - "de": { - "args": { - "decPlaces": 4, - "decSeparator": ",", - "thouSeparator": "." - }, - "module": "@arangodb/formatter", - "do": "formatFloat" - } - }, - "parser": { <- Input parser configuration - "default": { - "args": { - "decPlaces": 4, - "decSeparator": ".", - "thouSeparator": "," - }, - "module": "@arangodb/formatter", - "do": "parseFloat" - }, - "de": { - "args": { - "decPlaces": 4, - "decSeparator": ",", - "thouSeparator": "." - }, - "module": "@arangodb/formatter", - "do": "parseFloat" - } - }, - "validators": <- List of input validators - [ { "module": "@arangodb/formatter", "do": "validateNotNull" } ] - }, - "string": { <- Name of the attribute - "type": "string", <- Type of the attribute - "formatter": {}, - "validators": [] - }, - "zahlen": { <- Name of the attribute - "type": "number_list_type" <- Type of the attribute - }, - "number2": { <- Name of the attribute - "type": "number", <- Type of the attribute - "formatter": { - "default": { - "args": { - "decPlaces": 0, - "decSeparator": ".", - "thouSeparator": "," - }, - "module": "@arangodb/formatter", - "do": "formatFloat" - } - }, - "validators": [] - }, - "no_structure": { <- Name of the attribute - "type": "mixed" <- Type of the attribute - }, - "timestamp": { - "type": "number", - "formatter": { - "default": { - "module": "@arangodb/formatter", - "do": "formatDatetime", - "args": { - "lang": "en", - "timezone": "GMT", - "pattern": "yyyy-MM-dd'T'HH:mm:ssZ" - } - }, - "de": { - "module": "@arangodb/formatter", - "do": "formatDatetime", - "args": { - "lang": "de", - "timezone": "Europe/Berlin", - "pattern": "yyyy.MM.dd HH:mm:ss zzz" - } - } - }, - "parser": { - "default": { - "module": "@arangodb/formatter", - "do": "parseDatetime", - "args": { - "lang": "en", - "timezone": "GMT", - "pattern": "yyyy-MM-dd'T'HH:mm:ssZ" - } - }, - "de": { - "module": "@arangodb/formatter", - "do": "parseDatetime", - "args": { - "lang": "de", - "timezone": "Europe/Berlin", - "pattern": "yyyy.MM.dd HH:mm:ss zzz" - } - } - }, - "validators": [] - }, - "object1": { <- Name of the attribute - "type": "complex_type1" <- Type of the attribute - } - }, - - - "arrayTypes": { <- Array type definitions - "number_list_type": { <- Name of type - "type": "number", - "formatter": { - "default": { - "args": { - "decPlaces": 2, - "decSeparator": ".", - "thouSeparator": "," - }, - "module": "@arangodb/formatter", - "do": "formatFloat" - }, - "de": { - "args": { - "decPlaces": 2, - "decSeparator": ",", - "thouSeparator": "." - }, - "module": "@arangodb/formatter", - "do": "formatFloat" - } - }, - "parser": { - "default": { - "args": { - "decPlaces": 2, - "decSeparator": ".", - "thouSeparator": "," - }, - "module": "@arangodb/formatter", - "do": "parseFloat" - }, - "de": { - "args": { - "decPlaces": 2, - "decSeparator": ",", - "thouSeparator": "." - }, - "module": "@arangodb/formatter", - "do": "parseFloat" - } - } - } - }, - - - "objectTypes": { <- Object type definitions - "complex_type1": { <- Name of type - "attributes": { <- Attributes of the object type - "aNumber": { - "type": "number" - }, - "aList": { - "type": "number_list_type" <- reference to array type - } - } - } - } -} - -*/ - -//////////////////////////////////////////////////////////////////////////////// -/// @brief get structures collection -//////////////////////////////////////////////////////////////////////////////// - -function getCollection () { - var c; - - c = db._collection('_structures'); - if (c === null) { - c = db._create('_structures', { isSystem : true }); - } - - if (c !== null && ! checkedIndex) { - c.ensureUniqueConstraint('collection', { sparse: true }); - checkedIndex = true; - } - - return c; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief convert a string to boolean -//////////////////////////////////////////////////////////////////////////////// - -function stringToBoolean (string){ - if (undefined === string || null === string) { - return false; - } - - switch(string.toLowerCase()){ - case "true": case "yes": case "1": return true; - case "false": case "no": case "0": case null: return false; - default: return Boolean(string); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns a (OK) result -//////////////////////////////////////////////////////////////////////////////// - -function resultOk (req, res, httpReturnCode, keyvals, headers) { - 'use strict'; - - res.responseCode = httpReturnCode; - res.contentType = "application/json; charset=utf-8"; - - if (undefined !== keyvals) { - res.body = JSON.stringify(keyvals); - } - - if (headers !== undefined && headers !== null) { - res.headers = headers; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns a (error) result -//////////////////////////////////////////////////////////////////////////////// - -function resultError (req, res, httpReturnCode, errorNum, errorMessage, keyvals, headers) { - 'use strict'; - - var i; - - res.responseCode = httpReturnCode; - res.contentType = "application/json; charset=utf-8"; - - var result = {}; - - if (keyvals !== undefined) { - for (i in keyvals) { - if (keyvals.hasOwnProperty(i)) { - result[i] = keyvals[i]; - } - } - } - - result.error = true; - result.code = httpReturnCode; - if (undefined !== errorMessage.errorNum) { - result.errorNum = errorMessage.errorNum; - } - else { - result.errorNum = errorNum; - } - if (undefined !== errorMessage.errorMessage) { - result.errorMessage = errorMessage.errorMessage; - } - else { - result.errorMessage = errorMessage; - } - - - res.body = JSON.stringify(result); - - if (headers !== undefined && headers !== null) { - res.headers = headers; - } -} - - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns true if a "if-match" or "if-none-match" errer happens -//////////////////////////////////////////////////////////////////////////////// - -function matchError (req, res, doc) { - - if (req.headers["if-none-match"] !== undefined) { - if (doc._rev === req.headers["if-none-match"]) { - // error - res.responseCode = actions.HTTP_NOT_MODIFIED; - res.contentType = "application/json; charset=utf-8"; - res.body = ''; - res.headers = {}; - return true; - } - } - - if (req.headers["if-match"] !== undefined) { - if (doc._rev !== req.headers["if-match"]) { - // error - resultError(req, res, actions.HTTP_PRECONDITION_FAILED, - arangodb.ERROR_ARANGO_CONFLICT, "wrong revision", - {'_id': doc._id, '_rev': doc._rev, '_key': doc._key}); - return true; - } - } - - var rev = req.parameters.rev; - if (rev !== undefined) { - if (doc._rev !== rev) { - // error - resultError(req, res, actions.HTTP_PRECONDITION_FAILED, - arangodb.ERROR_ARANGO_CONFLICT, "wrong revision", - {'_id': doc._id, '_rev': doc._rev, '_key': doc._key}); - return true; - } - } - - return false; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the collection -//////////////////////////////////////////////////////////////////////////////// - -function getCollectionByRequest(req, res) { - - if (req.suffix.length === 0) { - // GET /_api/structure (missing collection) - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_ARANGO_COLLECTION_NOT_FOUND, "collection not found"); - return; - } - - // TODO: check parameter createCollection=true and create collection - - return db._collection(req.suffix[0]); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the overwite policy -//////////////////////////////////////////////////////////////////////////////// - -function getOverwritePolicy(req) { - - var policy = req.parameters.policy; - - if (undefined !== policy && "error" === policy) { - return false; - } - - return true; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the overwite policy -//////////////////////////////////////////////////////////////////////////////// - -function getKeepNull(req) { - return stringToBoolean(req.parameters.keepNull); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns wait for sync -//////////////////////////////////////////////////////////////////////////////// - -function getWaitForSync(req, collection) { - if (collection.properties().waitForSync) { - return true; - } - - return stringToBoolean(req.parameters.waitForSync); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief save a document -//////////////////////////////////////////////////////////////////////////////// - -function saveDocument(req, res, collection, document) { - var doc; - var waitForSync = getWaitForSync(req, collection); - - try { - doc = collection.save(document, waitForSync); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } - - var headers = { - "Etag" : doc._rev, - "location" : "/_api/structure/" + doc._id - }; - - if (req.hasOwnProperty('compatibility') && req.compatibility >= 10400) { - // 1.4+ style location header - headers.location = "/_db/" + encodeURIComponent(arangodb.db._name()) + headers.location; - } - - var returnCode = waitForSync ? actions.HTTP_CREATED : actions.HTTP_ACCEPTED; - - doc.error = false; - - resultOk(req, res, returnCode, doc, headers); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief replace a document -//////////////////////////////////////////////////////////////////////////////// - -function replaceDocument(req, res, collection, oldDocument, newDocument) { - var doc; - var waitForSync = getWaitForSync(req, collection); - var overwrite = getOverwritePolicy(req); - - if (! overwrite && - undefined !== newDocument._rev && - oldDocument._rev !== newDocument._rev) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - "wrong version"); - return; - } - - try { - doc = collection.replace(oldDocument, newDocument, true, waitForSync); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } - - var headers = { - "Etag" : doc._rev - }; - - var returnCode = waitForSync ? actions.HTTP_CREATED : actions.HTTP_ACCEPTED; - - resultOk(req, res, returnCode, doc, headers); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief update a document -//////////////////////////////////////////////////////////////////////////////// - -function patchDocument(req, res, collection, oldDocument, newDocument) { - var doc; - var waitForSync = getWaitForSync(req, collection); - var overwrite = getOverwritePolicy(req); - var keepNull = getKeepNull(req); - - if (!overwrite && - undefined !== newDocument._rev && - oldDocument._rev !== newDocument._rev) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - "wrong version"); - return; - } - - try { - doc = collection.update(oldDocument, newDocument, true, keepNull, waitForSync); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } - - var headers = { - "Etag" : doc._rev - }; - - var returnCode = waitForSync ? actions.HTTP_CREATED : actions.HTTP_ACCEPTED; - - resultOk(req, res, returnCode, doc, headers); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the document -//////////////////////////////////////////////////////////////////////////////// - -function getDocumentByRequest(req, res, collection) { - - if (req.suffix.length < 2) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_ARANGO_DOCUMENT_HANDLE_BAD, - "expecting GET /_api/structure/"); - return; - } - - try { - return collection.document(req.suffix[1]); - } - catch (err) { - resultError(req, res, actions.HTTP_NOT_FOUND, - arangodb.ERROR_ARANGO_DOCUMENT_NOT_FOUND, - "document /_api/structure/" + req.suffix[0] + "/" + req.suffix[1] + - " not found"); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the types -//////////////////////////////////////////////////////////////////////////////// - -function getTypes (structure) { - var predefinedTypes = { - "boolean" : { - }, - "string" : { - }, - "number" : { - }, - "mixed" : { - } - }; - - var types = { - "predefinedTypes" : predefinedTypes, - "arrayTypes" : {}, - "objectTypes" : {} - }; - - if (undefined !== structure.arrayTypes) { - types.arrayTypes = structure.arrayTypes; - } - - if (undefined !== structure.objectTypes) { - types.objectTypes = structure.objectTypes; - } - return types; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the request language -//////////////////////////////////////////////////////////////////////////////// - -function getLang (req) { - if (undefined !== req.parameters.lang) { - return req.parameters.lang; - } - - return null; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns formatter -//////////////////////////////////////////////////////////////////////////////// - -function selectFormatter (formatter1, formatter2, lang) { - var formatter = formatter1; - if (undefined === formatter1 || JSON.stringify(formatter1) === "{}") { - formatter = formatter2; - } - - if (undefined !== formatter) { - if (undefined === lang) { - return formatter[DEFAULT_KEY]; - } - - if (undefined === formatter[lang]) { - return formatter[DEFAULT_KEY]; - } - return formatter[lang]; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the parser -//////////////////////////////////////////////////////////////////////////////// - -function selectParser (parser1, parser2, lang) { - var parser = parser1; - if (undefined === parser1 || JSON.stringify(parser1) === "{}") { - parser = parser2; - } - - if (undefined !== parser) { - if (undefined === lang) { - return parser[DEFAULT_KEY]; - } - - if (undefined === parser[lang]) { - return parser[DEFAULT_KEY]; - } - return parser[lang]; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief call a module function -//////////////////////////////////////////////////////////////////////////////// - -function callModuleFunction(value, moduleName, functionName, functionArgs) { - if (undefined === moduleName) { - return value; - } - - try { - var formatModule = require(moduleName); - if (formatModule.hasOwnProperty(functionName)) { - // call the function - return formatModule[functionName].call(null, value, functionArgs); - } - } - catch (err) { - // could not load module - console.warn("module error for module: " + moduleName + " error: " + err); - return value; - } - - // function not found - console.warn("module function '" + functionName + "' of module '" + moduleName - + "' not found."); - - return value; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the formatted value -//////////////////////////////////////////////////////////////////////////////// - -function formatValue (value, structure, types, lang) { - var result; - var key; - var section; - - try { - var type = types.predefinedTypes[structure.type]; - if (type) { - //console.warn("predefined type found: " + structure.type); - - section = selectFormatter(structure.formatter, - types.predefinedTypes[structure.type].formatter, lang); - - if (undefined === section) { - return value; - } - - return callModuleFunction(value, - section.module, - section['do'], - section.args); - } - - // array types - type = types.arrayTypes[structure.type]; - if (type) { - //console.warn("array type found: " + structure.type); - - // check for array formatter - section = selectFormatter(structure.formatter, undefined, lang); - if (undefined !== section) { - return callModuleFunction(value, - section.module, - section['do'], - section.args); - } - - // format each element - result = []; - - if(value instanceof Array) { - for (key = 0; key < value.length; ++key) { - result[key] = formatValue(value[key], type, types, lang); - } - } - return result; - } - - // object types - type = types.objectTypes[structure.type]; - if (type) { - //console.warn("object type found: " + structure.type); - - // TODO check type of value - - // check for object formatter - section = selectFormatter(structure.formatter, undefined, lang); - if (undefined !== section) { - return callModuleFunction(value, - section.module, - section['do'], - section.args); - } - - var attributes = type.attributes; - if (undefined === attributes) { - // no attributes - return null; - } - - // TODO check type of attribute - - // format each property - result = {}; - for (key in attributes) { - if (attributes.hasOwnProperty(key)) { - if (value.hasOwnProperty(key)) { - var subStructure = attributes[key]; - if (undefined === subStructure) { - result[key] = value[key]; - } - else { - result[key] = formatValue(value[key], subStructure, types, lang); - } - } - else { - result[key] = null; - } - } - } - return result; - } - } - catch (err) { - //console.warn("error = " + err); - } - - return value; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the parsed value -//////////////////////////////////////////////////////////////////////////////// - -function parseValue (value, structure, types, lang) { - var result; - var key; - var section; - - // console.warn("in parseValue"); - - try { - var type = types.predefinedTypes[structure.type]; - if (type) { - // console.warn("predefined type found: " + structure.type); - - // TODO check type of value - - section = selectParser(structure.parser, - types.predefinedTypes[structure.type].parser, lang); - - if (undefined === section) { - //console.warn("section is undefined"); - return value; - } - - var x = callModuleFunction(value, - section.module, - section['do'], - section.args); - - // console.warn("parsing " + value + " to " + x ); - - return x; - } - - // array types - type = types.arrayTypes[structure.type]; - if (type) { - //console.warn("array type found: " + structure.type); - - // check for array formatter - section = selectParser(structure.parser, undefined, lang); - if (undefined !== section) { - return callModuleFunction(value, - section.module, - section['do'], - section.args); - } - - // parse each element - result = []; - if(value instanceof Array) { - for (key = 0; key < value.length; ++key) { - result[key] = parseValue(value[key], type, types, lang); - } - } - return result; - } - - // object types - type = types.objectTypes[structure.type]; - if (type) { - //console.warn("object type found: " + structure.type); - - // TODO check type of value - - // check for object parser - section = selectParser(structure.parser, undefined, lang); - if (undefined !== section) { - return callModuleFunction(value, - section.module, - section['do'], - section.args); - } - - var attributes = type.attributes; - if (undefined === attributes) { - // no attributes - return null; - } - - // TODO check type of attribute - - // parse each property - result = {}; - for (key in attributes) { - if (attributes.hasOwnProperty(key)) { - if (value.hasOwnProperty(key)) { - - var subStructure = attributes[key]; - if (undefined === subStructure) { - result[key] = value[key]; - } - else { - result[key] = parseValue(value[key], subStructure, types, lang); - } - - } - else { - result[key] = null; - } - } - } - return result; - } - } - catch (err) { - //console.warn("error = " + err); - } - - return value; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns true if the value is valid -//////////////////////////////////////////////////////////////////////////////// - -function validateValue (value, structure, types, lang) { - var result; - var key; - var validators; - var v; - - //console.warn("in validateValue(): " + structure.type); - - try { - var type = types.predefinedTypes[structure.type]; - if (type) { - //console.warn("predefined type found: " + structure.type); - - // TODO check type of value - - validators = structure.validators; - if (undefined !== validators) { - for (key = 0; key < validators.length; ++key) { - - //console.warn("call function: " + validators[key]['do']); - - result = callModuleFunction(value, - validators[key].module, - validators[key]['do'], - validators[key].args); - - if (!result) { - return false; - } - } - } - - validators = types.predefinedTypes[structure.type].validators; - if (undefined !== validators) { - for (key = 0; key < validators.length; ++key) { - - //console.warn("call function: " + validators[key]['do']); - - result = callModuleFunction(value, - validators[key].module, - validators[key]['do'], - validators[key].args); - - if (!result) { - return false; - } - } - } - - return true; - } - - // array types - type = types.arrayTypes[structure.type]; - if (type) { - //console.warn("array type found: " + structure.type); - - // TODO check type of value - - // check for array validator - validators = structure.validators; - if (undefined !== validators) { - for (key = 0; key < validators.length; ++key) { - - result = callModuleFunction(value, - validators[key].module, - validators[key]['do'], - validators[key].args); - - if (!result) { - return false; - } - } - return true; - } - - // validate each element - for (key = 0; key < value.length; ++key) { - var valid = validateValue(value[key], type, types); - if (!valid) { - return false; - } - } - return true; - } - - // object types - type = types.objectTypes[structure.type]; - if (type) { - //console.warn("object type found: " + structure.type); - - // TODO check type of value - - // check for object validator - validators = structure.validators; - if (undefined !== validators) { - for (key = 0; key < validators.length; ++key) { - - result = callModuleFunction(value, - validators[key].module, - validators[key]['do'], - validators[key].args); - - if (!result) { - return false; - } - } - } - - var attributes = type.attributes; - if (undefined === attributes) { - // no attributes - return true; - } - - // validate each property - for (key in attributes) { - if (attributes.hasOwnProperty(key)) { - - if (value.hasOwnProperty(key)) { - v = value[key]; - } - else { - v = null; - } - - var subStructure = attributes[key]; - - if (undefined !== subStructure) { - if (!validateValue(v, subStructure, types, lang)) { - return false; - } - } - } - } - - return true; - } - } - catch (err) { - //console.warn("error = " + err); - } - - return false; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief returns the structured document -//////////////////////////////////////////////////////////////////////////////// - -function resultStructure (req, res, doc, structure, headers) { - var result = {}; - - var key; - var types = getTypes(structure); - var lang = getLang(req); - var format = true; - - if (undefined !== req.parameters.format) { - format = stringToBoolean(req.parameters.format); - } - - if (structure.attributes !== undefined) { - for (key in structure.attributes) { - if (structure.attributes.hasOwnProperty(key)) { - var value = doc[key]; - - // format value - if (format) { - result[key] = formatValue(value, structure.attributes[key], types, lang); - } - else { - result[key] = value; - } - } - } - } - - result._id = doc._id; - result._rev = doc._rev; - result._key = doc._key; - - resultOk(req, res, actions.HTTP_OK, result, headers); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief parse body and return the parsed document -/// @throws exception -//////////////////////////////////////////////////////////////////////////////// - -function parseDocumentByStructure(req, res, structure, body, isPatch) { - var document = {}; - - var key; - var value; - var types = getTypes(structure); - var lang = getLang(req); - var format = true; - - if (undefined !== req.parameters.format) { - format = stringToBoolean(req.parameters.format); - } - - for (key in structure.attributes) { - if (structure.attributes.hasOwnProperty(key)) { - value = body[key]; - - if (!isPatch || undefined !== value) { - if (format) { - value = parseValue(value, structure.attributes[key], types, lang); - } - - //console.warn("validate key: " + key); - if (validateValue(value, structure.attributes[key], types)) { - document[key] = value; - } - else { - throw("value of attribute '" + key + "' is not valid."); - } - } - } - } - - if (undefined !== body._id) { - document._id = body._id; - } - if (undefined !== body._rev) { - document._rev = body._rev; - } - if (undefined !== body._key) { - document._key = body._key; - } - - return document; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief get the parsed document -//////////////////////////////////////////////////////////////////////////////// - -function saveDocumentByStructure(req, res, collection, structure, body) { - try { - var document = parseDocumentByStructure(req, res, structure, body, false); - saveDocument(req, res, collection, document); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief replace the parsed document -//////////////////////////////////////////////////////////////////////////////// - -function replaceDocumentByStructure(req, res, collection, structure, oldDocument, body) { - try { - var document = parseDocumentByStructure(req, res, structure, body, false); - replaceDocument(req, res, collection, oldDocument, document); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief patch the parsed document -//////////////////////////////////////////////////////////////////////////////// - -function patchDocumentByStructure(req, res, collection, structure, oldDocument, body) { - try { - var document = parseDocumentByStructure(req, res, structure, body, true); - patchDocument(req, res, collection, oldDocument, document); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } -} - - -//////////////////////////////////////////////////////////////////////////////// -/// @start DocuBlock JSF_read_single_document -/// @brief reads a single document -/// -/// @RESTHEADER{GET /_api/structure/{document-handle},reads a document} -/// -/// @RESTURLPARAMETERS -/// -/// @RESTURLPARAM{document-handle,string,required} -/// The Handle of the Document. -/// -/// @RESTQUERYPARAM{rev,string,optional} -/// You can conditionally select a document based on a target revision id by -/// using the `rev` query parameter. -/// -/// @RESTQUERYPARAM{lang,string,optional} -/// Language of the data. -/// -/// @RESTQUERYPARAM{format,boolean,optional} -/// False for unformated values (default: true). -/// -/// @RESTHEADERPARAMETERS -/// -/// @RESTHEADERPARAM{If-None-Match,string,optional} -/// If the "If-None-Match" header is given, then it must contain exactly one -/// etag. The document is returned, if it has a different revision than the -/// given etag. Otherwise a `HTTP 304` is returned. -/// -/// @RESTHEADERPARAM{If-Match,string,optional} -/// If the "If-Match" header is given, then it must contain exactly one -/// etag. The document is returned, if it has the same revision ad the -/// given etag. Otherwise a `HTTP 412` is returned. As an alternative -/// you can supply the etag in an attribute `rev` in the URL. -/// -/// @RESTDESCRIPTION -/// Returns the document identified by `document-handle`. The returned -/// document contains two special attributes: `_id` containing the document -/// handle and `_rev` containing the revision. -/// -/// @RESTRETURNCODES -/// -/// @RESTRETURNCODE{200} -/// is returned if the document was found -/// -/// @RESTRETURNCODE{404} -/// is returned if the document or collection was not found -/// -/// @RESTRETURNCODE{304} -/// is returned if the "If-None-Match" header is given and the document has -/// same version -/// -/// @RESTRETURNCODE{412} -/// is returned if a "If-Match" header or `rev` is given and the found -/// document has a different version -/// @end DocuBlock -//////////////////////////////////////////////////////////////////////////////// - -function get_api_structure(req, res) { - var structure; - - var collection = getCollectionByRequest(req, res); - if (undefined === collection) { - return; - } - - var doc = getDocumentByRequest(req, res, collection); - if (undefined === doc) { - return; - } - - if (matchError(req, res, doc)) { - return; - } - - var headers = { - "Etag" : doc._rev - }; - - try { - structure = getCollection().document(collection.name()); - } - catch (err) { - // return the doc - resultOk(req, res, actions.HTTP_OK, doc, headers); - return; - } - - resultStructure(req, res, doc, structure, headers); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @start DocuBlock JSF_read_single_document_head -/// @brief reads a single document head -/// -/// @RESTHEADER{HEAD /_api/structure/{document-handle},reads a document header} -/// -/// @RESTURLPARAMETERS -/// -/// @RESTURLPARAM{document-handle,string,required} -/// The Handle of the Document. -/// -/// @RESTQUERYPARAMETERS -/// -/// @RESTQUERYPARAM{rev,string,optional} -/// You can conditionally select a document based on a target revision id by -/// using the `rev` query parameter. -/// -/// @RESTHEADERPARAMETERS -/// -/// @RESTHEADERPARAM{If-Match,string,optional} -/// You can conditionally get a document based on a target revision id by -/// using the `if-match` HTTP header. -/// -/// @RESTDESCRIPTION -/// Like `GET`, but only returns the header fields and not the body. You -/// can use this call to get the current revision of a document or check if -/// the document was deleted. -/// -/// @RESTRETURNCODES -/// -/// @RESTRETURNCODE{200} -/// is returned if the document was found -/// -/// @RESTRETURNCODE{404} -/// is returned if the document or collection was not found -/// -/// @RESTRETURNCODE{304} -/// is returned if the "If-None-Match" header is given and the document has -/// same version -/// -/// @RESTRETURNCODE{412} -/// is returned if a "If-Match" header or `rev` is given and the found -/// document has a different version -/// @end DocuBlock -//////////////////////////////////////////////////////////////////////////////// - -function head_api_structure(req, res) { - var collection = getCollectionByRequest(req, res); - if (undefined === collection) { - return; - } - - var doc = getDocumentByRequest(req, res, collection); - if (undefined === doc) { - return; - } - - if (matchError(req, res, doc)) { - return; - } - - var headers = { - "Etag" : doc._rev - }; - - resultOk(req, res, actions.HTTP_OK, undefined, headers); -} - -//////////////////////////////////////////////////////////////////////////////// -/// @start DocuBlock JSF_delete_single_document -/// @brief deletes a document -/// -/// @RESTHEADER{DELETE /_api/structure/{document-handle},deletes a document} -/// -/// @RESTURLPARAMETERS -/// -/// @RESTURLPARAM{document-handle,string,required} -/// Deletes the document identified by `document-handle`. -/// -/// @RESTQUERYPARAMETERS -/// -/// @RESTQUERYPARAM{rev,string,optional} -/// You can conditionally delete a document based on a target revision id by -/// using the `rev` query parameter. -/// -/// @RESTQUERYPARAM{policy,string,optional} -/// To control the update behavior in case there is a revision mismatch, you -/// can use the `policy` parameter. This is the same as when replacing -/// documents (see replacing documents for more details). -/// -/// @RESTQUERYPARAM{waitForSync,boolean,optional} -/// Wait until document has been sync to disk. -/// -/// @RESTHEADERPARAMETERS -/// -/// @RESTHEADERPARAM{If-Match,string,optional} -/// You can conditionally delete a document based on a target revision id by -/// using the `if-match` HTTP header. -/// -/// @RESTDESCRIPTION -/// The body of the response contains a JSON object with the information about -/// the handle and the revision. The attribute `_id` contains the known -/// `document-handle` of the updated document, the attribute `_rev` -/// contains the known document revision. -/// -/// If the `waitForSync` parameter is not specified or set to -/// `false`, then the collection's default `waitForSync` behavior is -/// applied. The `waitForSync` query parameter cannot be used to disable -/// synchronization for collections that have a default `waitForSync` value -/// of `true`. -/// -/// @RESTRETURNCODES -/// -/// @RESTRETURNCODE{200} -/// is returned if the document was deleted successfully and `waitForSync` was -/// `true`. -/// -/// @RESTRETURNCODE{202} -/// is returned if the document was deleted successfully and `waitForSync` was -/// `false`. -/// -/// @RESTRETURNCODE{404} -/// is returned if the collection or the document was not found. -/// The response body contains an error document in this case. -/// -/// @RESTRETURNCODE{412} -/// is returned if a "If-Match" header or `rev` is given and the current -/// document has a different version -/// @end DocuBlock -//////////////////////////////////////////////////////////////////////////////// - -function delete_api_structure (req, res) { - - var collection = getCollectionByRequest(req, res); - if (undefined === collection) { - return; - } - - var doc = getDocumentByRequest(req, res, collection); - if (undefined === doc) { - return; - } - - if (matchError(req, res, doc)) { - return; - } - - var waitForSync = getWaitForSync(req, collection); - - try { - collection.remove( doc, true, waitForSync); - resultOk(req, res, - waitForSync ? actions.HTTP_OK : actions.HTTP_ACCEPTED, - { "deleted" : true }); - } - catch(err) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, - err); - return; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @start DocuBlock JSF_update_single_document -/// @brief updates a document -/// -/// @RESTHEADER{PATCH /_api/structure/{document-handle},patches a document} -/// -/// @RESTURLPARAMETERS -/// -/// @RESTURLPARAM{document-handle,string,required} -/// The Handle of the Document. -/// -/// @RESTQUERYPARAMETERS -/// -/// @RESTQUERYPARAM{keepNull,boolean,optional} -/// If the intention is to delete existing attributes with the patch command, -/// the URL query parameter `keepNull` can be used with a value of `false`. -/// This will modify the behavior of the patch command to remove any attributes -/// from the existing document that are contained in the patch document with an -/// attribute value of `null`. -/// -/// @RESTQUERYPARAM{waitForSync,boolean,optional} -/// Wait until document has been sync to disk. -/// -/// @RESTQUERYPARAM{rev,string,optional} -/// You can conditionally patch a document based on a target revision id by -/// using the `rev` query parameter. -/// -/// @RESTQUERYPARAM{policy,string,optional} -/// To control the update behavior in case there is a revision mismatch, you -/// can use the `policy` parameter. -/// -/// @RESTQUERYPARAM{lang,string,optional} -/// Language of the data. -/// -/// @RESTQUERYPARAM{format,boolean,optional} -/// False for unformated values (default: true). -/// -/// @RESTHEADERPARAMETERS -/// -/// @RESTHEADERPARAM{If-Match,string,optional} -/// You can conditionally delete a document based on a target revision id by -/// using the `if-match` HTTP header. -/// -/// @RESTDESCRIPTION -/// Partially updates the document identified by `document-handle`. -/// The body of the request must contain a JSON document with the attributes -/// to patch (the patch document). All attributes from the patch document will -/// be added to the existing document if they do not yet exist, and overwritten -/// in the existing document if they do exist there. -/// -/// Setting an attribute value to `null` in the patch document will cause a -/// value of `null` be saved for the attribute by default. -/// -/// Optionally, the query parameter `waitForSync` can be used to force -/// synchronization of the document update operation to disk even in case -/// that the `waitForSync` flag had been disabled for the entire collection. -/// Thus, the `waitForSync` query parameter can be used to force synchronization -/// of just specific operations. To use this, set the `waitForSync` parameter -/// to `true`. If the `waitForSync` parameter is not specified or set to -/// `false`, then the collection's default `waitForSync` behavior is -/// applied. The `waitForSync` query parameter cannot be used to disable -/// synchronization for collections that have a default `waitForSync` value -/// of `true`. -/// -/// The body of the response contains a JSON object with the information about -/// the handle and the revision. The attribute `_id` contains the known -/// `document-handle` of the updated document, the attribute `_rev` -/// contains the new document revision. -/// -/// If the document does not exist, then a `HTTP 404` is returned and the -/// body of the response contains an error document. -/// -/// You can conditionally update a document based on a target revision id by -/// using either the `rev` query parameter or the `if-match` HTTP header. -/// To control the update behavior in case there is a revision mismatch, you -/// can use the `policy` parameter. This is the same as when replacing -/// documents (see replacing documents for details). -/// -/// @RESTRETURNCODES -/// -/// @RESTRETURNCODE{201} -/// is returned if the document was created successfully and `waitForSync` was -/// `true`. -/// -/// @RESTRETURNCODE{202} -/// is returned if the document was created successfully and `waitForSync` was -/// `false`. -/// -/// @RESTRETURNCODE{400} -/// is returned if the body does not contain a valid JSON representation of a -/// document. The response body contains an error document in this case. -/// -/// @RESTRETURNCODE{404} -/// is returned if collection or the document was not found -/// -/// @RESTRETURNCODE{412} -/// is returned if a "If-Match" header or `rev` is given and the found -/// document has a different version -/// @end DocuBlock -//////////////////////////////////////////////////////////////////////////////// - -function patch_api_structure (req, res) { - var body; - var structure; - - var collection = getCollectionByRequest(req, res); - if (undefined === collection) { - return; - } - - var doc = getDocumentByRequest(req, res, collection); - if (undefined === doc) { - return; - } - - if (matchError(req, res, doc)) { - return; - } - - body = actions.getJsonBody(req, res); - - if (body === undefined) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, "no body data"); - return; - } - - try { - structure = getCollection().document(collection.name()); - patchDocumentByStructure(req, res, collection, structure, doc, body); - } - catch (err) { - patchDocument(req, res, collection, doc, body); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @start DocuBlock JSF_replace_single_document -/// @brief replaces a document -/// -/// @RESTHEADER{PUT /_api/structure/{document-handle},replaces a document} -/// -/// @RESTURLPARAMETERS -/// -/// @RESTURLPARAM{document-handle,string,required} -/// The Handle of the Document. -/// -/// @RESTQUERYPARAMETERS -/// -/// @RESTQUERYPARAM{waitForSync,boolean,optional} -/// Wait until document has been sync to disk. -/// -/// @RESTQUERYPARAM{rev,string,optional} -/// You can conditionally replace a document based on a target revision id by -/// using the `rev` query parameter. -/// -/// @RESTQUERYPARAM{policy,string,optional} -/// To control the update behavior in case there is a revision mismatch, you -/// can use the `policy` parameter. This is the same as when replacing -/// documents (see replacing documents for more details). -/// -/// @RESTQUERYPARAM{lang,string,optional} -/// Language of the data. -/// -/// @RESTQUERYPARAM{format,boolean,optional} -/// False for unformated values (default: true). -/// -/// @RESTHEADERPARAMETERS -/// -/// @RESTHEADERPARAM{If-Match,string,optional} -/// You can conditionally replace a document based on a target revision id by -/// using the `if-match` HTTP header. -/// -/// @RESTDESCRIPTION -/// Completely updates (i.e. replaces) the document identified by `document-handle`. -/// If the document exists and can be updated, then a `HTTP 201` is returned -/// and the "ETag" header field contains the new revision of the document. -/// -/// If the new document passed in the body of the request contains the -/// `document-handle` in the attribute `_id` and the revision in `_rev`, -/// these attributes will be ignored. Only the URI and the "ETag" header are -/// relevant in order to avoid confusion when using proxies. -/// -/// Optionally, the query parameter `waitForSync` can be used to force -/// synchronization of the document replacement operation to disk even in case -/// that the `waitForSync` flag had been disabled for the entire collection. -/// Thus, the `waitForSync` query parameter can be used to force synchronization -/// of just specific operations. To use this, set the `waitForSync` parameter -/// to `true`. If the `waitForSync` parameter is not specified or set to -/// `false`, then the collection's default `waitForSync` behavior is -/// applied. The `waitForSync` query parameter cannot be used to disable -/// synchronization for collections that have a default `waitForSync` value -/// of `true`. -/// -/// The body of the response contains a JSON object with the information about -/// the handle and the revision. The attribute `_id` contains the known -/// `document-handle` of the updated document, the attribute `_rev` -/// contains the new document revision. -/// -/// If the document does not exist, then a `HTTP 404` is returned and the -/// body of the response contains an error document. -/// -/// There are two ways for specifying the targeted document revision id for -/// conditional replacements (i.e. replacements that will only be executed if -/// the revision id found in the database matches the document revision id specified -/// in the request): -/// - specifying the target revision in the `rev` URL query parameter -/// - specifying the target revision in the `if-match` HTTP header -/// -/// Specifying a target revision is optional, however, if done, only one of the -/// described mechanisms must be used (either the `rev` query parameter or the -/// `if-match` HTTP header). -/// Regardless which mechanism is used, the parameter needs to contain the target -/// document revision id as returned in the `_rev` attribute of a document or -/// by an HTTP `etag` header. -/// -/// For example, to conditionally replace a document based on a specific revision -/// id, you the following request: -/// -/// - PUT /_api/document/`document-handle`?rev=`etag` -/// -/// If a target revision id is provided in the request (e.g. via the `etag` value -/// in the `rev` URL query parameter above), ArangoDB will check that -/// the revision id of the document found in the database is equal to the target -/// revision id provided in the request. If there is a mismatch between the revision -/// id, then by default a `HTTP 412` conflict is returned and no replacement is -/// performed. -/// -/// The conditional update behavior can be overridden with the `policy` URL query parameter: -/// -/// - PUT /_api/document/`document-handle`?policy=`policy` -/// -/// If `policy` is set to `error`, then the behavior is as before: replacements -/// will fail if the revision id found in the database does not match the target -/// revision id specified in the request. -/// -/// If `policy` is set to `last`, then the replacement will succeed, even if the -/// revision id found in the database does not match the target revision id specified -/// in the request. You can use the `last` `policy` to force replacements. -/// -/// @RESTRETURNCODES -/// -/// @RESTRETURNCODE{201} -/// is returned if the document was created successfully and `waitForSync` was -/// `true`. -/// -/// @RESTRETURNCODE{202} -/// is returned if the document was created successfully and `waitForSync` was -/// `false`. -/// -/// @RESTRETURNCODE{400} -/// is returned if the body does not contain a valid JSON representation of a -/// document. The response body contains an error document in this case. -/// -/// @RESTRETURNCODE{404} -/// is returned if collection or the document was not found -/// -/// @RESTRETURNCODE{412} -/// is returned if a "If-Match" header or `rev` is given and the found -/// document has a different version -/// @end DocuBlock -//////////////////////////////////////////////////////////////////////////////// - -function put_api_structure (req, res) { - var body; - var structure; - - var collection = getCollectionByRequest(req, res); - if (undefined === collection) { - return; - } - - var doc = getDocumentByRequest(req, res, collection); - if (undefined === doc) { - return; - } - - if (matchError(req, res, doc)) { - return; - } - - body = actions.getJsonBody(req, res); - - if (body === undefined) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, "no body data"); - return; - } - - try { - structure = getCollection().document(collection.name()); - replaceDocumentByStructure(req, res, collection, structure, doc, body); - } - catch (err) { - replaceDocument(req, res, collection, doc, body); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @start DocuBlock JSF_create_single_document -/// @brief creates a document -/// -/// @RESTHEADER{POST /_api/structure,creates a document} -/// -/// @RESTALLBODYPARAM{document,json,required} -/// A JSON representation of document. -/// -/// @RESTQUERYPARAMETERS -/// -/// @RESTQUERYPARAM{collection,string,required} -/// The collection name. -/// -/// @RESTQUERYPARAM{createCollection,boolean,optional} -/// If this parameter has a value of `true` or `yes`, then the collection is -/// created if it does not yet exist. Other values will be ignored so the -/// collection must be present for the operation to succeed. -/// -/// @RESTQUERYPARAM{waitForSync,boolean,optional} -/// Wait until document has been sync to disk. -/// -/// @RESTQUERYPARAM{lang,string,optional} -/// Language of the send data. -/// -/// @RESTQUERYPARAM{format,boolean,optional} -/// True, if the document contains formatted values (default: true). -/// -/// @RESTDESCRIPTION -/// Creates a new document in the collection named `collection`. A JSON -/// representation of the document must be passed as the body of the POST -/// request. -/// -/// If the document was created successfully, then the "Location" header -/// contains the path to the newly created document. The "ETag" header field -/// contains the revision of the document. -/// -/// The body of the response contains a JSON object with the following -/// attributes: -/// -/// - `_id` contains the document handle of the newly created document -/// - `_key` contains the document key -/// - `_rev` contains the document revision -/// -/// If the collection parameter `waitForSync` is `false`, then the call returns -/// as soon as the document has been accepted. It will not wait, until the -/// documents has been sync to disk. -/// -/// Optionally, the query parameter `waitForSync` can be used to force -/// synchronization of the document creation operation to disk even in case that -/// the `waitForSync` flag had been disabled for the entire collection. Thus, -/// the `waitForSync` query parameter can be used to force synchronization of just -/// this specific operations. To use this, set the `waitForSync` parameter to -/// `true`. If the `waitForSync` parameter is not specified or set to `false`, -/// then the collection's default `waitForSync` behavior is applied. The -/// `waitForSync` query parameter cannot be used to disable synchronization for -/// collections that have a default `waitForSync` value of `true`. -/// -/// @RESTRETURNCODES -/// -/// @RESTRETURNCODE{201} -/// is returned if the document was created successfully and `waitForSync` was -/// `true`. -/// -/// @RESTRETURNCODE{202} -/// is returned if the document was created successfully and `waitForSync` was -/// `false`. -/// -/// @RESTRETURNCODE{400} -/// is returned if the body does not contain a valid JSON representation of a -/// document. The response body contains an error document in this case. -/// -/// @RESTRETURNCODE{404} -/// is returned if the collection specified by `collection` is unknown. The -/// response body contains an error document in this case. -/// @end DocuBlock -//////////////////////////////////////////////////////////////////////////////// - -function post_api_structure (req, res) { - // POST /_api/structure - var body; - var structure; - var collection; - - var collectionName = req.parameters.collection; - if (undefined === collectionName) { - resultError(req, res, actions.HTTP_NOT_FOUND, - arangodb.ERROR_ARANGO_COLLECTION_NOT_FOUND, "collection not found"); - return; - } - - try { - collection = db._collection(collectionName); - } - catch (err) { - } - - if (null === collection) { - var createCollection = stringToBoolean(req.parameters.createCollection); - if (createCollection) { - try { - db._create(collectionName); - collection = db._collection(collectionName); - } - catch(err2) { - resultError(req, res, actions.HTTP_NOT_FOUND, - arangodb.ERROR_ARANGO_COLLECTION_NOT_FOUND, err2); - return; - } - } - } - - if (undefined === collection || null === collection) { - resultError(req, res, actions.HTTP_NOT_FOUND, - arangodb.ERROR_ARANGO_COLLECTION_NOT_FOUND, "collection not found"); - return; - } - - body = actions.getJsonBody(req, res); - - if (body === undefined) { - resultError(req, res, actions.HTTP_BAD, - arangodb.ERROR_FAILED, "no body data"); - return; - } - - try { - structure = getCollection().document(collection.name()); - saveDocumentByStructure(req, res, collection, structure, body); - } - catch (err3) { - saveDocument(req, res, collection, body); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief handles a structure request -//////////////////////////////////////////////////////////////////////////////// - -actions.defineHttp({ - url : API, - - callback : function (req, res) { - try { - if (req.requestType === actions.DELETE) { - delete_api_structure(req, res); - } - else if (req.requestType === actions.GET) { - get_api_structure(req, res); - } - else if (req.requestType === actions.HEAD) { - head_api_structure(req, res); - } - else if (req.requestType === actions.PATCH) { - patch_api_structure(req, res); - } - else if (req.requestType === actions.POST) { - post_api_structure(req, res); - } - else if (req.requestType === actions.PUT) { - put_api_structure(req, res); - } - else { - actions.resultUnsupported(req, res); - } - } - catch (err) { - actions.resultException(req, res, err, undefined, false); - } - } -}); - - diff --git a/lib/Rest/GeneralRequest.cpp b/lib/Rest/GeneralRequest.cpp index 4c9b6944a0..97326abe12 100644 --- a/lib/Rest/GeneralRequest.cpp +++ b/lib/Rest/GeneralRequest.cpp @@ -32,8 +32,6 @@ using namespace arangodb; using namespace arangodb::basics; -int32_t const GeneralRequest::MIN_COMPATIBILITY = 10300L; - static std::string const EMPTY_STR = ""; std::string GeneralRequest::translateVersion(ProtocolVersion version) { diff --git a/lib/Rest/GeneralRequest.h b/lib/Rest/GeneralRequest.h index e27223bfdc..f7591a121f 100644 --- a/lib/Rest/GeneralRequest.h +++ b/lib/Rest/GeneralRequest.h @@ -38,9 +38,6 @@ class GeneralRequest { GeneralRequest(GeneralRequest const&) = delete; GeneralRequest& operator=(GeneralRequest const&) = delete; - public: - static int32_t const MIN_COMPATIBILITY; - public: // VSTREAM_CRED: This method is used for sending Authentication // request,i.e; username and password. @@ -83,10 +80,8 @@ class GeneralRequest { static RequestType findRequestType(char const*, size_t const); public: - GeneralRequest(ConnectionInfo const& connectionInfo, - int32_t defaultApiCompatibility) + GeneralRequest(ConnectionInfo const& connectionInfo) : _version(ProtocolVersion::UNKNOWN), - _defaultApiCompatibility(defaultApiCompatibility), _connectionInfo(connectionInfo), _clientTaskId(0), _requestContext(nullptr), @@ -102,8 +97,6 @@ class GeneralRequest { std::string const& protocol() const { return _protocol; } void setProtocol(std::string const& protocol) { _protocol = protocol; } - virtual int32_t compatibility() = 0; - ConnectionInfo const& connectionInfo() const { return _connectionInfo; } void setConnectionInfo(ConnectionInfo const& connectionInfo) { _connectionInfo = connectionInfo; @@ -172,7 +165,6 @@ class GeneralRequest { protected: ProtocolVersion _version; std::string _protocol; - int32_t _defaultApiCompatibility; // connection info ConnectionInfo _connectionInfo; diff --git a/lib/Rest/GeneralResponse.cpp b/lib/Rest/GeneralResponse.cpp index 3dfdb10c47..3b651d8144 100644 --- a/lib/Rest/GeneralResponse.cpp +++ b/lib/Rest/GeneralResponse.cpp @@ -415,9 +415,8 @@ GeneralResponse::ResponseCode GeneralResponse::responseCode(int code) { } } -GeneralResponse::GeneralResponse(ResponseCode responseCode, - uint32_t compatibility) - : _responseCode(responseCode), _apiCompatibility(compatibility) {} +GeneralResponse::GeneralResponse(ResponseCode responseCode) + : _responseCode(responseCode) {} std::string const& GeneralResponse::header(std::string const& key) const { std::string k = StringUtils::tolower(key); diff --git a/lib/Rest/GeneralResponse.h b/lib/Rest/GeneralResponse.h index e26ec5dd51..58bcf0df3c 100644 --- a/lib/Rest/GeneralResponse.h +++ b/lib/Rest/GeneralResponse.h @@ -100,7 +100,7 @@ class GeneralResponse { static ResponseCode responseCode(int); public: - GeneralResponse(ResponseCode, uint32_t); + explicit GeneralResponse(ResponseCode); virtual ~GeneralResponse() {} public: @@ -132,7 +132,6 @@ class GeneralResponse { protected: ResponseCode _responseCode; - uint32_t const _apiCompatibility; std::unordered_map _headers; }; } diff --git a/lib/Rest/HttpRequest.cpp b/lib/Rest/HttpRequest.cpp index 79ca86b949..39417fe6d4 100644 --- a/lib/Rest/HttpRequest.cpp +++ b/lib/Rest/HttpRequest.cpp @@ -47,10 +47,8 @@ std::string const HttpRequest::MULTI_PART_CONTENT_TYPE = "multipart/form-data"; HttpRequest::HttpRequest(ConnectionInfo const& connectionInfo, char const* header, size_t length, - int32_t defaultApiCompatibility, bool allowMethodOverride) - : GeneralRequest(connectionInfo, defaultApiCompatibility), - + : GeneralRequest(connectionInfo), _contentLength(0), _header(nullptr), _allowMethodOverride(allowMethodOverride) { @@ -66,68 +64,6 @@ HttpRequest::~HttpRequest() { delete[] _header; } -int32_t HttpRequest::compatibility() { - int32_t result = _defaultApiCompatibility; - - bool found; - std::string const& apiVersion = header("x-arango-version", found); - - if (!found) { - return result; - } - - char const* a = apiVersion.c_str(); - char const* p = a; - char const* e = a + apiVersion.size(); - - // read major version - uint32_t major = 0; - - while (p < e && *p >= '0' && *p <= '9') { - major = major * 10 + (*p - '0'); - ++p; - } - - if (p != a && (*p == '.' || *p == '-' || p == e)) { - if (major >= 10000) { - // version specified as "10400" - if (*p == '\0') { - result = major; - - if (result < MIN_COMPATIBILITY) { - result = MIN_COMPATIBILITY; - } else { - // set patch-level to 0 - result /= 100L; - result *= 100L; - } - - return result; - } - } - - a = ++p; - - // read minor version - uint32_t minor = 0; - - while (p < e && *p >= '0' && *p <= '9') { - minor = minor * 10 + (*p - '0'); - ++p; - } - - if (p != a && (*p == '.' || *p == '-' || p == e)) { - result = (int32_t)(minor * 100L + major * 10000L); - } - } - - if (result < MIN_COMPATIBILITY) { - result = MIN_COMPATIBILITY; - } - - return result; -} - void HttpRequest::parseHeader(size_t length) { char* start = _header; char* end = start + length; diff --git a/lib/Rest/HttpRequest.h b/lib/Rest/HttpRequest.h index 89d8c7002b..940cb42a58 100644 --- a/lib/Rest/HttpRequest.h +++ b/lib/Rest/HttpRequest.h @@ -45,12 +45,9 @@ class HttpRequest : public GeneralRequest { static std::string const MULTI_PART_CONTENT_TYPE; public: - HttpRequest(ConnectionInfo const&, char const*, size_t, int32_t, bool); + HttpRequest(ConnectionInfo const&, char const*, size_t, bool); ~HttpRequest(); - public: - int32_t compatibility() override; - public: // HTTP protocol version is 1.0 bool isHttp10() const { return _version == ProtocolVersion::HTTP_1_0; } diff --git a/lib/Rest/HttpResponse.cpp b/lib/Rest/HttpResponse.cpp index da9cf7ee5d..a9f33e00ff 100644 --- a/lib/Rest/HttpResponse.cpp +++ b/lib/Rest/HttpResponse.cpp @@ -34,8 +34,8 @@ using namespace arangodb::basics; std::string const HttpResponse::BATCH_ERROR_HEADER = "x-arango-errors"; bool HttpResponse::HIDE_PRODUCT_HEADER = false; -HttpResponse::HttpResponse(ResponseCode code, uint32_t compatibility) - : GeneralResponse(code, compatibility), +HttpResponse::HttpResponse(ResponseCode code) + : GeneralResponse(code), _isHeadResponse(false), _isChunked(false), _body(TRI_UNKNOWN_MEM_ZONE, false), @@ -118,7 +118,7 @@ size_t HttpResponse::bodySize() const { } void HttpResponse::writeHeader(StringBuffer* output) { - bool const capitalizeHeaders = (_apiCompatibility >= 20100); + bool const capitalizeHeaders = true; output->appendText(TRI_CHAR_LENGTH_PAIR("HTTP/1.1 ")); output->appendText(responseString(_responseCode)); diff --git a/lib/Rest/HttpResponse.h b/lib/Rest/HttpResponse.h index 3473d3efc5..7ac284dd31 100644 --- a/lib/Rest/HttpResponse.h +++ b/lib/Rest/HttpResponse.h @@ -36,7 +36,7 @@ class HttpResponse : public GeneralResponse { static std::string const BATCH_ERROR_HEADER; public: - HttpResponse(ResponseCode code, uint32_t compatibility); + explicit HttpResponse(ResponseCode code); public: bool isHeadResponse() const { return _isHeadResponse; } From 016b79f4c7c72dacd9bce63ed79020cc4fa62a4b Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Fri, 6 May 2016 18:58:58 +0200 Subject: [PATCH 20/31] Fix object requirement --- arangod/Cluster/ClusterInfo.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index cfef3a1da5..48e31bf1c3 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -75,6 +75,9 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// int replicationFactor () const { + if (!_slice.isObject()) { + return 1; + } return arangodb::basics::VelocyPackHelper::getNumericValue( _slice, "replicationFactor", 1); } @@ -129,6 +132,9 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// TRI_col_type_e type() const { + if (!_slice.isObject()) { + return TRI_COL_TYPE_UNKNOWN; + } return (TRI_col_type_e)arangodb::basics::VelocyPackHelper::getNumericValue( _slice, "type", (int)TRI_COL_TYPE_UNKNOWN); } @@ -138,6 +144,9 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// TRI_vocbase_col_status_e status() const { + if (!_slice.isObject()) { + return TRI_VOC_COL_STATUS_CORRUPTED; + } return (TRI_vocbase_col_status_e) arangodb::basics::VelocyPackHelper::getNumericValue( _slice, "status", (int)TRI_VOC_COL_STATUS_CORRUPTED); @@ -246,6 +255,9 @@ class CollectionInfo { ////////////////////////////////////////////////////////////////////////////// uint32_t indexBuckets() const { + if (!_slice.isObject()) { + return 1; + } return arangodb::basics::VelocyPackHelper::getNumericValue( _slice, "indexBuckets", 1); } From 28b61725808cf1e226ad3c7e19d4cdded62a30b7 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 19:00:16 +0200 Subject: [PATCH 21/31] updated documentation --- .../ReleaseNotes/UpgradingChanges30.mdpp | 57 ++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp index 94e7313708..f5a79959da 100644 --- a/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp +++ b/Documentation/Books/Users/ReleaseNotes/UpgradingChanges30.mdpp @@ -14,7 +14,7 @@ The pattern for collection directory names was changed in 3.0 to include a rando id component at the end. The new pattern is `collection--`, where `` is the collection id and `` is a random number. Previous versions of ArangoDB used a pattern `collection-` without the random number. - + !SECTION Edges and edges attributes In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated @@ -35,8 +35,36 @@ referenced by `_from` and `_to` values may be dropped and re-created later. Any `_from` and `_to` values of edges pointing to such dropped collection are unaffected by the drop operation now. +!SECTION Documents + +Documents (in contrast to edges) cannot contain the attributes `_from` or `_to` on the +main level in ArangoDB 3.0. These attributes will be automatically removed when saving +documents (i.e. non-edges). `_from` and `_to` can be still used in sub-objects inside +documents. + +The `_from` and `_to` attributes will of course be preserved and are still required when +saving edges. + + !SECTION AQL +!SUBSECTION Edges handling + +When updating or replacing edges via AQL, any modifications to the `_from` and `_to` +attributes of edges were ignored by previous versions of ArangoDB, without signaling +any errors. This was due to the `_from` and `_to` attributes being immutable in earlier +versions of ArangoDB. + +From 3.0 on, the `_from` and `_to` attributes of edges are mutable, so any AQL queries that +modify the `_from` or `_to` attribute values of edges will attempt to actually change these +attributes. Clients should be aware of this change and should review their queries that +modify edges to rule out unintended side-effects. + +Additionally, when completely replacing the data of existing edges via the AQL `REPLACE` +operation, it is now required to specify values for the `_from` and `_to` attributes, +as `REPLACE` requires the entire new document to be specified. If either `_from` or `_to` +are missing from the replacement document, an `REPLACE` operation will fail. + !SUBSECTION Typecasting functions The type casting applied by the `TO_NUMBER()` AQL function has changed as follows: @@ -51,6 +79,33 @@ The type casting applied by the `TO_NUMBER()` AQL function has changed as follow Additionally, the `TO_STRING()` AQL function now converts `null` values into an empty string (`""`) instead of the string `"null"`. +!SUBSECTION Attribute names and parameters + +Previous versions of ArangoDB had some trouble with attribute names that contained the dot +symbol (`.`). Some code parts in AQL used the dot symbol to split an attribute name into +sub-components, so an attribute named `a.b` was not completely distinguishable from an +attribute `a` with a sub-attribute `b`. This inconsistent behavior sometimes allowed "hacks" +to work such as passing sub-attributes in a bind parameter as follows: + +``` +FOR doc IN collection + FILTER doc.@name == 1 + RETURN doc +``` + +If the bind parameter `@name` contained the dot symbol (e.g. `@bind` = `a.b`, it was unclear +whether this should trigger sub-attribute access (i.e. `doc.a.b`) or a access to an attribute +with exactly the specified name (i.e. `doc."a.b"`). + +ArangoDB 3.0 now handles attribute names containing the dot symbol properly, and sending a +bind parameter `@name` = `a.b` will now always trigger an access to the attribute `doc."a.b"`, +not the sub-attribute `b` of `a` in `doc`. + +For users that used the "hack" of passing bind parameters containing dot symbol to access +sub-attributes, ArangoDB 3.0 allows specifying the attribute name parts as an array of strings, +e.g. `@name` = `[ "a", "b" ]`, which will be resolved to the sub-attribute access `doc.a.b` +when the query is executed. + !SUBSECTION Arithmetic operators As the arithmetic operations in AQL implicitly convert their operands to numeric values using From c14e9d20c8d6a3198c55abef4cb93734007956b4 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Fri, 6 May 2016 19:14:55 +0200 Subject: [PATCH 22/31] removed obsolete tests --- UnitTests/HttpInterface/api-structures.rb | 504 ---------------------- 1 file changed, 504 deletions(-) delete mode 100644 UnitTests/HttpInterface/api-structures.rb diff --git a/UnitTests/HttpInterface/api-structures.rb b/UnitTests/HttpInterface/api-structures.rb deleted file mode 100644 index b0fbf18693..0000000000 --- a/UnitTests/HttpInterface/api-structures.rb +++ /dev/null @@ -1,504 +0,0 @@ -# coding: utf-8 - -require 'rspec' -require 'arangodb.rb' - -describe ArangoDB do - api = "/_api/structure" - prefix = "structures" - - context "dealing with structured documents" do - - def create_structure (prefix, body) - cmd = "/_api/document?collection=_structures" - doc = ArangoDB.log_post(prefix, cmd, :body => body) - return doc - end - - def insert_structure1 (prefix) - # one numeric attribute "number" - structure = '{ "_key" :"UnitTestsCollectionDocuments", "attributes": { ' + - '"number": { ' + - ' "type": "number", ' + - ' "formatter": { ' + - ' "default": { "args": { "decPlaces": 4, "decSeparator": ".", "thouSeparator": "," }, "module": "@arangodb/formatter", "do": "formatFloat" }, ' + - ' "de": { "args": { "decPlaces": 4, "decSeparator": ",", "thouSeparator": "." }, "module": "@arangodb/formatter", "do": "formatFloat" } ' + - ' }, ' + - ' "parser": { ' + - ' "default": { "args": {"decPlaces": 4, "decSeparator": ".", "thouSeparator": "," }, "module": "@arangodb/formatter", "do": "parseFloat" }, ' + - ' "de": { "args": {"decPlaces": 4,"decSeparator": ",","thouSeparator": "." },"module": "@arangodb/formatter","do": "parseFloat" } ' + - ' }, ' + - ' "validators": [ { "module": "@arangodb/formatter", "do": "validateNotNull" } ]' + - ' }, ' + - '"aString": { ' + - ' "type": "string" ' + - '}' + - '}' + - '}' - - return create_structure(prefix, structure); - end - - def insert_structure2 (prefix) - # one numeric array attribute "numbers" - structure = '{ "_key" :"UnitTestsCollectionDocuments", "attributes": { ' + - '"numbers": { ' + - ' "type": "number_list_type" ' + - '}},' + - '"arrayTypes": { ' + - ' "number_list_type": { ' + - ' "type": "number", ' + - ' "formatter": { ' + - ' "default": { "args": { "decPlaces": 4, "decSeparator": ".", "thouSeparator": "," }, "module": "@arangodb/formatter", "do": "formatFloat" }, ' + - ' "de": { "args": { "decPlaces": 4, "decSeparator": ",", "thouSeparator": "." }, "module": "@arangodb/formatter", "do": "formatFloat" } ' + - ' }, ' + - ' "parser": { ' + - ' "default": { "args": {"decPlaces": 4, "decSeparator": ".", "thouSeparator": "," }, "module": "@arangodb/formatter", "do": "parseFloat" }, ' + - ' "de": { "args": {"decPlaces": 4,"decSeparator": ",","thouSeparator": "." },"module": "@arangodb/formatter","do": "parseFloat" } ' + - ' }, ' + - ' "validators": [ { "module": "@arangodb/formatter", "do": "validateNotNull" } ]' + - ' } ' + - '}' + - '}' - - return create_structure(prefix, structure); - end - - - def insert_structure3 (prefix) - # one object attribute "myObject" - structure = '{ "_key" :"UnitTestsCollectionDocuments", "attributes": { ' + - '"myObject": { ' + - ' "type": "object_type", ' + - ' "validators": [ { "module": "@arangodb/formatter", "do": "validateNotNull" } ]' + - '}},' + - '"objectTypes": { ' + - ' "object_type": { ' + - ' "attributes": { ' + - ' "aNumber": { ' + - ' "type": "number", ' + - ' "formatter": { ' + - ' "default": { "args": { "decPlaces": 4, "decSeparator": ".", "thouSeparator": "," }, "module": "@arangodb/formatter", "do": "formatFloat" }, ' + - ' "de": { "args": { "decPlaces": 4, "decSeparator": ",", "thouSeparator": "." }, "module": "@arangodb/formatter", "do": "formatFloat" } ' + - ' }, ' + - ' "parser": { ' + - ' "default": { "args": {"decPlaces": 4, "decSeparator": ".", "thouSeparator": "," }, "module": "@arangodb/formatter", "do": "parseFloat" }, ' + - ' "de": { "args": {"decPlaces": 4,"decSeparator": ",","thouSeparator": "." },"module": "@arangodb/formatter","do": "parseFloat" } ' + - ' }, ' + - ' "validators": [ { "module": "@arangodb/formatter", "do": "validateNotNull" } ]' + - ' }, ' + - ' "aString": { ' + - ' "type": "string"' + - ' }' + - ' }' + - ' }' + - '}' + - '}' - - return create_structure(prefix, structure); - end - - def insert_structured_doc (prefix, api, collection, doc, lang, waitForSync, format) - cmd = api + "?collection=" + collection + "&lang=" + lang + "&waitForSync=" + waitForSync + "&format=" + format; - return ArangoDB.log_post(prefix, cmd, :body => doc) - end - - def replace_structured_doc (prefix, api, id, doc, lang, waitForSync, format, args = {}) - cmd = api + "/" + id + "?lang=" + lang + "&waitForSync=" + waitForSync + "&format=" + format; - return ArangoDB.log_put(prefix, cmd, :body => doc, :headers => args[:headers]) - end - - def update_structured_doc (prefix, api, id, doc, lang, waitForSync, format, args = {}) - cmd = api + "/" + id + "?lang=" + lang + "&waitForSync=" + waitForSync + "&format=" + format; - return ArangoDB.log_patch(prefix, cmd, :body => doc, :headers => args[:headers]) - end - - def get_doc (prefix, api, id, lang, format, args = {}) - cmd = api + "/" + id + "?lang=" + lang + "&format=" + format; - return ArangoDB.log_get(prefix, cmd, args) - end - - def delete_doc (prefix, api, id, args = {}) - cmd = api + "/" + id; - return ArangoDB.log_delete(prefix, cmd, args) - end - - def head_doc (prefix, api, id, args = {}) - cmd = api + "/" + id; - return ArangoDB.log_head(prefix, cmd, args) - end - - before do - @cn = "UnitTestsCollectionDocuments" - - ArangoDB.drop_collection(@cn) - @cid = ArangoDB.create_collection(@cn, false) - - cmd = "/_api/document/_structures/" + @cn - ArangoDB.delete(cmd) - end - - after do - ArangoDB.drop_collection(@cn) - end - -################################################################################ -## creates documents with invalid types -################################################################################ - - it "insert a document" do - p = "#{prefix}-create-1" - insert_structure1(p); - - body = '{ "number" : "1234.5" }'; - doc = insert_structured_doc(p, api, @cn, body, "en", "false", "false") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - - body = '{ "_key" : "a_key", "number" : "99.5" }'; - doc = insert_structured_doc(p, api, @cn, body, "en", "true", "true") - - doc.code.should eq(201) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - doc.parsed_response['_key'].should eq("a_key") - - # insert same key (error) - body = '{ "_key" : "a_key", "number" : "99.5" }'; - doc = insert_structured_doc(p, api, @cn, body, "en", "true", "true") - doc.code.should eq(400) - - end - - it "insert not valid document" do - p = "#{prefix}-create-2" - insert_structure1(p); - - body = '{ }'; - doc = insert_structured_doc(p, api, @cn, body, "en", "true", "true") - - doc.code.should eq(400) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - - doc.parsed_response['error'].should eq(true) - doc.parsed_response['code'].should eq(400) - doc.parsed_response['errorNum'].should eq(1) - end - - it "insert document in unknown collection" do - p = "#{prefix}-create-3" - insert_structure1(p); - - body = '{ }'; - - cmd = api + "?collection=egal&lang=en&waitForSync=true&format=true"; - doc = ArangoDB.log_post(p, cmd, :body => body) - - doc.code.should eq(404) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - - doc.parsed_response['error'].should eq(true) - doc.parsed_response['code'].should eq(404) - doc.parsed_response['errorNum'].should eq(1203) - end - -################################################################################ -## create and get objects -################################################################################ - - it "insert a document with other language" do - p = "#{prefix}-get-1" - insert_structure1(p); - - body = '{ "number" : "1.234,50" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - - doc3 = get_doc(p, api, id, "en", "false") - doc3.code.should eq(200) - doc3.headers['content-type'].should eq("application/json; charset=utf-8") - doc3.parsed_response['_id'].should eq(id) - doc3.parsed_response['number'].should eq(1234.5) - - doc2 = get_doc(p, api, id, "en", "true") - doc2.code.should eq(200) - doc2.headers['content-type'].should eq("application/json; charset=utf-8") - doc2.parsed_response['_id'].should eq(id) - doc2.parsed_response['number'].should eq("1,234.5000") - end - - it "insert a document with an array attribute" do - p = "#{prefix}-get-2" - insert_structure2(p); - - body = '{ "numbers" : [ "1.234,50", "99,99" ] }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - - doc3 = get_doc(p, api, id, "en", "false") - doc3.code.should eq(200) - doc3.headers['content-type'].should eq("application/json; charset=utf-8") - doc3.parsed_response['_id'].should eq(id) - doc3.parsed_response['numbers'][0].should eq(1234.5) - doc3.parsed_response['numbers'][1].should eq(99.99) - - doc2 = get_doc(p, api, id, "en", "true") - doc2.code.should eq(200) - doc2.headers['content-type'].should eq("application/json; charset=utf-8") - doc2.parsed_response['_id'].should eq(id) - doc2.parsed_response['numbers'][0].should eq("1,234.5000") - doc2.parsed_response['numbers'][1].should eq("99.9900") - end - - it "insert a document with an object attribute" do - p = "#{prefix}-get-3" - insert_structure3(p); - - body = '{ "myObject" : { "aNumber":"1.234,50", "aString":"str" } }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - - doc3 = get_doc(p, api, id, "en", "false") - doc3.code.should eq(200) - doc3.headers['content-type'].should eq("application/json; charset=utf-8") - doc3.parsed_response['_id'].should eq(id) - doc3.parsed_response['myObject']['aNumber'].should eq(1234.5) - doc3.parsed_response['myObject']['aString'].should eq("str") - - doc2 = get_doc(p, api, id, "en", "true") - doc2.code.should eq(200) - doc2.headers['content-type'].should eq("application/json; charset=utf-8") - doc2.parsed_response['_id'].should eq(id) - doc2.parsed_response['myObject']['aNumber'].should eq("1,234.5000") - doc2.parsed_response['myObject']['aString'].should eq("str") - end - -################################################################################ -## get objects with If-None-Match and If-Match -################################################################################ - - it "get a document with If-None-Match" do - p = "#{prefix}-get2-1" - insert_structure1(p); - - body = '{ "number" : "1.234,50" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - rev = doc.parsed_response['_rev'] - - match = {} - match['If-None-Match'] = '007'; - - doc = get_doc(p, api, id, "en", "false", :headers => match) - doc.code.should eq(200) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['_id'].should eq(id) - doc.parsed_response['number'].should eq(1234.5) - - match = {} - match['If-None-Match'] = rev; - - doc = get_doc(p, api, id, "en", "false", :headers => match) - doc.code.should eq(304) - end - - it "get a document with If-Match" do - p = "#{prefix}-get2-2" - insert_structure1(p); - - body = '{ "number" : "1.234,50" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - rev = doc.parsed_response['_rev'] - - match = {} - match['If-Match'] = '007'; - - doc = get_doc(p, api, id, "en", "false", :headers => match) - doc.code.should eq(412) - - match = {} - match['If-Match'] = rev; - - doc = get_doc(p, api, id, "en", "false", :headers => match) - doc.code.should eq(200) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['_id'].should eq(id) - doc.parsed_response['number'].should eq(1234.5) - end - -################################################################################ -## get objects header with If-None-Match and If-Match -################################################################################ - - it "get a document header with If-None-Match" do - p = "#{prefix}-head-1" - insert_structure1(p); - - body = '{ "number" : "1.234,50" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - rev = doc.parsed_response['_rev'] - - match = {} - match['If-None-Match'] = '007'; - - doc = head_doc(p, api, id, :headers => match) - doc.code.should eq(200) - - match = {} - match['If-None-Match'] = rev; - - doc = head_doc(p, api, id, :headers => match) - doc.code.should eq(304) - end - - it "get a document header with If-Match" do - p = "#{prefix}-head-2" - insert_structure1(p); - - body = '{ "number" : "1.234,50" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - rev = doc.parsed_response['_rev'] - - match = {} - match['If-Match'] = '007'; - - doc = head_doc(p, api, id, :headers => match) - doc.code.should eq(412) - - match = {} - match['If-Match'] = rev; - - doc = head_doc(p, api, id, :headers => match) - doc.code.should eq(200) - end - -################################################################################ -## replace documents -################################################################################ - - it "replace a document" do - p = "#{prefix}-put-1" - insert_structure1(p); - - body = '{ "number" : "1.234,50" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - key = doc.parsed_response['_key'] - - # replace - doc = replace_structured_doc(p, api, id, body, "de", "false", "true") - doc.code.should eq(202) - id = doc.parsed_response['_id'] - rev = doc.parsed_response['_rev'] - - # replace with wrong _rev - body = '{ "_key":"' + key + '", "_id":"' + id + '", "_rev":"error", "number" : "234,50" }'; - doc = replace_structured_doc(p, api, id, body, "de", "false", "true&policy=error") - doc.code.should eq(400) - - # replace with last _rev - body = '{ "_key":"' + key + '", "_id":"' + id + '", "_rev":"' + rev + '", "number" : "234,50" }'; - doc = replace_structured_doc(p, api, id, body, "de", "true", "true&policy=error") - doc.code.should eq(201) - - end - -################################################################################ -## patch documents -################################################################################ - - it "patch a document" do - p = "#{prefix}-patch-1" - insert_structure1(p); - - body = '{ "number" : "1.234,50", "aString":"str" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - key = doc.parsed_response['_key'] - - # patch string - body = '{ "aString":"new string" }'; - doc = update_structured_doc(p, api, id, body, "de", "false", "true") - doc.code.should eq(202) - id = doc.parsed_response['_id'] - rev = doc.parsed_response['_rev'] - - doc3 = get_doc(p, api, id, "en", "true") - doc3.code.should eq(200) - doc3.headers['content-type'].should eq("application/json; charset=utf-8") - doc3.parsed_response['_id'].should eq(id) - doc3.parsed_response['number'].should eq("1,234.5000") - doc3.parsed_response['aString'].should eq("new string") - - # patch number to null (error) - body = '{ "number" : null }'; - doc = update_structured_doc(p, api, id, body, "de", "false", "true") - doc.code.should eq(400) - - end - -################################################################################ -## delete documents -################################################################################ - - it "delete a document" do - p = "#{prefix}-delete-1" - insert_structure1(p); - - body = '{ "number" : "1.234,50", "aString":"str" }'; - doc = insert_structured_doc(p, api, @cn, body, "de", "false", "true") - - doc.code.should eq(202) - doc.headers['content-type'].should eq("application/json; charset=utf-8") - doc.parsed_response['error'].should eq(false) - id = doc.parsed_response['_id'] - key = doc.parsed_response['_key'] - - # delete - doc = delete_doc(p, api, id) - doc.code.should eq(202) - end - - end -end From 0fbc48b83e390685c25bbd4ea14f19e53846ebad Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 22:30:27 +0200 Subject: [PATCH 23/31] Improve cluster in various ways. Finish implementation of AgencyReadTransaction. Use AgencyReadTransaction in HeartbeatThread of coordinator (less requests). Repair ClusterInfo::uniqid. Repair AgencyComm::uniqid. Remove x-etcd-index header. Remove _index in AgencyCommResult. Streamline HeartbeatThread of coordinator. Remove lastCommandIndex from AgencyComm. Fix HeartbeatThread::handleStateChange. --- arangod/Cluster/AgencyCallback.cpp | 2 +- arangod/Cluster/AgencyComm.cpp | 150 ++++++++++++++----------- arangod/Cluster/AgencyComm.h | 37 +++++-- arangod/Cluster/ClusterInfo.cpp | 43 +++++--- arangod/Cluster/HeartbeatThread.cpp | 165 +++++++++------------------- arangod/Cluster/HeartbeatThread.h | 8 +- arangod/Cluster/v8-cluster.cpp | 24 ++-- 7 files changed, 200 insertions(+), 229 deletions(-) diff --git a/arangod/Cluster/AgencyCallback.cpp b/arangod/Cluster/AgencyCallback.cpp index 0dfd4902a6..569365033e 100644 --- a/arangod/Cluster/AgencyCallback.cpp +++ b/arangod/Cluster/AgencyCallback.cpp @@ -137,8 +137,8 @@ void AgencyCallback::executeByCallbackOrTimeout(double maxTimeout) { compareBuilder = _lastData; } - _useCv = true; CONDITION_LOCKER(locker, _cv); + _useCv = true; locker.wait(static_cast(maxTimeout * 1000000.0)); _useCv = false; diff --git a/arangod/Cluster/AgencyComm.cpp b/arangod/Cluster/AgencyComm.cpp index b4af5fa013..0c9d0ca8ce 100644 --- a/arangod/Cluster/AgencyComm.cpp +++ b/arangod/Cluster/AgencyComm.cpp @@ -144,10 +144,7 @@ void AgencyPrecondition::toVelocyPack(VPackBuilder& builder) const { std::string AgencyWriteTransaction::toJson() const { VPackBuilder builder; - { - VPackArrayBuilder guard(&builder); - toVelocyPack(builder); - } + toVelocyPack(builder); return builder.toJson(); } @@ -177,10 +174,7 @@ void AgencyWriteTransaction::toVelocyPack(VPackBuilder& builder) const { std::string AgencyReadTransaction::toJson() const { VPackBuilder builder; - { - VPackArrayBuilder guard(&builder); - toVelocyPack(builder); - } + toVelocyPack(builder); return builder.toJson(); } @@ -189,12 +183,9 @@ std::string AgencyReadTransaction::toJson() const { ////////////////////////////////////////////////////////////////////////////// void AgencyReadTransaction::toVelocyPack(VPackBuilder& builder) const { - VPackArrayBuilder guard(&builder); - { - VPackArrayBuilder guard2(&builder); - for (std::string const& key: keys) { - builder.add(VPackValue(key)); - } + VPackArrayBuilder guard2(&builder); + for (std::string const& key: keys) { + builder.add(VPackValue(key)); } } @@ -225,7 +216,6 @@ AgencyCommResult::AgencyCommResult() _message(), _body(), _values(), - _index(0), _statusCode(0), _connected(false) {} @@ -328,7 +318,6 @@ void AgencyCommResult::clear() { _location = ""; _message = ""; _body = ""; - _index = 0; _statusCode = 0; } @@ -658,7 +647,7 @@ bool AgencyComm::tryInitializeStructure() { builder.add(VPackValue("Sync")); { VPackObjectBuilder c(&builder); - builder.add("LatestID", VPackValue("1")); + builder.add("LatestID", VPackValue(1)); addEmptyVPackObject("Problems", builder); builder.add("UserVersion", VPackValue(1)); addEmptyVPackObject("ServerStates", builder); @@ -1566,76 +1555,69 @@ bool AgencyComm::unlockWrite(std::string const& key, double timeout) { /// @brief get unique id //////////////////////////////////////////////////////////////////////////////// -AgencyCommResult AgencyComm::uniqid(std::string const& key, uint64_t count, - double timeout) { - static int const maxTries = 10; +uint64_t AgencyComm::uniqid(uint64_t count, double timeout) { + static int const maxTries = 1000000; + // this is pretty much forever, but we simply cannot continue at all + // if we do not get a unique id from the agency. int tries = 0; AgencyCommResult result; - while (tries++ < maxTries) { - result.clear(); - result = getValues(key, false); + uint64_t oldValue = 0; - if (result.httpCode() == - (int)arangodb::GeneralResponse::ResponseCode::NOT_FOUND) { + while (tries++ < maxTries) { + result = getValues2("Sync/LatestID"); + if (!result.successful()) { + usleep(500000); + continue; + } + + VPackSlice oldSlice = result.slice()[0].get(std::vector( + {prefixStripped(), "Sync", "LatestID"})); + + if (!(oldSlice.isSmallInt() || oldSlice.isUInt())) { + LOG(WARN) << "Sync/LatestID in agency is not an unsigned integer, fixing..."; try { VPackBuilder builder; builder.add(VPackValue(0)); // create the key on the fly - setValue(key, builder.slice(), 0.0); - tries--; + setValue("Sync/LatestID", builder.slice(), 0.0); - continue; } catch (...) { // Could not build local key. Try again } + continue; } - if (!result.successful()) { - return result; - } - - result.parse("", false); - - std::shared_ptr oldBuilder; - - std::map::iterator it = - result._values.begin(); - + // If we get here, slice is pointing to an unsigned integer, which + // is the value in the agency. + oldValue = 0; try { - if (it != result._values.end()) { - // steal the velocypack - oldBuilder.swap((*it).second._vpack); - } else { - oldBuilder->add(VPackValue(0)); - } - } catch (...) { - return AgencyCommResult(); + oldValue = oldSlice.getUInt(); + } + catch (...) { } - - VPackSlice oldSlice = oldBuilder->slice(); - uint64_t const oldValue = arangodb::basics::VelocyPackHelper::stringUInt64(oldSlice) + count; uint64_t const newValue = oldValue + count; VPackBuilder newBuilder; try { newBuilder.add(VPackValue(newValue)); } catch (...) { - return AgencyCommResult(); + usleep(500000); + continue; } - result.clear(); - result = casValue(key, oldSlice, newBuilder.slice(), 0.0, timeout); + result = casValue("Sync/LatestID", oldSlice, newBuilder.slice(), + 0.0, timeout); if (result.successful()) { - result._index = oldValue + 1; break; } + // The cas did not work, simply try again! } - return result; + return oldValue; } //////////////////////////////////////////////////////////////////////////////// @@ -1841,11 +1823,56 @@ AgencyCommResult AgencyComm::sendTransactionWithFailover( std::string url(buildUrl()); - url += "/write"; + url += transaction.isWriteTransaction() ? "/write" : "/read"; - return sendWithFailover(arangodb::GeneralRequest::RequestType::POST, + VPackBuilder builder; + { + VPackArrayBuilder guard(&builder); + transaction.toVelocyPack(builder); + } + + AgencyCommResult result = sendWithFailover( + arangodb::GeneralRequest::RequestType::POST, timeout == 0.0 ? _globalConnectionOptions._requestTimeout : timeout, url, - transaction.toJson(), false); + builder.slice().toJson(), false); + + try { + result.setVPack(VPackParser::fromJson(result.body().c_str())); + + if (transaction.isWriteTransaction()) { + if (!result.slice().isObject() || + !result.slice().get("results").isArray()) { + result._statusCode = 500; + return result; + } + + if (result.slice().get("results").length() != 1) { + result._statusCode = 500; + return result; + } + } else { + if (!result.slice().isArray()) { + result._statusCode = 500; + return result; + } + + if (result.slice().length() != 1) { + result._statusCode = 500; + return result; + } + } + + result._body.clear(); + result._statusCode = 200; + + } catch(std::exception &e) { + LOG(ERR) << "Error transforming result. " << e.what(); + result.clear(); + } catch(...) { + LOG(ERR) << "Error transforming result. Out of memory"; + result.clear(); + } + return result; } //////////////////////////////////////////////////////////////////////////////// @@ -2061,15 +2088,8 @@ AgencyCommResult AgencyComm::send( result._message = response->getHttpReturnMessage(); basics::StringBuffer& sb = response->getBody(); result._body = std::string(sb.c_str(), sb.length()); - result._index = 0; result._statusCode = response->getHttpReturnCode(); - bool found = false; - std::string lastIndex = response->getHeaderField("x-etcd-index", found); - if (found) { - result._index = arangodb::basics::StringUtils::uint64(lastIndex); - } - LOG(TRACE) << "request to agency returned status code " << result._statusCode << ", message: '" << result._message << "', body: '" << result._body << "'"; diff --git a/arangod/Cluster/AgencyComm.h b/arangod/Cluster/AgencyComm.h index fe281ca792..807d838547 100644 --- a/arangod/Cluster/AgencyComm.h +++ b/arangod/Cluster/AgencyComm.h @@ -215,8 +215,10 @@ private: struct AgencyTransaction { virtual std::string toJson() const = 0; + virtual void toVelocyPack(arangodb::velocypack::Builder& builder) const = 0; virtual ~AgencyTransaction() { } + virtual bool isWriteTransaction() const = 0; }; struct AgencyWriteTransaction : public AgencyTransaction { @@ -237,7 +239,7 @@ struct AgencyWriteTransaction : public AgencyTransaction { /// @brief converts the transaction to velocypack ////////////////////////////////////////////////////////////////////////////// - void toVelocyPack(arangodb::velocypack::Builder& builder) const; + void toVelocyPack(arangodb::velocypack::Builder& builder) const override final; ////////////////////////////////////////////////////////////////////////////// /// @brief converts the transaction to json @@ -270,6 +272,13 @@ struct AgencyWriteTransaction : public AgencyTransaction { AgencyWriteTransaction() = default; +////////////////////////////////////////////////////////////////////////////// +/// @brief return type of transaction +////////////////////////////////////////////////////////////////////////////// + + bool isWriteTransaction() const override final { + return true; + } }; struct AgencyReadTransaction : public AgencyTransaction { @@ -284,7 +293,7 @@ struct AgencyReadTransaction : public AgencyTransaction { /// @brief converts the transaction to velocypack ////////////////////////////////////////////////////////////////////////////// - void toVelocyPack(arangodb::velocypack::Builder& builder) const; + void toVelocyPack(arangodb::velocypack::Builder& builder) const override final; ////////////////////////////////////////////////////////////////////////////// /// @brief converts the transaction to json @@ -300,12 +309,27 @@ struct AgencyReadTransaction : public AgencyTransaction { keys.push_back(key); } +////////////////////////////////////////////////////////////////////////////// +/// @brief shortcut to create a transaction with more than one operation +////////////////////////////////////////////////////////////////////////////// + + explicit AgencyReadTransaction(std::vector&& k) + : keys(k) { + } + ////////////////////////////////////////////////////////////////////////////// /// @brief default constructor ////////////////////////////////////////////////////////////////////////////// AgencyReadTransaction() = default; +////////////////////////////////////////////////////////////////////////////// +/// @brief return type of transaction +////////////////////////////////////////////////////////////////////////////// + + bool isWriteTransaction() const override final { + return false; + } }; struct AgencyCommResult { @@ -342,12 +366,6 @@ struct AgencyCommResult { int httpCode() const; - ////////////////////////////////////////////////////////////////////////////// - /// @brief extract the "index" attribute from the result - ////////////////////////////////////////////////////////////////////////////// - - uint64_t index() const { return _index; } - ////////////////////////////////////////////////////////////////////////////// /// @brief extract the error code from the result ////////////////////////////////////////////////////////////////////////////// @@ -418,7 +436,6 @@ struct AgencyCommResult { std::string _realBody; std::map _values; - uint64_t _index; int _statusCode; bool _connected; }; @@ -635,7 +652,7 @@ class AgencyComm { /// @brief get unique id ////////////////////////////////////////////////////////////////////////////// - AgencyCommResult uniqid(std::string const&, uint64_t, double); + uint64_t uniqid(uint64_t, double); ////////////////////////////////////////////////////////////////////////////// /// @brief registers a callback on a key diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 4725586491..b388582460 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -302,31 +302,44 @@ ClusterInfo::~ClusterInfo() { //////////////////////////////////////////////////////////////////////////////// uint64_t ClusterInfo::uniqid(uint64_t count) { - MUTEX_LOCKER(mutexLocker, _idLock); + while (true) { + uint64_t oldValue; + { + // The quick path, we have enough in our private reserve: + MUTEX_LOCKER(mutexLocker, _idLock); - if (_uniqid._currentValue + count - 1 >= _uniqid._upperValue) { + if (_uniqid._currentValue + count - 1 <= _uniqid._upperValue) { + uint64_t result = _uniqid._currentValue; + _uniqid._currentValue += count; + + return result; + } + oldValue = _uniqid._currentValue; + } + + // We need to fetch from the agency + uint64_t fetch = count; if (fetch < MinIdsPerBatch) { fetch = MinIdsPerBatch; } - AgencyCommResult result = _agency.uniqid("Sync/LatestID", fetch, 0.0); + uint64_t result = _agency.uniqid(fetch, 0.0); - if (!result.successful() || result._index == 0) { - return 0; + { + MUTEX_LOCKER(mutexLocker, _idLock); + + if (oldValue == _uniqid._currentValue) { + _uniqid._currentValue = result + count; + _uniqid._upperValue = result + fetch - 1; + + return result; + } + // If we get here, somebody else tried succeeded in doing the same, + // so we just try again. } - - _uniqid._currentValue = result._index + count; - _uniqid._upperValue = _uniqid._currentValue + fetch - 1; - - return result._index; } - - uint64_t result = _uniqid._currentValue; - _uniqid._currentValue += count; - - return result; } //////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/Cluster/HeartbeatThread.cpp b/arangod/Cluster/HeartbeatThread.cpp index 3d88b34068..cf0b4a5e83 100644 --- a/arangod/Cluster/HeartbeatThread.cpp +++ b/arangod/Cluster/HeartbeatThread.cpp @@ -113,9 +113,6 @@ void HeartbeatThread::runDBServer() { // convert timeout to seconds double const interval = (double)_interval / 1000.0 / 1000.0; - // value of Sync/Commands/my-id at startup - uint64_t lastCommandIndex = getLastCommandIndex(); - std::function updatePlan = [&]( VPackSlice const& result) { if (!result.isNumber()) { @@ -170,23 +167,23 @@ void HeartbeatThread::runDBServer() { break; } - { - // send an initial GET request to Sync/Commands/my-id - - AgencyCommResult result = - _agency.getValues("Sync/Commands/" + _myId, false); - - if (result.successful()) { - handleStateChange(result, lastCommandIndex); - } - } - - if (isStopping()) { - break; - } - if (--currentCount == 0) { currentCount = currentCountStart; + + // send an initial GET request to Sync/Commands/my-id + LOG(TRACE) << "Looking at Sync/Commands/" + _myId; + + AgencyCommResult result = + _agency.getValues2("Sync/Commands/" + _myId); + + if (result.successful()) { + handleStateChange(result); + } + + if (isStopping()) { + break; + } + LOG(TRACE) << "Refetching Current/Version..."; AgencyCommResult res = _agency.getValues2("Current/Version"); if (!res.successful()) { @@ -286,9 +283,6 @@ void HeartbeatThread::runCoordinator() { // last value of current which we have noticed: uint64_t lastCurrentVersionNoticed = 0; - // value of Sync/Commands/my-id at startup - uint64_t lastCommandIndex = getLastCommandIndex(); - setReady(); while (!isStopping()) { @@ -303,28 +297,20 @@ void HeartbeatThread::runCoordinator() { break; } - { - - // send an initial GET request to Sync/Commands/my-id - AgencyCommResult result = - _agency.getValues("Sync/Commands/" + _myId, false); + AgencyReadTransaction trx(std::vector({ + _agency.prefix() + "Plan/Version", + _agency.prefix() + "Current/Version", + _agency.prefix() + "Sync/Commands/" + _myId, + _agency.prefix() + "Sync/UserVersion"})); + AgencyCommResult result = _agency.sendTransactionWithFailover(trx); - if (result.successful()) { - handleStateChange(result, lastCommandIndex); - } - } + if (!result.successful()) { + LOG(WARN) << "Heartbeat: Could not read from agency!"; + } else { + LOG(TRACE) << "Looking at Sync/Commands/" + _myId; - if (isStopping()) { - break; - } + handleStateChange(result); - bool shouldSleep = true; - - // get the current version of the Plan - - AgencyCommResult result = _agency.getValues2("Plan/Version"); - - if (result.successful()) { VPackSlice versionSlice = result.slice()[0].get(std::vector( {_agency.prefixStripped(), "Plan", "Version"})); @@ -345,15 +331,8 @@ void HeartbeatThread::runCoordinator() { } } } - } - result.clear(); - - - result = _agency.getValues2("Sync/UserVersion"); - if (result.successful()) { - - velocypack::Slice slice = + VPackSlice slice = result.slice()[0].get(std::vector( {_agency.prefixStripped(), "Sync", "UserVersion"})); @@ -395,14 +374,9 @@ void HeartbeatThread::runCoordinator() { } } } - } - result = _agency.getValues2("Current/Version"); - if (result.successful()) { - - VPackSlice versionSlice - = result.slice()[0].get(std::vector( - {_agency.prefixStripped(), "Plan", "Version"})); + versionSlice = result.slice()[0].get(std::vector( + {_agency.prefixStripped(), "Plan", "Version"})); if (versionSlice.isInteger()) { uint64_t currentVersion = 0; @@ -419,19 +393,19 @@ void HeartbeatThread::runCoordinator() { } } - if (shouldSleep) { - double remain = interval - (TRI_microtime() - start); + double remain = interval - (TRI_microtime() - start); - // sleep for a while if appropriate, on some systems usleep does not - // like arguments greater than 1000000 - while (remain > 0.0) { - if (remain >= 0.5) { - usleep(500000); - remain -= 0.5; - } else { - usleep((unsigned long)(remain * 1000.0 * 1000.0)); - remain = 0.0; - } + LOG(INFO) << "HeartbeatThread: remain is " << remain; + + // sleep for a while if appropriate, on some systems usleep does not + // like arguments greater than 1000000 + while (remain > 0.0) { + if (remain >= 0.5) { + usleep(500000); + remain -= 0.5; + } else { + usleep((unsigned long)(remain * 1000.0 * 1000.0)); + remain = 0.0; } } @@ -477,39 +451,6 @@ void HeartbeatThread::removeDispatchedJob(ServerJobResult result) { _condition.signal(); } -//////////////////////////////////////////////////////////////////////////////// -/// @brief fetch the index id of the value of Sync/Commands/my-id from the -/// agency this index value is determined initially and it is passed to the -/// watch command (we're waiting for an entry with a higher id) -//////////////////////////////////////////////////////////////////////////////// - -uint64_t HeartbeatThread::getLastCommandIndex() { - // get the initial command state - - AgencyCommResult result = _agency.getValues("Sync/Commands/" + _myId, false); - - if (result.successful()) { - result.parse("Sync/Commands/", false); - - std::map::iterator it = - result._values.find(_myId); - - if (it != result._values.end()) { - // found something - LOG(TRACE) << "last command index was: '" << (*it).second._index << "'"; - return (*it).second._index; - } - } - - if (result._index > 0) { - // use the value returned in header X-Etcd-Index - return result._index; - } - - // nothing found. this is not an error - return 0; -} - //////////////////////////////////////////////////////////////////////////////// /// @brief handles a plan version change, coordinator case /// this is triggered if the heartbeat thread finds a new plan version number @@ -648,7 +589,8 @@ bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) { //////////////////////////////////////////////////////////////////////////////// /// @brief handles a plan version change, DBServer case -/// this is triggered if the heartbeat thread finds a new plan version number +/// this is triggered if the heartbeat thread finds a new plan version number, +/// and every few heartbeats if the Current/Version has changed. //////////////////////////////////////////////////////////////////////////////// bool HeartbeatThread::syncDBServerStatusQuo() { @@ -716,21 +658,12 @@ bool HeartbeatThread::syncDBServerStatusQuo() { /// notified about this particular change again). //////////////////////////////////////////////////////////////////////////////// -bool HeartbeatThread::handleStateChange(AgencyCommResult& result, - uint64_t& lastCommandIndex) { - result.parse("Sync/Commands/", false); - - std::map::const_iterator it = - result._values.find(_myId); - - if (it != result._values.end()) { - lastCommandIndex = (*it).second._index; - - std::string command = ""; - VPackSlice const slice = it->second._vpack->slice(); - if (slice.isString()) { - command = slice.copyString(); - } +bool HeartbeatThread::handleStateChange(AgencyCommResult& result) { + VPackSlice const slice = result.slice()[0].get( + std::vector({ AgencyComm::prefixStripped(), "Sync", + "Commands", _myId })); + if (slice.isString()) { + std::string command = slice.copyString(); ServerState::StateEnum newState = ServerState::stringToState(command); if (newState != ServerState::STATE_UNDEFINED) { diff --git a/arangod/Cluster/HeartbeatThread.h b/arangod/Cluster/HeartbeatThread.h index c844527fbf..4d185e546c 100644 --- a/arangod/Cluster/HeartbeatThread.h +++ b/arangod/Cluster/HeartbeatThread.h @@ -128,13 +128,7 @@ class HeartbeatThread : public Thread { /// @brief handles a state change ////////////////////////////////////////////////////////////////////////////// - bool handleStateChange(AgencyCommResult&, uint64_t&); - - ////////////////////////////////////////////////////////////////////////////// - /// @brief fetch the last value of Sync/Commands/my-id from the agency - ////////////////////////////////////////////////////////////////////////////// - - uint64_t getLastCommandIndex(); + bool handleStateChange(AgencyCommResult&); ////////////////////////////////////////////////////////////////////////////// /// @brief sends the current server's state to the agency diff --git a/arangod/Cluster/v8-cluster.cpp b/arangod/Cluster/v8-cluster.cpp index 16458dc98b..61dc302dab 100644 --- a/arangod/Cluster/v8-cluster.cpp +++ b/arangod/Cluster/v8-cluster.cpp @@ -608,15 +608,13 @@ static void JS_UniqidAgency(v8::FunctionCallbackInfo const& args) { TRI_V8_TRY_CATCH_BEGIN(isolate); v8::HandleScope scope(isolate); - if (args.Length() < 1 || args.Length() > 3) { - TRI_V8_THROW_EXCEPTION_USAGE("uniqid(, , )"); + if (args.Length() > 2) { + TRI_V8_THROW_EXCEPTION_USAGE("uniqid(, )"); } - std::string const key = TRI_ObjectToString(args[0]); - uint64_t count = 1; - if (args.Length() > 1) { - count = TRI_ObjectToUInt64(args[1], true); + if (args.Length() > 0) { + count = TRI_ObjectToUInt64(args[0], true); } if (count < 1 || count > 10000000) { @@ -624,18 +622,14 @@ static void JS_UniqidAgency(v8::FunctionCallbackInfo const& args) { } double timeout = 0.0; - if (args.Length() > 2) { - timeout = TRI_ObjectToDouble(args[2]); + if (args.Length() > 1) { + timeout = TRI_ObjectToDouble(args[1]); } AgencyComm comm; - AgencyCommResult result = comm.uniqid(key, count, timeout); + uint64_t result = comm.uniqid(count, timeout); - if (!result.successful() || result._index == 0) { - THROW_AGENCY_EXCEPTION(result); - } - - std::string const value = StringUtils::itoa(result._index); + std::string const value = StringUtils::itoa(result); TRI_V8_RETURN_STD_STRING(value); TRI_V8_TRY_CATCH_END @@ -1070,7 +1064,7 @@ static void JS_UniqidClusterInfo( TRI_V8_THROW_EXCEPTION_PARAMETER(" is invalid"); } - uint64_t value = ClusterInfo::instance()->uniqid(); + uint64_t value = ClusterInfo::instance()->uniqid(count); if (value == 0) { TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, From 66f1fce32806ec2e629507139ff426c766c6c4cd Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 22:46:23 +0200 Subject: [PATCH 24/31] Repair startLocalCluster.sh script w.r.t. XTERM and XTERMOPTIONS. --- scripts/startLocalCluster.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/startLocalCluster.sh b/scripts/startLocalCluster.sh index d56efcd97c..0767c2d230 100755 --- a/scripts/startLocalCluster.sh +++ b/scripts/startLocalCluster.sh @@ -1,8 +1,10 @@ #!/bin/bash if [ -z "$XTERM" ] ; then - XTERM=xterm + XTERM=x-terminal-emulator +fi +if [ -z "$XTERMOPTIONS" ] ; then + XTERMOPTIONS="--geometry=80x43" fi - if [ ! -d arangod ] || [ ! -d arangosh ] || [ ! -d UnitTests ] ; then echo Must be started in the main ArangoDB source directory. From 1d8c7e4515df558ec1ac87edfcfebe980b5f3b09 Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 22:46:40 +0200 Subject: [PATCH 25/31] Take out a debug message. --- arangod/Cluster/HeartbeatThread.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/arangod/Cluster/HeartbeatThread.cpp b/arangod/Cluster/HeartbeatThread.cpp index cf0b4a5e83..33033073a2 100644 --- a/arangod/Cluster/HeartbeatThread.cpp +++ b/arangod/Cluster/HeartbeatThread.cpp @@ -395,8 +395,6 @@ void HeartbeatThread::runCoordinator() { double remain = interval - (TRI_microtime() - start); - LOG(INFO) << "HeartbeatThread: remain is " << remain; - // sleep for a while if appropriate, on some systems usleep does not // like arguments greater than 1000000 while (remain > 0.0) { From b5c87fba33762ddfa6d117ceb96dd05376800cdd Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 22:47:17 +0200 Subject: [PATCH 26/31] Add a trivial quickie test and a convenience script. --- js/common/tests/shell/shell-quickie.js | 150 +++++++++++++++++++++++++ scripts/quickieTest.sh | 5 + 2 files changed, 155 insertions(+) create mode 100644 js/common/tests/shell/shell-quickie.js create mode 100755 scripts/quickieTest.sh diff --git a/js/common/tests/shell/shell-quickie.js b/js/common/tests/shell/shell-quickie.js new file mode 100644 index 0000000000..a08194a7fb --- /dev/null +++ b/js/common/tests/shell/shell-quickie.js @@ -0,0 +1,150 @@ +/*jshint globalstrict:false, strict:false, sub: true */ +/*global fail, assertTrue, assertEqual */ + +//////////////////////////////////////////////////////////////////////////////// +/// @brief very quick test for basic functionality +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2016-2016 ArangoDB GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Max Neunhoeffer +/// @author Copyright 2016, ArangoDB GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +var jsunity = require("jsunity"); +var arangodb = require("@arangodb"); +var ERRORS = arangodb.errors; +var db = arangodb.db; + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test attributes +//////////////////////////////////////////////////////////////////////////////// + +function QuickieSuite () { + 'use strict'; + + return { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief set up +//////////////////////////////////////////////////////////////////////////////// + + setUp : function () { + }, + +//////////////////////////////////////////////////////////////////////////////// +/// @brief tear down +//////////////////////////////////////////////////////////////////////////////// + + tearDown : function () { + }, + +//////////////////////////////////////////////////////////////////////////////// +/// @brief quickly create a collection and do some operations: +//////////////////////////////////////////////////////////////////////////////// + + testACollection: function () { + + try { + db._drop("UnitTestCollection"); + } + catch (e) { + } + + // Create a collection: + var c = db._create("UnitTestCollection", {numberOfShards:2}); + + // Do a bunch of operations: + var r = c.insert({"Hallo":12}); + var d = c.document(r._key); + assertEqual(12, d.Hallo); + c.replace(r._key, {"Hallo":13}); + d = c.document(r._key); + assertEqual(13, d.Hallo); + c.update(r._key, {"Hallo":14}); + d = c.document(r._key); + assertEqual(14, d.Hallo); + c.remove(r._key); + try { + d = c.document(r._key); + fail(); + } + catch (e) { + assertEqual(ERRORS.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code, e.errorNum); + } + + // Drop the collection again: + c.drop(); + }, + +//////////////////////////////////////////////////////////////////////////////// +/// @brief quickly create a database and a collection and do some operations: +//////////////////////////////////////////////////////////////////////////////// + + testADatabase: function () { + try { + db._dropDatabase("UnitTestDatabase"); + } + catch (e) { + } + + db._createDatabase("UnitTestDatabase"); + db._useDatabase("UnitTestDatabase"); + + // Create a collection: + var c = db._create("UnitTestCollection", {numberOfShards:2}); + + // Do a bunch of operations: + var r = c.insert({"Hallo":12}); + var d = c.document(r._key); + assertEqual(12, d.Hallo); + c.replace(r._key, {"Hallo":13}); + d = c.document(r._key); + assertEqual(13, d.Hallo); + c.update(r._key, {"Hallo":14}); + d = c.document(r._key); + assertEqual(14, d.Hallo); + c.remove(r._key); + try { + d = c.document(r._key); + fail(); + } + catch (e) { + assertEqual(ERRORS.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code, e.errorNum); + } + + // Drop the collection again: + c.drop(); + + // Drop the database again: + db._useDatabase("_system"); + db._dropDatabase("UnitTestDatabase"); + } + }; +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief executes the test suite +//////////////////////////////////////////////////////////////////////////////// + +jsunity.run(QuickieSuite); + +return jsunity.done(); + diff --git a/scripts/quickieTest.sh b/scripts/quickieTest.sh new file mode 100755 index 0000000000..0a31203e01 --- /dev/null +++ b/scripts/quickieTest.sh @@ -0,0 +1,5 @@ +#!/bin/bash +scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js +scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js --cluster true +scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js +scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js --cluster true From b37d8fff48755fdf1f47f9115c40d0ae0e6d0d1b Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 23:06:10 +0200 Subject: [PATCH 27/31] Better protection against multi-threading. --- arangod/Cluster/AgencyCallback.cpp | 4 ++-- arangod/Cluster/ClusterInfo.cpp | 31 ++++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/arangod/Cluster/AgencyCallback.cpp b/arangod/Cluster/AgencyCallback.cpp index 569365033e..8d6be9e175 100644 --- a/arangod/Cluster/AgencyCallback.cpp +++ b/arangod/Cluster/AgencyCallback.cpp @@ -92,8 +92,8 @@ bool AgencyCallback::executeEmpty() { result = _cb(VPackSlice::noneSlice()); } + CONDITION_LOCKER(locker, _cv); if (_useCv) { - CONDITION_LOCKER(locker, _cv); _cv.signal(); } return result; @@ -107,8 +107,8 @@ bool AgencyCallback::execute(std::shared_ptr newData) { result = _cb(newData->slice()); } + CONDITION_LOCKER(locker, _cv); if (_useCv) { - CONDITION_LOCKER(locker, _cv); _cv.signal(); } return result; diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 6ebd40604c..398e2d7ed3 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -901,6 +901,7 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, std::vector DBServers = getCurrentDBServers(); int dbServerResult = -1; + std::function dbServerChanged = [&](VPackSlice const& result) { size_t numDbServers; @@ -908,7 +909,11 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, MUTEX_LOCKER(guard, dbServersMutex); numDbServers = DBServers.size(); } - if (result.isObject() && result.length() == numDbServers) { + if (result.isObject() && result.length() >= numDbServers) { + // We use >= here since the number of DBservers could have increased + // during the creation of the database and we might not yet have + // the latest list. Thus there could be more reports than we know + // servers. VPackObjectIterator dbs(result); std::string tmpMsg = ""; @@ -934,16 +939,24 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, } } if (tmpHaveError) { + MUTEX_LOCKER(guard, dbServersMutex); errorMsg = "Error in creation of database:" + tmpMsg; dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_DATABASE; return true; } loadCurrent(); // update our cache - dbServerResult = setErrormsg(TRI_ERROR_NO_ERROR, errorMsg); + { + MUTEX_LOCKER(guard, dbServersMutex); + dbServerResult = setErrormsg(TRI_ERROR_NO_ERROR, errorMsg); + } } return true; }; + // ATTENTION: The following callback calls the above closure in a + // different thread. Nevertheless, the closure accesses some of our + // local variables. Therefore we have to protect all accesses to them + // by the above mutex. auto agencyCallback = std::make_shared( ac, "Current/Databases/" + name, dbServerChanged, true, false); _agencyCallbackRegistry->registerCallback(agencyCallback); @@ -974,8 +987,11 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, int count = 0; // this counts, when we have to reload the DBServers while (TRI_microtime() <= endTime) { agencyCallback->executeByCallbackOrTimeout(getReloadServerListTimeout() / interval); - if (dbServerResult >= 0) { - break; + { + MUTEX_LOCKER(guard, dbServersMutex); + if (dbServerResult >= 0) { + break; + } } if (++count >= static_cast(getReloadServerListTimeout() / interval)) { @@ -991,8 +1007,11 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, count = 0; } } - if (dbServerResult >= 0) { - return dbServerResult; + { + MUTEX_LOCKER(guard, dbServersMutex); + if (dbServerResult >= 0) { + return dbServerResult; + } } return setErrormsg(TRI_ERROR_CLUSTER_TIMEOUT, errorMsg); } From 7ed306c2e77372a606ac79430c968fb7af6f402b Mon Sep 17 00:00:00 2001 From: Max Neunhoeffer Date: Fri, 6 May 2016 23:25:19 +0200 Subject: [PATCH 28/31] Fix double locking of mutex of condition variable. --- arangod/Cluster/AgencyCallback.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arangod/Cluster/AgencyCallback.cpp b/arangod/Cluster/AgencyCallback.cpp index 8d6be9e175..a60f204bad 100644 --- a/arangod/Cluster/AgencyCallback.cpp +++ b/arangod/Cluster/AgencyCallback.cpp @@ -137,10 +137,12 @@ void AgencyCallback::executeByCallbackOrTimeout(double maxTimeout) { compareBuilder = _lastData; } - CONDITION_LOCKER(locker, _cv); - _useCv = true; - locker.wait(static_cast(maxTimeout * 1000000.0)); - _useCv = false; + { + CONDITION_LOCKER(locker, _cv); + _useCv = true; + locker.wait(static_cast(maxTimeout * 1000000.0)); + _useCv = false; + } if (!_lastData || _lastData->slice().equals(compareBuilder->slice())) { LOG(DEBUG) << "Waiting done and nothing happended. Refetching to be sure"; From 57bc554d9afbd41c017a523350ac2e985366c2f2 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Sat, 7 May 2016 07:55:57 +0200 Subject: [PATCH 29/31] cppcheck --- lib/Rest/GeneralRequest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Rest/GeneralRequest.h b/lib/Rest/GeneralRequest.h index f7591a121f..f37983a056 100644 --- a/lib/Rest/GeneralRequest.h +++ b/lib/Rest/GeneralRequest.h @@ -80,7 +80,7 @@ class GeneralRequest { static RequestType findRequestType(char const*, size_t const); public: - GeneralRequest(ConnectionInfo const& connectionInfo) + explicit GeneralRequest(ConnectionInfo const& connectionInfo) : _version(ProtocolVersion::UNKNOWN), _connectionInfo(connectionInfo), _clientTaskId(0), From e1f874ad4a7d312de11ba0837614e67eef390940 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Sat, 7 May 2016 07:56:16 +0200 Subject: [PATCH 30/31] jslint --- js/common/tests/shell/shell-quickie.js | 2 +- js/server/modules/@arangodb/arango-statement.js | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/js/common/tests/shell/shell-quickie.js b/js/common/tests/shell/shell-quickie.js index a08194a7fb..4bbc36cd49 100644 --- a/js/common/tests/shell/shell-quickie.js +++ b/js/common/tests/shell/shell-quickie.js @@ -1,5 +1,5 @@ /*jshint globalstrict:false, strict:false, sub: true */ -/*global fail, assertTrue, assertEqual */ +/*global fail, assertEqual */ //////////////////////////////////////////////////////////////////////////////// /// @brief very quick test for basic functionality diff --git a/js/server/modules/@arangodb/arango-statement.js b/js/server/modules/@arangodb/arango-statement.js index c62cfef782..89a86233df 100644 --- a/js/server/modules/@arangodb/arango-statement.js +++ b/js/server/modules/@arangodb/arango-statement.js @@ -80,13 +80,7 @@ ArangoStatement.prototype.execute = function () { } } - try { var result = AQL_EXECUTE(this._query, this._bindVars, opts); - } catch (e) { - console.log("HASSHASSHASSHASS", this._query, e); - throw e; - } - return new GeneralArrayCursor(result.json, 0, null, result); }; From 4691a0868d0622a16d74ade9e4bb7528b43d31eb Mon Sep 17 00:00:00 2001 From: jsteemann Date: Sat, 7 May 2016 08:06:26 +0200 Subject: [PATCH 31/31] removed unnecessary iostream includes --- arangod/Agency/Node.cpp | 2 +- arangod/Agency/RestAgencyPrivHandler.cpp | 2 +- arangod/Agency/Store.cpp | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arangod/Agency/Node.cpp b/arangod/Agency/Node.cpp index f75cf61084..7ad6c97ee1 100644 --- a/arangod/Agency/Node.cpp +++ b/arangod/Agency/Node.cpp @@ -133,7 +133,7 @@ Node& Node::operator= (Node const& rhs) { _children = rhs._children; return *this; } -#include + // Comparison with slice bool Node::operator== (VPackSlice const& rhs) const { if (rhs.isObject()) { diff --git a/arangod/Agency/RestAgencyPrivHandler.cpp b/arangod/Agency/RestAgencyPrivHandler.cpp index 3f5bf798fb..960f267cdc 100644 --- a/arangod/Agency/RestAgencyPrivHandler.cpp +++ b/arangod/Agency/RestAgencyPrivHandler.cpp @@ -70,7 +70,7 @@ inline HttpHandler::status_t RestAgencyPrivHandler::reportMethodNotAllowed() { generateError(GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED, 405); return HttpHandler::status_t(HANDLER_DONE); } -#include + HttpHandler::status_t RestAgencyPrivHandler::execute() { try { VPackBuilder result; diff --git a/arangod/Agency/Store.cpp b/arangod/Agency/Store.cpp index 05119c2608..62d4b0327a 100644 --- a/arangod/Agency/Store.cpp +++ b/arangod/Agency/Store.cpp @@ -29,7 +29,6 @@ #include "Basics/StringUtils.h" #include "Basics/VelocyPackHelper.h" - #include #include #include @@ -37,7 +36,6 @@ #include #include -#include using namespace arangodb::consensus; using namespace arangodb::basics;