diff --git a/Documentation/Books/AQL/Invocation/WithArangosh.md b/Documentation/Books/AQL/Invocation/WithArangosh.md
index 7f9e1349bf..3a21a58ea3 100644
--- a/Documentation/Books/AQL/Invocation/WithArangosh.md
+++ b/Documentation/Books/AQL/Invocation/WithArangosh.md
@@ -337,7 +337,7 @@ a client.
### Using cursors to obtain additional information on internal timings
Cursors can also optionally provide statistics of the internal execution phases. By default, they do not.
-To get to know how long parsing, otpimisation, instanciation and execution took,
+To get to know how long parsing, optimization, instantiation and execution took,
make the server return that by setting the *profile* attribute to
*true* when creating a statement:
@@ -359,3 +359,16 @@ produced statistics:
c.getExtra();
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock 06_workWithAQL_statements12
+
+Query validation
+----------------
+
+The *_parse* method of the *db* object can be used to parse and validate a
+query syntactically, without actually executing it.
+
+ @startDocuBlockInline 06_workWithAQL_statements13
+ @EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements13}
+ db._parse( "FOR i IN [ 1, 2 ] RETURN i" );
+ @END_EXAMPLE_ARANGOSH_OUTPUT
+ @endDocuBlock 06_workWithAQL_statements13
+
diff --git a/Documentation/Books/Manual/Appendix/References/CollectionObject.md b/Documentation/Books/Manual/Appendix/References/CollectionObject.md
index a316cb29fc..57e866d230 100644
--- a/Documentation/Books/Manual/Appendix/References/CollectionObject.md
+++ b/Documentation/Books/Manual/Appendix/References/CollectionObject.md
@@ -10,7 +10,7 @@ The following methods exist on the collection object (returned by *db.name*):
* [collection.drop()](../../DataModeling/Collections/CollectionMethods.md#drop)
* [collection.figures()](../../DataModeling/Collections/CollectionMethods.md#figures)
* [collection.load()](../../DataModeling/Collections/CollectionMethods.md#load)
-* [collection.properties()](../../DataModeling/Collections/CollectionMethods.md#properties)
+* [collection.properties(options)](../../DataModeling/Collections/CollectionMethods.md#properties)
* [collection.revision()](../../DataModeling/Collections/CollectionMethods.md#revision)
* [collection.rotate()](../../DataModeling/Collections/CollectionMethods.md#rotate)
* [collection.toArray()](../../DataModeling/Documents/DocumentMethods.md#toarray)
@@ -29,6 +29,7 @@ The following methods exist on the collection object (returned by *db.name*):
* [collection.all()](../../DataModeling/Documents/DocumentMethods.md#all)
* [collection.any()](../../DataModeling/Documents/DocumentMethods.md#any)
+* [collection.byExample(example)](../../DataModeling/Documents/DocumentMethods.md#query-by-example)
* [collection.closedRange(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#closed-range)
* [collection.document(object)](../../DataModeling/Documents/DocumentMethods.md#document)
* [collection.documents(keys)](../../DataModeling/Documents/DocumentMethods.md#lookup-by-keys)
@@ -40,9 +41,9 @@ The following methods exist on the collection object (returned by *db.name*):
* [collection.edges(vertices)](../../DataModeling/Documents/DocumentMethods.md#edges)
* [collection.iterate(iterator,options)](../../DataModeling/Documents/DocumentMethods.md#misc)
* [collection.outEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges)
-* [collection.queryByExample(example)](../../DataModeling/Documents/DocumentMethods.md#query-by-example)
* [collection.range(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#range)
* [collection.remove(selector)](../../DataModeling/Documents/DocumentMethods.md#remove)
+* [collection.removeByExample(example)](../../DataModeling/Documents/DocumentMethods.md#remove-by-example)
* [collection.removeByKeys(keys)](../../DataModeling/Documents/DocumentMethods.md#remove-by-keys)
* [collection.rename()](../../DataModeling/Collections/CollectionMethods.md#rename)
* [collection.replace(selector, data)](../../DataModeling/Documents/DocumentMethods.md#replace)
diff --git a/Documentation/Books/Manual/Appendix/References/DBObject.md b/Documentation/Books/Manual/Appendix/References/DBObject.md
index 1ab80987e6..669ac153cb 100644
--- a/Documentation/Books/Manual/Appendix/References/DBObject.md
+++ b/Documentation/Books/Manual/Appendix/References/DBObject.md
@@ -31,6 +31,7 @@ The following methods exists on the *_db* object:
*Collection*
* [db._collection(name)](../../DataModeling/Collections/DatabaseMethods.md#collection)
+* [db._collections()](../../DataModeling/Collections/DatabaseMethods.md#all-collections)
* [db._create(name)](../../DataModeling/Collections/DatabaseMethods.md#create)
* [db._drop(name)](../../DataModeling/Collections/DatabaseMethods.md#drop)
* [db._truncate(name)](../../DataModeling/Collections/DatabaseMethods.md#truncate)
@@ -40,6 +41,7 @@ The following methods exists on the *_db* object:
* [db._createStatement(query)](../../../AQL/Invocation/WithArangosh.html#with-createstatement-arangostatement)
* [db._query(query)](../../../AQL/Invocation/WithArangosh.html#with-dbquery)
* [db._explain(query)](../../ReleaseNotes/NewFeatures28.md#miscellaneous-improvements)
+* [db._parse(query)](../../../AQL/Invocation/WithArangosh.html#query-validation)
*Document*
@@ -48,3 +50,16 @@ The following methods exists on the *_db* object:
* [db._remove(selector)](../../DataModeling/Documents/DatabaseMethods.md#remove)
* [db._replace(selector,data)](../../DataModeling/Documents/DatabaseMethods.md#replace)
* [db._update(selector,data)](../../DataModeling/Documents/DatabaseMethods.md#update)
+
+*Views*
+
+* [db._view(name)](../../DataModeling/Views/DatabaseMethods.md#view)
+* [db._views()](../../DataModeling/Views/DatabaseMethods.md#all-views)
+* [db._createView(name, type, properties)](../../DataModeling/Views/DatabaseMethods.md#create)
+* [db._dropView(name)](../../DataModeling/Views/DatabaseMethods.md#drop)
+
+*Global*
+
+* [db._engine()](../../DataModeling/Databases/WorkingWith.md#engine)
+* [db._engineStats()](../../DataModeling/Databases/WorkingWith.md#engine-statistics)
+* [db._executeTransaction()](../../Transactions/TransactionInvocation.md)
diff --git a/Documentation/Books/Manual/DataModeling/Collections/README.md b/Documentation/Books/Manual/DataModeling/Collections/README.md
index 13c2b8a1bc..a22402980b 100644
--- a/Documentation/Books/Manual/DataModeling/Collections/README.md
+++ b/Documentation/Books/Manual/DataModeling/Collections/README.md
@@ -58,7 +58,7 @@ altogether k copies of each shard are kept in the cluster on k different
servers, and are kept in sync. That is, every write operation is automatically
replicated on all copies.
-This is organised using a leader/follower model. At all times, one of the
+This is organized using a leader/follower model. At all times, one of the
servers holding replicas for a shard is "the leader" and all others
are "followers", this configuration is held in the Agency (see
[Scalability](../../Scalability/README.md) for details of the ArangoDB
diff --git a/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md b/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md
index 1fe5ede9a7..09c651ff7c 100644
--- a/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md
+++ b/Documentation/Books/Manual/DataModeling/Databases/WorkingWith.md
@@ -185,12 +185,21 @@ database. The *_system* database itself cannot be dropped.
Databases are dropped asynchronously, and will be physically removed if
all clients have disconnected and references have been garbage-collected.
+### Engine
+
+retrieve the storage engine type used by the server
+`db._engine()`
+
+Returns the name of the storage engine in use (`mmfiles` or `rocksdb`), as well
+as a list of supported features (types of indexes and
+[dfdb](../../Troubleshooting/DatafileDebugger.md)).
+
### Engine statistics
-retrieve statistics related to the storage engine-rocksdb
+retrieve statistics related to the storage engine (rocksdb)
`db._engineStats()`
-Returns some statistics related to storage engine activity, including figures
+Returns some statistics related to the storage engine activity, including figures
about data size, cache usage, etc.
**Note**: Currently this only produces useful output for the RocksDB engine.
diff --git a/Documentation/Books/Manual/Graphs/README.md b/Documentation/Books/Manual/Graphs/README.md
index b229dfbc51..2826b7059b 100644
--- a/Documentation/Books/Manual/Graphs/README.md
+++ b/Documentation/Books/Manual/Graphs/README.md
@@ -291,7 +291,7 @@ The above referenced chapters describe the various APIs of ArangoDBs graph engin
- [Traversing a graph in full depth](../../Cookbook/Graph/FulldepthTraversal.html)
- [Using an example vertex with the java driver](../../Cookbook/Graph/JavaDriverGraphExampleVertex.html)
- - [Retrieving documents from ArangoDB without knowing the structure](https://docs.arangodb.com/cookbook/Graph/JavaDriverBaseDocument.html)
+ - [Retrieving documents from ArangoDB without knowing the structure](../../Cookbook/UseCases/JavaDriverBaseDocument.html)
- [Using a custom visitor from node.js](../../Cookbook/Graph/CustomVisitorFromNodeJs.html)
- [AQL Example Queries on an Actors and Movies Database](../../Cookbook/Graph/ExampleActorsAndMovies.html)
diff --git a/Documentation/Books/Manual/README.md b/Documentation/Books/Manual/README.md
index 02867e7264..4c391346ba 100644
--- a/Documentation/Books/Manual/README.md
+++ b/Documentation/Books/Manual/README.md
@@ -25,8 +25,8 @@ The documentation is organized in four handbooks:
Features are illustrated with interactive usage examples; you can cut'n'paste them
into [arangosh](Administration/Arangosh/README.md) to try them out. The HTTP
[REST-API](../HTTP/index.html) for driver developers is demonstrated with cut'n'paste
-recepies intended to be used with the [cURL](http://curl.haxx.se). Drivers may provide
-their own examples based on these .js based examples to improve understandeability
+recipes intended to be used with the [cURL](http://curl.haxx.se). Drivers may provide
+their own examples based on these .js based examples to improve understandability
for their respective users, i.e. for the [java driver](https://github.com/arangodb/arangodb-java-driver#learn-more)
some of the samples are re-implemented.
diff --git a/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md b/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md
index 2ebad471ac..67e7e4dbff 100644
--- a/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md
+++ b/Documentation/Books/Manual/ReleaseNotes/KnownIssues32.md
@@ -102,5 +102,5 @@ Mac OS X
OpenSSL 1.1
-----------
- * ArangoDB has been tested with OpenSSL 1.0 only and won't build against 1.1 when compiling on your own. See [here](../../cookbook/Compiling/OpenSSL.html)
+ * ArangoDB has been tested with OpenSSL 1.0 only and won't build against 1.1 when compiling on your own. See [here](../../Cookbook/Compiling/OpenSSL.html)
for how to compile on systems that ship OpenSSL 1.1 by default.
diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md
index da0d2e9b4a..1d98ec5ad7 100644
--- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md
+++ b/Documentation/Books/Manual/ReleaseNotes/NewFeatures24.md
@@ -64,7 +64,7 @@ is here:
* [part 2](https://www.arangodb.com/2014/12/02/building-hypermedia-apis-design)
* [part 3](https://www.arangodb.com/2014/12/08/building-hypermedia-apis-foxxgenerator)
-A cookbook recipe for getting started with FoxxGenerator is [here](https://docs.arangodb.com/2.8/cookbook/FoxxGeneratorFirstSteps.html).
+A cookbook recipe for getting started with FoxxGenerator is [here](https://docs.arangodb.com/2.8/Cookbook/FoxxGeneratorFirstSteps.html).
AQL improvements
----------------
diff --git a/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md b/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md
index 0ddac788ce..52597728f0 100644
--- a/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md
+++ b/Documentation/Books/Manual/ReleaseNotes/NewFeatures26.md
@@ -212,7 +212,7 @@ You can now write tests for your Foxx apps using the Mocha testing framework:
https://www.arangodb.com/2015/04/testing-foxx-mocha/
A recipe for writing tests for your Foxx apps can be found in the cookbook:
-https://docs.arangodb.com/2.8/cookbook/FoxxTesting.html
+https://docs.arangodb.com/2.8/Cookbook/FoxxTesting.html
### API Documentation
diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md
index 7c21b33757..8d5207f7f0 100644
--- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md
+++ b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges27.md
@@ -64,7 +64,7 @@ The properties `setup` and `teardown` have been moved into the `scripts` propert
### Foxx Queues
-Function-based Foxx Queue job types are no longer supported. To learn about how you can use the new script-based job types [follow the updated recipe in the cookbook](https://docs.arangodb.com/2.8/cookbook/FoxxQueues.html).
+Function-based Foxx Queue job types are no longer supported. To learn about how you can use the new script-based job types [follow the updated recipe in the cookbook](https://docs.arangodb.com/2.8/Cookbook/FoxxQueues.html).
### Foxx Sessions
diff --git a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md
index a2488a5636..8552b9eb72 100644
--- a/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md
+++ b/Documentation/Books/Manual/ReleaseNotes/UpgradingChanges30.md
@@ -149,7 +149,7 @@ The functions:
* GRAPH_PATHS
* GRAPH_VERTICES
-are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](https://docs.arangodb.com/cookbook/AQL/MigratingGraphFunctionsTo3.html)
+are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingGraphFunctionsTo3.html)
* GRAPH_ABSOLUTE_BETWEENNESS
* GRAPH_ABSOLUTE_CLOSENESS
@@ -160,7 +160,7 @@ are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](https://
* GRAPH_ECCENTRICITY
* GRAPH_RADIUS
-are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](https://docs.arangodb.com/cookbook/AQL/MigratingMeasurementsTo3.html)
+are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingMeasurementsTo3.html)
* EDGES
* NEIGHBORS
@@ -168,7 +168,7 @@ are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](https
* TRAVERSAL
* TRAVERSAL_TREE
-are covered in [Migrating anonymous graph functions from 2.8 or earlier to 3.0](https://docs.arangodb.com/3/cookbook/AQL/MigratingEdgeFunctionsTo3.html)
+are covered in [Migrating anonymous graph functions from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingEdgeFunctionsTo3.html)
### Typecasting functions
diff --git a/Documentation/Examples/06_workWithAQL_statements13.generated b/Documentation/Examples/06_workWithAQL_statements13.generated
new file mode 100644
index 0000000000..b5b11b78d3
--- /dev/null
+++ b/Documentation/Examples/06_workWithAQL_statements13.generated
@@ -0,0 +1,47 @@
+arangosh> db._parse( "FOR i IN [ 1, 2 ] RETURN i" );
+{
+ "code" : 200,
+ "parsed" : true,
+ "collections" : [ ],
+ "bindVars" : [ ],
+ "ast" : [
+ {
+ "type" : "root",
+ "subNodes" : [
+ {
+ "type" : "for",
+ "subNodes" : [
+ {
+ "type" : "variable",
+ "name" : "i",
+ "id" : 0
+ },
+ {
+ "type" : "array",
+ "subNodes" : [
+ {
+ "type" : "value",
+ "value" : 1
+ },
+ {
+ "type" : "value",
+ "value" : 2
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "type" : "return",
+ "subNodes" : [
+ {
+ "type" : "reference",
+ "name" : "i",
+ "id" : 0
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/README_maintainers.md b/README_maintainers.md
index 9cb5601915..60a9668635 100644
--- a/README_maintainers.md
+++ b/README_maintainers.md
@@ -298,6 +298,8 @@ syntax --option value --sub:option value. Using Valgrind could look like this:
--extraArgs:scheduler.threads 1 \
--extraArgs:javascript.gc-frequency 1000000 \
--extraArgs:javascript.gc-interval 65536 \
+ --extraArgs:log.level debug \
+ --extraArgs:log.force-direct true \
--javascript.v8-contexts 2 \
--valgrind /usr/bin/valgrind \
--valgrindargs:log-file /tmp/valgrindlog.%p
@@ -306,6 +308,9 @@ syntax --option value --sub:option value. Using Valgrind could look like this:
- we specify some arangod arguments via --extraArgs which increase the server performance
- we specify to run using valgrind (this is supported by all facilities)
- we specify some valgrind commandline arguments
+ - we set the loglevel to debug
+ - we force the logging not to happen asynchroneous
+ - eventually you may still add temporary `console.log()` statements to tests you debug.
Running a single unittestsuite
------------------------------
diff --git a/arangod/Aql/Functions.cpp b/arangod/Aql/Functions.cpp
index dca19befa5..304a0624b3 100644
--- a/arangod/Aql/Functions.cpp
+++ b/arangod/Aql/Functions.cpp
@@ -317,10 +317,10 @@ AqlValue Functions::AddOrSubtractUnitFromTimestamp(Query* query,
tp_sys_clock_ms resTime;
if (isSubtract) {
resTime = tp_sys_clock_ms{sys_days(ymd) + day_time.to_duration() -
- std::chrono::duration_cast(ms)};
+ std::chrono::duration_cast>(ms)};
} else {
resTime = tp_sys_clock_ms{sys_days(ymd) + day_time.to_duration() +
- std::chrono::duration_cast(ms)};
+ std::chrono::duration_cast>(ms)};
}
return TimeAqlValue(resTime);
}
@@ -2409,7 +2409,7 @@ AqlValue Functions::RegexReplace(arangodb::aql::Query* query,
AqlValue Functions::DateNow(arangodb::aql::Query*, transaction::Methods*,
VPackFunctionParameters const&) {
auto millis =
- duration_cast(system_clock::now().time_since_epoch());
+ std::chrono::duration_cast>(system_clock::now().time_since_epoch());
uint64_t dur = millis.count();
return AqlValue(AqlValueHintUInt(dur));
}
@@ -2462,12 +2462,14 @@ AqlValue Functions::DateFromParameters(
funcName = "DATE_ISO8601";
}
tp_sys_clock_ms tp;
+ duration time;
if (parameters.size() == 1) {
if (!ParameterToTimePoint(query, trx, parameters, tp, funcName.c_str(),
0)) {
return AqlValue(AqlValueHintNull());
}
+ time = tp.time_since_epoch();
} else {
if (parameters.size() < 3 || parameters.size() > 7) {
// YMD is a must
@@ -2557,12 +2559,16 @@ AqlValue Functions::DateFromParameters(
}
}
- tp = sys_days(ymd) + h + min + s + ms;
+ time = sys_days(ymd).time_since_epoch();
+ time += h;
+ time += min;
+ time += s;
+ time += ms;
+ tp = tp_sys_clock_ms(time);
}
if (asTimestamp) {
- auto millis = duration_cast(tp.time_since_epoch());
- return AqlValue(AqlValueHintInt(millis.count()));
+ return AqlValue(AqlValueHintInt(time.count()));
} else {
return TimeAqlValue(tp);
}
diff --git a/arangod/Aql/RestAqlHandler.h b/arangod/Aql/RestAqlHandler.h
index 6217eb8d68..604ab0b3d5 100644
--- a/arangod/Aql/RestAqlHandler.h
+++ b/arangod/Aql/RestAqlHandler.h
@@ -158,9 +158,6 @@ class RestAqlHandler : public RestVocbaseBaseHandler {
// dig out vocbase from context and query from ID, handle errors
bool findQuery(std::string const& idString, Query*& query);
- // name of the queue
- static std::string const QUEUE_NAME;
-
// our query registry
QueryRegistry* _queryRegistry;
diff --git a/js/client/modules/@arangodb/arango-collection.js b/js/client/modules/@arangodb/arango-collection.js
index 3c14754a58..9c4cb7b9b4 100644
--- a/js/client/modules/@arangodb/arango-collection.js
+++ b/js/client/modules/@arangodb/arango-collection.js
@@ -260,6 +260,7 @@ var helpArangoCollection = arangosh.createHelpHeadline('ArangoCollection help')
' type() type of the collection ' + '\n' +
' truncate() delete all documents ' + '\n' +
' properties() show collection properties ' + '\n' +
+ ' properties() change collection properties ' + '\n' +
' drop() delete a collection ' + '\n' +
' load() load a collection ' + '\n' +
' unload() unload a collection ' + '\n' +
diff --git a/js/client/modules/@arangodb/arango-database.js b/js/client/modules/@arangodb/arango-database.js
index 41f9603528..0af3e0702c 100644
--- a/js/client/modules/@arangodb/arango-database.js
+++ b/js/client/modules/@arangodb/arango-database.js
@@ -252,10 +252,11 @@ var helpArangoDatabase = arangosh.createHelpHeadline('ArangoDatabase (db) help')
' _createStatement() create and return AQL query ' + '\n' +
' ' + '\n' +
'View Functions: ' + '\n' +
- ' _views() list all views ' + '\n' +
- ' _view() get view by name ' + '\n' +
- ' _createView(, , ) creates a new view ' + '\n' +
- ' _dropView() delete a view ';
+ ' _views() list all views ' + '\n' +
+ ' _view() get view by name ' + '\n' +
+ ' _createView(, , creates a new view ' + '\n' +
+ ' ) ' + '\n' +
+ ' _dropView() delete a view ';
ArangoDatabase.prototype._help = function () {
internal.print(helpArangoDatabase);
@@ -283,7 +284,7 @@ ArangoDatabase.prototype._collections = function () {
var result = [];
var i;
- // add all collentions to object
+ // add all collections to object
for (i = 0; i < collections.length; ++i) {
var collection = new this._collectionConstructor(this, collections[i]);
this._registerCollection(collection._name, collection);
diff --git a/js/server/tests/aql/aql-optimizer-geoindex.js b/js/server/tests/aql/aql-optimizer-geoindex.js
index 9bf5678610..75933bc81e 100644
--- a/js/server/tests/aql/aql-optimizer-geoindex.js
+++ b/js/server/tests/aql/aql-optimizer-geoindex.js
@@ -284,7 +284,7 @@ function geoVariationsTestSuite() {
{"_key":"1232","_id":"test/1232","_rev":"_WjFgKfC---","location":[0,0]},
{"_key":"1173","_id":"test/1173","_rev":"_WjFfvBC---","location":[10,10]},
{"_key":"1197","_id":"test/1197","_rev":"_WjFf9AC---","location":[0,50]},
- {"_key":"1256","_id":"test/1256","_rev":"_WjFgVtC---","location":[10,10]}
+ {"_key":"1256","_id":"test/1256","_rev":"_WjFgVtC---","location":[10,10.1]}
];
geocol.insert(documents);
},
diff --git a/js/server/tests/replication/replication-ongoing-global-spec.js b/js/server/tests/replication/replication-ongoing-global-spec.js
index 463be7a11b..cc731ffb1b 100644
--- a/js/server/tests/replication/replication-ongoing-global-spec.js
+++ b/js/server/tests/replication/replication-ongoing-global-spec.js
@@ -82,6 +82,25 @@ const compareTicks = function(l, r) {
};
+const compareIndexes = function(l, r, eq) {
+ // This can modify l and r and remove id and selectivityEstimate
+ expect(l).to.be.an("array");
+ expect(r).to.be.an("array");
+ for (let x of l) {
+ delete x.id;
+ delete x.selectivityEstimate;
+ }
+ for (let x of r) {
+ delete x.id;
+ delete x.selectivityEstimate;
+ }
+ if (eq) {
+ expect(l).to.eql(r, JSON.stringify(l) + " vs. " + JSON.stringify(r));
+ } else {
+ expect(l).to.not.eql(r, JSON.stringify(l) + " vs. " + JSON.stringify(r));
+ }
+};
+
const waitForReplication = function() {
const wasOnMaster = onMaster;
@@ -254,7 +273,7 @@ describe('Global Replication on a fresh boot', function () {
let scol = db._collection(docColName);
expect(scol.type()).to.equal(2);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
connectToMaster();
// Second Part Drop it again
@@ -281,7 +300,7 @@ describe('Global Replication on a fresh boot', function () {
let scol = db._collection(edgeColName);
expect(scol.type()).to.equal(3);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
connectToMaster();
// Second Part Drop it again
@@ -440,8 +459,8 @@ describe('Global Replication on a fresh boot', function () {
connectToSlave();
let sIdx = db._collection(docColName).getIndexes();
- expect(sIdx).to.deep.equal(mIdx);
- expect(sIdx).to.not.deep.equal(oIdx);
+ compareIndexes(sIdx, mIdx, true);
+ compareIndexes(sIdx, oIdx, false);
});
});
});
@@ -493,7 +512,7 @@ describe('Global Replication on a fresh boot', function () {
let scol = db._collection(docColName);
expect(scol.type()).to.equal(2);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
connectToMaster();
db._useDatabase(dbName);
@@ -524,7 +543,7 @@ describe('Global Replication on a fresh boot', function () {
let scol = db._collection(edgeColName);
expect(scol.type()).to.equal(3);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
connectToMaster();
db._useDatabase(dbName);
@@ -664,22 +683,16 @@ describe('Global Replication on a fresh boot', function () {
db._collection(docColName).ensureHashIndex("value");
- let mIdx = db._collection(docColName).getIndexes().map(function(idx) {
- delete idx.selectivityEstimate;
- return idx;
- });
+ let mIdx = db._collection(docColName).getIndexes();
waitForReplication();
connectToSlave();
db._useDatabase(dbName);
- let sIdx = db._collection(docColName).getIndexes().map(function(idx) {
- delete idx.selectivityEstimate;
- return idx;
- });
+ let sIdx = db._collection(docColName).getIndexes();
- expect(sIdx).to.deep.equal(mIdx);
- expect(sIdx).to.not.deep.equal(oIdx);
+ compareIndexes(sIdx, mIdx, true);
+ compareIndexes(sIdx, oIdx, false);
});
});
@@ -752,7 +765,7 @@ describe('Setup global replication on empty slave and master has some data', fun
let scol = db._collection(docColName);
expect(scol.type()).to.equal(2);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
});
it("should have synced the edge collection", function () {
@@ -769,7 +782,7 @@ describe('Setup global replication on empty slave and master has some data', fun
let scol = db._collection(edgeColName);
expect(scol.type()).to.equal(3);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
});
it("should have synced the database", function () {
@@ -804,7 +817,7 @@ describe('Setup global replication on empty slave and master has some data', fun
let scol = db._collection(docColName);
expect(scol.type()).to.equal(2);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
});
it("should have synced the edge collection", function () {
@@ -824,7 +837,7 @@ describe('Setup global replication on empty slave and master has some data', fun
let scol = db._collection(edgeColName);
expect(scol.type()).to.equal(3);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
});
describe("content of an existing collection", function () {
@@ -900,7 +913,7 @@ describe('Test switch off and restart replication', function() {
let scol = db._collection(col);
expect(scol.type()).to.equal(2);
expect(scol.properties()).to.deep.equal(mProps);
- expect(scol.getIndexes()).to.deep.equal(mIdxs);
+ compareIndexes(scol.getIndexes(), mIdxs, true);
// Second part. Delete collection
@@ -938,8 +951,8 @@ describe('Test switch off and restart replication', function() {
connectToSlave();
let scol = db._collection(col);
let sidxs = scol.getIndexes();
- expect(sidxs).to.deep.equal(midxs);
- expect(sidxs).to.not.deep.equal(omidx);
+ compareIndexes(sidxs, midxs, true);
+ compareIndexes(sidxs, omidx, false);
connectToMaster();
db._drop(col);
diff --git a/lib/Basics/datetime.cpp b/lib/Basics/datetime.cpp
index 2e80c9b0a5..473737ab58 100644
--- a/lib/Basics/datetime.cpp
+++ b/lib/Basics/datetime.cpp
@@ -39,9 +39,22 @@ bool arangodb::basics::parse_dateTime(
boost::algorithm::trim(dateTime);
std::regex iso8601_regex(
- "(\\+|\\-)?\\d+(\\-\\d{1,2}(\\-\\d{1,2})?)?(((\\ "
- "|T)\\d\\d\\:\\d\\d(\\:\\d\\d(\\.\\d{1,3})?)?(z|Z|(\\+|\\-)\\d\\d\\:"
- "\\d\\d)?)?|(z|Z)?)?");
+ "(\\+|\\-)?\\d+(\\-\\d{1,2}(\\-\\d{1,2})?)?" // YY[YY]-MM-DD
+ "("
+ "("
+ // Time is optional
+ "(\\ |T)" // T or blank separates date and time
+ "\\d\\d\\:\\d\\d" // time: hh:mm
+ "(\\:\\d\\d(\\.\\d{1,3})?)?" // Optional: :ss.mmms
+ "("
+ "z|Z|" // trailing Z or start of timezone
+ "(\\+|\\-)"
+ "\\d\\d\\:\\d\\d" // timezone hh:mm
+ ")?"
+ ")|"
+ "(z|Z)" // Z
+ ")?"
+ );
if (!std::regex_match(dateTime, iso8601_regex)) {
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
@@ -207,4 +220,4 @@ bool arangodb::basics::regex_isoDuration(std::string const& isoDuration, std::sm
return false;
}
return true;
-}
\ No newline at end of file
+}
diff --git a/utils/generateSwagger.py b/utils/generateSwagger.py
index 744fef477c..d1d7964f16 100755
--- a/utils/generateSwagger.py
+++ b/utils/generateSwagger.py
@@ -482,8 +482,6 @@ def generic_handler_desc(cargo, r, message, op, para, name):
line = Typography(line)
para[name] += line + '\n'
- para[name] = removeTrailingBR.sub("", para[name])
-
def start_docublock(cargo, r=Regexen()):
global currentDocuBlock
(fp, last) = cargo
@@ -521,7 +519,7 @@ def restheader(cargo, r=Regexen()):
temp = parameters(last).split(',')
if temp == "":
- raise Exception("Invalid restheader value. got empty string. Maybe missing closing bracket? " + path)
+ raise Exception("Invalid restheader value. got empty string. Maybe missing closing bracket? " + last)
(ucmethod, path) = temp[0].split()