mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into feature/remove-view-implementation
This commit is contained in:
commit
d98469b09d
|
@ -337,7 +337,7 @@ a client.
|
|||
### Using cursors to obtain additional information on internal timings
|
||||
|
||||
Cursors can also optionally provide statistics of the internal execution phases. By default, they do not.
|
||||
To get to know how long parsing, otpimisation, instanciation and execution took,
|
||||
To get to know how long parsing, optimization, instantiation and execution took,
|
||||
make the server return that by setting the *profile* attribute to
|
||||
*true* when creating a statement:
|
||||
|
||||
|
@ -359,3 +359,16 @@ produced statistics:
|
|||
c.getExtra();
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock 06_workWithAQL_statements12
|
||||
|
||||
Query validation
|
||||
----------------
|
||||
|
||||
The *_parse* method of the *db* object can be used to parse and validate a
|
||||
query syntactically, without actually executing it.
|
||||
|
||||
@startDocuBlockInline 06_workWithAQL_statements13
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{06_workWithAQL_statements13}
|
||||
db._parse( "FOR i IN [ 1, 2 ] RETURN i" );
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock 06_workWithAQL_statements13
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ The following methods exist on the collection object (returned by *db.name*):
|
|||
* [collection.drop()](../../DataModeling/Collections/CollectionMethods.md#drop)
|
||||
* [collection.figures()](../../DataModeling/Collections/CollectionMethods.md#figures)
|
||||
* [collection.load()](../../DataModeling/Collections/CollectionMethods.md#load)
|
||||
* [collection.properties()](../../DataModeling/Collections/CollectionMethods.md#properties)
|
||||
* [collection.properties(options)](../../DataModeling/Collections/CollectionMethods.md#properties)
|
||||
* [collection.revision()](../../DataModeling/Collections/CollectionMethods.md#revision)
|
||||
* [collection.rotate()](../../DataModeling/Collections/CollectionMethods.md#rotate)
|
||||
* [collection.toArray()](../../DataModeling/Documents/DocumentMethods.md#toarray)
|
||||
|
@ -29,6 +29,7 @@ The following methods exist on the collection object (returned by *db.name*):
|
|||
|
||||
* [collection.all()](../../DataModeling/Documents/DocumentMethods.md#all)
|
||||
* [collection.any()](../../DataModeling/Documents/DocumentMethods.md#any)
|
||||
* [collection.byExample(example)](../../DataModeling/Documents/DocumentMethods.md#query-by-example)
|
||||
* [collection.closedRange(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#closed-range)
|
||||
* [collection.document(object)](../../DataModeling/Documents/DocumentMethods.md#document)
|
||||
* [collection.documents(keys)](../../DataModeling/Documents/DocumentMethods.md#lookup-by-keys)
|
||||
|
@ -40,9 +41,9 @@ The following methods exist on the collection object (returned by *db.name*):
|
|||
* [collection.edges(vertices)](../../DataModeling/Documents/DocumentMethods.md#edges)
|
||||
* [collection.iterate(iterator,options)](../../DataModeling/Documents/DocumentMethods.md#misc)
|
||||
* [collection.outEdges(vertex-id)](../../DataModeling/Documents/DocumentMethods.md#edges)
|
||||
* [collection.queryByExample(example)](../../DataModeling/Documents/DocumentMethods.md#query-by-example)
|
||||
* [collection.range(attribute, left, right)](../../DataModeling/Documents/DocumentMethods.md#range)
|
||||
* [collection.remove(selector)](../../DataModeling/Documents/DocumentMethods.md#remove)
|
||||
* [collection.removeByExample(example)](../../DataModeling/Documents/DocumentMethods.md#remove-by-example)
|
||||
* [collection.removeByKeys(keys)](../../DataModeling/Documents/DocumentMethods.md#remove-by-keys)
|
||||
* [collection.rename()](../../DataModeling/Collections/CollectionMethods.md#rename)
|
||||
* [collection.replace(selector, data)](../../DataModeling/Documents/DocumentMethods.md#replace)
|
||||
|
|
|
@ -31,6 +31,7 @@ The following methods exists on the *_db* object:
|
|||
*Collection*
|
||||
|
||||
* [db._collection(name)](../../DataModeling/Collections/DatabaseMethods.md#collection)
|
||||
* [db._collections()](../../DataModeling/Collections/DatabaseMethods.md#all-collections)
|
||||
* [db._create(name)](../../DataModeling/Collections/DatabaseMethods.md#create)
|
||||
* [db._drop(name)](../../DataModeling/Collections/DatabaseMethods.md#drop)
|
||||
* [db._truncate(name)](../../DataModeling/Collections/DatabaseMethods.md#truncate)
|
||||
|
@ -40,6 +41,7 @@ The following methods exists on the *_db* object:
|
|||
* [db._createStatement(query)](../../../AQL/Invocation/WithArangosh.html#with-createstatement-arangostatement)
|
||||
* [db._query(query)](../../../AQL/Invocation/WithArangosh.html#with-dbquery)
|
||||
* [db._explain(query)](../../ReleaseNotes/NewFeatures28.md#miscellaneous-improvements)
|
||||
* [db._parse(query)](../../../AQL/Invocation/WithArangosh.html#query-validation)
|
||||
|
||||
*Document*
|
||||
|
||||
|
@ -48,3 +50,16 @@ The following methods exists on the *_db* object:
|
|||
* [db._remove(selector)](../../DataModeling/Documents/DatabaseMethods.md#remove)
|
||||
* [db._replace(selector,data)](../../DataModeling/Documents/DatabaseMethods.md#replace)
|
||||
* [db._update(selector,data)](../../DataModeling/Documents/DatabaseMethods.md#update)
|
||||
|
||||
*Views*
|
||||
|
||||
* [db._view(name)](../../DataModeling/Views/DatabaseMethods.md#view)
|
||||
* [db._views()](../../DataModeling/Views/DatabaseMethods.md#all-views)
|
||||
* [db._createView(name, type, properties)](../../DataModeling/Views/DatabaseMethods.md#create)
|
||||
* [db._dropView(name)](../../DataModeling/Views/DatabaseMethods.md#drop)
|
||||
|
||||
*Global*
|
||||
|
||||
* [db._engine()](../../DataModeling/Databases/WorkingWith.md#engine)
|
||||
* [db._engineStats()](../../DataModeling/Databases/WorkingWith.md#engine-statistics)
|
||||
* [db._executeTransaction()](../../Transactions/TransactionInvocation.md)
|
||||
|
|
|
@ -58,7 +58,7 @@ altogether k copies of each shard are kept in the cluster on k different
|
|||
servers, and are kept in sync. That is, every write operation is automatically
|
||||
replicated on all copies.
|
||||
|
||||
This is organised using a leader/follower model. At all times, one of the
|
||||
This is organized using a leader/follower model. At all times, one of the
|
||||
servers holding replicas for a shard is "the leader" and all others
|
||||
are "followers", this configuration is held in the Agency (see
|
||||
[Scalability](../../Scalability/README.md) for details of the ArangoDB
|
||||
|
|
|
@ -185,12 +185,21 @@ database. The *_system* database itself cannot be dropped.
|
|||
Databases are dropped asynchronously, and will be physically removed if
|
||||
all clients have disconnected and references have been garbage-collected.
|
||||
|
||||
### Engine
|
||||
|
||||
retrieve the storage engine type used by the server
|
||||
`db._engine()`
|
||||
|
||||
Returns the name of the storage engine in use (`mmfiles` or `rocksdb`), as well
|
||||
as a list of supported features (types of indexes and
|
||||
[dfdb](../../Troubleshooting/DatafileDebugger.md)).
|
||||
|
||||
### Engine statistics
|
||||
|
||||
retrieve statistics related to the storage engine-rocksdb
|
||||
retrieve statistics related to the storage engine (rocksdb)
|
||||
`db._engineStats()`
|
||||
|
||||
Returns some statistics related to storage engine activity, including figures
|
||||
Returns some statistics related to the storage engine activity, including figures
|
||||
about data size, cache usage, etc.
|
||||
|
||||
**Note**: Currently this only produces useful output for the RocksDB engine.
|
||||
|
|
|
@ -291,7 +291,7 @@ The above referenced chapters describe the various APIs of ArangoDBs graph engin
|
|||
|
||||
- [Traversing a graph in full depth](../../Cookbook/Graph/FulldepthTraversal.html)
|
||||
- [Using an example vertex with the java driver](../../Cookbook/Graph/JavaDriverGraphExampleVertex.html)
|
||||
- [Retrieving documents from ArangoDB without knowing the structure](https://docs.arangodb.com/cookbook/Graph/JavaDriverBaseDocument.html)
|
||||
- [Retrieving documents from ArangoDB without knowing the structure](../../Cookbook/UseCases/JavaDriverBaseDocument.html)
|
||||
- [Using a custom visitor from node.js](../../Cookbook/Graph/CustomVisitorFromNodeJs.html)
|
||||
- [AQL Example Queries on an Actors and Movies Database](../../Cookbook/Graph/ExampleActorsAndMovies.html)
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ The documentation is organized in four handbooks:
|
|||
Features are illustrated with interactive usage examples; you can cut'n'paste them
|
||||
into [arangosh](Administration/Arangosh/README.md) to try them out. The HTTP
|
||||
[REST-API](../HTTP/index.html) for driver developers is demonstrated with cut'n'paste
|
||||
recepies intended to be used with the [cURL](http://curl.haxx.se). Drivers may provide
|
||||
their own examples based on these .js based examples to improve understandeability
|
||||
recipes intended to be used with the [cURL](http://curl.haxx.se). Drivers may provide
|
||||
their own examples based on these .js based examples to improve understandability
|
||||
for their respective users, i.e. for the [java driver](https://github.com/arangodb/arangodb-java-driver#learn-more)
|
||||
some of the samples are re-implemented.
|
||||
|
||||
|
|
|
@ -102,5 +102,5 @@ Mac OS X
|
|||
OpenSSL 1.1
|
||||
-----------
|
||||
|
||||
* ArangoDB has been tested with OpenSSL 1.0 only and won't build against 1.1 when compiling on your own. See [here](../../cookbook/Compiling/OpenSSL.html)
|
||||
* ArangoDB has been tested with OpenSSL 1.0 only and won't build against 1.1 when compiling on your own. See [here](../../Cookbook/Compiling/OpenSSL.html)
|
||||
for how to compile on systems that ship OpenSSL 1.1 by default.
|
||||
|
|
|
@ -64,7 +64,7 @@ is here:
|
|||
* [part 2](https://www.arangodb.com/2014/12/02/building-hypermedia-apis-design)
|
||||
* [part 3](https://www.arangodb.com/2014/12/08/building-hypermedia-apis-foxxgenerator)
|
||||
|
||||
A cookbook recipe for getting started with FoxxGenerator is [here](https://docs.arangodb.com/2.8/cookbook/FoxxGeneratorFirstSteps.html).
|
||||
A cookbook recipe for getting started with FoxxGenerator is [here](https://docs.arangodb.com/2.8/Cookbook/FoxxGeneratorFirstSteps.html).
|
||||
|
||||
AQL improvements
|
||||
----------------
|
||||
|
|
|
@ -212,7 +212,7 @@ You can now write tests for your Foxx apps using the Mocha testing framework:
|
|||
https://www.arangodb.com/2015/04/testing-foxx-mocha/
|
||||
|
||||
A recipe for writing tests for your Foxx apps can be found in the cookbook:
|
||||
https://docs.arangodb.com/2.8/cookbook/FoxxTesting.html
|
||||
https://docs.arangodb.com/2.8/Cookbook/FoxxTesting.html
|
||||
|
||||
### API Documentation
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ The properties `setup` and `teardown` have been moved into the `scripts` propert
|
|||
|
||||
### Foxx Queues
|
||||
|
||||
Function-based Foxx Queue job types are no longer supported. To learn about how you can use the new script-based job types [follow the updated recipe in the cookbook](https://docs.arangodb.com/2.8/cookbook/FoxxQueues.html).
|
||||
Function-based Foxx Queue job types are no longer supported. To learn about how you can use the new script-based job types [follow the updated recipe in the cookbook](https://docs.arangodb.com/2.8/Cookbook/FoxxQueues.html).
|
||||
|
||||
### Foxx Sessions
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ The functions:
|
|||
* GRAPH_PATHS
|
||||
* GRAPH_VERTICES
|
||||
|
||||
are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](https://docs.arangodb.com/cookbook/AQL/MigratingGraphFunctionsTo3.html)
|
||||
are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingGraphFunctionsTo3.html)
|
||||
|
||||
* GRAPH_ABSOLUTE_BETWEENNESS
|
||||
* GRAPH_ABSOLUTE_CLOSENESS
|
||||
|
@ -160,7 +160,7 @@ are covered in [Migrating GRAPH_* Functions from 2.8 or earlier to 3.0](https://
|
|||
* GRAPH_ECCENTRICITY
|
||||
* GRAPH_RADIUS
|
||||
|
||||
are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](https://docs.arangodb.com/cookbook/AQL/MigratingMeasurementsTo3.html)
|
||||
are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingMeasurementsTo3.html)
|
||||
|
||||
* EDGES
|
||||
* NEIGHBORS
|
||||
|
@ -168,7 +168,7 @@ are covered in [Migrating GRAPH_* Measurements from 2.8 or earlier to 3.0](https
|
|||
* TRAVERSAL
|
||||
* TRAVERSAL_TREE
|
||||
|
||||
are covered in [Migrating anonymous graph functions from 2.8 or earlier to 3.0](https://docs.arangodb.com/3/cookbook/AQL/MigratingEdgeFunctionsTo3.html)
|
||||
are covered in [Migrating anonymous graph functions from 2.8 or earlier to 3.0](../../Cookbook/AQL/MigratingEdgeFunctionsTo3.html)
|
||||
|
||||
### Typecasting functions
|
||||
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
arangosh> db._parse( <span class="hljs-string">"FOR i IN [ 1, 2 ] RETURN i"</span> );
|
||||
{
|
||||
<span class="hljs-string">"code"</span> : <span class="hljs-number">200</span>,
|
||||
<span class="hljs-string">"parsed"</span> : <span class="hljs-literal">true</span>,
|
||||
<span class="hljs-string">"collections"</span> : [ ],
|
||||
<span class="hljs-string">"bindVars"</span> : [ ],
|
||||
<span class="hljs-string">"ast"</span> : [
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"root"</span>,
|
||||
<span class="hljs-string">"subNodes"</span> : [
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"for"</span>,
|
||||
<span class="hljs-string">"subNodes"</span> : [
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"variable"</span>,
|
||||
<span class="hljs-string">"name"</span> : <span class="hljs-string">"i"</span>,
|
||||
<span class="hljs-string">"id"</span> : <span class="hljs-number">0</span>
|
||||
},
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"array"</span>,
|
||||
<span class="hljs-string">"subNodes"</span> : [
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"value"</span>,
|
||||
<span class="hljs-string">"value"</span> : <span class="hljs-number">1</span>
|
||||
},
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"value"</span>,
|
||||
<span class="hljs-string">"value"</span> : <span class="hljs-number">2</span>
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"return"</span>,
|
||||
<span class="hljs-string">"subNodes"</span> : [
|
||||
{
|
||||
<span class="hljs-string">"type"</span> : <span class="hljs-string">"reference"</span>,
|
||||
<span class="hljs-string">"name"</span> : <span class="hljs-string">"i"</span>,
|
||||
<span class="hljs-string">"id"</span> : <span class="hljs-number">0</span>
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -298,6 +298,8 @@ syntax --option value --sub:option value. Using Valgrind could look like this:
|
|||
--extraArgs:scheduler.threads 1 \
|
||||
--extraArgs:javascript.gc-frequency 1000000 \
|
||||
--extraArgs:javascript.gc-interval 65536 \
|
||||
--extraArgs:log.level debug \
|
||||
--extraArgs:log.force-direct true \
|
||||
--javascript.v8-contexts 2 \
|
||||
--valgrind /usr/bin/valgrind \
|
||||
--valgrindargs:log-file /tmp/valgrindlog.%p
|
||||
|
@ -306,6 +308,9 @@ syntax --option value --sub:option value. Using Valgrind could look like this:
|
|||
- we specify some arangod arguments via --extraArgs which increase the server performance
|
||||
- we specify to run using valgrind (this is supported by all facilities)
|
||||
- we specify some valgrind commandline arguments
|
||||
- we set the loglevel to debug
|
||||
- we force the logging not to happen asynchroneous
|
||||
- eventually you may still add temporary `console.log()` statements to tests you debug.
|
||||
|
||||
Running a single unittestsuite
|
||||
------------------------------
|
||||
|
|
|
@ -317,10 +317,10 @@ AqlValue Functions::AddOrSubtractUnitFromTimestamp(Query* query,
|
|||
tp_sys_clock_ms resTime;
|
||||
if (isSubtract) {
|
||||
resTime = tp_sys_clock_ms{sys_days(ymd) + day_time.to_duration() -
|
||||
std::chrono::duration_cast<milliseconds>(ms)};
|
||||
std::chrono::duration_cast<duration<int64_t, std::milli>>(ms)};
|
||||
} else {
|
||||
resTime = tp_sys_clock_ms{sys_days(ymd) + day_time.to_duration() +
|
||||
std::chrono::duration_cast<milliseconds>(ms)};
|
||||
std::chrono::duration_cast<duration<int64_t, std::milli>>(ms)};
|
||||
}
|
||||
return TimeAqlValue(resTime);
|
||||
}
|
||||
|
@ -2409,7 +2409,7 @@ AqlValue Functions::RegexReplace(arangodb::aql::Query* query,
|
|||
AqlValue Functions::DateNow(arangodb::aql::Query*, transaction::Methods*,
|
||||
VPackFunctionParameters const&) {
|
||||
auto millis =
|
||||
duration_cast<milliseconds>(system_clock::now().time_since_epoch());
|
||||
std::chrono::duration_cast<duration<int64_t, std::milli>>(system_clock::now().time_since_epoch());
|
||||
uint64_t dur = millis.count();
|
||||
return AqlValue(AqlValueHintUInt(dur));
|
||||
}
|
||||
|
@ -2462,12 +2462,14 @@ AqlValue Functions::DateFromParameters(
|
|||
funcName = "DATE_ISO8601";
|
||||
}
|
||||
tp_sys_clock_ms tp;
|
||||
duration<int64_t, std::milli> time;
|
||||
|
||||
if (parameters.size() == 1) {
|
||||
if (!ParameterToTimePoint(query, trx, parameters, tp, funcName.c_str(),
|
||||
0)) {
|
||||
return AqlValue(AqlValueHintNull());
|
||||
}
|
||||
time = tp.time_since_epoch();
|
||||
} else {
|
||||
if (parameters.size() < 3 || parameters.size() > 7) {
|
||||
// YMD is a must
|
||||
|
@ -2557,12 +2559,16 @@ AqlValue Functions::DateFromParameters(
|
|||
}
|
||||
}
|
||||
|
||||
tp = sys_days(ymd) + h + min + s + ms;
|
||||
time = sys_days(ymd).time_since_epoch();
|
||||
time += h;
|
||||
time += min;
|
||||
time += s;
|
||||
time += ms;
|
||||
tp = tp_sys_clock_ms(time);
|
||||
}
|
||||
|
||||
if (asTimestamp) {
|
||||
auto millis = duration_cast<milliseconds>(tp.time_since_epoch());
|
||||
return AqlValue(AqlValueHintInt(millis.count()));
|
||||
return AqlValue(AqlValueHintInt(time.count()));
|
||||
} else {
|
||||
return TimeAqlValue(tp);
|
||||
}
|
||||
|
|
|
@ -158,9 +158,6 @@ class RestAqlHandler : public RestVocbaseBaseHandler {
|
|||
// dig out vocbase from context and query from ID, handle errors
|
||||
bool findQuery(std::string const& idString, Query*& query);
|
||||
|
||||
// name of the queue
|
||||
static std::string const QUEUE_NAME;
|
||||
|
||||
// our query registry
|
||||
QueryRegistry* _queryRegistry;
|
||||
|
||||
|
|
|
@ -260,6 +260,7 @@ var helpArangoCollection = arangosh.createHelpHeadline('ArangoCollection help')
|
|||
' type() type of the collection ' + '\n' +
|
||||
' truncate() delete all documents ' + '\n' +
|
||||
' properties() show collection properties ' + '\n' +
|
||||
' properties(<data>) change collection properties ' + '\n' +
|
||||
' drop() delete a collection ' + '\n' +
|
||||
' load() load a collection ' + '\n' +
|
||||
' unload() unload a collection ' + '\n' +
|
||||
|
|
|
@ -252,10 +252,11 @@ var helpArangoDatabase = arangosh.createHelpHeadline('ArangoDatabase (db) help')
|
|||
' _createStatement(<data>) create and return AQL query ' + '\n' +
|
||||
' ' + '\n' +
|
||||
'View Functions: ' + '\n' +
|
||||
' _views() list all views ' + '\n' +
|
||||
' _view(<name>) get view by name ' + '\n' +
|
||||
' _createView(<name>, <type>, <properties>) creates a new view ' + '\n' +
|
||||
' _dropView(<name>) delete a view ';
|
||||
' _views() list all views ' + '\n' +
|
||||
' _view(<name>) get view by name ' + '\n' +
|
||||
' _createView(<name>, <type>, creates a new view ' + '\n' +
|
||||
' <properties>) ' + '\n' +
|
||||
' _dropView(<name>) delete a view ';
|
||||
|
||||
ArangoDatabase.prototype._help = function () {
|
||||
internal.print(helpArangoDatabase);
|
||||
|
@ -283,7 +284,7 @@ ArangoDatabase.prototype._collections = function () {
|
|||
var result = [];
|
||||
var i;
|
||||
|
||||
// add all collentions to object
|
||||
// add all collections to object
|
||||
for (i = 0; i < collections.length; ++i) {
|
||||
var collection = new this._collectionConstructor(this, collections[i]);
|
||||
this._registerCollection(collection._name, collection);
|
||||
|
|
|
@ -284,7 +284,7 @@ function geoVariationsTestSuite() {
|
|||
{"_key":"1232","_id":"test/1232","_rev":"_WjFgKfC---","location":[0,0]},
|
||||
{"_key":"1173","_id":"test/1173","_rev":"_WjFfvBC---","location":[10,10]},
|
||||
{"_key":"1197","_id":"test/1197","_rev":"_WjFf9AC---","location":[0,50]},
|
||||
{"_key":"1256","_id":"test/1256","_rev":"_WjFgVtC---","location":[10,10]}
|
||||
{"_key":"1256","_id":"test/1256","_rev":"_WjFgVtC---","location":[10,10.1]}
|
||||
];
|
||||
geocol.insert(documents);
|
||||
},
|
||||
|
|
|
@ -82,6 +82,25 @@ const compareTicks = function(l, r) {
|
|||
};
|
||||
|
||||
|
||||
const compareIndexes = function(l, r, eq) {
|
||||
// This can modify l and r and remove id and selectivityEstimate
|
||||
expect(l).to.be.an("array");
|
||||
expect(r).to.be.an("array");
|
||||
for (let x of l) {
|
||||
delete x.id;
|
||||
delete x.selectivityEstimate;
|
||||
}
|
||||
for (let x of r) {
|
||||
delete x.id;
|
||||
delete x.selectivityEstimate;
|
||||
}
|
||||
if (eq) {
|
||||
expect(l).to.eql(r, JSON.stringify(l) + " vs. " + JSON.stringify(r));
|
||||
} else {
|
||||
expect(l).to.not.eql(r, JSON.stringify(l) + " vs. " + JSON.stringify(r));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const waitForReplication = function() {
|
||||
const wasOnMaster = onMaster;
|
||||
|
@ -254,7 +273,7 @@ describe('Global Replication on a fresh boot', function () {
|
|||
let scol = db._collection(docColName);
|
||||
expect(scol.type()).to.equal(2);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
|
||||
connectToMaster();
|
||||
// Second Part Drop it again
|
||||
|
@ -281,7 +300,7 @@ describe('Global Replication on a fresh boot', function () {
|
|||
let scol = db._collection(edgeColName);
|
||||
expect(scol.type()).to.equal(3);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
|
||||
connectToMaster();
|
||||
// Second Part Drop it again
|
||||
|
@ -440,8 +459,8 @@ describe('Global Replication on a fresh boot', function () {
|
|||
connectToSlave();
|
||||
|
||||
let sIdx = db._collection(docColName).getIndexes();
|
||||
expect(sIdx).to.deep.equal(mIdx);
|
||||
expect(sIdx).to.not.deep.equal(oIdx);
|
||||
compareIndexes(sIdx, mIdx, true);
|
||||
compareIndexes(sIdx, oIdx, false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -493,7 +512,7 @@ describe('Global Replication on a fresh boot', function () {
|
|||
let scol = db._collection(docColName);
|
||||
expect(scol.type()).to.equal(2);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
|
||||
connectToMaster();
|
||||
db._useDatabase(dbName);
|
||||
|
@ -524,7 +543,7 @@ describe('Global Replication on a fresh boot', function () {
|
|||
let scol = db._collection(edgeColName);
|
||||
expect(scol.type()).to.equal(3);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
|
||||
connectToMaster();
|
||||
db._useDatabase(dbName);
|
||||
|
@ -664,22 +683,16 @@ describe('Global Replication on a fresh boot', function () {
|
|||
|
||||
db._collection(docColName).ensureHashIndex("value");
|
||||
|
||||
let mIdx = db._collection(docColName).getIndexes().map(function(idx) {
|
||||
delete idx.selectivityEstimate;
|
||||
return idx;
|
||||
});
|
||||
let mIdx = db._collection(docColName).getIndexes();
|
||||
|
||||
waitForReplication();
|
||||
connectToSlave();
|
||||
db._useDatabase(dbName);
|
||||
|
||||
let sIdx = db._collection(docColName).getIndexes().map(function(idx) {
|
||||
delete idx.selectivityEstimate;
|
||||
return idx;
|
||||
});
|
||||
let sIdx = db._collection(docColName).getIndexes();
|
||||
|
||||
expect(sIdx).to.deep.equal(mIdx);
|
||||
expect(sIdx).to.not.deep.equal(oIdx);
|
||||
compareIndexes(sIdx, mIdx, true);
|
||||
compareIndexes(sIdx, oIdx, false);
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -752,7 +765,7 @@ describe('Setup global replication on empty slave and master has some data', fun
|
|||
let scol = db._collection(docColName);
|
||||
expect(scol.type()).to.equal(2);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
});
|
||||
|
||||
it("should have synced the edge collection", function () {
|
||||
|
@ -769,7 +782,7 @@ describe('Setup global replication on empty slave and master has some data', fun
|
|||
let scol = db._collection(edgeColName);
|
||||
expect(scol.type()).to.equal(3);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
});
|
||||
|
||||
it("should have synced the database", function () {
|
||||
|
@ -804,7 +817,7 @@ describe('Setup global replication on empty slave and master has some data', fun
|
|||
let scol = db._collection(docColName);
|
||||
expect(scol.type()).to.equal(2);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
});
|
||||
|
||||
it("should have synced the edge collection", function () {
|
||||
|
@ -824,7 +837,7 @@ describe('Setup global replication on empty slave and master has some data', fun
|
|||
let scol = db._collection(edgeColName);
|
||||
expect(scol.type()).to.equal(3);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
});
|
||||
|
||||
describe("content of an existing collection", function () {
|
||||
|
@ -900,7 +913,7 @@ describe('Test switch off and restart replication', function() {
|
|||
let scol = db._collection(col);
|
||||
expect(scol.type()).to.equal(2);
|
||||
expect(scol.properties()).to.deep.equal(mProps);
|
||||
expect(scol.getIndexes()).to.deep.equal(mIdxs);
|
||||
compareIndexes(scol.getIndexes(), mIdxs, true);
|
||||
|
||||
// Second part. Delete collection
|
||||
|
||||
|
@ -938,8 +951,8 @@ describe('Test switch off and restart replication', function() {
|
|||
connectToSlave();
|
||||
let scol = db._collection(col);
|
||||
let sidxs = scol.getIndexes();
|
||||
expect(sidxs).to.deep.equal(midxs);
|
||||
expect(sidxs).to.not.deep.equal(omidx);
|
||||
compareIndexes(sidxs, midxs, true);
|
||||
compareIndexes(sidxs, omidx, false);
|
||||
|
||||
connectToMaster();
|
||||
db._drop(col);
|
||||
|
|
|
@ -39,9 +39,22 @@ bool arangodb::basics::parse_dateTime(
|
|||
boost::algorithm::trim(dateTime);
|
||||
|
||||
std::regex iso8601_regex(
|
||||
"(\\+|\\-)?\\d+(\\-\\d{1,2}(\\-\\d{1,2})?)?(((\\ "
|
||||
"|T)\\d\\d\\:\\d\\d(\\:\\d\\d(\\.\\d{1,3})?)?(z|Z|(\\+|\\-)\\d\\d\\:"
|
||||
"\\d\\d)?)?|(z|Z)?)?");
|
||||
"(\\+|\\-)?\\d+(\\-\\d{1,2}(\\-\\d{1,2})?)?" // YY[YY]-MM-DD
|
||||
"("
|
||||
"("
|
||||
// Time is optional
|
||||
"(\\ |T)" // T or blank separates date and time
|
||||
"\\d\\d\\:\\d\\d" // time: hh:mm
|
||||
"(\\:\\d\\d(\\.\\d{1,3})?)?" // Optional: :ss.mmms
|
||||
"("
|
||||
"z|Z|" // trailing Z or start of timezone
|
||||
"(\\+|\\-)"
|
||||
"\\d\\d\\:\\d\\d" // timezone hh:mm
|
||||
")?"
|
||||
")|"
|
||||
"(z|Z)" // Z
|
||||
")?"
|
||||
);
|
||||
|
||||
if (!std::regex_match(dateTime, iso8601_regex)) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
|
||||
|
@ -207,4 +220,4 @@ bool arangodb::basics::regex_isoDuration(std::string const& isoDuration, std::sm
|
|||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -482,8 +482,6 @@ def generic_handler_desc(cargo, r, message, op, para, name):
|
|||
line = Typography(line)
|
||||
para[name] += line + '\n'
|
||||
|
||||
para[name] = removeTrailingBR.sub("", para[name])
|
||||
|
||||
def start_docublock(cargo, r=Regexen()):
|
||||
global currentDocuBlock
|
||||
(fp, last) = cargo
|
||||
|
@ -521,7 +519,7 @@ def restheader(cargo, r=Regexen()):
|
|||
|
||||
temp = parameters(last).split(',')
|
||||
if temp == "":
|
||||
raise Exception("Invalid restheader value. got empty string. Maybe missing closing bracket? " + path)
|
||||
raise Exception("Invalid restheader value. got empty string. Maybe missing closing bracket? " + last)
|
||||
|
||||
(ucmethod, path) = temp[0].split()
|
||||
|
||||
|
|
Loading…
Reference in New Issue