mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
bca08d012f
|
@ -7,8 +7,9 @@ newVersionNumber = $(shell cat ../../VERSION)
|
|||
################################################################################
|
||||
# per book targets
|
||||
check-summary:
|
||||
@echo "##### checking summary for $(NAME)"
|
||||
@find ppbooks/$(NAME) -name \*.md |sed -e "s;ppbooks/$(NAME)/;;" |grep -vf SummaryBlacklist.txt |sort > /tmp/is_md.txt
|
||||
@cat $(NAME)/SUMMARY.md |grep '(' |sed -e "s;.*(;;" -e "s;).*;;" |sort |grep -v '# Summary' > /tmp/is_summary.txt
|
||||
@cat $(NAME)/SUMMARY.md |grep -v '^ *# '|grep '(' |sed -e "s;.*(;;" -e "s;).*;;" |sort > /tmp/is_summary.txt
|
||||
@if test "`comm -3 /tmp/is_md.txt /tmp/is_summary.txt|wc -l`" -ne 0; then \
|
||||
echo "not all files are mapped to the summary!"; \
|
||||
echo " files found | files in summary"; \
|
||||
|
@ -17,6 +18,7 @@ check-summary:
|
|||
fi
|
||||
|
||||
book-check-leftover-docublocks:
|
||||
@echo "##### checking for left over docublocks in $(NAME)"
|
||||
@if test "`grep -r \"@startDocuBlock\" --include \"*.md\" ppbooks/$(NAME) | wc -l`" -ne 0; then \
|
||||
echo ; \
|
||||
echo "@startDocuBlock markers still found in generated output files:"; \
|
||||
|
@ -25,6 +27,7 @@ book-check-leftover-docublocks:
|
|||
fi
|
||||
|
||||
book-check-restheader-leftovers:
|
||||
@echo "##### checking for restheader leftovers in $(NAME)"
|
||||
@if test "`find ppbooks/$(NAME) -name '*.md' -exec grep -- '^@[A-Z]*' {} \; -print | wc -l`" -gt 0; then \
|
||||
echo "found these unconverted Swagger Restapi tags: "; \
|
||||
find ppbooks/$(NAME) -name '*.md' -exec grep '^@[A-Z]*' {} \; -print; \
|
||||
|
@ -33,6 +36,7 @@ book-check-restheader-leftovers:
|
|||
|
||||
# Check for all lines starting with exclamation marks, except image links which are exclamation mar + bracket
|
||||
book-check-mdpp-leftovers:
|
||||
@echo "##### checking for mdpp leftovers for $(NAME)"
|
||||
@if test "`find ppbooks/$(NAME) -name '*.md' -exec grep -- '^![^\[]' {} \; -print | wc -l`" -gt 0; then \
|
||||
echo "found these unconverted Markdown-PP tags: "; \
|
||||
find ppbooks/$(NAME) -name '*.md' -exec grep '^![^\[]' {} \; -print; \
|
||||
|
@ -40,6 +44,7 @@ book-check-mdpp-leftovers:
|
|||
fi
|
||||
|
||||
ppbook-precheck-bad-code-sections:
|
||||
@echo "##### checking for bad code sections in $(NAME)"
|
||||
@if grep -qR '^``` *.* ' $(NAME); then \
|
||||
echo "tripple tics with blanks afterwards found: "; \
|
||||
grep -R '^``` *.* ' $(NAME); \
|
||||
|
@ -47,6 +52,7 @@ ppbook-precheck-bad-code-sections:
|
|||
fi
|
||||
|
||||
ppbook-check-html-link:
|
||||
@echo "##### checking for invalid HTML links in $(NAME)"
|
||||
@if test "`egrep -r '\[.*\]\(.*\)' ppbooks/$(NAME) |grep '\.md:' |grep html |grep -v http://|grep -v https:// |grep -v header.css |wc -l`" -gt 0; then \
|
||||
echo "Found links to .html files inside of the document! use <foo>.md instead!"; \
|
||||
echo; \
|
||||
|
@ -55,6 +61,7 @@ ppbook-check-html-link:
|
|||
fi
|
||||
|
||||
ppbook-check-directory-link:
|
||||
@echo "##### checking for invalid md links in $(NAME)"
|
||||
@if test "`egrep -r '\[.*\]\(.*\)' ppbooks/$(NAME) | grep '\.md:' |grep -v html |grep -v http://|grep -v https:// |grep -v header.css |grep -v node_modules |grep -v node_modules | grep -v '\.md' | wc -l`" -gt 0; then \
|
||||
echo "Found director links! use ../<directory>/README.md instead!"; \
|
||||
echo; \
|
||||
|
@ -63,6 +70,7 @@ ppbook-check-directory-link:
|
|||
fi
|
||||
|
||||
book-check-markdown-leftovers:
|
||||
@echo "##### checking for remaining markdown snippets in the HTML output of $(NAME)"
|
||||
@if test "`find books/$(NAME) -name '*.html' -exec grep -- '##' {} \; -print | wc -l`" -gt 0; then \
|
||||
echo "found these unconverted markdown titles: "; \
|
||||
find books/$(NAME) -name '*.html' -exec grep '##' {} \; -print; \
|
||||
|
@ -80,6 +88,7 @@ book-check-markdown-leftovers:
|
|||
fi
|
||||
|
||||
book-check-dangling-anchors:
|
||||
@echo "##### checking for dangling anchors in $(NAME)"
|
||||
@grep -R "a href.*#" books/$(NAME) | \
|
||||
egrep -v "(styles/header\.js|/app\.js|class=\"navigation|https*://|href=\"#\")" | \
|
||||
sed 's;\(.*\.html\):.*a href="\(.*\)#\(.*\)">.*</a>.*;\1,\2,\3;' | grep -v " " > /tmp/anchorlist.txt
|
||||
|
@ -110,7 +119,7 @@ book-check-dangling-anchors:
|
|||
fi
|
||||
|
||||
build-book-symlinks:
|
||||
echo "generate backwards compatibility symlinks:"
|
||||
echo "##### generate backwards compatibility symlinks for $(NAME)"
|
||||
cd books/$(NAME); pwd; \
|
||||
find . -name "README.mdpp" |\
|
||||
sed -e 's:README\.mdpp$$::' |\
|
||||
|
@ -118,6 +127,7 @@ build-book-symlinks:
|
|||
bash
|
||||
|
||||
build-book:
|
||||
@echo "##### Generating book $(NAME)"
|
||||
make ppbook-precheck-bad-code-sections $(NAME)
|
||||
if test ! -d ppbooks/$(NAME); then \
|
||||
mkdir -p ppbooks/$(NAME); \
|
||||
|
@ -147,18 +157,17 @@ build-book:
|
|||
|
||||
test -d books/$(NAME) || mkdir -p books/$(NAME)
|
||||
|
||||
#make check-summary
|
||||
#make book-check-leftover-docublocks
|
||||
#make book-check-restheader-leftovers
|
||||
#make book-check-mdpp-leftovers
|
||||
#make ppbook-check-html-link
|
||||
#make ppbook-check-directory-link
|
||||
make check-summary
|
||||
make book-check-leftover-docublocks
|
||||
make book-check-restheader-leftovers
|
||||
make book-check-mdpp-leftovers
|
||||
make ppbook-check-directory-link
|
||||
|
||||
cd ppbooks/$(NAME) && gitbook install
|
||||
cd ppbooks/$(NAME) && gitbook build ./ ./../../books/$(NAME)
|
||||
python ../Scripts/deprecated.py
|
||||
|
||||
#make book-check-markdown-leftovers
|
||||
make book-check-markdown-leftovers
|
||||
|
||||
clean-book:
|
||||
@rm -rvf books/$(NAME)
|
||||
|
@ -197,7 +206,7 @@ check-docublocks:
|
|||
>> /tmp/rawindoc.txt
|
||||
cat /tmp/rawindoc.txt | sed -e "s;.*ck ;;" -e "s;.*ne ;;" |sort -u > /tmp/indoc.txt
|
||||
grep -R '^/// @startDocuBlock' ../DocuBlocks --include "*.md" --include "*.mdpp" |grep -v aardvark > /tmp/rawinprog.txt
|
||||
# searching the Inline docublocks needs some more blacklisting:
|
||||
# searching the Inline docublocks needs some more blacklisting:
|
||||
grep -R '@startDocuBlockInline' --include "*.h" --include "*.cpp" --include "*.js" --include "*.mdpp" . |\
|
||||
grep -v ppbook |\
|
||||
grep -v allComments.txt |\
|
||||
|
@ -254,4 +263,10 @@ build-books:
|
|||
make build-books-keep-md NAME=Users
|
||||
make build-books-keep-md NAME=AQL
|
||||
make build-books-keep-md NAME=HTTP
|
||||
make check-docublocks
|
||||
|
||||
make ppbook-check-html-link NAME=Users
|
||||
make ppbook-check-html-link NAME=AQL
|
||||
make ppbook-check-html-link NAME=HTTP
|
||||
|
||||
make check-docublocks
|
||||
|
||||
|
|
|
@ -1,3 +1,48 @@
|
|||
!CHAPTER Static file assets
|
||||
|
||||
TODO
|
||||
The most flexible way to serve files in your Foxx service is to simply pass them through in your router using [the context object's `fileName` method][CONTEXT] and [the response object's `sendFile` method][RESPONSE]:
|
||||
|
||||
```js
|
||||
router.get('/some/filename.png', function (req, res) {
|
||||
const filePath = module.context.fileName('some-local-filename.png');
|
||||
res.sendFile(filePath);
|
||||
});
|
||||
```
|
||||
|
||||
While allowing for greater control of how the file should be sent to the client and who should be able to access it, doing this for all your static assets can get tedious.
|
||||
|
||||
Alternatively you can specify file assets that should be served by your Foxx service directly in the [service manifest][MANIFEST] using the `files` attribute:
|
||||
|
||||
```json
|
||||
"files": {
|
||||
"/some/filename.png": {
|
||||
"path": "some-local-filename.png",
|
||||
"type": "image/png",
|
||||
"gzip": false
|
||||
},
|
||||
"/favicon.ico": "bookmark.ico",
|
||||
"/static": "my-assets-folder"
|
||||
}
|
||||
```
|
||||
|
||||
Each entry in the `files` attribute can represent either a single file or a directory. When serving entire directories, the key acts as a prefix and requests to that prefix will be resolved within the given directory.
|
||||
|
||||
**Options**
|
||||
|
||||
* **path**: `string`
|
||||
|
||||
The relative path of the file or folder within the service.
|
||||
|
||||
* **type**: `string` (optional)
|
||||
|
||||
The MIME content type of the file. Defaults to an intelligent guess based on the filename's extension.
|
||||
|
||||
* **gzip**: `boolean` (Default: `false`)
|
||||
|
||||
If set to `true` the file will be served with gzip-encoding if supported by the client. This can be useful when serving text files like client-side JavaScript, CSS or HTML.
|
||||
|
||||
If a string is provided instead of an object, it will be interpreted as the *path* option.
|
||||
|
||||
[CONTEXT]: './Context.md'
|
||||
[MANIFEST]: './Manifest.md'
|
||||
[RESPONSE]: './Response.md'
|
||||
|
|
|
@ -106,7 +106,54 @@ router.get('/hello/:name', function (req, res) {
|
|||
|
||||
The first line imports the [`joi` module from npm][JOI] which comes bundled with ArangoDB. Joi is a validation library that is used throughout Foxx to define schemas and parameter types.
|
||||
|
||||
**Note**: You can bundle your own modules from npm by installing them in your service folder and making sure the `node_modules` folder is included in your zip archive. For more information see [the chapter on modules][MODULES].
|
||||
**Note**: You can bundle your own modules from npm by installing them in your service folder and making sure the `node_modules` folder is included in your zip archive. For more information see [the section on module dependencies in the chapter on dependencies][DEPENDENCIES].
|
||||
|
||||
The `pathParam` method allows us to specify parameters we are expecting in the path. The first argument corresponds to the parameter name in the path, the second argument is a joi schema the parameter is expected to match and the final argument serves to describe the parameter in the API documentation.
|
||||
|
||||
The path parameters are accessible from the `pathParams` property of the request object. We're using a template string to generate the server's response containing the parameter's value.
|
||||
|
||||
Note that routes with path parameters that fail to validate for the request URL will be skipped as if they wouldn't exist. This allows you to define multiple routes that are only distinguished by the schemas of their path parameters (e.g. a route taking only numeric parameters and one taking any string as a fallback).
|
||||
|
||||
![Screenshot of the API docs after a request to /hello/world]()
|
||||
|
||||
Let's take this further and create a route that takes a JSON request body:
|
||||
|
||||
```js
|
||||
const requestSchema = joi.object({
|
||||
values: joi.array().items(joi.number().required()).required()
|
||||
}).required();
|
||||
const responseSchema = joi.object({
|
||||
result: joi.number().required()
|
||||
}).required();
|
||||
router.post('/sum', function (req, res) {
|
||||
const values = req.body.values;
|
||||
res.send({
|
||||
result: values.reduce(function (a, b) {
|
||||
return a + b;
|
||||
}, 0)
|
||||
});
|
||||
})
|
||||
.body(requestSchema, 'Values to add together.')
|
||||
.response(responseSchema, 'Sum of the input values.')
|
||||
.summary('Add up numbers')
|
||||
.description('Calculates the sum of an array of number values.');
|
||||
```
|
||||
|
||||
Note that we used `post` to define this route instead of `get` (which does not support request bodies). Trying to send a GET request to this route's URL (in the absence of a `get` route for the same path) will result in Foxx responding with an appropriate error response, indicating the supported HTTP methods.
|
||||
|
||||
As this route not only expects a JSON object as input but also responds with a JSON object as output we need to define two schemas. We don't strictly need a response schema but it helps documenting what the route should be expected to respond with and will show up in the API documentation.
|
||||
|
||||
Because we're passing a schema to the `response` method we don't need to explicitly tell Foxx we are sending a JSON response. The presence of a schema in the absence of a content type always implies we want JSON. Though we could just add `["application/json"]` as an additional argument after the schema if we wanted to make this more explicit.
|
||||
|
||||
The `body` method works the same way as the `response` method except the schema will be used to validate the request body. If the request body can't be parsed as JSON or doesn't match the schema, Foxx will reject the request with an appropriate error response.
|
||||
|
||||
![Screenshot of the API docs after a request with an array of numbers]()
|
||||
|
||||
!SECTION Using the database
|
||||
|
||||
TODO
|
||||
|
||||
!SECTION Next steps
|
||||
|
||||
TODO
|
||||
|
||||
|
@ -117,4 +164,4 @@ TODO
|
|||
[REQUEST]: ./Router/Request.md
|
||||
[RESPONSE]: ./Router/Response.md
|
||||
[ROUTES]: ./Router/Endpoints.md
|
||||
[MODULES]: ./Modules.md
|
||||
[DEPENDENCIES]: ./Dependencies.md
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
!CHAPTER Other Foxx modules
|
||||
!CHAPTER Related modules
|
||||
|
||||
TODO
|
||||
|
|
|
@ -16,6 +16,16 @@ While Foxx is primarily designed to be used to access the database itself, Arang
|
|||
|
||||
Finally [Scripts][SCRIPTS] can be used to perform one-off tasks, which can also be scheduled to be performed asynchronously using the built-in job queue.
|
||||
|
||||
!SECTION Development mode
|
||||
|
||||
TODO
|
||||
|
||||
!SECTION Foxx store
|
||||
|
||||
TODO
|
||||
|
||||
!SECTION Cluster-Foxx
|
||||
|
||||
TODO
|
||||
|
||||
[CONTEXT]: ./Context.md
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
!CHAPTER Router Module
|
||||
!CHAPTER Routers
|
||||
|
||||
`const createRouter = require('@arangodb/foxx/router');`
|
||||
|
||||
|
@ -6,6 +6,74 @@ TODO
|
|||
|
||||
Routers need to be mounted to expose their HTTP routes. See [service context][CONTEXT].
|
||||
|
||||
!SECTION Creating a router
|
||||
|
||||
`createRouter(): Router`
|
||||
|
||||
This returns a new, clean Router object that has not yet been mounted in the service and can be exported like any other object.
|
||||
|
||||
!SECTION Request handlers
|
||||
|
||||
`router.get([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.post([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.put([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.patch([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.delete([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.all([path], handler, [name]): Endpoint`
|
||||
|
||||
TODO
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **path**: `string` (Default: `"/"`)
|
||||
|
||||
The path of the request handler relative to the base path the Router is mounted at. If omitted, the request handler will handle requests to the base path of the Router. For information on defining dynamic routes see the section on path parameters in the [chapter on router endpoints][ENDPOINTS].
|
||||
|
||||
* **handler**: `Function`
|
||||
|
||||
A function that takes the following arguments:
|
||||
|
||||
* **req**: `Request`
|
||||
|
||||
An incoming server request object.
|
||||
|
||||
* **res**: `Response`
|
||||
|
||||
An outgoing server response.
|
||||
|
||||
* **name**: `string` (optional)
|
||||
|
||||
A name that can be used to generate URLs for the endpoint. For more information see the `reverse` method of the [request object][REQUEST].
|
||||
|
||||
TODO
|
||||
|
||||
!SECTION Mounting child routers and middleware
|
||||
|
||||
`router.use([path], handler, [name]): Endpoint`
|
||||
|
||||
TODO
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **path**: `string` (optional)
|
||||
|
||||
TODO
|
||||
|
||||
* **handler**: `Router | Middleware`
|
||||
|
||||
TODO
|
||||
|
||||
* **name**: `string` (optional)
|
||||
|
||||
A name that can be used to generate URLs for endpoints of this router. For more information see the `reverse` method of the [request object][REQUEST]. Has no effect if *handler* is a Middleware.
|
||||
|
||||
TODO
|
||||
|
||||
[CONTEXT]: ../Context.md
|
||||
[ENDPOINTS]: ./Endpoints.md
|
||||
[REQUEST]: ./Request.md
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
!CHAPTER Routers
|
||||
|
||||
`const createRouter = require('@arangodb/foxx/router');`
|
||||
|
||||
TODO
|
||||
|
||||
!SECTION Creating a router
|
||||
|
||||
`createRouter(): Router`
|
||||
|
||||
This returns a new, clean Router object that has not yet been mounted in the service and can be exported like any other object.
|
||||
|
||||
!SECTION Request handlers
|
||||
|
||||
`router.get([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.post([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.put([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.patch([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.delete([path], handler, [name]): Endpoint`
|
||||
|
||||
`router.all([path], handler, [name]): Endpoint`
|
||||
|
||||
TODO
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **path**: `string` (Default: `"/"`)
|
||||
|
||||
The path of the request handler relative to the base path the Router is mounted at. If omitted, the request handler will handle requests to the base path of the Router. For information on defining dynamic routes see the section on path parameters in the [chapter on router endpoints][ENDPOINTS].
|
||||
|
||||
* **handler**: `Function`
|
||||
|
||||
A function that takes the following arguments:
|
||||
|
||||
* **req**: `Request`
|
||||
|
||||
An incoming server request object.
|
||||
|
||||
* **res**: `Response`
|
||||
|
||||
An outgoing server response.
|
||||
|
||||
* **name**: `string` (optional)
|
||||
|
||||
A name that can be used to generate URLs for the endpoint. For more information see the `reverse` method of the [request object][REQUEST].
|
||||
|
||||
TODO
|
||||
|
||||
!SECTION Mounting child routers and middleware
|
||||
|
||||
`router.use([path], handler, [name]): Endpoint`
|
||||
|
||||
TODO
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **path**: `string` (optional)
|
||||
|
||||
TODO
|
||||
|
||||
* **handler**: `Router | Middleware`
|
||||
|
||||
TODO
|
||||
|
||||
* **name**: `string` (optional)
|
||||
|
||||
A name that can be used to generate URLs for endpoints of this router. For more information see the `reverse` method of the [request object][REQUEST]. Has no effect if *handler* is a Middleware.
|
||||
|
||||
TODO
|
||||
|
||||
[ENDPOINTS]: ./Endpoints.md
|
||||
[REQUEST]: ./Request.md
|
|
@ -4,26 +4,57 @@
|
|||
|
||||
The JWT session storage converts sessions to and from [JSON Web Tokens](https://jwt.io/).
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
// Pass in a secure secret from the Foxx configuration
|
||||
const secret = module.context.configuration.jwtSecret;
|
||||
const sessions = sessionsMiddleware({
|
||||
storage: jwtStorage(secret),
|
||||
transport: 'header'
|
||||
});
|
||||
module.context.use(sessions);
|
||||
```
|
||||
|
||||
!SECTION Creating a storage
|
||||
|
||||
`jwtStorage(options): Storage`
|
||||
|
||||
Creates a [Storage][STORAGES] that can be used in the sessions middleware.
|
||||
|
||||
**Note:** while the "none" algorithm (i.e. no signature) is supported this dummy algorithm provides no security and allows clients to make arbitrary modifications to the payload and should not be used unless you are certain you specifically need it.
|
||||
|
||||
**Arguments**
|
||||
|
||||
* **options**: `Object`
|
||||
|
||||
An object with the following properties:
|
||||
|
||||
* **algorithm**: `string`
|
||||
* **algorithm**: `string` (Default: `"HS512"`)
|
||||
|
||||
TODO
|
||||
The algorithm to use for signing the token.
|
||||
|
||||
Supported values:
|
||||
|
||||
* `"HS256"` (HMAC-SHA256)
|
||||
* `"HS384"` (HMAC-SHA384)
|
||||
* `"HS512"` (HMAC-SHA512)
|
||||
* `"none"` (no signature)
|
||||
|
||||
* **secret**: `string`
|
||||
|
||||
TODO
|
||||
The secret to use for signing the token.
|
||||
|
||||
TODO
|
||||
This field is forbidden when using the "none" algorithm but required otherwise.
|
||||
|
||||
* **ttl**: `number` (Default: `3600`)
|
||||
|
||||
The maximum lifetime of the token in seconds. You may want to keep this short as a new token is generated on every request allowing clients to refresh tokens automatically.
|
||||
|
||||
* **verify**: `boolean` (Default: `true`)
|
||||
|
||||
If set to `false` the signature will not be verified but still generated (unless using the "none" algorithm).
|
||||
|
||||
If a string is passed instead of an options object it will be interpreted as the *secret* option.
|
||||
|
||||
[STORAGES]: ./README.md
|
||||
|
|
|
@ -38,7 +38,7 @@ new() {
|
|||
|
||||
`storage.fromClient(sid): Session | null`
|
||||
|
||||
TODO
|
||||
Resolves or deserializes a session identifier to a session object.
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
@ -60,7 +60,7 @@ fromClient(sid) {
|
|||
|
||||
`storage.forClient(session): string | null`
|
||||
|
||||
TODO
|
||||
Derives a session identifier from the given session object.
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
|
|
@ -2,7 +2,24 @@
|
|||
|
||||
`const cookieTransport = require('@arangodb/foxx/sessions/transports/cookie');`
|
||||
|
||||
TODO
|
||||
The cookie transport stores session identifiers in cookies on the request and response object.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
// Pass in a secure secret from the Foxx configuration
|
||||
const secret = module.context.configuration.cookieSecret;
|
||||
const sessions = sessionsMiddleware({
|
||||
storage: module.context.collection('sessions'),
|
||||
transport: cookieTransport({
|
||||
name: 'FOXXSESSID',
|
||||
ttl: 60 * 60 * 24 * 7, // one week in seconds
|
||||
algorithm: 'sha256',
|
||||
secret: secret
|
||||
})
|
||||
});
|
||||
module.context.use(sessions);
|
||||
```
|
||||
|
||||
!SECTION Creating a transport
|
||||
|
||||
|
@ -18,20 +35,21 @@ Creates a [Transport][TRANSPORT] that can be used in the sessions middleware.
|
|||
|
||||
* **name**: `string` (Default: `"sid"`)
|
||||
|
||||
TODO
|
||||
The name of the cookie.
|
||||
|
||||
* **ttl**: `number` (optional)
|
||||
|
||||
TODO
|
||||
|
||||
* **secret**: `string` (optional)
|
||||
|
||||
TODO
|
||||
Cookie lifetime in seconds.
|
||||
|
||||
* **algorithm**: `string` (optional)
|
||||
|
||||
TODO
|
||||
The algorithm used to sign and verify the cookie. If no algorithm is specified, the cookie will not be signed or verified. See the [cookie method on the response object][RESPONSE].
|
||||
|
||||
* **secret**: `string` (optional)
|
||||
|
||||
Secret to use for the signed cookie. Will be ignored if no algorithm is provided.
|
||||
|
||||
If a string is passed instead of an options object, it will be interpreted as the *name* option.
|
||||
|
||||
[RESPONSE]: ../../Router/Response.md
|
||||
[TRANSPORT]: ./README.md
|
||||
|
|
|
@ -2,7 +2,17 @@
|
|||
|
||||
`const headerTransport = require('@arangodb/foxx/sessions/transports/header');`
|
||||
|
||||
TODO
|
||||
The header transport stores session identifiers in headers on the request and response objects.
|
||||
|
||||
**Examples**
|
||||
|
||||
```js
|
||||
const sessions = sessionsMiddleware({
|
||||
storage: module.context.collection('sessions'),
|
||||
transport: headerTransport('X-FOXXSESSID')
|
||||
});
|
||||
module.context.use(sessions);
|
||||
```
|
||||
|
||||
!SECTION Creating a transport
|
||||
|
||||
|
@ -18,7 +28,7 @@ Creates a [Transport][TRANSPORT] that can be used in the sessions middleware.
|
|||
|
||||
* **name**: `string` (Default: `X-Session-Id`)
|
||||
|
||||
TODO
|
||||
Name of the header that contains the session identifier (not case sensitive).
|
||||
|
||||
If a string is passed instead of an options object, it will be interpreted as the *name* option.
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ select your operating system and download ArangoDB. You may also follow
|
|||
the instructions on how to install with a package manager, if available.
|
||||
|
||||
Start up the server by running `arangod`.
|
||||
!TODO explain how to do that on all major platforms in the most simple way
|
||||
!TODO explain how to do that on all major platforms in the most simple way
|
||||
|
||||
For startup parameters, installation in a cluster and so on, see
|
||||
[Installing](Installing/README.md).
|
||||
|
|
|
@ -5,9 +5,9 @@ upgrading to ArangoDB 3.0, and adjust any client programs if necessary.
|
|||
|
||||
!SECTION Build system
|
||||
|
||||
Building ArangoDB 3.0 from source now requires CMake.
|
||||
Building ArangoDB 3.0 from source now requires CMake.
|
||||
|
||||
The pre-3.0 build system used a configure-based approach. The steps to build
|
||||
The pre-3.0 build system used a configure-based approach. The steps to build
|
||||
ArangoDB from source code in 2.8 were:
|
||||
|
||||
```
|
||||
|
@ -16,7 +16,7 @@ make setup
|
|||
make
|
||||
```
|
||||
|
||||
These steps will not work anymore, as ArangoDB 3.0 does not come with a
|
||||
These steps will not work anymore, as ArangoDB 3.0 does not come with a
|
||||
configure script.
|
||||
|
||||
To build 3.0 on Linux, create a separate build directory first:
|
||||
|
@ -31,7 +31,7 @@ and then create the initial build scripts once using CMake:
|
|||
(cd build && cmake <options> ..)
|
||||
```
|
||||
|
||||
The above command will configure the build and check for the required
|
||||
The above command will configure the build and check for the required
|
||||
dependencies. If everything works well the actual build can be started with
|
||||
|
||||
```
|
||||
|
@ -60,7 +60,7 @@ used a pattern `collection-<id>` without the random number.
|
|||
|
||||
!SECTION Edges and edges attributes
|
||||
|
||||
In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated
|
||||
In ArangoDB prior to 3.0 the attributes `_from` and `_to` of edges were treated
|
||||
specially when loading or storing edges. That special handling led to these attributes
|
||||
being not as flexible as regular document attributes. For example, the `_from` and
|
||||
`_to` attribute values of an existing edge could not be updated once the edge was
|
||||
|
@ -156,7 +156,7 @@ with exactly the specified name (i.e. `doc["a.b"]`).
|
|||
|
||||
ArangoDB 3.0 now handles attribute names containing the dot symbol properly, and sending a
|
||||
bind parameter `@name` = `a.b` will now always trigger an access to the attribute `doc["a.b"]`,
|
||||
not the sub-attribute `b` of `a` in `doc`.
|
||||
not the sub-attribute `b` of `a` in `doc`.
|
||||
|
||||
For users that used the "hack" of passing bind parameters containing dot symbol to access
|
||||
sub-attributes, ArangoDB 3.0 allows specifying the attribute name parts as an array of strings,
|
||||
|
@ -181,8 +181,8 @@ is modified in the subquery but also read-accessed in the outer scope:
|
|||
```
|
||||
FOR doc IN myCollection
|
||||
LET changes = (
|
||||
FOR what IN myCollection
|
||||
FILTER what.value == 1
|
||||
FOR what IN myCollection
|
||||
FILTER what.value == 1
|
||||
REMOVE what IN myCollection
|
||||
)
|
||||
RETURN doc
|
||||
|
@ -193,7 +193,7 @@ e.g.
|
|||
|
||||
```
|
||||
FOR doc IN myCollection
|
||||
FILTER doc.value == 1
|
||||
FILTER doc.value == 1
|
||||
REMOVE doc IN myCollection
|
||||
```
|
||||
|
||||
|
@ -216,7 +216,7 @@ these functions unless the `sort` parameter is specified (for the `ATTRIBUTES()`
|
|||
The V8 engine that is used inside ArangoDB to execute JavaScript code has been upgraded from
|
||||
version 4.3.61 to 5.0.71.39. The new version should be mostly compatible to the old version,
|
||||
but there may be subtle differences, including changes of error message texts thrown by the
|
||||
engine.
|
||||
engine.
|
||||
Furthermore, some V8 startup parameters have changed their meaning or have been removed in
|
||||
the new version. This is only relevant when ArangoDB or ArangoShell are started with a custom
|
||||
value for the `--javascript.v8-options` startup option.
|
||||
|
@ -245,8 +245,8 @@ Among others, the following V8 options change in the new version of ArangoDB:
|
|||
value from false to true
|
||||
|
||||
- `--harmony_unicode_regexps`: this options means `enable "harmony unicode regexps"` and changes
|
||||
its default value from false to true
|
||||
|
||||
its default value from false to true
|
||||
|
||||
- `--harmony_arrays`, `--harmony_array_includes`, `--harmony_computed_property_names`,
|
||||
`--harmony_arrow_functions`, `--harmony_rest_parameters`, `--harmony_classes`,
|
||||
`--harmony_object_literals`, `--harmony_numeric_literals`, `--harmony_unicode`:
|
||||
|
@ -271,18 +271,18 @@ Modules shipped with ArangoDB can now be required using the pattern `@arangodb/<
|
|||
instead of `org/arangodb/<module>`, e.g.
|
||||
|
||||
```js
|
||||
var cluster = require("@arangodb/cluster");
|
||||
var cluster = require("@arangodb/cluster");
|
||||
```
|
||||
|
||||
The old format can still be used for compatibility:
|
||||
|
||||
```js
|
||||
var cluster = require("org/arangodb/cluster");
|
||||
var cluster = require("org/arangodb/cluster");
|
||||
```
|
||||
|
||||
ArangoDB prior to version 3.0 allowed a transparent use of CoffeeScript
|
||||
source files with the `require()` function. Files with a file name extension
|
||||
of `coffee` were automatically sent through a CoffeeScript parser and
|
||||
of `coffee` were automatically sent through a CoffeeScript parser and
|
||||
transpiled into JavaScript on-the-fly. This support is gone with ArangoDB
|
||||
3.0. To run any CoffeeScript source files, they must be converted to JavaScript
|
||||
by the client application.
|
||||
|
@ -355,18 +355,18 @@ of 3.0 is as follows:
|
|||
```js
|
||||
/* test if document exists. this returned true in 2.8 */
|
||||
db.myCollection.exists("test");
|
||||
{
|
||||
"_key" : "test",
|
||||
"_id" : "myCollection/test",
|
||||
"_rev" : "9758059"
|
||||
{
|
||||
"_key" : "test",
|
||||
"_id" : "myCollection/test",
|
||||
"_rev" : "9758059"
|
||||
}
|
||||
|
||||
/* test if document exists. this returned true in 2.8 */
|
||||
db.myCollection.exists({ _key: "test" });
|
||||
{
|
||||
"_key" : "test",
|
||||
"_id" : "myCollection/test",
|
||||
"_rev" : "9758059"
|
||||
{
|
||||
"_key" : "test",
|
||||
"_id" : "myCollection/test",
|
||||
"_rev" : "9758059"
|
||||
}
|
||||
|
||||
/* test if document exists. this also returned false in 2.8 */
|
||||
|
@ -375,10 +375,10 @@ false
|
|||
|
||||
/* test if document with a given revision id exists. this returned true in 2.8 */
|
||||
db.myCollection.exists({ _key: "test", _rev: "9758059" });
|
||||
{
|
||||
"_key" : "test",
|
||||
"_id" : "myCollection/test",
|
||||
"_rev" : "9758059"
|
||||
{
|
||||
"_key" : "test",
|
||||
"_id" : "myCollection/test",
|
||||
"_rev" : "9758059"
|
||||
}
|
||||
|
||||
/* test if document with a given revision id exists. this returned false in 2.8 */
|
||||
|
@ -398,7 +398,7 @@ index of type "cap" with a collection's `ensureIndex()` function. The dedicated
|
|||
|
||||
!SUBSUBSECTION Undocumented APIs
|
||||
|
||||
The undocumented functions `BY_EXAMPLE_HASH()` and `BY_EXAMPLE_SKIPLIST()` and
|
||||
The undocumented functions `BY_EXAMPLE_HASH()` and `BY_EXAMPLE_SKIPLIST()` and
|
||||
`BY_CONDITION_SKIPLIST` have been removed. These functions were always hidden and not
|
||||
intended to be part of the public JavaScript API for collections.
|
||||
|
||||
|
@ -412,7 +412,7 @@ The following incompatible changes have been made to the HTTP API in ArangoDB 3.
|
|||
|
||||
The HTTP insert operations for single documents and edges (POST `/_api/document`) do
|
||||
not support the URL parameter "createCollection" anymore. In previous versions of
|
||||
ArangoDB this parameter could be used to automatically create a collection upon
|
||||
ArangoDB this parameter could be used to automatically create a collection upon
|
||||
insertion of the first document. It is now required that the target collection already
|
||||
exists when using this API, otherwise it will return an HTTP 404 error.
|
||||
The same is true for the import API at POST `/_api/import`.
|
||||
|
@ -443,7 +443,7 @@ removing a document with a specific revision id could be achieved as follows:
|
|||
|
||||
```
|
||||
curl -X DELETE \
|
||||
"http://127.0.0.1:8529/_api/document/myCollection/myKey?rev=123"
|
||||
"http://127.0.0.1:8529/_api/document/myCollection/myKey?rev=123"
|
||||
```
|
||||
|
||||
ArangoDB 3.0 does not support passing the revision id via the "rev" URL parameter
|
||||
|
@ -453,7 +453,7 @@ e.g.
|
|||
```
|
||||
curl -X DELETE \
|
||||
--header "If-Match: '123'" \
|
||||
"http://127.0.0.1:8529/_api/document/myCollection/myKey"
|
||||
"http://127.0.0.1:8529/_api/document/myCollection/myKey"
|
||||
```
|
||||
|
||||
The URL parameter "policy" was also usable in previous versions of ArangoDB to
|
||||
|
@ -470,7 +470,7 @@ current revision of the document, regardless of its revision id.
|
|||
|
||||
The HTTP API for retrieving the ids, keys or URLs of all documents from a collection
|
||||
was previously located at GET `/_api/document?collection=...`. This API was moved to
|
||||
PUT `/_api/simple/all-keys` and is now executed as an AQL query.
|
||||
PUT `/_api/simple/all-keys` and is now executed as an AQL query.
|
||||
The name of the collection must now be passed in the HTTP request body instead of in
|
||||
the request URL. The same is true for the "type" parameter, which controls the type of
|
||||
the result to be created.
|
||||
|
@ -494,7 +494,7 @@ CRUD operations there is no distinction anymore between documents and edges API-
|
|||
|
||||
That means CRUD operations concerning edges need to be sent to the HTTP endpoint
|
||||
`/_api/document` instead of `/_api/edge`. Sending requests to `/_api/edge` will
|
||||
result in an HTTP 404 error in 3.0. The following methods are available at
|
||||
result in an HTTP 404 error in 3.0. The following methods are available at
|
||||
`/_api/document` for documents and edge:
|
||||
|
||||
- HTTP POST: insert new document or edge
|
||||
|
@ -514,7 +514,7 @@ edges objects sent to the server:
|
|||
```
|
||||
curl -X POST \
|
||||
--data '{"value":1,"_from":"myVertexCollection/1","_to":"myVertexCollection/2"}' \
|
||||
"http://127.0.0.1:8529/_api/document?collection=myEdgeCollection"
|
||||
"http://127.0.0.1:8529/_api/document?collection=myEdgeCollection"
|
||||
```
|
||||
|
||||
Previous versions of ArangoDB required the `_from` and `_to` attributes of edges be
|
||||
|
@ -543,7 +543,7 @@ of these operations return HTTP 200 regardless of the `waitForSync` value.
|
|||
!SUBSECTION Simple queries API
|
||||
|
||||
The REST routes PUT `/_api/simple/first` and `/_api/simple/last` have been removed
|
||||
entirely. These APIs were responsible for returning the first-inserted and
|
||||
entirely. These APIs were responsible for returning the first-inserted and
|
||||
least-inserted documents in a collection. This feature was built on cap constraints
|
||||
internally, which have been removed in 3.0.
|
||||
|
||||
|
@ -591,16 +591,16 @@ still send the header, but this will not make the database name in the "location
|
|||
response header disappear.
|
||||
|
||||
The result format for querying all collections via the API GET `/_api/collection`
|
||||
has been changed.
|
||||
has been changed.
|
||||
|
||||
Previous versions of ArangoDB returned an object with an attribute named `collections`
|
||||
and an attribute named `names`. Both contained all available collections, but
|
||||
`collections` contained the collections as an array, and `names` contained the
|
||||
collections again, contained in an object in which the attribute names were the
|
||||
and an attribute named `names`. Both contained all available collections, but
|
||||
`collections` contained the collections as an array, and `names` contained the
|
||||
collections again, contained in an object in which the attribute names were the
|
||||
collection names, e.g.
|
||||
|
||||
```
|
||||
{
|
||||
{
|
||||
"collections": [
|
||||
{"id":"5874437","name":"test","isSystem":false,"status":3,"type":2},
|
||||
{"id":"17343237","name":"something","isSystem":false,"status":3,"type":2},
|
||||
|
@ -613,7 +613,6 @@ collection names, e.g.
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
This result structure was redundant, and therefore has been simplified to just
|
||||
|
||||
```
|
||||
|
@ -624,6 +623,7 @@ This result structure was redundant, and therefore has been simplified to just
|
|||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
in ArangoDB 3.0.
|
||||
|
||||
|
@ -633,7 +633,7 @@ The URL parameter "failOnUnknown" was removed from the REST API GET `/_api/repli
|
|||
This parameter controlled whether dumping or replicating edges should fail if one
|
||||
of the vertex collections linked in the edge's `_from` or `_to` attributes was not
|
||||
present anymore. In this case the `_from` and `_to` values could not be translated into
|
||||
meaningful ids anymore.
|
||||
meaningful ids anymore.
|
||||
|
||||
There were two ways for handling this:
|
||||
- setting `failOnUnknown` to `true` caused the HTTP request to fail, leaving error
|
||||
|
@ -674,12 +674,12 @@ been moved into more appropriate topic sections.
|
|||
The following options have been available before 3.0 and have changed their name
|
||||
in 3.0:
|
||||
|
||||
- `--server.disable-authentication` was renamed to `--server.authentication`.
|
||||
- `--server.disable-authentication` was renamed to `--server.authentication`.
|
||||
Note that the meaning of the option `--server.authentication` is the opposite of
|
||||
the previous `--server.disable-authentication`.
|
||||
- `--server.disable-authentication-unix-sockets` was renamed to
|
||||
`--server.authentication-unix-sockets`. Note that the meaning of the option
|
||||
`--server.authentication-unix-sockets` is the opposite of the previous
|
||||
- `--server.disable-authentication-unix-sockets` was renamed to
|
||||
`--server.authentication-unix-sockets`. Note that the meaning of the option
|
||||
`--server.authentication-unix-sockets` is the opposite of the previous
|
||||
`--server.disable-authentication-unix-sockets`.
|
||||
- `--server.authenticate-system-only` was renamed to `--server.authentication-system-only`.
|
||||
The meaning of the option in unchanged.
|
||||
|
@ -720,7 +720,7 @@ in 3.0:
|
|||
- `--database.query-cache-max-results` was renamed to `--query.cache-entries`. The
|
||||
meaning of the option is unchanged.
|
||||
- `--database.disable-query-tracking` was renamed to `--query.tracking`. The meaning
|
||||
of the option `--query.tracking` is the opposite of the previous
|
||||
of the option `--query.tracking` is the opposite of the previous
|
||||
`--database.disable-query-tracking`.
|
||||
- `--log.tty` was renamed to `--log.foreground-tty`. The meaning of the option is
|
||||
unchanged.
|
||||
|
@ -741,7 +741,7 @@ topic in front of a log level or an output. For example
|
|||
```
|
||||
|
||||
will log messages concerning startup at trace level, everything else at info
|
||||
level. `--log.level` can be specified multiple times at startup, for as many
|
||||
level. `--log.level` can be specified multiple times at startup, for as many
|
||||
topics as needed.
|
||||
|
||||
Some relevant log topics available in 3.0 are:
|
||||
|
@ -777,7 +777,7 @@ logs all queries to the file "queries.txt".
|
|||
|
||||
The old option `--log.file` is still available in 3.0 for convenience reasons. In
|
||||
3.0 it is a shortcut for the more general option `--log.output file://filename`.
|
||||
|
||||
|
||||
The old option `--log.requests-file` is still available in 3.0. It is now a shortcut
|
||||
for the more general option `--log.output requests=file://...`.
|
||||
|
||||
|
@ -791,7 +791,7 @@ have most been used during ArangoDB's internal development.
|
|||
|
||||
The syslog-related options `--log.application` and `--log.facility` have been removed.
|
||||
They are superseded by the more general `--log.output` option which can also handle
|
||||
syslog targets.
|
||||
syslog targets.
|
||||
|
||||
!SUBSECTION Removed other options
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
#
|
||||
# Summary
|
||||
#
|
||||
#* [First Steps](FirstSteps/README.md)
|
||||
# * [Getting Familiar](FirstSteps/GettingFamiliar.md)
|
||||
# * [First Steps](FirstSteps/README.md) #TODO
|
||||
# * [Getting Familiar](FirstSteps/GettingFamiliar.md) #TODO
|
||||
* [Getting Started](GettingStarted/README.md)
|
||||
# move to administration (command line options)?
|
||||
#* [Install and run the server](FirstSteps/Arangod.md)
|
||||
# * [Install and run the server](FirstSteps/Arangod.md) #TODO
|
||||
* [Installing](GettingStarted/Installing/README.md)
|
||||
* [Linux](GettingStarted/Installing/Linux.md)
|
||||
* [Mac OS X](GettingStarted/Installing/MacOSX.md)
|
||||
|
@ -15,13 +15,13 @@
|
|||
* [Cluster setup](GettingStarted/Installing/Cluster.md)
|
||||
* [Using the Web Interface](GettingStarted/WebInterface.md)
|
||||
* [Coming from SQL](GettingStarted/ComingFromSql.md)
|
||||
#* [Coming from MongoDB](GettingStarted/ComingFromMongoDb.md)
|
||||
# * [Coming from MongoDB](GettingStarted/ComingFromMongoDb.md) #TODO
|
||||
#
|
||||
* [Scalability](Scalability/README.md)
|
||||
# * [Joins](Scalability/Joins.md)
|
||||
#
|
||||
* [Data model & modeling](DataModeling/README.md)
|
||||
#* [Collections](FirstSteps/CollectionsAndDocuments.md)
|
||||
# * [Collections](FirstSteps/CollectionsAndDocuments.md) #TODO
|
||||
* [Concepts](DataModeling/Concepts.md)
|
||||
* [Databases](DataModeling/Databases/README.md)
|
||||
* [Working with Databases](DataModeling/Databases/WorkingWith.md)
|
||||
|
@ -67,8 +67,7 @@
|
|||
* [Service context](Foxx/Context.md)
|
||||
* [Configuration](Foxx/Configuration.md)
|
||||
* [Dependencies](Foxx/Dependencies.md)
|
||||
* [Routing](Foxx/Router/README.md)
|
||||
* [Routers](Foxx/Router/Routers.md)
|
||||
* [Routers](Foxx/Router/README.md)
|
||||
* [Endpoints](Foxx/Router/Endpoints.md)
|
||||
* [Middleware](Foxx/Router/Middleware.md)
|
||||
* [Request](Foxx/Router/Request.md)
|
||||
|
@ -84,6 +83,7 @@
|
|||
* [Writing tests](Foxx/Testing.md)
|
||||
* [Scripts and queued jobs](Foxx/Scripts.md)
|
||||
* [Legacy mode for 2.x](Foxx/LegacyMode.md)
|
||||
* [User management](Foxx/Users.md)
|
||||
* [Related modules](Foxx/Modules.md)
|
||||
* [Authentication](Foxx/Auth.md)
|
||||
* [OAuth 2.0](Foxx/OAuth2.md)
|
||||
|
|
|
@ -195,9 +195,9 @@ Illegal document
|
|||
@END_EXAMPLE_ARANGOSH_RUN
|
||||
|
||||
Insert multiple documents:
|
||||
TODO, make this example work.
|
||||
###!TODO, make this example work.
|
||||
|
||||
@ EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostMulti}
|
||||
EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostMulti}
|
||||
var cn = "products";
|
||||
db._drop(cn);
|
||||
|
||||
|
@ -209,12 +209,12 @@ TODO, make this example work.
|
|||
assert(response.code === 200);
|
||||
|
||||
logJsonResponse(response);
|
||||
@ END_EXAMPLE_ARANGOSH_RUN
|
||||
ND_EXAMPLE_ARANGOSH_RUN
|
||||
|
||||
Use of returnNew:
|
||||
TODO, make this example work.
|
||||
###!TODO, make this example work.
|
||||
|
||||
@ EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostMulti}
|
||||
EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostMulti}
|
||||
var cn = "products";
|
||||
db._drop(cn);
|
||||
|
||||
|
@ -226,6 +226,6 @@ TODO, make this example work.
|
|||
assert(response.code === 200);
|
||||
|
||||
logJsonResponse(response);
|
||||
@ END_EXAMPLE_ARANGOSH_RUN
|
||||
END_EXAMPLE_ARANGOSH_RUN
|
||||
@endDocuBlock
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -124,6 +124,12 @@
|
|||
initFinished: false,
|
||||
|
||||
initialize: function () {
|
||||
|
||||
//check frontend config for global conf settings
|
||||
if (frontendConfig.isCluster === true) {
|
||||
this.isCluster = true;
|
||||
}
|
||||
|
||||
// This should be the only global object
|
||||
window.modalView = new window.ModalView();
|
||||
|
||||
|
@ -143,7 +149,6 @@
|
|||
var callback = function(error, isCoordinator) {
|
||||
self = this;
|
||||
if (isCoordinator === true) {
|
||||
self.isCluster = true;
|
||||
|
||||
self.coordinatorCollection.fetch({
|
||||
success: function() {
|
||||
|
@ -151,9 +156,6 @@
|
|||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
self.isCluster = false;
|
||||
}
|
||||
}.bind(this);
|
||||
|
||||
window.isCoordinator(callback);
|
||||
|
|
|
@ -34,6 +34,7 @@ module.exports = function jwtStorage(cfg) {
|
|||
}
|
||||
assert(cfg.algorithm === 'none' || cfg.secret, `Must pass a JWT secret for "${cfg.algorithm}" algorithm`);
|
||||
assert(cfg.algorithm !== 'none' || !cfg.secret, 'Must NOT pass a JWT secret for "none" algorithm');
|
||||
const algorithm = cfg.algorithm || 'HS512';
|
||||
const ttl = (cfg.ttl || 60 * 60) * 1000;
|
||||
return {
|
||||
fromClient(sid) {
|
||||
|
@ -54,7 +55,7 @@ module.exports = function jwtStorage(cfg) {
|
|||
payload: session.data,
|
||||
exp: Date.now() + ttl
|
||||
};
|
||||
return crypto.jwtEncode(cfg.secret, token, cfg.algorithm);
|
||||
return crypto.jwtEncode(cfg.secret, token, algorithm);
|
||||
},
|
||||
new() {
|
||||
return {
|
||||
|
|
Loading…
Reference in New Issue