1
0
Fork 0

Merge branch 'devel' of github.com:triAGENS/ArangoDB into devel

This commit is contained in:
a-brandt 2013-05-17 17:26:45 +02:00
commit 623c3af246
250 changed files with 8946 additions and 4958 deletions

3
.gitignore vendored
View File

@ -5,6 +5,7 @@ mr-*.h
*.o
*.a
*~
*.pyc
.libev-build-64
.v8-build-64
@ -48,12 +49,14 @@ config/stamp-h1
configure
core
TAGS
tags
Doxygen/*.doxy
Doxygen/html/
Doxygen/latex/
Doxygen/js/
Documentation/arango.template
Documentation/Examples/*.generated
UnitTests/HttpInterface/logs/
UnitTests/basics_suite
UnitTests/geo_suite

View File

@ -1,5 +1,65 @@
v1.3.0-alpha2 (XXXX-XX-XX)
--------------------------
v1.4
------
* fixed usage of --temp-path in aranogd and arangosh
* issue #526: Unable to escape when an errorneous command is entered into the js shell
* issue #523: Graph and vertex methods for the javascript api
* issue #517: Foxx: Route parameters with captial letters fail
* issue #512: Binded Parameters for LIMIT
v1.3.1 (2013-XX-XX)
-------------------
* issue #535: Problem with AQL user functions javascript API
* set --javascript.app-path for test execution to prevent startup error
* issue #532: Graph _edgesCache returns invalid data?
* issue #531: Arangod errors
* issue #529: Really weird transaction issue
* fixed usage of --temp-path in aranogd and arangosh
v1.3.0 (2013-05-10)
-------------------
* fixed problem on restart ("datafile-xxx is not sealed") when server was killed
during a compaction run
* fixed leak when using cursors with very small batchSize
* issue #508: `unregistergroup` function not mentioned in http interface docs
* issue #507: GET /_api/aqlfunction returns code inside parentheses
* fixed issue #489: Bug in aal.install
* fixed issue 505: statistics not populated on MacOS
v1.3.0-rc1 (2013-04-24)
-----------------------
* updated documentation for 1.3.0
* added node modules and npm packages
* changed compaction to only compact datafiles with more at least 10% of dead
documents (byte size-wise)
* issue #498: fixed reload of authentication info when using
`require("org/arangodb/users").reload()`
* issue #495: Passing an empty array to create a document results in a
"phantom" document
* added more precision for requests statistics figures

View File

@ -1,89 +0,0 @@
unix> curl --dump - http://localhost:8529/_admin/statistics
HTTP/1.1 200 OK
content-type: application/json; charset=utf-8
{
"system" : {
"minorPageFaults" : 18666,
"majorPageFaults" : 57,
"userTime" : 1.162991,
"systemTime" : 0.464635,
"numberThreads" : 0,
"residentSize" : 34525184,
"virtualSize" : 0
},
"client" : {
"httpConnections" : 1,
"connectionTime" : {
"count" : 0,
"counts" : [
0,
0,
0,
0
]
},
"totalTime" : {
"count" : 54,
"counts" : [
51,
0,
0,
0,
0,
0,
3
]
},
"requestTime" : {
"count" : 54,
"counts" : [
51,
0,
0,
0,
0,
0,
3
]
},
"queueTime" : {
"count" : 53,
"counts" : [
53,
0,
0,
0,
0,
0,
0
]
},
"bytesSent" : {
"count" : 54,
"counts" : [
13,
27,
13,
1,
0,
0
]
},
"bytesReceived" : {
"count" : 54,
"counts" : [
52,
2,
0,
0,
0,
0
]
}
},
"error" : false,
"code" : 200
}

View File

@ -1,179 +0,0 @@
unix> curl --dump - http://localhost:8529/_admin/statistics-description
HTTP/1.1 200 OK
content-type: application/json; charset=utf-8
{
"groups" : [
{
"group" : "system",
"name" : "Process Statistics",
"description" : "Statistics about the ArangoDB process"
},
{
"group" : "client",
"name" : "Client Statistics",
"description" : "Statistics about the clients connecting to the server."
}
],
"figures" : [
{
"group" : "system",
"identifier" : "userTime",
"name" : "User Time",
"description" : "Amount of time that this process has been scheduled in user mode, measured in clock ticks divided by sysconf(_SC_CLK_TCK) aka seconds.",
"type" : "accumulated",
"units" : "seconds"
},
{
"group" : "system",
"identifier" : "systemTime",
"name" : "System Time",
"description" : "Amount of time that this process has been scheduled in kernel mode, measured in clock ticks divided by sysconf(_SC_CLK_TCK) aka seconds.",
"type" : "accumulated",
"units" : "seconds"
},
{
"group" : "system",
"identifier" : "numberOfThreads",
"name" : "Number of Threads",
"description" : "Number of threads in this process.",
"type" : "current",
"units" : "number"
},
{
"group" : "system",
"identifier" : "residentSize",
"name" : "Resident Set Size",
"description" : "The number of pages the process has in real memory. This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out.",
"type" : "current",
"units" : "bytes"
},
{
"group" : "system",
"identifier" : "virtualSize",
"name" : "Virtual Memory Size",
"description" : "The size of the virtual memory the process is using.",
"type" : "current",
"units" : "bytes"
},
{
"group" : "system",
"identifier" : "minorPageFaults",
"name" : "Minor Page Faults",
"description" : "The number of minor faults the process has made which have not required loading a memory page from disk.",
"type" : "accumulated",
"units" : "number"
},
{
"group" : "system",
"identifier" : "majorPageFaults",
"name" : "Major Page Faults",
"description" : "The number of major faults the process has made which have required loading a memory page from disk.",
"type" : "accumulated",
"units" : "number"
},
{
"group" : "client",
"identifier" : "httpConnections",
"name" : "HTTP Client Connections",
"description" : "The number of http connections that are currently open.",
"type" : "current",
"units" : "number"
},
{
"group" : "client",
"identifier" : "totalTime",
"name" : "Total Time",
"description" : "Total time needed to answer a request.",
"type" : "distribution",
"cuts" : [
0.01,
0.05,
0.1,
0.2,
0.5,
1
],
"units" : "seconds"
},
{
"group" : "client",
"identifier" : "requestTime",
"name" : "Request Time",
"description" : "Request time needed to answer a request.",
"type" : "distribution",
"cuts" : [
0.01,
0.05,
0.1,
0.2,
0.5,
1
],
"units" : "seconds"
},
{
"group" : "client",
"identifier" : "queueTime",
"name" : "Queue Time",
"description" : "Queue time needed to answer a request.",
"type" : "distribution",
"cuts" : [
0.01,
0.05,
0.1,
0.2,
0.5,
1
],
"units" : "seconds"
},
{
"group" : "client",
"identifier" : "bytesSent",
"name" : "Bytes Sent",
"description" : "Bytes sents for a request.",
"type" : "distribution",
"cuts" : [
250,
1000,
2000,
5000,
10000
],
"units" : "bytes"
},
{
"group" : "client",
"identifier" : "bytesReceived",
"name" : "Bytes Received",
"description" : "Bytes receiveds for a request.",
"type" : "distribution",
"cuts" : [
250,
1000,
2000,
5000,
10000
],
"units" : "bytes"
},
{
"group" : "client",
"identifier" : "connectionTime",
"name" : "Connection Time",
"description" : "Total connection time of a client.",
"type" : "distribution",
"cuts" : [
0.1,
1,
60
],
"units" : "seconds"
}
],
"error" : false,
"code" : 200
}

View File

@ -1,8 +0,0 @@
{
"_id" : "_users/1589671",
"_rev" : "1589671",
"_key" : "1589671",
"active" : true,
"user" : "root",
"password" : "$1$e0d13aed$328469fc501398de5c3626dcc0a5a567e01b4bafbc06d89c0a5cf2b09872ca14"
}

View File

@ -1,16 +0,0 @@
{
"firstName" : "Hugo",
"lastName" : "Schlonz",
"address" : {
"city" : "Hier",
"street" : "Strasse 1"
},
"hobbies" : [
"swimming",
"biking",
"programming"
],
"_id" : "demo/schlonz",
"_rev" : "12468647",
"_key" : "schlonz"
}

View File

@ -1,15 +0,0 @@
unix> curl -X POST --data @- --dump - http://localhost:8529/_api/document?collection=products
{ "Hello": "World" }
HTTP/1.1 202 Accepted
content-type: application/json; charset=utf-8
etag: "21316007"
location: /_api/document/products/21316007
{
"error" : false,
"_id" : "products/21316007",
"_rev" : "21316007",
"_key" : "21316007"
}

View File

@ -1,13 +0,0 @@
unix> curl -X POST --data @- --dump - http://localhost:8529/_api/document?collection=products
{ 1: "World" }
HTTP/1.1 400 Bad Request
content-type: application/json; charset=utf-8
{
"error" : true,
"errorMessage" : "expecting attribute name",
"code" : 400,
"errorNum" : 600
}

View File

@ -1,15 +0,0 @@
unix> curl -X POST --data @- --dump - http://localhost:8529/_api/document?collection=products
{ "Hello": "World" }
HTTP/1.1 201 Created
content-type: application/json; charset=utf-8
etag: "17973671"
location: /_api/document/products/17973671
{
"error" : false,
"_id" : "products/17973671",
"_rev" : "17973671",
"_key" : "17973671"
}

View File

@ -1,15 +0,0 @@
unix> curl -X POST --data @- --dump - http://localhost:8529/_api/document?collection=products&createCollection=true
{ "Hello": "World" }
HTTP/1.1 202 Accepted
content-type: application/json; charset=utf-8
etag: "29049255"
location: /_api/document/products/29049255
{
"error" : false,
"_id" : "products/29049255",
"_rev" : "29049255",
"_key" : "29049255"
}

View File

@ -1,13 +0,0 @@
unix> curl -X POST --data @- --dump - http://localhost:8529/_api/document?collection=products
{ "Hello": "World" }
HTTP/1.1 404 Not Found
content-type: application/json; charset=utf-8
{
"error" : true,
"errorMessage" : "collection /_api/collection/products not found",
"code" : 404,
"errorNum" : 1203
}

View File

@ -1,15 +0,0 @@
unix> curl -X POST --data @- --dump - http://localhost:8529/_api/document?collection=products&waitForSync=true
{ "Hello": "World" }
HTTP/1.1 201 Created
content-type: application/json; charset=utf-8
etag: "19743143"
location: /_api/document/products/19743143
{
"error" : false,
"_id" : "products/19743143",
"_rev" : "19743143",
"_key" : "19743143"
}

View File

@ -1,13 +0,0 @@
unix> curl --dump - http://localhost:8529/_api/document/products/24265127
HTTP/1.1 200 OK
content-type: application/json; charset=utf-8
etag: "24265127"
{
"hallo" : "world",
"_id" : "products/24265127",
"_rev" : "24265127",
"_key" : "24265127"
}

View File

@ -1,13 +0,0 @@
unix> curl --dump - http://localhost:8529/_api/document/?collection=products
HTTP/1.1 200 OK
content-type: application/json; charset=utf-8
{
"documents" : [
"/_api/document/products/31408551",
"/_api/document/products/31146407",
"/_api/document/products/30622119"
]
}

View File

@ -1,6 +0,0 @@
unix> curl -X HEAD --dump - http://localhost:8529/_api/document/products/37306791
HTTP/1.1 200 OK
content-type: application/json; charset=utf-8
etag: "37306791"

View File

@ -1,2 +0,0 @@
unix> curl --header 'if-none-match: "34292135"' --dump - http://localhost:8529/_api/document/products/34292135

View File

@ -1,12 +0,0 @@
unix> curl --dump - http://localhost:8529/_api/document/products/unknownhandle
HTTP/1.1 404 Not Found
content-type: application/json; charset=utf-8
{
"error" : true,
"errorMessage" : "document /_api/document/products/unknownhandle not found",
"code" : 404,
"errorNum" : 1202
}

View File

@ -1,3 +0,0 @@
unix> curl -X PUT --data @- --dump - http://localhost:8529/_api/document/products/32653735
{}

View File

@ -1,3 +0,0 @@
unix> curl -X PUT --data @- --dump - http://localhost:8529/_api/document/products/27410855
{}

View File

@ -1,3 +0,0 @@
unix> curl -X PUT --data @- --dump - http://localhost:8529/_api/document/products/22757799
{}

View File

@ -1,3 +0,0 @@
unix> curl -X PUT --data @- --dump - http://localhost:8529/_api/document/products/25903527
{}

View File

@ -1,3 +0,0 @@
unix> curl -X PUT --data @- --dump - http://localhost:8529/_api/document/products/35799463
{}

View File

@ -5,5 +5,5 @@ content-type: application/json; charset=utf-8
{
"server": "arango",
"version": "1.3.devel"
"version": "1.4.devel"
}

View File

@ -5,14 +5,14 @@ content-type: application/json; charset=utf-8
{
"server": "arango",
"version": "1.3.devel"
"version": "1.4.devel"
"details": {
"build-date": "Mar 8 2013 22:06:49",
"build-date": "May 13 2013 09:07:23",
"configure": "'./configure' '--enable-maintainer-mode' '--enable-relative' '--enable-all-in-one-icu' '--enable-all-in-one-v8'","icu-version":"49.1.2",
"libev-version": "4.11",
"openssl-version": "OpenSSL 1.0.1 14 Mar 2012",
"repository-version": "heads/devel-0-gdd446b51dd713656d0f3dc50e94e3ea88cd72a86",
"server-version": "1.3.devel",
"server-version": "1.4.devel",
"v8-version":"3.16.14.1"
}
}

View File

@ -1,8 +1,10 @@
arango> require("internal").processStat();
{ minorPageFaults : 2683,
{
minorPageFaults : 2683,
majorPageFaults : 0,
userTime : 26,
systemTime : 7,
numberThreads : 4,
numberOfThreads : 4,
residentSize : 2288,
virtualSize : 55861248 }
virtualSize : 55861248
}

View File

@ -14,7 +14,8 @@ DOXYGEN_TOC = \
Documentation/InstallationManual/InstallationManual.md \
Documentation/Manual/Home.md \
Documentation/RefManual/RefManual.md \
Documentation/UserManual/UserManual.md
Documentation/UserManual/UserManual.md \
Documentation/Manual/Upgrading.md
################################################################################
### @brief install man pages
@ -82,6 +83,7 @@ WIKI = \
DbaManualEmergencyConsole \
ExtendingAql \
FirstStepsArangoDB \
Graphs \
Glossary \
HandlingCollections \
HandlingDocuments \
@ -126,14 +128,17 @@ WIKI = \
NamingConventions \
NewFeatures11 \
NewFeatures12 \
NewFeatures13 \
RefManual \
RefManualArangoErrors \
RestDocument \
RestEdge \
SimpleQueries \
Transactions \
Upgrading \
Upgrading11 \
Upgrading12 \
Upgrading13 \
UserManual \
UserManualActions \
UserManualArangosh \
@ -315,6 +320,9 @@ swagger:
python @srcdir@/Documentation/Scripts/generateSwagger.py \
< @srcdir@/js/actions/api-graph.js > @srcdir@/html/admin/api-docs/graph
python @srcdir@/Documentation/Scripts/generateSwagger.py \
< @srcdir@/js/actions/api-edges.js > @srcdir@/html/admin/api-docs/edges
python @srcdir@/Documentation/Scripts/generateSwagger.py \
< @srcdir@/js/actions/api-user.js > @srcdir@/html/admin/api-docs/user

View File

@ -22,16 +22,13 @@ for various languages including the Big-P (PHP, Python, Perl) and Ruby.
Please contact @S_EXTREF_S{http://www.arangodb.org/connect,us} if you have any
questions.
Upgrading to ArangoDB 1.2 {#ArangoDBUpgrading12}
Upgrading to ArangoDB 1.3 {#ArangoDBUpgrading13}
================================================
- @BOOK_REF{NewFeatures12}
- @BOOK_REF{Upgrading12}
- @BOOK_REF{NewFeatures13}
- @BOOK_REF{Upgrading13}
Older Releases:
- @BOOK_REF{NewFeatures11}
- @BOOK_REF{Upgrading11}
See @ref Upgrading from older releases.
ArangoDB's User Manuals {#ArangoDBUserManual}
=============================================

View File

@ -0,0 +1,286 @@
New Features in ArangoDB 1.3 {#NewFeatures13}
=============================================
@NAVIGATE_NewFeatures13
@EMBEDTOC{NewFeatures13TOC}
Features and Improvements {#NewFeatures13Introduction}
======================================================
The following list shows in detail which features have been added or improved in
ArangoDB 1.3. ArangoDB 1.3 also contains several bugfixes that are not listed
here.
Changes to the Datafile Structure{#NewFeatures13Datafile}
---------------------------------------------------------
As the datafile structure has changed, please read the
@ref Upgrading13 "upgrade manual" carefully.
Rapid API Development with FOXX{#NewFeatures13Foxx}
---------------------------------------------------
A preview of the forthcoming Foxx is contained in 1.3. Please note that this is
not the final version, Foxx is still experimental.
Foxx is a lightweight Javascript "micro framework" which allows you to build
applications directly on top of ArangoDB and therefore skip the middleman
(Rails, Django, Symfony or whatever your favorite web framework is). Inspired by
frameworks like Sinatra Foxx is designed with simplicity and the specific use
case of modern client-side MVC frameworks in mind.
The screencast at
<a href="http://foxx.arangodb.org">http://foxx.arangodb.org</a>
explains how to use Foxx.
Transactions{#NewFeatures13Transactions}
----------------------------------------
ArangoDB provides server-side transactions that allow executing multi-document
and even multi-collection operations with ACID guarantees.
Transactions in ArangoDB are defined by providing a JavaScript object which
needs to contain the transaction code, and some declarations about the
collections involved in the transaction.
The transaction code will be executed by the server en bloc. If execution of any
statement in the transaction code fails for whatever reason, the entire
transaction will be aborted and rolled back.
Data modifications done by transactions become visible to following transactions
only when a transaction succeeds. Data modifications that are performed by a
still-ongoing transaction are not exposed to other parallel transactions. In
fact, transactions on the same collection will be executed serially.
The following example will atomically transfer money from one user account to
another:
db._create("accounts");
db.accounts.save({ _key: "john", amount: 423 });
db.accounts.save({ _key: "fred", amount: 197 });
db._executeTransaction({
collections: {
write: "accounts"
},
params: {
user1: "fred",
user2: "john",
amount: 10
},
action: function (params) {
var db = require("internal").db;
var account1 = db.accounts.document(params['user1']);
var account2 = db.accounts.document(params['user2']);
var amount = params['amount'];
if (account1.amount < amount) {
throw "account of user '" + user1 + "' does not have enough money!";
}
db.accounts.update(account1, { amount : account1.amount - amount });
db.accounts.update(account2, { amount : account2.amount + amount });
/* will commit the transaction and return the value true */
return true;
}
});
Please refer to @ref Transactions for more details and examples on transaction
usage in ArangoDB.
New Administration Interface{#NewFeatures13Admin}
-------------------------------------------------
ArangoDB 1.3 comes with a new administration front-end. The front-end is now
based on backbone and uses repl.it, which allows for instance line editing when
using the browser based ArangoDB shell.
Please note, that the "Application" tab belongs to the forthcoming @ref
NewFeatures13Foxx Foxx. The functionality below this tab is neither stable nor
complete. It has been shipped as a feature preview.
New Server Statistics{#NewFeatures13Statistics}
-----------------------------------------------
The server statistics provided by ArangoDB have been changed in 1.3.
Before version 1.3, the server provided a multi-level history of request and
connection statistics. Values for each incoming request and connection were kept
individually and mapped to the chronological period they appeared in. The server
then provided aggregated values for different periods, which was implemented
using a constant recalculation of the aggregation values.
To lower ArangoDB's CPU usage, the constant recalculation has been removed in
1.3. Instead, the server will now only keep aggregate values per figure
reported, but will not provide any chronological values.
Request and connection statistics values are 0 at server start, and will be
increased with each incoming request or connection. Clients querying the
statistics will see the accumulated values only. They can calculate the values
for a period of time by querying the statistics twice and calculating the
difference between the values themselves.
The REST APIs for the statistics in ArangoDB 1.3 can be found at:
/_admin/statistics
/_admin/statistics-description
The `/_admin/statistics-description` API can be used by clients to get
descriptions for the figures reported by `/_admin/statistics`. The description
will contain a textual description, the unit used for the value(s) and the
boundary of slot values used.
The previoulsy available APIs
/_admin/request-statistics
/_admin/connection-statistics
are not available in ArangoDB 1.3 anymore.
AQL extensions{#NewFeatures13AQL}
---------------------------------
It is now possible to extend AQL with user-defined functions. These functions
need to be written in Javascript, and be registered before usage in an AQL
query.
arangosh> var aqlfunctions = require("org/arangodb/aql/functions");
arangosh> aqlfunctions.register("myfunctions:double", function (value) { return value * 2; }, true);
false
arangosh> db._query("RETURN myfunctions:double(4)").toArray();
[ 8 ]
Please refer to @ref ExtendingAql for more details on this.
There have been the following additional changes to AQL in ArangoDB 1.3:
* added AQL statistical functions `VARIANCE_POPULATION`, `VARIANCE_SAMPLE`,
`STDDEV_POPULATION`, `STDDEV_SAMPLE`, `AVERAGE`, `MEDIAN`. These functions
work on lists.
* added AQL numeric function `SQRT` to calculate square-roots.
* added AQL string functions `TRIM`, `LEFT` and `RIGHT` for easier string and
substring handling.
* the AQL functions `REVERSE` and `LENGTH` now work on string values, too.
Previously they were allowed for lists only.
* made "limit" an optional parameter in the `NEAR` function. The "limit" parameter
can now be either omitted completely, or set to 0. If so, an internal
default value (currently 100) will be applied for the limit.
Please refer to @ref AqlFunctions for detailed information on the AQL functions.
Node Modules and Packages{#NewFeatures13Node}
---------------------------------------------
ArangoDB 1.3 supports some of @ref JSModulesNode "modules" and @ref JSModulesNPM
"packages" from node. The most important module is maybe the Buffer support,
which allows to handle binary data in JavaScript.
arangosh> var Buffer = require("buffer").Buffer;
arangosh> a = new Buffer("414243", "hex");
ABC
arangosh> a = new Buffer("414243", "ascii");
414243
arangosh> a = new Buffer([48, 49, 50]);
012
Supplying the Buffer class makes it possible to use other interesting modules
like punycode. It enables us to support some of NPM packages available - for
instance CoffeeScript.
arangosh> var cs = require("coffee-script");
arangosh> cs.compile("a = 1");
(function() {
var a;
a = 1;
}).call(this);
arangosh> cs.compile("square = x -> x * x", { bare: true });
var square;
square = x(function() {
return x * x;
});
"underscore" is also preinstalled.
arangosh> var _ = require("underscore");
arangosh> _.map([1,2,3], function(x) {return x*x;});
[
1,
4,
9
]
The node packages can be installed using npm in the "share/npm" directory. If
you find out, that a node package is also working under ArangoDB, please share
your findings with us and other users.
Miscelleanous changes{#NewFeatures13Misc}
-----------------------------------------
* Added server startup option `--database.force-sync-properties` to force syncing of
collection properties on collection creation, deletion and on collection properties
change.
The default value is `true` to mimic the behavior of previous versions of ArangoDB.
If set to `false`, collection properties are still written to disk but no immediate
system call to sync() is made.
Setting the `--database.force-sync-properties` to `false` may speed up running
test suites on systems where sync() is expensive, but is discouraged for regular
use cases.
* ArangoDB will now reject saving documents with an invalid "type".
Previous versions of ArangoDB didn't reject documents that were just scalar values
without any attribute names.
Starting with version 1.3, each document saved in ArangoDB must be a JSON object
consisting of attribute name / attribute value pairs.
Storing the following types of documents will be rejected by the server:
[ "foo", "bar" ]
1.23
"test"
Of course such values can be stored inside valid documents, e.g.
{ "data" : [ "foo", "bar" ] }
{ "number" : 1.23 }
{ "value" : "test" }
User-defined document attribute names must also start with a letter or a number.
It is disallowed to use user-defined attribute names starting with an underscore.
This is due to name starting with an underscore being reserved for ArangoDB's
internal use.
* Changed return value of REST API method `/_admin/log`:
Previously, the log messages returned by the API in the `text` attribute also
contained the date and log level, which was redundant.
In ArangoDB 1.3, the values in the `text` attribute contain only the mere log
message, and no date and log level. Dates and log levels for the individual
messages are still available in the separate `timestamp` and `level` attributes.
* Extended output of server version and components for REST APIs `/_admin/version`
and `/_api/version`:
To retrieve the extended information, the REST APIs can be called with the URL
parameter `details=true`. This will provide a list of server version details in
the `details` attribute of the result.
* Extended output for REST API `/_api/collection/<name>/figures`:
The result will now contain an attribute `attributes` with a sub-attribute `count`.
This value provides the number of different attributes that are or have been used
in the collection.

View File

@ -0,0 +1,12 @@
TOC {#NewFeatures13TOC}
=======================
- @ref NewFeatures13Introduction
- @ref NewFeatures13Datafile
- @ref NewFeatures13Foxx
- @ref NewFeatures13Transactions
- @ref NewFeatures13Admin
- @ref NewFeatures13Statistics
- @ref NewFeatures13AQL
- @ref NewFeatures13Node
- @ref NewFeatures13Misc

View File

@ -0,0 +1,13 @@
Upgrading ArangoDB{#Upgrading}
==============================
- @ref NewFeatures13
- @ref Upgrading13
Older Releases:
- @BOOK_REF{NewFeatures12}
- @BOOK_REF{Upgrading12}
- @BOOK_REF{NewFeatures11}
- @BOOK_REF{Upgrading11}

View File

@ -430,7 +430,7 @@ Removed Functionality {#Upgrading12RemovedFunctionality}
The global `edges` variable has been removed in `arangosh`. In ArangoDB 1.1 and
before this variable could be used to create and access edge collections.
Since ArangoDb 1.1, all collections can be accessed using the `db` variable, and
Since ArangoDB 1.1, all collections can be accessed using the `db` variable, and
there was no need to keep the `edges` variable any longer.
### arangoimp

View File

@ -0,0 +1,186 @@
Upgrading to ArangoDB 1.3 {#Upgrading13}
========================================
@NAVIGATE_Upgrading13
@EMBEDTOC{Upgrading13TOC}
Upgrading {#Upgrading13Introduction}
====================================
ArangoDB 1.3 provides a lot of new features and APIs when compared to ArangoDB
1.2. The most important one being true multi-collection transactions support.
The following list contains changes in ArangoDB 1.3 that are not 100%
downwards-compatible to ArangoDB 1.2.
Existing users of ArangoDB 1.2 should read the list carefully and make sure they
have undertaken all necessary steps and precautions before upgrading from
ArangoDB 1.2 to ArangoDB 1.3. Please also check @ref Upgrading13Troubleshooting.
Database Directory Version Check and Upgrade {#Upgrading13VersionCheck}
-----------------------------------------------------------------------
Starting with ArangoDB 1.1, _arangod_ will perform a database version check at
startup. This has not changed in ArangoDB 1.3.
The version check will look for a file named _VERSION_ in its database
directory. If the file is not present, _arangod_ in version 1.3 will perform an
auto-upgrade (can also be considered a database "initialisation"). This
auto-upgrade will create the system collections necessary to run ArangoDB, and
it will also create the VERSION file with a version number like `1.3` inside.
If the _VERSION_ file is present but is from a non-matching version of ArangoDB
(e.g. ArangoDB 1.2 if you upgrade), _arangod_ will refuse to start. Instead, it
will ask you start the server with the option `--upgrade`. Using the
`--upgrade` option will make the server trigger any required upgrade tasks to
migrate data from ArangoDB 1.2 to ArangoDB 1.3.
This manual invocation of an upgrade shall ensure that users have full control
over when they perform any updates/upgrades of their data, and do not risk
running an incompatible tandem of server and database versions.
If you try starting an ArangoDB 1.3 server with a database created by an earlier
version of ArangoDB, and did not invoke the upgrade procedure, the output of
ArangoDB will look like this:
> bin/arangod --server.endpoint tcp://127.0.0.1:8529 --database.directory /tmp/12
...
2013-05-10T08:35:59Z [9017] ERROR Database directory version (1.2) is lower than server version (1.3).
2013-05-10T08:35:59Z [9017] ERROR It seems like you have upgraded the ArangoDB binary. If this is what you wanted to do, please restart with the --upgrade option to upgrade the data in the database directory.
2013-05-10T08:35:59Z [9017] FATAL Database version check failed. Please start the server with the --upgrade option
...
So it is really necessary to forcefully invoke the upgrade procedure. Please
create a backup of your database directory before upgrading. Please also make
sure that the database directory and all subdirectories and files in it are
writeable for the user you run ArangoDB with.
As mentioned, invoking the upgrade procedure can be done by specifying the
additional command line option `--upgrade` as follows:
> bin/arangod --server.endpoint tcp://127.0.0.1:8529 --database.directory /tmp/12 --upgrade
...
2013-05-10T08:38:41Z [9039] INFO Starting upgrade from version 1.2 to 1.3
2013-05-10T08:38:41Z [9039] INFO Found 20 defined task(s), 8 task(s) to run
...
2013-05-10T08:38:43Z [9039] INFO Upgrade successfully finished
2013-05-10T08:38:43Z [9039] INFO database version check passed
...
The upgrade procecure will execute the defined tasks to run _arangod_ with all
new features and data formats. It should normally run without problems and
indicate success at its end. If it detects a problem that it cannot fix, it will
halt on the first error and warn you.
Re-starting arangod with the `--upgrade` option will execute only the previously
failed and not yet executed tasks.
Upgrading from ArangoDB 1.2 to ArangoDB 1.3 will rewrite data in the collections'
datafiles. The upgrade procedure will create a backup of all files it processes,
in case something goes wrong.
In case the upgrade did not produce any problems and ArangoDB works well for you
after the upgrade, you may want to remove these backup files manually.
All collection datafiles will be backed up in the original collection
directories in the database directory. You can easily detect the backup files
because they have a filename ending in `.old`.
Upgrade a binary package {#Upgrading13BinaryPackage}
---------------------------------------------------
Linux:
- Upgrade ArangoDB by package manager (Example `zypper update arangodb`)
- check configuration file: `/etc/arangodb/arangod.conf`
- Upgrade database files with `/etc/init.d/arangodb upgrade`
Mac OS X binary package
- You can find the new Mac OS X packages here: `http://www.arangodb.org/repositories/MacOSX`
- check configuration file: `/etc/arangodb/arangod.conf`
- Upgrade database files `/usr/sbin/arangod --upgrade'
Mac OS X with homebrew
- Upgrade ArangoDB by `brew upgrade arangodb'
- check configuration file: `/usr/local/Cellar/1.X.Y/etc/arangodb/arangod.conf`
- Upgrade database files `/usr/local/sbin/arangod --upgrade`
In case you upgrade from a previous version of ArangoDB, please make sure you
perform the changes to the configuration file as described in @ref Upgrading13Options.
Otherwise ArangoDB 1.3 will not start properly.
New and Changed Command-Line Options{#Upgrading13Options}
---------------------------------------------------------
In order to support node modules and packages, a new command-line option was
introduced:
--javascript.package-path <directory>
must be used to specify the directory containing the NPM packages. This is option
is presented in the pre-defined configuration files. In case a created your own
configuration, you need to add this option and also make sure that
--javascript.modules-path <directories>
contains the new `node` directory.
Example values for `--javascript.modules-path` and `--javascript.package-path` are:
--javascript.modules-path = DIR/js/server/modules;DIR/js/common/modules;DIR/js/node
--javascript.package-path = DIR/js/npm
where `DIR` is the directory that contains the shared data installed by ArangoDB
during installation. It might be `/usr/local/share/arangodb`, but the actual value is
system-dependent.
Not adding the `node` directory to `--javascript.modules-path` or not setting
`--javascript.package-path` will result in server startup errors.
The configuration options `--scheduler.report-intervall` and `--dispatcher.report-intervall`
have been renamed to `--scheduler.report-interval` and `--dispatcher.report-interval`.
These are rarely used debugging options that are not contained in any of the configuration
files shipped with ArangoDB, so the changed name should not have an effect for end users.
Removed Features{#Upgrading13RemovedFeatures}
---------------------------------------------
The configure options `--enable-zone-debug` and `--enable-arangob` have been removed.
These should not have been used by end users anyway, so this change should not have
an effect.
Troubleshooting{#Upgrading13Troubleshooting}
============================================
If the server does not start after an upgrade, please check that your configuration
contains the `node` directory in the `--javascript.modules-path`, and that the
parameter `--javascript.package-path` is set.
On systems with rlimit, ArangoDB 1.3 will also require the minimum number of file
descriptors it can use to be 256. If the limit is set to a lower value, ArangoDB will
try to increase the limit to 256. If raising the limit does fail, ArangoDB will refuse
to start, and fail with an error message like
2013-05-10T09:00:40Z [11492] FATAL cannot raise the file descriptor limit to 256, got 'Operation not permitted'
In this case the number of file descriptors should be increased. Please note that 256
is a minimum value and that you should allow ArangoDB to use much more file descriptors
if you afford it.
To avoid the file descriptor check on startup in an emergency case, you can use the
startup option
--server.descriptors-minimum 0
Please also check the logfile written by ArangoDB for further errors messages.

View File

@ -0,0 +1,8 @@
TOC {#Upgrading13TOC}
=====================
- @ref Upgrading13Introduction
- @ref Upgrading13VersionCheck
- @ref Upgrading13BinaryPackage
- @ref Upgrading13Options
- @ref Upgrading13Troubleshooting

View File

@ -7,7 +7,7 @@
###
### find files in
### arangod/RestHandler/*.cpp
### js/actions/system/api-*.js
### js/actions/api-*.js
### @usage generateSwagger.py < RestXXXX.cpp > restSwagger.json
###
### @file
@ -41,7 +41,7 @@
### Copyright holder is triAGENS GmbH, Cologne, Germany
###
### @author Thomas Richter
### @author Copyright 2013, triagens GmbH, Cologne, Germany
### @author Copyright 2013, triAGENS GmbH, Cologne, Germany
################################################################################
import sys, re, json, string
@ -135,7 +135,7 @@ class StateMachine:
class Regexen:
def __init__(self):
self.brief = re.compile('.*@brief')
self.RESTHEADER = re.compile('.*@RESTHEADER')
self.RESTHEADER = re.compile('.*@RESTHEADER{')
self.RESTURLPARAMETERS = re.compile('.*@RESTURLPARAMETERS')
self.RESTQUERYPARAMETERS = re.compile('.*@RESTQUERYPARAMETERS')
self.RESTHEADERPARAMETERS = re.compile('.*@RESTHEADERPARAMETERS')
@ -161,8 +161,11 @@ def resturlparameters(cargo, r=Regexen()):
if not line: return eof, (fp, line)
elif r.read_through.match(line): return read_through, (fp, line)
elif r.RESTURLPARAM.match(line): return resturlparam, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line): return restheaderparameters, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
else: continue
else: continue
def resturlparam(cargo, r=Regexen()):
fp, last = cargo
@ -192,9 +195,12 @@ def restqueryparameters(cargo, r=Regexen()):
line = fp.readline()
if not line: return eof, (fp, line)
elif r.read_through.match(line): return read_through, (fp, line)
elif r.RESTQUERYPARAM.match(line): return restqueryparam, (fp, line)
elif r.RESTURLPARAMETERS.match(line): return resturlparameters, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line): return restheaderparameters, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
else: continue
elif r.RESTQUERYPARAM.match(line): return restqueryparam, (fp, line)
else: continue
def restheaderparameters(cargo, r=Regexen()):
fp, last = cargo
@ -203,45 +209,53 @@ def restheaderparameters(cargo, r=Regexen()):
if not line: return eof, (fp, line)
elif r.read_through.match(line): return read_through, (fp, line)
elif r.RESTHEADERPARAM.match(line): return restheaderparam, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTURLPARAMETERS.match(line): return resturlparameters, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
else: continue
else: continue
def restheaderparam(cargo, r=Regexen()):
# TODO
fp, last = cargo
parametersList = parameters(last).split(',')
para = {}
para['paramType'] = 'header'
para['dataType'] = parametersList[1].capitalize()
if parametersList[2] == 'required':
para['required'] = 'true'
else:
para['required'] = 'false'
para['name'] = parametersList[0]
para['description']=''
while 1:
line = fp.readline()
if not line: return eof, (fp, line)
elif r.read_through.match(line): return read_through, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
elif r.EMPTY_COMMENT.match(line):
operation['parameters'].append(para)
return restqueryparameters, (fp, line)
return restheaderparameters, (fp, line)
else:
para['description'] += Typography(line[4:-1]) + ' '
def restbodyparam(cargo, r=Regexen()):
# TODO see POST processing in comment till PUT
fp, last = cargo
parametersList = parameters(last).split(',')
para = {}
para['paramType'] = 'body'
para['dataType'] = parametersList[1].capitalize()
if parametersList[2] == 'required':
para['required'] = 'true'
para['name'] = parametersList[0]
para['description']=''
while 1:
line = fp.readline()
if not line: return eof, (fp, line)
elif r.read_through.match(line): return read_through, (fp, line)
elif r.RESTQUERYPARAM.match(line): return restqueryparam, (fp, line)
elif r.RESTURLPARAMETERS.match(line): return resturlparameters, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line): return restheaderparameters, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
else: continue
elif r.EMPTY_COMMENT.match(line):
operation['parameters'].append(para)
return comment, (fp, line)
else:
para['description'] += Typography(line[4:-1]) + ' '
def restqueryparam(cargo, r=Regexen()):
fp, last = cargo
@ -250,9 +264,7 @@ def restqueryparam(cargo, r=Regexen()):
para['paramType'] = 'query'
para['dataType'] = parametersList[1].capitalize()
if parametersList[2] == 'required':
para['required'] = 'true'
else:
para['required'] = 'false'
para['required'] = 'True'
para['name'] = parametersList[0]
para['description']=''
while 1:
@ -277,9 +289,9 @@ def restdescription(cargo, r=Regexen()):
operation['notes'] += '<br><br>'
elif r.DESCRIPTION_LI.match(line): operation['notes'] += Typography(line[4:-1]) + '<br>'
elif r.read_through.match(line): return read_through, (fp, line)
elif r.EXAMPLES.match(line):
return examples, (fp, line)
elif r.RESTRETURNCODES.match(line): return restreturncodes, (fp, line)
elif r.EXAMPLES.match(line): return examples, (fp, line)
elif len(line) >= 4 and line[:4] == "////": continue
elif r.RESTRETURNCODES.match(line): return restreturncodes, (fp, line)
else:
operation['notes'] += Typography(line[4:-1]) + ' '
last = line
@ -360,7 +372,6 @@ def comment(cargo, r=Regexen()):
while 1:
line = fp.readline()
if not line: return eof, (fp, line)
# elif r.brief.match(line): print line[4:-1]
elif r.RESTHEADER.match(line):
temp = parameters(line).split(',')
method, path = temp[0].split()
@ -373,14 +384,6 @@ def comment(cargo, r=Regexen()):
_operation = { 'httpMethod': None, 'nickname': None, 'parameters': [],
'summary': None, 'notes': '', 'examples': '', 'errorResponses':[]}
_operation['httpMethod'] = method
if method == 'POST' or method == 'PUT' or method == 'PATCH':
parameter = {}
parameter['paramType'] = 'body'
parameter['name'] = 'body'
parameter['description'] = 'A valid json document for your data, for instance {"hello": "world"}.'
parameter['dataType'] = 'String'
parameter['required'] = 'false'
_operation['parameters'] = [parameter]
summaryList = summary.split()
_operation['nickname'] = summaryList[0] + ''.join([word.capitalize() for word in summaryList[1:]])
_operation['summary'] = summary
@ -389,6 +392,7 @@ def comment(cargo, r=Regexen()):
operation = _operation
elif r.RESTURLPARAMETERS.match(line): return resturlparameters, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line): return restheaderparameters, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
elif len(line) >= 4 and line[:4] == "////": continue
@ -404,7 +408,6 @@ def read_through(cargo):
elif len(line) >= 3 and line[:3] == "///": return comment, (fp, line)
else: continue
if __name__ == "__main__":
automat = StateMachine()
automat.add_state(read_through)
@ -425,3 +428,5 @@ if __name__ == "__main__":
automat.add_state(error, end_state=1)
automat.set_start(read_through)
automat.run((sys.stdin, ''))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4

View File

@ -997,6 +997,16 @@ AQL supports the following functions to operate on list values:
are ignored. If the list is empty or only `null` values are contained in the list,
the function will return `null`.
- @FN{STDDEV_POPULATION(@FA{list})}: returns the population standard deviation of the
values in @FA{list}. This requires the elements in @FA{list} to be numbers. `null`
values are ignored. If the list is empty or only `null` values are contained in the list,
the function will return `null`.
- @FN{STDDEV_SAMPLE(@FA{list})}: returns the sample standard deviation of the values in
@FA{list}. This requires the elements in @FA{list} to be numbers. `null` values
are ignored. If the list is empty or only `null` values are contained in the list,
the function will return `null`.
- @FN{REVERSE(@FA{list})}: returns the elements in @FA{list} in reversed order.
- @FN{FIRST(@FA{list})}: returns the first element in @FA{list} or `null` if the

View File

@ -42,6 +42,9 @@ User functions can take any number of input arguments and should
provide one result. They should be kept purely functional and thus free of
side effects and state.
Especially it is unsupported to modify any global variables, or to change
data of a collection from an AQL user function.
User function code is late-bound, and may thus not rely on any variables
that existed at the time of declaration. If user function code requires
access to any external data, it must take care to set up the data by

View File

@ -14,9 +14,9 @@ So given you want to build an application that sends a plain-text response "Work
First, create a directory `my_app` and save a file called `app.js` in this directory. Write the following content to this file:
Foxx = require("org/arangodb/foxx");
var Foxx = require("org/arangodb/foxx");
app = new Foxx.Application();
var app = new Foxx.Application();
app.get("/wiese", function(req, res) {
res.set("Content-Type", "text/plain");
@ -28,16 +28,26 @@ First, create a directory `my_app` and save a file called `app.js` in this direc
This is your application. Now we need to mount it to the path `/my`. In order to achieve that, we create a file called `manifest.json` in our `my_app` directory with the following content:
{
"name": "my_app",
"version": "0.0.1",
"apps": {
"/my": "app.js"
"/": "app.js"
}
}
You **must** specify a name and a version number for your application, otherwise it won't be loaded into ArangoDB.
Now your application is done. Start ArangoDB as follows:
arangod --javascript.dev-app-path my_app /tmp/fancy_db
$ arangod --javascript.dev-app-path my_app /tmp/fancy_db
Now point your browser to `/my/wiese` and you should see "Worked!". After this short overview, let's get into the details.
To include it to the list of apps running on your ArangoDB instance, start the ArangoDB shell and add your new application:
$ arangosh
arangosh> aal = require('org/arangodb/aal');
arangosh> aal.installDevApp('my_app', '/my');
Now point your browser to `http://localhost:8529/my/wiese` and you should see "Worked!". After this short overview, let's get into the details.
## Details on Foxx.Application

View File

@ -0,0 +1,6 @@
Graphs {#Graphs}
================
@NAVIGATE_Graphs
@EMBEDTOC{JSModuleGraphTOC}

View File

@ -0,0 +1,4 @@
TOC {#GraphsTOC}
================
@EMBEDTOC{JSModuleGraphTOC}

View File

@ -79,7 +79,7 @@ attribute of the object passed to the `_executeTransaction` function. The
db._executeTransaction({
collections: {
write: [ "users", "logins" ],
read: [ "recommendations" ],
read: [ "recommendations" ]
},
...
});
@ -93,7 +93,7 @@ single collection name (as a string):
db._executeTransaction({
collections: {
write: "users",
read: "recommendations",
read: "recommendations"
},
...
});
@ -114,7 +114,7 @@ attribute:
db._executeTransaction({
collections: {
write: "users",
write: "users"
},
action: function () {
/* all operations go here */
@ -128,7 +128,7 @@ of a Javascript function:
db._executeTransaction({
collections: {
write: "users",
write: "users"
},
action: "function () { /* all operations go here */ }"
});
@ -141,7 +141,7 @@ Instead, any variables used inside `action` should be defined inside `action` it
db._executeTransaction({
collections: {
write: "users",
write: "users"
},
action: function () {
var db = require(...).db;
@ -159,7 +159,7 @@ be thrown and not caught inside the transaction:
db._executeTransaction({
collections: {
write: "users",
write: "users"
},
action: function () {
var db = require("internal").db;
@ -178,7 +178,7 @@ case, the user can return any legal Javascript value from the function:
db._executeTransaction({
collections: {
write: "users",
write: "users"
},
action: function () {
var db = require("internal").db;
@ -574,7 +574,7 @@ from inside a running transaction, the server will throw error `1651 (nested
transactions detected`).
It is also disallowed to execute user transaction on some of ArangoDB's own system
collections. This shouldn't be problem for regular usage as system collections will
collections. This shouldn't be a problem for regular usage as system collections will
not contain user data and there is no need to access them from within a user
transaction.

View File

@ -10,6 +10,7 @@ ArangoDB's User Manual (@VERSION) {#UserManual}
@CHAPTER_REF{HandlingDocuments}
@CHAPTER_REF{HandlingEdges}
@CHAPTER_REF{SimpleQueries}
@CHAPTER_REF{Graphs}
@CHAPTER_REF{Aql}
@CHAPTER_REF{ExtendingAql}
@CHAPTER_REF{AqlExamples}

View File

@ -217,15 +217,16 @@ ALIASES += \
ALIASES += \
"RESTHEADER{2}=@latexonly\restheader{@endlatexonly@htmlonly<div class=\"restheader\">@endhtmlonly@xmlonly<computeroutput>@endxmlonly\1@latexonly,@endlatexonly@htmlonly<div class=\"restheaderremark\">(@endhtmlonly@xmlonly</computeroutput> - @endxmlonly\2@latexonly}@endlatexonly@htmlonly)</div></div>@endhtmlonly@xmlonly@endxmlonly" \
"REST{1}=@latexonly\restcall{@endlatexonly@htmlonly<div class=\"restcall\">@endhtmlonly@xmlonly<computeroutput>@endxmlonly\1@latexonly}@endlatexonly@htmlonly</div>@endhtmlonly@xmlonly</computeroutput>@endxmlonly" \
"RESTURLPARAMETERS=@htmlonly<div class=\"resturlparameters\">URL parameters</div>@endhtmlonly" \
"RESTURLPARAM{3}=@htmlonly<div class=\"resturlparam\">@endhtmlonly\1 (\2,\3)@htmlonly</div>@endhtmlonly" \
"RESTQUERYPARAMETERS=@htmlonly<div class=\"restqueryparameters\">Query Parameters</div>@endhtmlonly" \
"RESTQUERYPARAM{3}=@htmlonly<div class=\"restqueryparam\">@endhtmlonly\1 (\2,\3)@htmlonly</div>@endhtmlonly" \
"RESTHEADERPARAMETERS=@htmlonly<div class=\"restheaderparameters\">HTTP header parameters</div>@endhtmlonly" \
"RESTHEADERPARAM{3}=@htmlonly<div class=\"restheaderparam\">@endhtmlonly\1 (\2,\3)@htmlonly</div>@endhtmlonly" \
"RESTBODYPARAM{3}=@htmlonly<div class=\"restbodyparam\">@endhtmlonly\1 (\2,\3)@htmlonly</div>@endhtmlonly" \
"RESTRETURNCODES=@htmlonly<div class=\"restreturncodes\">Return Codes</div>@endhtmlonly" \
"RESTRETURNCODE{1}=@htmlonly<div class=\"restreturncode\">HTTP \1</div>@endhtmlonly" \
"RESTDESCRIPTION=@htmlonly<div class=\"restdescription\">Description</div>@endhtmlonly" \
"RESTURLPARAMETERS=@htmlonly<div class=\"resturlparameters\">URL parameters</div>@endhtmlonly" \
"RESTURLPARAM{3}=@htmlonly<div class=\"resturlparam\">URL parameter</div>@endhtmlonly" \
"RESTHEADERPARAMETERS=@htmlonly<div class=\"restheaderparameters\">HTTP header parameters</div>@endhtmlonly" \
"RESTHEADERPARAM{2}=@htmlonly<div class=\"restheaderparam\">HTTP header</div>@endhtmlonly"
# navigation
ALIASES += \

View File

@ -123,12 +123,32 @@
margin-bottom: 14px;
}
#content div.arangodb div.restheaderremark {
#content div.arangodb div.restbodyparam {
font-size: 18px;
float: right;
font-family: "Helvetica",sans-serif;
}
#content div.arangodb div.resturlparameters {
font-weight: bold;
margin-top: 14px;
margin-bottom: 14px;
}
#content div.arangodb div.resturlparam {
font-style: italic;
}
#content div.arangodb div.restheaderparameters {
font-weight: bold;
margin-top: 14px;
margin-bottom: 14px;
}
#content div.arangodb div.restheaderparam {
font-style: italic;
}
#content div.arangodb div.restqueryparameters {
font-weight: bold;
margin-top: 14px;

View File

@ -10,7 +10,7 @@ OUTPUT_FOLDER=Doxygen/manuals/$(PACKAGE_VERSION)
.PHONY: publish publish-wiki publish-html publish-pdf publish-copy
publish: publish-wiki publish-html publish-pdf
publish: examples publish-wiki publish-html publish-pdf
make publish-copy
publish-copy:
@ -27,7 +27,7 @@ publish-html:
@for w in $(WIKI); do cp Doxygen/website/$$w.html $(OUTPUT_FOLDER); done
@for i in `ls Doxygen/website/images/*.png`; do cp $$i $(OUTPUT_FOLDER)/images/; done
@cp Doxygen/website/arangodb.css $(OUTPUT_FOLDER)
@cp Documentation/Manual/arangodb_1.2_shell_reference_card.pdf $(OUTPUT_FOLDER)
@cp Documentation/Manual/arangodb_$(MAJOR_MINOR)_shell_reference_card.pdf $(OUTPUT_FOLDER)
publish-pdf: $(OUTPUT_FOLDER)
$(MAKE) latex
@ -38,4 +38,3 @@ publish-pdf: $(OUTPUT_FOLDER)
cp Doxygen/latex/ref-manual.pdf $(OUTPUT_FOLDER)
cp Doxygen/latex/imp-manual.pdf $(OUTPUT_FOLDER)
cp Documentation/Manual/arangodb_1.2_shell_reference_card.pdf $(OUTPUT_FOLDER)

View File

@ -1,5 +1,14 @@
# -*- mode: Makefile; -*-
## -----------------------------------------------------------------------------
## --SECTION-- COMMON DEFINES
## -----------------------------------------------------------------------------
comma := ,
empty :=
space := $(empty) $(empty)
MAJOR_MINOR := $(subst $(space),.,$(wordlist 1,2,$(subst ., ,$(VERSION))))
## -----------------------------------------------------------------------------
## --SECTION-- FILES
## -----------------------------------------------------------------------------

View File

@ -135,7 +135,7 @@ describe ArangoDB do
rescue HTTParty::RedirectionTooDeep => e
# check response code
e.response.code.should eq("301")
e.response.header['location'].should eq("/_admin/html/index.html")
e.response.header['location'].should =~ /^\/_admin\/html\/index.html$/
end
end
@ -146,7 +146,7 @@ describe ArangoDB do
rescue HTTParty::RedirectionTooDeep => e
# check response code
e.response.code.should eq("301")
e.response.header['location'].should eq("/_admin/html/index.html")
e.response.header['location'].should =~ /^\/_admin\/html\/index.html$/
end
end
@ -157,7 +157,7 @@ describe ArangoDB do
rescue HTTParty::RedirectionTooDeep => e
# check response code
e.response.code.should eq("301")
e.response.header['location'].should eq("/_admin/html/index.html")
e.response.header['location'].should =~ /^https?:\/\/.*\/_admin\/html\/index.html$/
end
end
end

View File

@ -96,7 +96,7 @@ describe ArangoDB do
## adding and deleting functions
################################################################################
context "adding functions" do
context "adding and deleting functions" do
before do
ArangoDB.delete("/_api/aqlfunction/UnitTests%3Amytest")
end
@ -153,36 +153,87 @@ describe ArangoDB do
doc.parsed_response['code'].should eq(404)
doc.parsed_response['errorNum'].should eq(1582)
end
it "add function, delete multiple" do
body = "{ \"name\" : \"UnitTests:mytest:one\", \"code\": \"function () { return 1; }\" }"
doc = ArangoDB.log_post("#{prefix}-add-function4", api, :body => body)
doc.code.should eq(201)
body = "{ \"name\" : \"UnitTests:mytest:two\", \"code\": \"function () { return 1; }\" }"
doc = ArangoDB.log_post("#{prefix}-add-function4", api, :body => body)
doc.code.should eq(201)
body = "{ \"name\" : \"UnitTests:foo\", \"code\": \"function () { return 1; }\" }"
doc = ArangoDB.log_post("#{prefix}-add-function4", api, :body => body)
doc.code.should eq(201)
doc = ArangoDB.log_delete("#{prefix}-add-function4", api + "/UnitTests%3Amytest?group=true")
doc.code.should eq(200)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(false)
doc.parsed_response['code'].should eq(200)
doc = ArangoDB.log_delete("#{prefix}-add-function4", api + "/UnitTests%3Amytest%3Aone")
doc.code.should eq(404)
doc = ArangoDB.log_delete("#{prefix}-add-function4", api + "/UnitTests%3Amytest%3Atwo")
doc.code.should eq(404)
doc = ArangoDB.log_delete("#{prefix}-add-function4", api + "/UnitTests%3Afoo")
doc.code.should eq(200)
end
end
################################################################################
## retrieving the list of functions
################################################################################
context "adding functions" do
context "retrieving functions" do
before do
ArangoDB.delete("/_api/aqlfunction/UnitTests%3Amytest")
ArangoDB.delete("/_api/aqlfunction/UnitTests?group=true")
end
after do
ArangoDB.delete("/_api/aqlfunction/UnitTests%3Amytest")
ArangoDB.delete("/_api/aqlfunction/UnitTests?group=true")
end
it "add function and retrieve the list" do
body = "{ \"name\" : \"UnitTests:mytest\", \"code\": \"function () { return 1; }\" }"
doc = ArangoDB.log_post("#{prefix}-list-functions", api, :body => body)
doc = ArangoDB.log_post("#{prefix}-list-functions1", api, :body => body)
doc.code.should eq(201)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(false)
doc.parsed_response['code'].should eq(201)
doc = ArangoDB.log_get("#{prefix}-list-functions", api + "?prefix=UnitTests")
doc = ArangoDB.log_get("#{prefix}-list-functions1", api + "?prefix=UnitTests")
doc.code.should eq(200)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response.length.should eq(1)
doc.parsed_response[0]['name'].should eq("UnitTests:mytest")
doc.parsed_response[0]['code'].should eq("function () { return 1; }")
end
it "add functions and retrieve the list" do
body = "{ \"name\" : \"UnitTests:mytest1\", \"code\": \"function () { return 1; }\" }"
doc = ArangoDB.log_post("#{prefix}-list-functions2", api, :body => body)
doc.code.should eq(201)
doc = ArangoDB.log_get("#{prefix}-list-functions2", api + "?prefix=UnitTests")
doc.code.should eq(200)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response.length.should eq(1)
doc.parsed_response[0]['name'].should eq("UnitTests:mytest1")
doc.parsed_response[0]['code'].should eq("function () { return 1; }")
body = "{ \"name\" : \"UnitTests:mytest1\", \"code\": \"( function () { return 3 * 5; } ) \" }"
doc = ArangoDB.log_post("#{prefix}-list-functions2", api, :body => body)
doc.code.should eq(200)
doc = ArangoDB.log_get("#{prefix}-list-functions2", api + "?prefix=UnitTests")
doc.code.should eq(200)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response.length.should eq(1)
doc.parsed_response[0]['name'].should eq("UnitTests:mytest1")
doc.parsed_response[0]['code'].should eq("( function () { return 3 * 5; } ) ")
end
end

View File

@ -0,0 +1,191 @@
# coding: utf-8
require 'rspec'
require './arangodb.rb'
describe ArangoDB do
api = "/_api/document"
prefix = "documents"
context "dealing with documents" do
before do
@cn = "UnitTestsCollectionDocuments"
ArangoDB.drop_collection(@cn)
@cid = ArangoDB.create_collection(@cn)
end
after do
ArangoDB.drop_collection(@cn)
end
################################################################################
## creates documents with invalid types
################################################################################
it "creates a document with an invalid type" do
cmd = api + "?collection=" + @cn
body = "[ ]";
doc = ArangoDB.log_post("#{prefix}-create-list1", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(1227)
end
it "creates a document with an invalid type" do
cmd = api + "?collection=" + @cn
body = "\"test\"";
doc = ArangoDB.log_post("#{prefix}-create-list2", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(1227)
end
################################################################################
## updates documents with invalid types
################################################################################
it "updates a document with an invalid type" do
cmd = api + "/#{@cn}/test"
body = "[ ]";
doc = ArangoDB.log_patch("#{prefix}-update-object1", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(1227)
end
it "updates a document with an invalid type" do
cmd = api + "/#{@cn}/test"
body = "\"test\"";
doc = ArangoDB.log_patch("#{prefix}-update-object2", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(1227)
end
################################################################################
## replaces documents with invalid types
################################################################################
it "replaces a document with an invalid type" do
cmd = api + "/#{@cn}/test"
body = "[ ]";
doc = ArangoDB.log_put("#{prefix}-replace-object1", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(1227)
end
it "replaces a document with an invalid type" do
cmd = api + "/#{@cn}/test"
body = "\"test\"";
doc = ArangoDB.log_put("#{prefix}-replace-object2", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(1227)
end
################################################################################
## updates documents by example with invalid type
################################################################################
it "updates documents by example with an invalid type" do
cmd = "/_api/simple/update-by-example"
body = "{ \"collection\" : \"#{@cn}\", \"example\" : [ ], \"newValue\" : { } }";
doc = ArangoDB.log_put("#{prefix}-update-by-example1", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(400)
end
it "updates documents by example with an invalid type" do
cmd = "/_api/simple/update-by-example"
body = "{ \"collection\" : \"#{@cn}\", \"example\" : { }, \"newValue\" : [ ] }";
doc = ArangoDB.log_put("#{prefix}-update-by-example2", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(400)
end
################################################################################
## replaces documents by example with invalid type
################################################################################
it "replaces documents by example with an invalid type" do
cmd = "/_api/simple/replace-by-example"
body = "{ \"collection\" : \"#{@cn}\", \"example\" : [ ], \"newValue\" : { } }";
doc = ArangoDB.log_put("#{prefix}-replace-by-example1", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(400)
end
it "replaces documents by example with an invalid type" do
cmd = "/_api/simple/replace-by-example"
body = "{ \"collection\" : \"#{@cn}\", \"example\" : { }, \"newValue\" : [ ] }";
doc = ArangoDB.log_put("#{prefix}-replace-by-example2", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(400)
end
################################################################################
## removes documents by example with invalid type
################################################################################
it "removes a document with an invalid type" do
cmd = "/_api/simple/remove-by-example"
body = "{ \"collection\" : \"#{@cn}\", \"example\" : [ ] }";
doc = ArangoDB.log_put("#{prefix}-remove-by-example", cmd, :body => body)
doc.code.should eq(400)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(true)
doc.parsed_response['code'].should eq(400)
doc.parsed_response['errorNum'].should eq(400)
end
end
end

View File

@ -394,6 +394,77 @@ describe ArangoDB do
end
end
################################################################################
## by-example query with skip / limit
################################################################################
context "by-example query with skip:" do
before do
@cn = "UnitTestsCollectionByExample"
ArangoDB.drop_collection(@cn)
@cid = ArangoDB.create_collection(@cn, false)
end
after do
ArangoDB.drop_collection(@cn)
end
it "finds the examples" do
body = "{ \"someAttribute\" : \"someValue\", \"someOtherAttribute\" : \"someOtherValue\" }"
doc = ArangoDB.post("/_api/document?collection=#{@cn}", :body => body)
doc.code.should eq(202)
body = "{ \"someAttribute\" : \"someValue\", \"someOtherAttribute2\" : \"someOtherValue2\" }"
doc = ArangoDB.post("/_api/document?collection=#{@cn}", :body => body)
doc.code.should eq(202)
cmd = api + "/by-example"
body = "{ \"collection\" : \"#{@cn}\", \"example\" : { \"someAttribute\" : \"someValue\" } }"
doc = ArangoDB.log_put("#{prefix}-by-example-skip", cmd, :body => body)
doc.code.should eq(201)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(false)
doc.parsed_response['code'].should eq(201)
doc.parsed_response['hasMore'].should eq(false)
doc.parsed_response['result'].length.should eq(2)
doc.parsed_response['count'].should eq(2)
body = "{ \"collection\" : \"#{@cn}\", \"example\" : { \"someAttribute\" : \"someValue\" }, \"skip\" : 1 }"
doc = ArangoDB.log_put("#{prefix}-by-example-skip", cmd, :body => body)
doc.code.should eq(201)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(false)
doc.parsed_response['code'].should eq(201)
doc.parsed_response['hasMore'].should eq(false)
doc.parsed_response['result'].length.should eq(1)
doc.parsed_response['count'].should eq(1)
body = "{ \"collection\" : \"#{@cn}\", \"example\" : { \"someAttribute\" : \"someValue\" }, \"skip\" : 1, \"limit\" : 1 }"
doc = ArangoDB.log_put("#{prefix}-by-example-skip", cmd, :body => body)
doc.code.should eq(201)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(false)
doc.parsed_response['code'].should eq(201)
doc.parsed_response['hasMore'].should eq(false)
doc.parsed_response['result'].length.should eq(1)
doc.parsed_response['count'].should eq(1)
body = "{ \"collection\" : \"#{@cn}\", \"example\" : { \"someAttribute\" : \"someValue\" }, \"skip\" : 2 }"
doc = ArangoDB.log_put("#{prefix}-by-example-skip", cmd, :body => body)
doc.code.should eq(201)
doc.headers['content-type'].should eq("application/json; charset=utf-8")
doc.parsed_response['error'].should eq(false)
doc.parsed_response['code'].should eq(201)
doc.parsed_response['hasMore'].should eq(false)
doc.parsed_response['result'].length.should eq(0)
doc.parsed_response['count'].should eq(0)
end
end
################################################################################
## remove-by-example query
################################################################################

View File

@ -8,6 +8,7 @@ rspec --color --format d \
api-attributes-spec.rb \
api-batch-spec.rb \
api-collection-spec.rb \
api-documents-spec.rb \
api-graph-spec.rb \
rest-key-spec.rb \
rest-create-document-spec.rb \

View File

@ -28,6 +28,7 @@ unittests: all unittests-verbose unittests-brief
unittests-brief: \
unittests-make \
unittests-codebase-static \
unittests-boost \
unittests-shell-server \
unittests-shell-server-ahuacatl \
@ -79,6 +80,7 @@ SERVER_OPT := \
--database.force-sync-shapes false \
--database.force-sync-properties false \
--javascript.action-directory @top_srcdir@/js/actions \
--javascript.app-path @top_srcdir@/js/apps \
--javascript.gc-interval 1 \
--javascript.modules-path @top_srcdir@/js/server/modules\;@top_srcdir@/js/common/modules\;@top_srcdir@/js/node \
--javascript.package-path @top_srcdir@/js/npm\;@top_srcdir@/js/common/test-data/modules \
@ -139,9 +141,10 @@ unittests-make:
################################################################################
unittests-codebase-static:
@rm -f duplicates
@(find lib arangosh arangod arangoirb -regex ".*/.*\.\(c\|h\|cpp\)" -printf "%f\n" | sort | uniq -c | grep -v "^ \+1 \+" > duplicates) || true
@if [ "`grep " " duplicates`" != "" ]; then echo ; echo "Duplicate filenames found. These should be fixed to allow compilation with Visual Studio:"; cat duplicates; false; fi
@rm -f duplicates.test
@(find lib arangosh arangod arangoirb -regex ".*/.*\.\(c\|h\|cpp\)" -printf "%f\n" | sort | uniq -c | grep -v "^ \+1 \+" > duplicates.test) || true
@if [ "`grep " " duplicates.test`" != "" ]; then echo ; echo "Duplicate filenames found. These should be fixed to allow compilation with Visual Studio:"; cat duplicates.test; rm -f duplicates.test; false; fi
@rm -f duplicates.test
################################################################################
### @brief BOOST TESTS

View File

@ -34,10 +34,10 @@
// -----------------------------------------------------------------------------
#define TEST_STRING(str) \
TRI_EscapeUtf8String(str, strlen(str), true, &outLength);
TRI_EscapeUtf8String(str, strlen(str), true, &outLength, true);
#define TEST_STRING_L(str, len) \
TRI_EscapeUtf8String(str, len, true, &outLength);
TRI_EscapeUtf8String(str, len, true, &outLength, true);
// -----------------------------------------------------------------------------
// --SECTION-- private constants

View File

@ -210,7 +210,7 @@ TRI_aql_node_t* TRI_CreateNodeReturnEmptyAql (void) {
list = (TRI_aql_node_t*) TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_aql_node_t), false);
if (list == NULL) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, list);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, node);
return NULL;
}

View File

@ -161,6 +161,16 @@ static inline bool OutputString (TRI_string_buffer_t* const buffer,
return (TRI_AppendStringStringBuffer(buffer, value) == TRI_ERROR_NO_ERROR);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief append a string to the buffer
////////////////////////////////////////////////////////////////////////////////
static inline bool OutputString2 (TRI_string_buffer_t* const buffer,
const char* const value,
size_t length) {
return (TRI_AppendString2StringBuffer(buffer, value, length) == TRI_ERROR_NO_ERROR);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief append a single character to the buffer
////////////////////////////////////////////////////////////////////////////////
@ -259,9 +269,10 @@ static inline void ScopeOutputQuoted2 (TRI_aql_codegen_js_t* const generator,
generator->_errorCode = TRI_ERROR_OUT_OF_MEMORY;
}
escaped = TRI_EscapeUtf8StringZ(TRI_UNKNOWN_MEM_ZONE, value, strlen(value), false, &outLength);
if (escaped) {
if (! OutputString(scope->_buffer, escaped)) {
escaped = TRI_EscapeUtf8StringZ(TRI_UNKNOWN_MEM_ZONE, value, strlen(value), false, &outLength, false);
if (escaped != NULL) {
if (! OutputString2(scope->_buffer, escaped, outLength)) {
generator->_errorCode = TRI_ERROR_OUT_OF_MEMORY;
}

View File

@ -119,7 +119,7 @@ static bool AppendListValues (TRI_string_buffer_t* const buffer,
n = node->_members._length;
for (i = 0; i < n; ++i) {
if (i > 0) {
if (TRI_AppendStringStringBuffer(buffer, ", ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, ", ", 2) != TRI_ERROR_NO_ERROR) {
return false;
}
}
@ -327,20 +327,30 @@ bool TRI_ValueJavascriptAql (TRI_string_buffer_t* const buffer,
const TRI_aql_value_t* const value,
const TRI_aql_value_type_e type) {
switch (type) {
case TRI_AQL_TYPE_FAIL:
return (TRI_AppendStringStringBuffer(buffer, "fail") == TRI_ERROR_NO_ERROR);
case TRI_AQL_TYPE_FAIL: {
return (TRI_AppendString2StringBuffer(buffer, "fail", 4) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_NULL:
return (TRI_AppendStringStringBuffer(buffer, "null") == TRI_ERROR_NO_ERROR);
case TRI_AQL_TYPE_NULL: {
return (TRI_AppendString2StringBuffer(buffer, "null", 4) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_BOOL:
return (TRI_AppendStringStringBuffer(buffer, value->_value._bool ? "true" : "false") == TRI_ERROR_NO_ERROR);
case TRI_AQL_TYPE_BOOL: {
if (value->_value._bool) {
return (TRI_AppendString2StringBuffer(buffer, "true", 4) == TRI_ERROR_NO_ERROR);
}
else {
return (TRI_AppendString2StringBuffer(buffer, "false", 5) == TRI_ERROR_NO_ERROR);
}
}
case TRI_AQL_TYPE_INT:
case TRI_AQL_TYPE_INT: {
return (TRI_AppendInt64StringBuffer(buffer, value->_value._int) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_DOUBLE:
case TRI_AQL_TYPE_DOUBLE: {
return (TRI_AppendDoubleStringBuffer(buffer, value->_value._double) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_STRING: {
char* escapedString;
@ -354,12 +364,13 @@ bool TRI_ValueJavascriptAql (TRI_string_buffer_t* const buffer,
value->_value._string,
strlen(value->_value._string),
false,
&outLength);
&outLength,
false);
if (escapedString == NULL) {
return false;
}
if (TRI_AppendStringStringBuffer(buffer, escapedString) != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, escapedString, outLength) != TRI_ERROR_NO_ERROR) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, escapedString);
return false;
@ -426,20 +437,30 @@ bool TRI_ValueStringAql (TRI_string_buffer_t* const buffer,
const TRI_aql_value_t* const value,
const TRI_aql_value_type_e type) {
switch (type) {
case TRI_AQL_TYPE_FAIL:
return (TRI_AppendStringStringBuffer(buffer, "fail") == TRI_ERROR_NO_ERROR);
case TRI_AQL_TYPE_FAIL: {
return (TRI_AppendString2StringBuffer(buffer, "fail", 4) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_NULL:
return (TRI_AppendStringStringBuffer(buffer, "null") == TRI_ERROR_NO_ERROR);
case TRI_AQL_TYPE_NULL: {
return (TRI_AppendString2StringBuffer(buffer, "null", 4) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_BOOL:
return (TRI_AppendStringStringBuffer(buffer, value->_value._bool ? "true" : "false") == TRI_ERROR_NO_ERROR);
case TRI_AQL_TYPE_BOOL: {
if (value->_value._bool) {
return (TRI_AppendString2StringBuffer(buffer, "true", 4) == TRI_ERROR_NO_ERROR);
}
else {
return (TRI_AppendString2StringBuffer(buffer, "false", 5) == TRI_ERROR_NO_ERROR);
}
}
case TRI_AQL_TYPE_INT:
case TRI_AQL_TYPE_INT: {
return (TRI_AppendInt64StringBuffer(buffer, value->_value._int) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_DOUBLE:
case TRI_AQL_TYPE_DOUBLE: {
return (TRI_AppendDoubleStringBuffer(buffer, value->_value._double) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_TYPE_STRING: {
if (TRI_AppendCharStringBuffer(buffer, '"') != TRI_ERROR_NO_ERROR) {
@ -473,7 +494,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, " : ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, " : ", 3) != TRI_ERROR_NO_ERROR) {
return false;
}
@ -481,7 +502,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
}
case TRI_AQL_NODE_LIST: {
if (TRI_AppendStringStringBuffer(buffer, "[ ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, "[ ", 2) != TRI_ERROR_NO_ERROR) {
return false;
}
@ -489,11 +510,11 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
return (TRI_AppendStringStringBuffer(buffer, " ]") == TRI_ERROR_NO_ERROR);
return (TRI_AppendString2StringBuffer(buffer, " ]", 2) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_NODE_ARRAY: {
if (TRI_AppendStringStringBuffer(buffer, "{ ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, "{ ", 2) != TRI_ERROR_NO_ERROR) {
return false;
}
@ -501,7 +522,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
return (TRI_AppendStringStringBuffer(buffer, " }") == TRI_ERROR_NO_ERROR);
return (TRI_AppendString2StringBuffer(buffer, " }", 2) == TRI_ERROR_NO_ERROR);
}
case TRI_AQL_NODE_OPERATOR_UNARY_PLUS:
@ -542,7 +563,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, " ? ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, " ? ", 3) != TRI_ERROR_NO_ERROR) {
return false;
}
@ -550,7 +571,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, " : ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, " : ", 3) != TRI_ERROR_NO_ERROR) {
return false;
}
@ -562,7 +583,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, ".") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendCharStringBuffer(buffer, '.') != TRI_ERROR_NO_ERROR) {
return false;
}
@ -574,7 +595,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, "[") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendCharStringBuffer(buffer, '[') != TRI_ERROR_NO_ERROR) {
return false;
}
@ -582,7 +603,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
return TRI_AppendStringStringBuffer(buffer, "]") == TRI_ERROR_NO_ERROR;
return TRI_AppendCharStringBuffer(buffer, ']') == TRI_ERROR_NO_ERROR;
}
case TRI_AQL_NODE_FCALL: {
@ -592,7 +613,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, "(") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendCharStringBuffer(buffer, '(') != TRI_ERROR_NO_ERROR) {
return false;
}
@ -600,7 +621,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
return TRI_AppendStringStringBuffer(buffer, ")") == TRI_ERROR_NO_ERROR;
return TRI_AppendCharStringBuffer(buffer, ')') == TRI_ERROR_NO_ERROR;
}
case TRI_AQL_NODE_FCALL_USER: {
@ -608,7 +629,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, "(") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendCharStringBuffer(buffer, '(') != TRI_ERROR_NO_ERROR) {
return false;
}
@ -616,7 +637,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
return TRI_AppendStringStringBuffer(buffer, ")") == TRI_ERROR_NO_ERROR;
return TRI_AppendCharStringBuffer(buffer, ')') == TRI_ERROR_NO_ERROR;
}
case TRI_AQL_NODE_EXPAND: {
@ -654,7 +675,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
return false;
}
if (TRI_AppendStringStringBuffer(buffer, " = ") != TRI_ERROR_NO_ERROR) {
if (TRI_AppendString2StringBuffer(buffer, " = ", 3) != TRI_ERROR_NO_ERROR) {
return false;
}
@ -662,7 +683,7 @@ bool TRI_NodeStringAql (TRI_string_buffer_t* const buffer,
}
default: {
// nadata
// nada
}
}

View File

@ -176,7 +176,7 @@ static inline TRI_json_t* GetRowProtoType (TRI_aql_explain_t* const explain,
/// @brief create an explain structure
////////////////////////////////////////////////////////////////////////////////
static TRI_aql_explain_t* CreateExplain (void) {
static TRI_aql_explain_t* CreateExplain (TRI_aql_context_t* context) {
TRI_aql_explain_t* explain;
explain = (TRI_aql_explain_t*) TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_aql_explain_t), false);
@ -184,6 +184,8 @@ static TRI_aql_explain_t* CreateExplain (void) {
if (explain == NULL) {
return NULL;
}
explain->_context = context;
explain->_count = 0;
explain->_level = 0;
@ -360,7 +362,7 @@ static TRI_aql_node_t* ProcessStatement (TRI_aql_statement_walker_t* const walke
case TRI_AQL_NODE_LIMIT: {
TRI_aql_node_t* offsetNode = TRI_AQL_NODE_MEMBER(node, 0);
TRI_aql_node_t* countNode = TRI_AQL_NODE_MEMBER(node, 1);
TRI_aql_node_t* countNode = TRI_AQL_NODE_MEMBER(node, 1);
TRI_json_t* row;
row = GetRowProtoType(explain, node->_type);
@ -368,12 +370,12 @@ static TRI_aql_node_t* ProcessStatement (TRI_aql_statement_walker_t* const walke
TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE,
row,
"offset",
TRI_CreateNumberJson(TRI_UNKNOWN_MEM_ZONE, (double) TRI_AQL_NODE_INT(offsetNode)));
TRI_NodeJsonAql(explain->_context, offsetNode));
TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE,
row,
"count",
TRI_CreateNumberJson(TRI_UNKNOWN_MEM_ZONE, (double) TRI_AQL_NODE_INT(countNode)));
TRI_NodeJsonAql(explain->_context, countNode));
AddRow(explain, row);
break;
@ -428,7 +430,7 @@ TRI_json_t* TRI_ExplainAql (TRI_aql_context_t* const context) {
TRI_aql_explain_t* explain;
TRI_json_t* result;
explain = CreateExplain();
explain = CreateExplain(context);
if (explain == NULL) {
TRI_SetErrorContextAql(context, TRI_ERROR_OUT_OF_MEMORY, NULL);

View File

@ -47,6 +47,7 @@ struct TRI_json_s;
////////////////////////////////////////////////////////////////////////////////
typedef struct TRI_aql_explain_s {
struct TRI_aql_context_s* _context;
size_t _count;
size_t _level;
struct TRI_json_s* _result;

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
/* A Bison parser, made by GNU Bison 2.5. */
/* A Bison parser, made by GNU Bison 2.6.5. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -30,6 +30,15 @@
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
#ifndef YY_AHUACATL_ARANGOD_AHUACATL_AHUACATL_GRAMMAR_H_INCLUDED
# define YY_AHUACATL_ARANGOD_AHUACATL_AHUACATL_GRAMMAR_H_INCLUDED
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
#if YYDEBUG
extern int Ahuacatldebug;
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
@ -91,12 +100,10 @@
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 2132 of yacc.c */
/* Line 2042 of yacc.c */
#line 26 "arangod/Ahuacatl/ahuacatl-grammar.y"
TRI_aql_node_t* node;
@ -105,17 +112,14 @@ typedef union YYSTYPE
int64_t intval;
/* Line 2132 of yacc.c */
#line 111 "arangod/Ahuacatl/ahuacatl-grammar.h"
/* Line 2042 of yacc.c */
#line 117 "arangod/Ahuacatl/ahuacatl-grammar.h"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
typedef struct YYLTYPE
{
@ -130,4 +134,18 @@ typedef struct YYLTYPE
#endif
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int Ahuacatlparse (void *YYPARSE_PARAM);
#else
int Ahuacatlparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int Ahuacatlparse (TRI_aql_context_t* const context);
#else
int Ahuacatlparse ();
#endif
#endif /* ! YYPARSE_PARAM */
#endif /* !YY_AHUACATL_ARANGOD_AHUACATL_AHUACATL_GRAMMAR_H_INCLUDED */

View File

@ -357,8 +357,8 @@ sort_direction:
;
limit_statement:
T_LIMIT integer_value {
TRI_aql_node_t* node = TRI_CreateNodeLimitAql(context, TRI_CreateNodeValueIntAql(context, 0), $2);
T_LIMIT atomic_value {
TRI_aql_node_t* node = TRI_CreateNodeLimitAql(context, TRI_CreateNodeValueIntAql(context, 0), $2);
if (node == NULL) {
ABORT_OOM
}
@ -368,7 +368,7 @@ limit_statement:
}
}
| T_LIMIT integer_value T_COMMA integer_value {
| T_LIMIT atomic_value T_COMMA atomic_value {
TRI_aql_node_t* node = TRI_CreateNodeLimitAql(context, $2, $4);
if (node == NULL) {
ABORT_OOM

View File

@ -786,6 +786,7 @@ static TRI_aql_node_t* OptimiseLimit (TRI_aql_statement_walker_t* const walker,
TRI_aql_node_t* node) {
TRI_aql_scope_t* scope;
TRI_aql_node_t* limit;
aql_optimiser_t* optimiser = (aql_optimiser_t*) walker->_data;
int64_t limitValue;
assert(node);
@ -794,7 +795,27 @@ static TRI_aql_node_t* OptimiseLimit (TRI_aql_statement_walker_t* const walker,
assert(scope);
limit = TRI_AQL_NODE_MEMBER(node, 1);
limitValue = TRI_AQL_NODE_INT(limit);
if (limit->_type != TRI_AQL_NODE_VALUE) {
return node;
}
if (limit->_value._type == TRI_AQL_TYPE_INT) {
limitValue = TRI_AQL_NODE_INT(limit);
}
else if (limit->_value._type == TRI_AQL_TYPE_DOUBLE) {
limitValue = (int64_t) TRI_AQL_NODE_DOUBLE(limit);
}
else if (limit->_value._type == TRI_AQL_TYPE_NULL) {
limitValue = 0;
}
else if (limit->_value._type == TRI_AQL_TYPE_BOOL) {
limitValue = (int64_t) TRI_AQL_NODE_BOOL(limit);
}
else {
TRI_SetErrorContextAql(optimiser->_context, TRI_ERROR_QUERY_NUMBER_OUT_OF_RANGE, NULL);
return node;
}
// check for the easy case, a limit value of 0, e.g. LIMIT 10, 0
if (limitValue == 0) {
@ -1506,9 +1527,49 @@ static void NoteLimit (TRI_aql_statement_walker_t* const walker,
const TRI_aql_node_t* const node) {
TRI_aql_node_t* offset = TRI_AQL_NODE_MEMBER(node, 0);
TRI_aql_node_t* limit = TRI_AQL_NODE_MEMBER(node, 1);
int64_t offsetValue = TRI_AQL_NODE_INT(offset);
int64_t limitValue = TRI_AQL_NODE_INT(limit);
int64_t offsetValue;
int64_t limitValue;
TRI_aql_scope_t* scope;
aql_optimiser_t* optimiser;
optimiser = walker->_data;
if (offset->_type != TRI_AQL_NODE_VALUE || limit->_type != TRI_AQL_NODE_VALUE) {
TRI_SetErrorContextAql(optimiser->_context, TRI_ERROR_QUERY_NUMBER_OUT_OF_RANGE, NULL);
return;
}
if (offset->_value._type == TRI_AQL_TYPE_INT) {
offsetValue = TRI_AQL_NODE_INT(offset);
}
else if (offset->_value._type == TRI_AQL_TYPE_DOUBLE) {
offsetValue = (int64_t) TRI_AQL_NODE_DOUBLE(offset);
}
else {
TRI_SetErrorContextAql(optimiser->_context, TRI_ERROR_QUERY_NUMBER_OUT_OF_RANGE, NULL);
return;
}
if (offsetValue < 0) {
TRI_SetErrorContextAql(optimiser->_context, TRI_ERROR_QUERY_NUMBER_OUT_OF_RANGE, NULL);
return;
}
if (limit->_value._type == TRI_AQL_TYPE_INT) {
limitValue = TRI_AQL_NODE_INT(limit);
}
else if (limit->_value._type == TRI_AQL_TYPE_DOUBLE) {
limitValue = (int64_t) TRI_AQL_NODE_DOUBLE(limit);
}
else {
TRI_SetErrorContextAql(optimiser->_context, TRI_ERROR_QUERY_NUMBER_OUT_OF_RANGE, NULL);
return;
}
if (limitValue < 0) {
TRI_SetErrorContextAql(optimiser->_context, TRI_ERROR_QUERY_NUMBER_OUT_OF_RANGE, NULL);
return;
}
scope = TRI_GetCurrentScopeStatementWalkerAql(walker);
if (scope->_type != TRI_AQL_SCOPE_MAIN) {

View File

@ -123,7 +123,7 @@ PQIndex* PQueueIndex_new (void) {
bool ok;
// ..........................................................................
// Allocate the Priority Que Index
// Allocate the Priority Queue Index
// ..........................................................................
idx = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(PQIndex), false);
@ -136,7 +136,7 @@ PQIndex* PQueueIndex_new (void) {
// ..........................................................................
// Allocate the priority que
// Allocate the priority queue
// Remember to add any additional structure you need
// ..........................................................................
@ -155,8 +155,8 @@ PQIndex* PQueueIndex_new (void) {
idx->_aa = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_associative_array_t), false);
if (idx->_aa == NULL) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, idx);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, idx->_pq);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, idx);
TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY);
LOG_ERROR("out of memory when creating priority queue index");
return NULL;
@ -164,7 +164,7 @@ PQIndex* PQueueIndex_new (void) {
// ..........................................................................
// Initialise the priority que
// Initialise the priority queue
// ..........................................................................
ok = TRI_InitPQueue(idx->_pq,

View File

@ -126,22 +126,16 @@ bool TRI_InitPQueue (TRI_pqueue_t* pq, size_t initialCapacity, size_t itemSize,
// ..........................................................................
// Set the capacity and assign memeory for storage
// Set the capacity and assign memory for storage
// ..........................................................................
pq->_base._capacity = initialCapacity;
pq->_base._items = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, pq->_base._itemSize * pq->_base._capacity, false);
pq->_base._items = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, pq->_base._itemSize * pq->_base._capacity, true);
if (pq->_base._items == NULL) {
TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY);
LOG_ERROR("out of memory when creating priority queue storage");
return false;
}
// ..........................................................................
// Initialise the memory allcoated for the storage of pq (0 filled)
// ..........................................................................
memset(pq->_base._items, 0, pq->_base._itemSize * pq->_base._capacity);
// ..........................................................................
// initialise the number of items stored
@ -373,26 +367,29 @@ static void* TopPQueue(TRI_pqueue_t* pq) {
static bool CheckPQSize(TRI_pqueue_t* pq) {
char* newItems;
size_t newCapacity;
if (pq == NULL) {
return false;
}
if (pq->_base._capacity > (pq->_base._count + 1) ) {
if (pq->_base._capacity >= (pq->_base._count + 1) ) {
return true;
}
pq->_base._capacity = pq->_base._capacity * 2;
// allocate and fill with NUL bytes
newItems = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, pq->_base._capacity * pq->_base._itemSize, true);
newCapacity = pq->_base._capacity * 2;
// reallocate
newItems = TRI_Reallocate(TRI_UNKNOWN_MEM_ZONE, pq->_base._items, newCapacity * pq->_base._itemSize);
if (newItems == NULL) {
return false;
}
memcpy(newItems, pq->_base._items, (pq->_base._count * pq->_base._itemSize) );
// initialise the remaining memory allocated for the storage of pq (0 filled)
memset(pq->_base._items + pq->_base._capacity * pq->_base._itemSize, 0, (newCapacity - pq->_base._capacity) * pq->_base._itemSize);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, pq->_base._items);
pq->_base._items = newItems;
pq->_base._capacity = newCapacity;
return true;
}

View File

@ -102,7 +102,7 @@ typedef struct TRI_pqueue_base_s {
bool _reverse;
// ...........................................................................
// Additional hidden extenral structure used outside this priority queue
// Additional hidden external structure used outside this priority queue
// This hidden structure is not available within this priority queue
// ...........................................................................
// char[n]
@ -124,7 +124,7 @@ typedef struct TRI_pqueue_s {
// ...........................................................................
// default pq add, remove ,top methods
// default pq add, remove, top methods
// ...........................................................................
bool (*add) (struct TRI_pqueue_s*, void*);

View File

@ -171,6 +171,9 @@ HttpHandler::status_e RestDocumentHandler::execute () {
///
/// @RESTHEADER{POST /_api/document,creates a document}
///
/// @RESTBODYPARAM{document,json,required}
/// A JSON representation of document.
///
/// @RESTQUERYPARAMETERS
///
/// @RESTQUERYPARAM{collection,string,required}
@ -373,6 +376,11 @@ bool RestDocumentHandler::createDocument () {
return false;
}
if (json->_type != TRI_JSON_ARRAY) {
generateTransactionError(collection, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
return false;
}
// find and load collection given by name or identifier
SingleCollectionWriteTransaction<StandaloneTransaction<RestTransactionContext>, 1> trx(_vocbase, _resolver, collection);
@ -462,15 +470,16 @@ bool RestDocumentHandler::readDocument () {
/// @RESTURLPARAMETERS
///
/// @RESTURLPARAM{document-handle,string,required}
/// The Handle of the Document.
///
/// @RESTHEADERPARAMETERS
///
/// @RESTHEADERPARAM{If-None-Match,string}
/// @RESTHEADERPARAM{If-None-Match,string,optional}
/// If the "If-None-Match" header is given, then it must contain exactly one
/// etag. The document is returned, if it has a different revision than the
/// given etag. Otherwise a `HTTP 304` is returned.
///
/// @RESTHEADERPARAM{If-Match,string}
/// @RESTHEADERPARAM{If-Match,string,optional}
/// If the "If-Match" header is given, then it must contain exactly one
/// etag. The document is returned, if it has the same revision ad the
/// given etag. Otherwise a `HTTP 412` is returned. As an alternative
@ -506,7 +515,7 @@ bool RestDocumentHandler::readDocument () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var document = db.products.save({"hello":"world"});
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('GET', url);
@ -523,11 +532,11 @@ bool RestDocumentHandler::readDocument () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var document = db.products.save({"hello":"world"});
/// var url = "/_api/document/" + document._id;
/// var header = "if-none-match: \"" + document._rev + "\"";
/// var headers = {"If-None-Match": "\"" + document._rev + "\""};
///
/// var response = logCurlRequest('GET', url, "", header);
/// var response = logCurlRequest('GET', url, "", headers);
///
/// assert(response.code === 304);
///
@ -639,21 +648,32 @@ bool RestDocumentHandler::readSingleDocument (bool generateBody) {
/// @RESTQUERYPARAMETERS
///
/// @RESTQUERYPARAM{collection,string,required}
/// The Id of the collection.
///
/// @RESTDESCRIPTION
/// Returns a list of all URI for all documents from the collection identified
/// by `collection`.
///
/// @RESTRETURNCODES
///
/// @RESTRETURNCODE{200}
/// All went good.
///
/// @RESTRETURNCODE{404}
/// The collection does not exist.
///
/// @EXAMPLES
///
/// Returns a collection.
///
/// @EXAMPLE_ARANGOSH_RUN{RestReadDocumentAll}
/// var cn = "products";
/// db._drop(cn);
/// db._create(cn);
///
/// db.products.save({"hallo1":"world1"});
/// db.products.save({"hallo2":"world1"});
/// db.products.save({"hallo3":"world1"});
/// db.products.save({"hello1":"world1"});
/// db.products.save({"hello2":"world1"});
/// db.products.save({"hello3":"world1"});
/// var url = "/_api/document/?collection=" + cn;
///
/// var response = logCurlRequest('GET', url);
@ -662,6 +682,21 @@ bool RestDocumentHandler::readSingleDocument (bool generateBody) {
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
///
/// Collection does not exist.
///
/// @EXAMPLE_ARANGOSH_RUN{RestReadDocumentAllCollectionDoesNotExist}
/// var cn = "doesnotexist";
/// db._drop(cn);
/// var url = "/_api/document/?collection=" + cn;
///
/// var response = logCurlRequest('GET', url);
///
/// assert(response.code === 404);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
///
////////////////////////////////////////////////////////////////////////////////
bool RestDocumentHandler::readAllDocuments () {
@ -734,7 +769,25 @@ bool RestDocumentHandler::readAllDocuments () {
/// @RESTURLPARAMETERS
///
/// @RESTURLPARAM{document-handle,string,required}
/// The Handle of the Document.
///
/// @RESTQUERYPARAMETERS
///
/// @RESTQUERYPARAM{rev,string,optional}
/// You can conditionally delete a document based on a target revision id by
/// using the `rev` URL parameter.
///
/// @RESTQUERYPARAM{policy,string,optional}
/// To control the update behavior in case there is a revision mismatch, you
/// can use the `policy` parameter. This is the same as when replacing
/// documents (see replacing documents for more details).
///
/// @RESTHEADERPARAMETERS
///
/// @RESTHEADERPARAM{If-Match,string,optional}
/// You can conditionally get a document based on a target revision id by
/// using the `if-match` HTTP header.
///
/// @RESTDESCRIPTION
/// Like `GET`, but only returns the header fields and not the body. You
/// can use this call to get the current revision of a document or check if
@ -763,7 +816,7 @@ bool RestDocumentHandler::readAllDocuments () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var document = db.products.save({"hello":"world"});
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('HEAD', url);
@ -795,16 +848,27 @@ bool RestDocumentHandler::checkDocument () {
/// @RESTURLPARAMETERS
///
/// @RESTURLPARAM{document-handle,string,required}
/// The Handle of the Document.
///
/// @RESTQUERYPARAMETERS
///
/// @RESTQUERYPARAM{waitForSync,boolean,optional}
/// Wait until document has been sync to disk.
///
/// @RESTQUERYPARAM{rev,string,optional}
/// You can conditionally replace a document based on a target revision id by
/// using the `rev` URL parameter.
///
/// @RESTQUERYPARAM{policy,string,optional}
///
/// To control the update behavior in case there is a revision mismatch, you
/// can use the `policy` parameter. This is the same as when replacing
/// documents (see replacing documents for more details).
///
/// @RESTHEADERPARAMETERS
///
/// @RESTHEADERPARAM{If-Match,string,optional}
/// You can conditionally replace a document based on a target revision id by
/// using the `if-match` HTTP header.
///
/// @RESTDESCRIPTION
/// Completely updates (i.e. replaces) the document identified by `document-handle`.
@ -851,7 +915,8 @@ bool RestDocumentHandler::checkDocument () {
///
/// For example, to conditionally replace a document based on a specific revision
/// id, you the following request:
/// @REST{PUT /_api/document/`document-handle`?rev=`etag`}
///
/// - PUT /_api/document/`document-handle`?rev=`etag`
///
/// If a target revision id is provided in the request (e.g. via the `etag` value
/// in the `rev` URL query parameter above), ArangoDB will check that
@ -862,7 +927,7 @@ bool RestDocumentHandler::checkDocument () {
///
/// The conditional update behavior can be overriden with the `policy` URL query parameter:
///
/// @REST{PUT /_api/document/`document-handle`?policy=`policy`}
/// - PUT /_api/document/`document-handle`?policy=`policy`
///
/// If `policy` is set to `error`, then the behavior is as before: replacements
/// will fail if the revision id found in the database does not match the target
@ -902,12 +967,12 @@ bool RestDocumentHandler::checkDocument () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var document = db.products.save({"hello":"world"});
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('PUT', url, "{}");
/// var response = logCurlRequest('PUT', url, '{"Hello": "you"}');
///
/// assert(response.code === 200);
/// assert(response.code === 202);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
@ -919,12 +984,13 @@ bool RestDocumentHandler::checkDocument () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var document = db.products.save({"hello":"world"});
/// db.products.remove(document._id);
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('PUT', url, "{}");
///
/// assert(response.code === 200);
/// assert(response.code === 404);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
@ -936,30 +1002,33 @@ bool RestDocumentHandler::checkDocument () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var document = db.products.save({"hello":"world"});
/// var document2 = db.products.save({"hello2":"world"});
/// var url = "/_api/document/" + document._id;
/// var headers = {"If-Match": "\"" + document2._rev + "\""};
///
/// var response = logCurlRequest('PUT', url, "{}");
/// var response = logCurlRequest('PUT', url, '{"other":"content"}', headers);
///
/// assert(response.code === 200);
/// assert(response.code === 412);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
///
/// Last write wins:
///
/// @EXAMPLE_ARANGOSH_RUN{RestUpdateDocumentIfMatchOtherLastWrite}
/// @EXAMPLE_ARANGOSH_RUN{RestUpdateDocumentIfMatchOtherLastWriteWins}
/// var cn = "products";
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('PUT', url, "{}");
///
/// assert(response.code === 200);
/// var document = db.products.save({"hello":"world"});
/// var document2 = db.products.replace(document._id,{"other":"content"});
/// var url = "/_api/document/products/" + document._rev + "?policy=last";
/// var headers = {"If-Match": "\"" + document2._rev + "\""};
///
/// var response = logCurlRequest('PUT', url, "{}", headers);
/// assert(response.code === 202);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
///
@ -970,12 +1039,13 @@ bool RestDocumentHandler::checkDocument () {
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hallo":"world"});
/// var url = "/_api/document/" + document._id;
/// var document = db.products.save({"hello":"world"});
/// var document2 = db.products.save({"hello2":"world"});
/// var url = "/_api/document/" + document._id + "?rev=" + document2._rev;
///
/// var response = logCurlRequest('PUT', url, "{}");
/// var response = logCurlRequest('PUT', url, '{"other":"content"}');
///
/// assert(response.code === 200);
/// assert(response.code === 412);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
@ -993,18 +1063,33 @@ bool RestDocumentHandler::replaceDocument () {
/// @RESTURLPARAMETERS
///
/// @RESTURLPARAM{document-handle,string,required}
/// The Handle of the Document.
///
/// @RESTQUERYPARAMETERS
///
/// @RESTQUERYPARAM{keepNull,string,required}
/// @RESTQUERYPARAM{keepNull,string,optional}
/// If the intention is to delete existing attributes with the patch command,
/// the URL query parameter `keepNull` can be used with a value of `false`.
/// This will modify the behavior of the patch command to remove any attributes
/// from the existing document that are contained in the patch document with an
/// attribute value of `null`.
///
/// @RESTQUERYPARAM{waitForSync,boolean,optional}
/// Wait until document has been sync to disk.
///
/// @RESTQUERYPARAM{rev,string,optional}
/// You can conditionally patch a document based on a target revision id by
/// using the `rev` URL parameter.
///
/// @RESTQUERYPARAM{policy,string,optional}
///
/// To control the update behavior in case there is a revision mismatch, you
/// can use the `policy` parameter.
///
/// @RESTHEADERPARAMETERS
///
/// @RESTHEADERPARAM{If-Match,string,optional}
/// You can conditionally delete a document based on a target revision id by
/// using the `if-match` HTTP header.
///
/// @RESTDESCRIPTION
/// Partially updates the document identified by `document-handle`.
@ -1014,12 +1099,7 @@ bool RestDocumentHandler::replaceDocument () {
/// in the existing document if they do exist there.
///
/// Setting an attribute value to `null` in the patch document will cause a
/// value of `null` be saved for the attribute by default. If the intention
/// is to delete existing attributes with the patch command, the URL query parameter
/// `keepNull` can be used with a value of `false`.
/// This will modify the behavior of the patch command to remove any attributes
/// from the existing document that are contained in the patch document with an
/// attribute value of `null`.
/// value of `null` be saved for the attribute by default.
///
/// Optionally, the URL parameter `waitForSync` can be used to force
/// synchronisation of the document update operation to disk even in case
@ -1069,7 +1149,34 @@ bool RestDocumentHandler::replaceDocument () {
///
/// @EXAMPLES
///
/// @verbinclude rest-patch-document
/// patches an existing document with new content.
///
/// @EXAMPLE_ARANGOSH_RUN{RestPatchDocument}
/// var cn = "products";
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"one":"world"});
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest("PATCH", url, { "hello": "world" });
///
/// assert(response.code === 202);
///
/// logJsonResponse(response);
/// var response2 = logCurlRequest("PATCH", url, { "numbers": { "one": 1, "two": 2, "three": 3, "empty": null } });
/// assert(response2.code === 202);
/// logJsonResponse(response2);
/// var response3 = logCurlRequest("GET", url);
/// assert(response3.code === 200);
/// logJsonResponse(response3);
/// var response4 = logCurlRequest("PATCH", url + "?keepNull=false", { "hello": null, "numbers": { "four": 4 } });
/// assert(response4.code === 202);
/// logJsonResponse(response4);
/// var response5 = logCurlRequest("GET", url);
/// assert(response5.code === 200);
/// logJsonResponse(response5);
/// @END_EXAMPLE_ARANGOSH_RUN
////////////////////////////////////////////////////////////////////////////////
bool RestDocumentHandler::updateDocument () {
@ -1106,6 +1213,11 @@ bool RestDocumentHandler::modifyDocument (bool isPatch) {
if (! holder.registerJson(TRI_UNKNOWN_MEM_ZONE, json)) {
return false;
}
if (json->_type != TRI_JSON_ARRAY) {
generateTransactionError(collection, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
return false;
}
// extract the revision
const TRI_voc_rid_t revision = extractRevision("if-match", "rev");
@ -1224,41 +1336,35 @@ bool RestDocumentHandler::modifyDocument (bool isPatch) {
/// @RESTURLPARAMETERS
///
/// @RESTURLPARAM{document-handle,string,required}
/// Deletes the document identified by `document-handle`.
///
/// @RESTQUERYPARAMETERS
///
/// @RESTQUERYPARAM{rev,string,optional}
/// You can conditionally delete a document based on a target revision id by
/// using the `rev` URL parameter.
///
/// @RESTQUERYPARAM{policy,string,optional}
///
/// To control the update behavior in case there is a revision mismatch, you
/// can use the `policy` parameter. This is the same as when replacing
/// documents (see replacing documents for more details).
///
/// @RESTQUERYPARAM{waitForSync,boolean,optional}
/// Wait until document has been sync to disk.
///
/// @RESTHEADERPARAMETERS
///
/// @RESTHEADERPARAM{If-Match,string,optional}
/// You can conditionally delete a document based on a target revision id by
/// using the `if-match` HTTP header.
///
/// @RESTDESCRIPTION
/// Deletes the document identified by `document-handle`. If the document
/// exists and could be deleted, then a `HTTP 200` is returned.
///
/// The body of the response contains a JSON object with the information about
/// the handle and the revision. The attribute `_id` contains the known
/// `document-handle` of the updated document, the attribute `_rev`
/// contains the known document revision.
///
/// If the document does not exist, then a `HTTP 404` is returned and the
/// body of the response contains an error document.
///
/// You can conditionally delete a document based on a target revision id by
/// using either the `rev` URL parameter or the `if-match` HTTP header.
/// To control the update behavior in case there is a revision mismatch, you
/// can use the `policy` parameter. This is the same as when replacing
/// documents (see replacing documents for more details).
///
/// Optionally, the URL parameter `waitForSync` can be used to force
/// synchronisation of the document deletion operation to disk even in case
/// that the `waitForSync` flag had been disabled for the entire collection.
/// Thus, the `waitForSync` URL parameter can be used to force synchronisation
/// of just specific operations. To use this, set the `waitForSync` parameter
/// to `true`. If the `waitForSync` parameter is not specified or set to
/// If the `waitForSync` parameter is not specified or set to
/// `false`, then the collection's default `waitForSync` behavior is
/// applied. The `waitForSync` URL parameter cannot be used to disable
/// synchronisation for collections that have a default `waitForSync` value
@ -1286,15 +1392,57 @@ bool RestDocumentHandler::modifyDocument (bool isPatch) {
///
/// Using document handle:
///
/// @verbinclude rest-delete-document
/// @EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDELETEDocument}
/// var cn = "products";
/// db._drop(cn);
/// db._create(cn, { waitForSync: true });
/// var document = db.products.save({"hello":"world"});
///
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('DELETE', url);
///
/// assert(response.code === 200);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
///
/// Unknown document handle:
///
/// @verbinclude rest-delete-document-unknown-handle
/// @EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentUnknownHandle}
/// var cn = "products";
/// db._drop(cn);
/// db._create(cn, { waitForSync: true });
/// var document = db.products.save({"hello":"world"});
/// db.products.remove(document._id);
///
/// var url = "/_api/document/" + document._id;
///
/// var response = logCurlRequest('DELETE', url);
///
/// assert(response.code === 404);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
///
/// Revision conflict:
///
/// @verbinclude rest-delete-document-if-match-other
/// @EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentIfMatchOther}
/// var cn = "products";
/// db._drop(cn);
/// db._create(cn);
///
/// var document = db.products.save({"hello":"world"});
/// var document2 = db.products.save({"hello2":"world"});
/// var url = "/_api/document/" + document._id;
/// var headers = {"If-Match": "\"" + document2._rev + "\""};
///
/// var response = logCurlRequest('DELETE', url, "", headers);
///
/// assert(response.code === 412);
///
/// logJsonResponse(response);
/// @END_EXAMPLE_ARANGOSH_RUN
////////////////////////////////////////////////////////////////////////////////
bool RestDocumentHandler::deleteDocument () {

View File

@ -154,6 +154,11 @@ bool RestEdgeHandler::createDocument () {
if (! holder.registerJson(TRI_UNKNOWN_MEM_ZONE, json)) {
return false;
}
if (json->_type != TRI_JSON_ARRAY) {
generateTransactionError(collection, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
return false;
}
if (! checkCreateCollection(collection, getCollectionType())) {
return false;

View File

@ -412,7 +412,7 @@ bool RestImportHandler::createByDocumentsList () {
if (documents->_type != TRI_JSON_LIST) {
generateError(HttpResponse::BAD,
TRI_ERROR_HTTP_BAD_PARAMETER,
"expecting a JSON array in the request");
"expecting a JSON list in the request");
return false;
}

View File

@ -311,7 +311,7 @@ void RestVocbaseBaseHandler::generateDocument (const TRI_voc_cid_t cid,
const string id = DocumentHelper::assembleDocumentId(_resolver.getCollectionName(cid), document->_key);
TRI_json_t augmented;
TRI_Init2ArrayJson(TRI_UNKNOWN_MEM_ZONE, &augmented, 8);
TRI_InitArray2Json(TRI_UNKNOWN_MEM_ZONE, &augmented, 5);
TRI_json_t* _id = TRI_CreateString2CopyJson(TRI_UNKNOWN_MEM_ZONE, id.c_str(), id.size());
@ -380,7 +380,7 @@ void RestVocbaseBaseHandler::generateDocument (const TRI_voc_cid_t cid,
_response->headResponse(TRI_LengthStringBuffer(&buffer));
}
TRI_AnnihilateStringBuffer(&buffer);
TRI_DestroyStringBuffer(&buffer);
}
////////////////////////////////////////////////////////////////////////////////
@ -430,6 +430,10 @@ void RestVocbaseBaseHandler::generateTransactionError (const string& collectionN
case TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND:
generateDocumentNotFound(_resolver.getCollectionId(collectionName), key);
return;
case TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID:
generateError(HttpResponse::BAD, res);
return;
case TRI_ERROR_ARANGO_CONFLICT:
generatePreconditionFailed(_resolver.getCollectionId(collectionName), key ? key : (TRI_voc_key_t) "unknown", rid);

View File

@ -256,7 +256,7 @@ void ArangoServer::buildApplicationServer () {
// V8 engine
// .............................................................................
_applicationV8 = new ApplicationV8(_binaryPath, _tempPath);
_applicationV8 = new ApplicationV8(_binaryPath);
_applicationServer->addFeature(_applicationV8);
// .............................................................................
@ -399,7 +399,8 @@ void ArangoServer::buildApplicationServer () {
_applicationScheduler,
_applicationDispatcher,
"arangodb",
TRI_CheckAuthenticationAuthInfo);
TRI_CheckAuthenticationAuthInfo,
TRI_FlushAuthenticationAuthInfo);
_applicationServer->addFeature(_applicationEndpointServer);
// .............................................................................
@ -410,9 +411,15 @@ void ArangoServer::buildApplicationServer () {
CLEANUP_LOGGING_AND_EXIT_ON_FATAL_ERROR();
}
// set the temp-path
if (_applicationServer->programOptions().has("temp-path")) {
TRI_SetUserTempPath((char*) _tempPath.c_str());
}
// dump version details
LOGGER_INFO(rest::Version::getVerboseVersionString());
// configure v8
if (_applicationServer->programOptions().has("development-mode")) {
_developmentMode = true;
_applicationV8->enableDevelopmentMode();
@ -841,7 +848,7 @@ int ArangoServer::executeConsole (OperationMode::server_operation_mode_e mode) {
// .............................................................................
case OperationMode::MODE_CONSOLE: {
V8LineEditor console(context->_context, ".arangod");
V8LineEditor console(context->_context, ".arangod.history");
console.open(true);
@ -1013,12 +1020,12 @@ int ArangoServer::executeRubyConsole () {
// create a line editor
cout << "ArangoDB MRuby emergency console (" << rest::Version::getVerboseVersionString() << ")" << endl;
MRLineEditor console(context->_mrb, ".arangod");
MRLineEditor console(context->_mrb, ".arangod-ruby.history");
console.open(false);
while (true) {
char* input = console.prompt("arangod> ");
char* input = console.prompt("arangod (ruby)> ");
if (input == 0) {
printf("<ctrl-D>\n" TRI_BYE_MESSAGE "\n");

View File

@ -90,7 +90,6 @@ namespace triagens {
T(),
_setupState(TRI_ERROR_NO_ERROR),
_nestingLevel(0),
_readOnly(true),
_hints(0),
_timeout(0.0),
_waitForSync(false),
@ -141,6 +140,14 @@ namespace triagens {
public:
////////////////////////////////////////////////////////////////////////////////
/// @brief return the collection name resolver
////////////////////////////////////////////////////////////////////////////////
const CollectionNameResolver& resolver () const {
return _resolver;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not the transaction is embedded
////////////////////////////////////////////////////////////////////////////////
@ -150,11 +157,15 @@ namespace triagens {
}
////////////////////////////////////////////////////////////////////////////////
/// @brief return whether or not the transaction is read-only
/// @brief whether or not shaped json in this trx should be copied
////////////////////////////////////////////////////////////////////////////////
inline bool isReadOnlyTransaction () const {
return _readOnly;
inline bool mustCopyShapedJson () const {
if (_trx != 0 && _trx->_hasOperations) {
return true;
}
return false;
}
////////////////////////////////////////////////////////////////////////////////
@ -312,10 +323,6 @@ namespace triagens {
res = this->addCollectionToplevel(cid, type);
}
if (type == TRI_TRANSACTION_WRITE) {
_readOnly = false;
}
return res;
}
@ -992,12 +999,6 @@ namespace triagens {
int _nestingLevel;
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not the transaction is read-only
////////////////////////////////////////////////////////////////////////////////
bool _readOnly;
////////////////////////////////////////////////////////////////////////////////
/// @brief transaction hints
////////////////////////////////////////////////////////////////////////////////

View File

@ -174,9 +174,8 @@ void ApplicationV8::V8Context::handleGlobalContextMethods () {
/// @brief constructor
////////////////////////////////////////////////////////////////////////////////
ApplicationV8::ApplicationV8 (string const& binaryPath, string const& tempPath)
ApplicationV8::ApplicationV8 (string const& binaryPath)
: ApplicationFeature("V8"),
_tempPath(tempPath),
_startupPath(),
_modulesPath(),
_packagePath(),
@ -592,6 +591,11 @@ bool ApplicationV8::prepare () {
LOGGER_ERROR("specified dev-app-path '" << _devAppPath << "' does not exist.");
// TODO: decide if we want to abort server start here
}
if (_packagePath.empty()) {
LOGGER_ERROR("--javascript.package-path option was not specified. this may cause follow-up errors.");
// TODO: decide if we want to abort server start here
}
_startupLoader.setDirectory(_startupPath);
@ -734,7 +738,7 @@ bool ApplicationV8::prepareV8Instance (const size_t i) {
TRI_InitV8Buffer(context->_context);
TRI_InitV8Conversions(context->_context);
TRI_InitV8Utils(context->_context, _modulesPath, _packagePath, _tempPath);
TRI_InitV8Utils(context->_context, _modulesPath, _packagePath);
TRI_InitV8Shell(context->_context);
{

View File

@ -173,7 +173,7 @@ namespace triagens {
/// @brief constructor
////////////////////////////////////////////////////////////////////////////////
ApplicationV8 (string const&, string const&);
ApplicationV8 (string const&);
////////////////////////////////////////////////////////////////////////////////
/// @brief destructor
@ -345,12 +345,6 @@ namespace triagens {
/// @{
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief temporary path
////////////////////////////////////////////////////////////////////////////////
string _tempPath;
////////////////////////////////////////////////////////////////////////////////
/// @brief path to the directory containing alternate startup scripts
///

View File

@ -384,9 +384,27 @@ static HttpResponse* ExecuteActionVocbase (TRI_vocbase_t* vocbase,
req->Set(v8g->UserKey, v8::String::New(user.c_str(), user.size()));
}
// set the full url
string const& fullUrl = request->fullUrl();
req->Set(v8g->UrlKey, v8::String::New(fullUrl.c_str(), fullUrl.size()));
// set the protocol
string const& protocol = request->protocol();
req->Set(v8g->ProtocolKey, v8::String::New(protocol.c_str(), protocol.size()));
// set the connection info
const ConnectionInfo& info = request->connectionInfo();
v8::Handle<v8::Object> serverArray = v8::Object::New();
serverArray->Set(v8g->AddressKey, v8::String::New(info.serverAddress.c_str(), info.serverAddress.size()));
serverArray->Set(v8g->PortKey, v8::Number::New(info.serverPort));
req->Set(v8g->ServerKey, serverArray);
v8::Handle<v8::Object> clientArray = v8::Object::New();
clientArray->Set(v8g->AddressKey, v8::String::New(info.clientAddress.c_str(), info.clientAddress.size()));
clientArray->Set(v8g->PortKey, v8::Number::New(info.clientPort));
req->Set(v8g->ClientKey, clientArray);
// copy prefix
string path = request->prefix();
@ -814,7 +832,7 @@ static v8::Handle<v8::Value> JS_ExecuteGlobalContextFunction (v8::Arguments cons
v8::String::Utf8Value utf8def(argv[0]);
if (*utf8def == 0) {
TRI_V8_TYPE_ERROR(scope, "<defition> must be a UTF-8 function definition");
TRI_V8_TYPE_ERROR(scope, "<definition> must be a UTF-8 function definition");
}
string def = *utf8def;

View File

@ -50,6 +50,31 @@ using namespace std;
using namespace triagens::basics;
using namespace triagens::arango;
// -----------------------------------------------------------------------------
// --SECTION-- private defines
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @addtogroup VocBase
/// @{
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief shortcut for read-only transaction class type
////////////////////////////////////////////////////////////////////////////////
#define ReadTransactionType SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> >
////////////////////////////////////////////////////////////////////////////////
/// @brief shortcut to wrap a shaped-json object in a read-only transaction
////////////////////////////////////////////////////////////////////////////////
#define WRAP_SHAPED_JSON(...) TRI_WrapShapedJson<ReadTransactionType>(__VA_ARGS__)
////////////////////////////////////////////////////////////////////////////////
/// @}
////////////////////////////////////////////////////////////////////////////////
// -----------------------------------------------------------------------------
// --SECTION-- HELPER FUNCTIONS
// -----------------------------------------------------------------------------
@ -131,10 +156,10 @@ static void CalculateSkipLimitSlice (size_t length,
// skip from the beginning
if (0 < skip) {
s = skip;
s = (size_t) skip;
if (e < s) {
s = e;
s = (size_t) e;
}
}
@ -149,7 +174,15 @@ static void CalculateSkipLimitSlice (size_t length,
// apply limit
if (s + limit < e) {
e = s + limit;
int64_t sum = (int64_t) s + (int64_t) limit;
if (sum < (int64_t) e) {
if (sum >= (int64_t) TRI_QRY_NO_LIMIT) {
e = TRI_QRY_NO_LIMIT;
}
else {
e = (size_t) sum;
}
}
}
}
@ -977,7 +1010,7 @@ static v8::Handle<v8::Value> ExecuteSkiplistQuery (v8::Arguments const& argv,
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -1057,7 +1090,7 @@ static v8::Handle<v8::Value> ExecuteSkiplistQuery (v8::Arguments const& argv,
}
}
v8::Handle<v8::Value> doc = TRI_WrapShapedJson(resolver, col, (TRI_doc_mptr_t const*) indexElement->_document, barrier);
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, col->_cid, (TRI_doc_mptr_t const*) indexElement->_document, barrier);
if (doc.IsEmpty()) {
error = true;
@ -1167,7 +1200,7 @@ static v8::Handle<v8::Value> ExecuteBitarrayQuery (v8::Arguments const& argv,
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
@ -1259,13 +1292,14 @@ static v8::Handle<v8::Value> ExecuteBitarrayQuery (v8::Arguments const& argv,
if (total > skip && count < limit) {
if (barrier == 0) {
barrier = TRI_CreateBarrierElement(&primary->_barrierList);
if (barrier == 0) {
error = true;
break;
}
}
v8::Handle<v8::Value> doc = TRI_WrapShapedJson(resolver, col, data, barrier);
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, col->_cid, data, barrier);
if (doc.IsEmpty()) {
error = true;
@ -1347,7 +1381,8 @@ static uint32_t SortGeoRandomGenerator (void) {
/// @brief creates a geo result
////////////////////////////////////////////////////////////////////////////////
static int StoreGeoResult (TRI_vocbase_col_t const* collection,
static int StoreGeoResult (ReadTransactionType& trx,
TRI_vocbase_col_t const* collection,
GeoCoordinates* cors,
v8::Handle<v8::Array>& documents,
v8::Handle<v8::Array>& distances) {
@ -1400,11 +1435,9 @@ static int StoreGeoResult (TRI_vocbase_col_t const* collection,
return TRI_ERROR_OUT_OF_MEMORY;
}
CollectionNameResolver resolver(collection->_vocbase);
// copy the documents
for (gtr = tmp, i = 0; gtr < gnd; ++gtr, ++i) {
documents->Set(i, TRI_WrapShapedJson(resolver, collection, (TRI_doc_mptr_t const*) gtr->_data, barrier));
documents->Set(i, WRAP_SHAPED_JSON(trx, collection->_cid, (TRI_doc_mptr_t const*) gtr->_data, barrier));
distances->Set(i, v8::Number::New(gtr->_distance));
}
@ -1449,7 +1482,7 @@ static v8::Handle<v8::Value> EdgesQuery (TRI_edge_direction_e direction, v8::Arg
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
@ -1530,7 +1563,7 @@ static v8::Handle<v8::Value> EdgesQuery (TRI_edge_direction_e direction, v8::Arg
}
}
v8::Handle<v8::Value> doc = TRI_WrapShapedJson(resolver, col, (TRI_doc_mptr_t const*) edges._buffer[j], barrier);
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, col->_cid, (TRI_doc_mptr_t const*) edges._buffer[j], barrier);
if (doc.IsEmpty()) {
// error
@ -1588,7 +1621,7 @@ static v8::Handle<v8::Value> EdgesQuery (TRI_edge_direction_e direction, v8::Arg
}
}
v8::Handle<v8::Value> doc = TRI_WrapShapedJson(resolver, col, (TRI_doc_mptr_t const*) edges._buffer[j], barrier);
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, col->_cid, (TRI_doc_mptr_t const*) edges._buffer[j], barrier);
if (doc.IsEmpty()) {
error = true;
@ -1658,7 +1691,7 @@ static v8::Handle<v8::Value> JS_AllQuery (v8::Arguments const& argv) {
vector<TRI_doc_mptr_t> docs;
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
@ -1683,11 +1716,11 @@ static v8::Handle<v8::Value> JS_AllQuery (v8::Arguments const& argv) {
// setup result
v8::Handle<v8::Object> result = v8::Object::New();
v8::Handle<v8::Array> documents = v8::Array::New(n);
// reserver full capacity in one go
// reserve full capacity in one go
result->Set(v8::String::New("documents"), documents);
for (size_t i = 0; i < n; ++i) {
v8::Handle<v8::Value> document = TRI_WrapShapedJson(resolver, col, &docs[i], barrier);
v8::Handle<v8::Value> document = WRAP_SHAPED_JSON(trx, col->_cid, &docs[i], barrier);
if (document.IsEmpty()) {
TRI_V8_EXCEPTION_MEMORY(scope);
@ -1735,7 +1768,8 @@ static v8::Handle<v8::Value> JS_AnyQuery (v8::Arguments const& argv) {
document._key = 0;
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -1760,7 +1794,7 @@ static v8::Handle<v8::Value> JS_AnyQuery (v8::Arguments const& argv) {
return scope.Close(v8::Null());
}
return scope.Close(TRI_WrapShapedJson(resolver, col, &document, barrier));
return scope.Close(WRAP_SHAPED_JSON(trx, col->_cid, &document, barrier));
}
////////////////////////////////////////////////////////////////////////////////
@ -1789,7 +1823,7 @@ static v8::Handle<v8::Value> JS_ByExampleQuery (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
@ -1853,7 +1887,8 @@ static v8::Handle<v8::Value> JS_ByExampleQuery (v8::Arguments const& argv) {
else {
for (size_t j = s; j < e; ++j) {
TRI_doc_mptr_t* mptr = (TRI_doc_mptr_t*) TRI_AtVector(&filtered, j);
v8::Handle<v8::Value> doc = TRI_WrapShapedJson(resolver, col, mptr, barrier);
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, col->_cid, mptr, barrier);
if (doc.IsEmpty()) {
error = true;
@ -1895,7 +1930,7 @@ static v8::Handle<v8::Value> JS_ByExampleQuery (v8::Arguments const& argv) {
/// It is the callers responsibility to acquire and free the required locks
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> ByExampleHashIndexQuery (TRI_document_collection_t* document,
static v8::Handle<v8::Value> ByExampleHashIndexQuery (ReadTransactionType& trx,
TRI_vocbase_col_t const* collection,
v8::Handle<v8::Object>* err,
v8::Arguments const& argv) {
@ -1926,8 +1961,7 @@ static v8::Handle<v8::Value> ByExampleHashIndexQuery (TRI_document_collection_t*
result->Set(v8::String::New("documents"), documents);
// extract the index
CollectionNameResolver resolver(collection->_vocbase);
TRI_index_t* idx = TRI_LookupIndexByHandle(resolver, collection, argv[0], false, err);
TRI_index_t* idx = TRI_LookupIndexByHandle(trx.resolver(), collection, argv[0], false, err);
if (idx == 0) {
return scope.Close(v8::ThrowException(*err));
@ -1941,8 +1975,9 @@ static v8::Handle<v8::Value> ByExampleHashIndexQuery (TRI_document_collection_t*
// convert the example (index is locked by lockRead)
TRI_index_search_value_t searchValue;
TRI_shaper_t* shaper = document->base._shaper;
TRI_primary_collection_t* primary = trx.primaryCollection();
TRI_shaper_t* shaper = primary->_shaper;
int res = SetupSearchValue(&hashIndex->_paths, example, shaper, searchValue, err);
if (res != TRI_ERROR_NO_ERROR) {
@ -1965,13 +2000,13 @@ static v8::Handle<v8::Value> ByExampleHashIndexQuery (TRI_document_collection_t*
CalculateSkipLimitSlice(total, skip, limit, s, e);
if (s < e) {
TRI_barrier_t* barrier = TRI_CreateBarrierElement(&document->base._barrierList);
TRI_barrier_t* barrier = TRI_CreateBarrierElement(&primary->_barrierList);
if (barrier == 0) {
error = true;
}
else {
for (size_t i = s; i < e; ++i) {
v8::Handle<v8::Value> doc = TRI_WrapShapedJson(resolver, collection, list._documents[i], barrier);
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, collection->_cid, list._documents[i], barrier);
if (doc.IsEmpty()) {
error = true;
@ -2015,7 +2050,8 @@ static v8::Handle<v8::Value> JS_ByExampleHashIndex (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -2030,7 +2066,7 @@ static v8::Handle<v8::Value> JS_ByExampleHashIndex (v8::Arguments const& argv) {
trx.lockRead();
v8::Handle<v8::Value> result = ByExampleHashIndexQuery((TRI_document_collection_t*) trx.primaryCollection(), col, &err, argv);
v8::Handle<v8::Value> result = ByExampleHashIndexQuery(trx, col, &err, argv);
trx.finish(res);
@ -2131,7 +2167,7 @@ static v8::Handle<v8::Value> JS_InEdgesQuery (v8::Arguments const& argv) {
/// the caller must ensure all relevant locks are acquired and freed
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> FulltextQuery (TRI_document_collection_t* document,
static v8::Handle<v8::Value> FulltextQuery (ReadTransactionType& trx,
TRI_vocbase_col_t const* collection,
v8::Handle<v8::Object>* err,
v8::Arguments const& argv) {
@ -2143,8 +2179,7 @@ static v8::Handle<v8::Value> FulltextQuery (TRI_document_collection_t* document,
}
// extract the index
CollectionNameResolver resolver(collection->_vocbase);
TRI_index_t* idx = TRI_LookupIndexByHandle(resolver, collection, argv[0], false, err);
TRI_index_t* idx = TRI_LookupIndexByHandle(trx.resolver(), collection, argv[0], false, err);
if (idx == 0) {
return scope.Close(v8::ThrowException(*err));
@ -2198,7 +2233,7 @@ static v8::Handle<v8::Value> FulltextQuery (TRI_document_collection_t* document,
result->Set(v8::String::New("documents"), documents);
for (uint32_t i = 0; i < queryResult->_numDocuments; ++i) {
documents->Set(i, TRI_WrapShapedJson(resolver, collection, (TRI_doc_mptr_t const*) queryResult->_documents[i], barrier));
documents->Set(i, WRAP_SHAPED_JSON(trx, collection->_cid, (TRI_doc_mptr_t const*) queryResult->_documents[i], barrier));
}
TRI_FreeResultFulltextIndex(queryResult);
@ -2239,7 +2274,8 @@ static v8::Handle<v8::Value> JS_FulltextQuery (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -2254,7 +2290,7 @@ static v8::Handle<v8::Value> JS_FulltextQuery (v8::Arguments const& argv) {
trx.lockRead();
v8::Handle<v8::Value> result = FulltextQuery((TRI_document_collection_t*) trx.primaryCollection(), col, &err, argv);
v8::Handle<v8::Value> result = FulltextQuery(trx, col, &err, argv);
trx.finish(res);
@ -2271,7 +2307,7 @@ static v8::Handle<v8::Value> JS_FulltextQuery (v8::Arguments const& argv) {
/// the caller must ensure all relevant locks are acquired and freed
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> NearQuery (TRI_document_collection_t* document,
static v8::Handle<v8::Value> NearQuery (ReadTransactionType& trx,
TRI_vocbase_col_t const* collection,
v8::Handle<v8::Object>* err,
v8::Arguments const& argv) {
@ -2283,8 +2319,7 @@ static v8::Handle<v8::Value> NearQuery (TRI_document_collection_t* document,
}
// extract the index
CollectionNameResolver resolver(collection->_vocbase);
TRI_index_t* idx = TRI_LookupIndexByHandle(resolver, collection, argv[0], false, err);
TRI_index_t* idx = TRI_LookupIndexByHandle(trx.resolver(), collection, argv[0], false, err);
if (idx == 0) {
return scope.Close(v8::ThrowException(*err));
@ -2313,7 +2348,7 @@ static v8::Handle<v8::Value> NearQuery (TRI_document_collection_t* document,
GeoCoordinates* cors = TRI_NearestGeoIndex(idx, latitude, longitude, limit);
if (cors != 0) {
int res = StoreGeoResult(collection, cors, documents, distances);
int res = StoreGeoResult(trx, collection, cors, documents, distances);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_EXCEPTION_MESSAGE(scope, res, "cannot add document to geo-index");
@ -2338,7 +2373,8 @@ static v8::Handle<v8::Value> JS_NearQuery (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -2353,7 +2389,7 @@ static v8::Handle<v8::Value> JS_NearQuery (v8::Arguments const& argv) {
trx.lockRead();
v8::Handle<v8::Value> result = NearQuery((TRI_document_collection_t*) trx.primaryCollection(), col, &err, argv);
v8::Handle<v8::Value> result = NearQuery(trx, col, &err, argv);
trx.finish(res);
@ -2405,7 +2441,7 @@ static v8::Handle<v8::Value> JS_TopQuery (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
@ -2434,10 +2470,15 @@ static v8::Handle<v8::Value> JS_TopQuery (v8::Arguments const& argv) {
}
TRI_barrier_t* barrier = TRI_CreateBarrierElement(&((TRI_primary_collection_t*) col->_collection)->_barrierList);
v8::Handle<v8::Value> result = TRI_WrapShapedJson(resolver,
col,
(TRI_doc_mptr_t const*) elms->_elements[0]._document,
barrier);
if (barrier == 0) {
TRI_V8_EXCEPTION_MEMORY(scope);
}
v8::Handle<v8::Value> result = WRAP_SHAPED_JSON(trx,
col->_cid,
(TRI_doc_mptr_t const*) elms->_elements[0]._document,
barrier);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, elms->_elements);
@ -2451,7 +2492,7 @@ static v8::Handle<v8::Value> JS_TopQuery (v8::Arguments const& argv) {
/// the caller must ensure all relevant locks are acquired and freed
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> WithinQuery (TRI_document_collection_t* document,
static v8::Handle<v8::Value> WithinQuery (ReadTransactionType& trx,
TRI_vocbase_col_t const* collection,
v8::Handle<v8::Object>* err,
v8::Arguments const& argv) {
@ -2463,8 +2504,7 @@ static v8::Handle<v8::Value> WithinQuery (TRI_document_collection_t* document,
}
// extract the index
CollectionNameResolver resolver(collection->_vocbase);
TRI_index_t* idx = TRI_LookupIndexByHandle(resolver, collection, argv[0], false, err);
TRI_index_t* idx = TRI_LookupIndexByHandle(trx.resolver(), collection, argv[0], false, err);
if (idx == 0) {
return scope.Close(v8::ThrowException(*err));
@ -2493,7 +2533,7 @@ static v8::Handle<v8::Value> WithinQuery (TRI_document_collection_t* document,
GeoCoordinates* cors = TRI_WithinGeoIndex(idx, latitude, longitude, radius);
if (cors != 0) {
int res = StoreGeoResult(collection, cors, documents, distances);
int res = StoreGeoResult(trx, collection, cors, documents, distances);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_EXCEPTION_MESSAGE(scope, res, "cannot add document to geo-index");
@ -2518,7 +2558,8 @@ static v8::Handle<v8::Value> JS_WithinQuery (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(col->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(col->_vocbase, resolver, col->_cid);
ReadTransactionType trx(col->_vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -2533,7 +2574,7 @@ static v8::Handle<v8::Value> JS_WithinQuery (v8::Arguments const& argv) {
trx.lockRead();
v8::Handle<v8::Value> result = WithinQuery((TRI_document_collection_t*) trx.primaryCollection(), col, &err, argv);
v8::Handle<v8::Value> result = WithinQuery(trx, col, &err, argv);
trx.finish(res);

View File

@ -89,6 +89,12 @@ static v8::Handle<v8::Value> WrapGeneralCursor (void* cursor);
/// @{
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief shortcut for read-only transaction class type
////////////////////////////////////////////////////////////////////////////////
#define ReadTransactionType SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> >
////////////////////////////////////////////////////////////////////////////////
/// @brief macro to make sure we won't continue if we are inside a transaction
////////////////////////////////////////////////////////////////////////////////
@ -363,8 +369,9 @@ static bool ParseDocumentHandle (v8::Handle<v8::Value> arg,
static int ExtractDocumentKey (v8::Handle<v8::Value> arg,
TRI_voc_key_t& key) {
TRI_v8_global_t* v8g = (TRI_v8_global_t*) v8::Isolate::GetCurrent()->GetData();
key = 0;
if (arg->IsObject()) {
if (arg->IsObject() && ! arg->IsArray()) {
v8::Handle<v8::Object> obj = arg->ToObject();
if (obj->Has(v8g->_KeyKey)) {
@ -378,18 +385,16 @@ static int ExtractDocumentKey (v8::Handle<v8::Value> arg,
return TRI_ERROR_NO_ERROR;
}
else {
key = 0;
return TRI_ERROR_ARANGO_DOCUMENT_KEY_BAD;
}
}
else {
key = 0;
return TRI_ERROR_ARANGO_DOCUMENT_KEY_MISSING;
}
}
else {
key = 0;
return TRI_ERROR_ARANGO_DOCUMENT_KEY_MISSING;
// anything else than an object will be rejected
return TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID;
}
}
@ -872,7 +877,8 @@ static v8::Handle<v8::Value> DocumentVocbaseCol (const bool useCollection,
assert(col);
assert(key);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(vocbase, resolver, col->_cid);
ReadTransactionType trx(vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -894,7 +900,8 @@ static v8::Handle<v8::Value> DocumentVocbaseCol (const bool useCollection,
res = trx.read(&document, key);
if (res == TRI_ERROR_NO_ERROR) {
result = TRI_WrapShapedJson(resolver, col, &document, barrier);
result = TRI_WrapShapedJson<ReadTransactionType >(trx, col->_cid, &document, barrier);
if (! result.IsEmpty()) {
freeBarrier = false;
}
@ -986,6 +993,11 @@ static v8::Handle<v8::Value> ReplaceVocbaseCol (const bool useCollection,
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_EXCEPTION_MESSAGE(scope, res, "cannot replace document");
}
// we're only accepting "real" object documents
if (! argv[1]->IsObject() || argv[1]->IsArray()) {
TRI_V8_EXCEPTION(scope, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
TRI_primary_collection_t* primary = trx.primaryCollection();
TRI_shaped_json_t* shaped = TRI_ShapedJsonV8Object(argv[1], primary->_shaper);
@ -1054,6 +1066,9 @@ static v8::Handle<v8::Value> SaveVocbaseCol (
holder.registerString(TRI_CORE_MEM_ZONE, key);
}
}
else {
TRI_V8_EXCEPTION(scope, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
TRI_primary_collection_t* primary = trx->primaryCollection();
TRI_shaped_json_t* shaped = TRI_ShapedJsonV8Object(argv[0], primary->_shaper);
@ -1111,7 +1126,7 @@ static v8::Handle<v8::Value> SaveEdgeCol (
TRI_voc_key_t key = 0;
int res;
if (argv[2]->IsObject()) {
if (argv[2]->IsObject() && ! argv[2]->IsArray()) {
res = ExtractDocumentKey(argv[2]->ToObject(), key);
if (res != TRI_ERROR_NO_ERROR && res != TRI_ERROR_ARANGO_DOCUMENT_KEY_MISSING) {
@ -1121,6 +1136,9 @@ static v8::Handle<v8::Value> SaveEdgeCol (
holder.registerString(TRI_CORE_MEM_ZONE, key);
}
}
else {
TRI_V8_EXCEPTION(scope, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
const bool forceSync = ExtractForceSync(argv, 4);
@ -1245,6 +1263,11 @@ static v8::Handle<v8::Value> UpdateVocbaseCol (const bool useCollection,
assert(col);
assert(key);
if (! argv[1]->IsObject() || argv[1]->IsArray()) {
// we're only accepting "real" object documents
TRI_V8_EXCEPTION(scope, TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
TRI_json_t* json = TRI_ObjectToJson(argv[1]);
if (! holder.registerJson(TRI_UNKNOWN_MEM_ZONE, json)) {
@ -2660,11 +2683,14 @@ static v8::Handle<v8::Value> JS_RunAhuacatl (v8::Arguments const& argv) {
if (argc > 2) {
doCount = TRI_ObjectToBoolean(argv[2]);
if (argc > 3) {
double maxValue = TRI_ObjectToDouble(argv[3]);
if (maxValue >= 1.0) {
batchSize = (uint32_t) maxValue;
}
if (argc > 4) {
allowDirectReturn = TRI_ObjectToBoolean(argv[4]);
}
@ -3420,7 +3446,8 @@ static v8::Handle<v8::Value> JS_CountVocbaseCol (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(collection->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(collection->_vocbase, resolver, collection->_cid);
ReadTransactionType trx(collection->_vocbase, resolver, collection->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -4497,7 +4524,8 @@ static v8::Handle<v8::Value> JS_FiguresVocbaseCol (v8::Arguments const& argv) {
v8::Handle<v8::Object> result = v8::Object::New();
CollectionNameResolver resolver(collection->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(collection->_vocbase, resolver, collection->_cid);
ReadTransactionType trx(collection->_vocbase, resolver, collection->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -4585,7 +4613,8 @@ static v8::Handle<v8::Value> JS_GetIndexesVocbaseCol (v8::Arguments const& argv)
}
CollectionNameResolver resolver(collection->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(collection->_vocbase, resolver, collection->_cid);
ReadTransactionType trx(collection->_vocbase, resolver, collection->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -5423,7 +5452,8 @@ static v8::Handle<v8::Value> JS_RevisionVocbaseCol (v8::Arguments const& argv) {
}
CollectionNameResolver resolver(collection->_vocbase);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(collection->_vocbase, resolver, collection->_cid);
ReadTransactionType trx(collection->_vocbase, resolver, collection->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
@ -6239,9 +6269,8 @@ static v8::Handle<v8::Value> JS_VersionVocbase (v8::Arguments const& argv) {
static void WeakBarrierCallback (v8::Isolate* isolate,
v8::Persistent<v8::Value> object,
void* parameter) {
TRI_barrier_t* barrier;
TRI_v8_global_t* v8g = (TRI_v8_global_t*) isolate->GetData();
barrier = (TRI_barrier_t*) parameter;
TRI_barrier_blocker_t* barrier = (TRI_barrier_blocker_t*) parameter;
LOG_TRACE("weak-callback for barrier called");
@ -6249,13 +6278,12 @@ static void WeakBarrierCallback (v8::Isolate* isolate,
v8::Persistent<v8::Value> persistent = v8g->JSBarriers[barrier];
v8g->JSBarriers.erase(barrier);
// dispose and clear the persistent handle
persistent.Dispose(isolate);
persistent.Clear();
// free the barrier
TRI_FreeBarrier(barrier);
TRI_FreeBarrier(&barrier->base);
}
////////////////////////////////////////////////////////////////////////////////
@ -6687,15 +6715,16 @@ v8::Handle<v8::Object> TRI_WrapCollection (TRI_vocbase_col_t const* collection)
/// @brief wraps a TRI_shaped_json_t
////////////////////////////////////////////////////////////////////////////////
v8::Handle<v8::Value> TRI_WrapShapedJson (const CollectionNameResolver& resolver,
TRI_vocbase_col_t const* collection,
template<class T>
v8::Handle<v8::Value> TRI_WrapShapedJson (T& trx,
TRI_voc_cid_t cid,
TRI_doc_mptr_t const* document,
TRI_barrier_t* barrier) {
v8::HandleScope scope;
TRI_ASSERT_MAINTAINER(document != 0);
TRI_ASSERT_MAINTAINER(document->_key != 0);
TRI_ASSERT_MAINTAINER(collection != 0);
TRI_ASSERT_MAINTAINER(document->_data != 0);
TRI_ASSERT_MAINTAINER(barrier != 0);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
@ -6710,9 +6739,39 @@ v8::Handle<v8::Value> TRI_WrapShapedJson (const CollectionNameResolver& resolver
return scope.Close(result);
}
TRI_barrier_blocker_t* blocker = (TRI_barrier_blocker_t*) barrier;
bool doCopy = trx.mustCopyShapedJson();
if (doCopy) {
// we'll create our own copy of the data
TRI_df_marker_t const* m = static_cast<TRI_df_marker_t const*>(document->_data);
if (blocker->_data != NULL && blocker->_mustFree) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, blocker->_data);
blocker->_data = NULL;
blocker->_mustFree = false;
}
blocker->_data = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, m->_size, false);
if (blocker->_data == 0) {
// out of memory
return scope.Close(result);
}
memcpy(blocker->_data, m, m->_size);
blocker->_mustFree = true;
}
else {
// we'll use the pointer into the datafile
blocker->_data = const_cast<void*>(document->_data);
}
// point the 0 index Field to the c++ pointer for unwrapping later
result->SetInternalField(SLOT_CLASS_TYPE, v8::Integer::New(WRP_SHAPED_JSON_TYPE));
result->SetInternalField(SLOT_CLASS, v8::External::New(const_cast<void*>(document->_data)));
result->SetInternalField(SLOT_CLASS, v8::External::New(blocker->_data));
map< void*, v8::Persistent<v8::Value> >::iterator i = v8g->JSBarriers.find(barrier);
@ -6730,7 +6789,7 @@ v8::Handle<v8::Value> TRI_WrapShapedJson (const CollectionNameResolver& resolver
// store the document reference
TRI_voc_rid_t rid = document->_rid;
result->Set(v8g->_IdKey, V8DocumentId(resolver.getCollectionName(collection->_cid), document->_key), v8::ReadOnly);
result->Set(v8g->_IdKey, V8DocumentId(trx.resolver().getCollectionName(cid), document->_key), v8::ReadOnly);
result->Set(v8g->_RevKey, V8RevisionId(rid), v8::ReadOnly);
result->Set(v8g->_KeyKey, v8::String::New(document->_key), v8::ReadOnly);
@ -6739,8 +6798,8 @@ v8::Handle<v8::Value> TRI_WrapShapedJson (const CollectionNameResolver& resolver
if (type == TRI_DOC_MARKER_KEY_EDGE) {
TRI_doc_edge_key_marker_t* marker = (TRI_doc_edge_key_marker_t*) document->_data;
result->Set(v8g->_FromKey, V8DocumentId(resolver.getCollectionName(marker->_fromCid), ((char*) marker) + marker->_offsetFromKey));
result->Set(v8g->_ToKey, V8DocumentId(resolver.getCollectionName(marker->_toCid), ((char*) marker) + marker->_offsetToKey));
result->Set(v8g->_FromKey, V8DocumentId(trx.resolver().getCollectionName(marker->_fromCid), ((char*) marker) + marker->_offsetFromKey));
result->Set(v8g->_ToKey, V8DocumentId(trx.resolver().getCollectionName(marker->_toCid), ((char*) marker) + marker->_offsetToKey));
}
// and return

View File

@ -78,8 +78,9 @@ v8::Handle<v8::Object> TRI_WrapCollection (TRI_vocbase_col_t const*);
/// @brief wraps a TRI_shaped_json_t
////////////////////////////////////////////////////////////////////////////////
v8::Handle<v8::Value> TRI_WrapShapedJson (const triagens::arango::CollectionNameResolver&,
TRI_vocbase_col_t const*,
template<class T>
v8::Handle<v8::Value> TRI_WrapShapedJson (T&,
TRI_voc_cid_t,
TRI_doc_mptr_t const*,
TRI_barrier_t*);

81
arangod/VocBase/FILES.md Normal file
View File

@ -0,0 +1,81 @@
Files writes by ArangoDB
========================
This document briefly describes which files are written by ArangoDB on
startup and shutdown, and what the role of these files is.
All files mentioned are placed in the vocbase database directory.
lock
====
A lock file containing the process id of a running ArangoDB.
The purpose of the lock file is to avoid starting multiple instances of ArangoDB
with the same database directory, which would potentially lead to inconsistencies.
The presence of the lock file is checked at ArangoDB startup, and ArangoDB will
not start if there is already a lock file. The lock file will be removed on
clean shutdown, and also on unclean shutdown.
The lock file will be flocked on Linux, and thus can't be put into a filesystem
that doesn't support flock.
The lock file is used since ArangoDB 1.0.
VERSION
=======
A JSON file with version information for the server.
The file will contain a JSON array with a "version" attribute. The version attribute
will contain the version number that ArangoDB was last started with.
It will also contain a "tasks" attribute with an array of all the tasks that are
or were already executed by the upgrade procedure in js/server/version-check.js.
Every successful upgrade task will be inserted into the "tasks" array with a value of
true. Every failed upgrade task will be inserted into the "tasks" array with a value
of false.
Failed upgrade tasks will get re-executed on server startup if the task is still
present in the js/server/version-check.js file.
The VERSION file will be created on the first start of ArangoDB if the database
directory is still empty.
The VERSION file is used since ArangoDB 1.1.
SERVER
======
A JSON file containing some basic information about the server.
It contains a "createdTime" attribute, with the information when the SERVER file was
first created.
It will also contain a "serverId" attribute, which is a randomly generated server id
that may be used for replication purposes in the future. The "serverId" is currently
(as of ArangoDB 1.3) not used.
The SERVER file is used since ArangoDB 1.3.
SHUTDOWN
========
A JSON file containing information about the last clean shutdown of the server.
If the server is shut down cleanly, the SHUTDOWN file is created. It will contain an
attribute "tick" with the value of the last tick the server used. It will also contain
a string with the datetime of the server shutdown. This can be used for informational
purposes.
On server startup, ArangoDB will look for the SHUTDOWN file and read it if present.
When present, the "tick" attribute will be adjust the server's tick value. This is a
shortcut that allows bypassing the scanning of all collection datafiles for tick values
at startup.
On startup, the SHUTDOWN file is removed before the server enters the normal
operation mode. That prevents using a stale SHUTDOWN file in case of a server crash.
In case the SHUTDOWN file is not there, ArangoDB will scan the latest datafiles
(journals) of collections for the latest tick values used.
The SHUTDOWN file is in use since ArangoDB 1.4.

View File

@ -1,7 +1,7 @@
Using Locks in ArangoDB
=======================
This documents summarizes the various locks and their usage. You should
This document summarizes the various locks and their usage. You should
carefully read it in order to avoid dead-locks.
TRI_*_COLLECTIONS_VOCBASE (R/W)

View File

@ -202,6 +202,7 @@ static TRI_vocbase_auth_t* ConvertAuthInfo (TRI_vocbase_t* vocbase,
}
result = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_vocbase_auth_t), true);
if (result == NULL) {
TRI_FreeString(TRI_CORE_MEM_ZONE, user);
TRI_FreeString(TRI_CORE_MEM_ZONE, password);
@ -266,10 +267,10 @@ bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
TRI_WriteLockReadWriteLock(&vocbase->_authInfoLock);
// .............................................................................
// inside a read transaction
// inside a write transaction
// .............................................................................
collection->_collection->beginRead(collection->_collection);
collection->_collection->beginWrite(collection->_collection);
beg = primary->_primaryIndex._table;
end = beg + primary->_primaryIndex._nrAlloc;
@ -302,12 +303,13 @@ bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
}
}
collection->_collection->endRead(collection->_collection);
collection->_collection->endWrite(collection->_collection);
// .............................................................................
// outside a read transaction
// outside a write transaction
// .............................................................................
vocbase->_authInfoFlush = true;
TRI_WriteUnlockReadWriteLock(&vocbase->_authInfoLock);
TRI_ReleaseCollectionVocBase(vocbase, collection);
@ -358,11 +360,33 @@ void TRI_DestroyAuthInfo (TRI_vocbase_t* vocbase) {
}
}
vocbase->_authInfo._nrUsed = 0;
vocbase->_authInfo._nrUsed = 0;
TRI_WriteUnlockReadWriteLock(&vocbase->_authInfoLock);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief returns whether some externally cached authentication info
/// should be flushed, by querying the internal flush flag
/// checking the information may also change the state of the flag
////////////////////////////////////////////////////////////////////////////////
bool TRI_FlushAuthenticationAuthInfo () {
bool res;
TRI_ReadLockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
res = DefaultAuthInfo->_authInfoFlush;
TRI_ReadUnlockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
if (res) {
TRI_WriteLockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
DefaultAuthInfo->_authInfoFlush = false;
TRI_WriteUnlockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
}
return res;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief checks the authentication
////////////////////////////////////////////////////////////////////////////////
@ -379,16 +403,11 @@ bool TRI_CheckAuthenticationAuthInfo (char const* username,
assert(DefaultAuthInfo);
// lockup username
// look up username
TRI_ReadLockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
auth = TRI_LookupByKeyAssociativePointer(&DefaultAuthInfo->_authInfo, username);
if (auth == 0) {
TRI_ReadUnlockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
return false;
}
if (! auth->_active) {
if (auth == NULL || ! auth->_active) {
TRI_ReadUnlockReadWriteLock(&DefaultAuthInfo->_authInfoLock);
return false;
}

View File

@ -98,6 +98,14 @@ bool TRI_ReloadAuthInfo (struct TRI_vocbase_s*);
void TRI_DestroyAuthInfo (struct TRI_vocbase_s*);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns whether some externally cached authentication info
/// should be flushed, by querying the internal flush flag
/// checking the information may also change the state of the flag
////////////////////////////////////////////////////////////////////////////////
bool TRI_FlushAuthenticationAuthInfo (void);
////////////////////////////////////////////////////////////////////////////////
/// @brief checks the authentication
////////////////////////////////////////////////////////////////////////////////

View File

@ -43,6 +43,22 @@
/// @{
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief free data associated with a barrier
/// currently only barriers pointing to shaped json might contain data that
/// needs to be freed.
////////////////////////////////////////////////////////////////////////////////
static void FreeDataBarrier (TRI_barrier_t* element) {
if (element->_type == TRI_BARRIER_ELEMENT) {
TRI_barrier_blocker_t* b = (TRI_barrier_blocker_t*) element;
if (b->_data != NULL && b->_mustFree) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, b->_data);
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief inserts the barrier element into the linked list of barrier elemnents
/// of the collection
@ -121,6 +137,8 @@ void TRI_DestroyBarrierList (TRI_barrier_list_t* container) {
ptr->_type == TRI_BARRIER_COLLECTION_COMPACTION) {
// free data still allocated in barrier elements
FreeDataBarrier(ptr);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, ptr);
}
else if (ptr->_type == TRI_BARRIER_ELEMENT) {
@ -150,6 +168,7 @@ bool TRI_ContainsBarrierList (TRI_barrier_list_t* container, TRI_barrier_type_e
while (ptr != NULL) {
if (ptr->_type == type) {
TRI_UnlockSpin(&container->_lock);
return true;
}
ptr = ptr->_next;
@ -176,6 +195,8 @@ TRI_barrier_t* TRI_CreateBarrierElementZ (TRI_barrier_list_t* container,
}
element->base._type = TRI_BARRIER_ELEMENT;
element->_data = NULL;
element->_mustFree = false;
element->_line = line;
element->_filename = filename;
@ -350,6 +371,10 @@ void TRI_FreeBarrier (TRI_barrier_t* element) {
}
TRI_UnlockSpin(&container->_lock);
// free data contained in the element
// currently, only barriers of type ELEMENT contain data that needs freeing
FreeDataBarrier(element);
// free the element
TRI_Free(TRI_UNKNOWN_MEM_ZONE, element);

View File

@ -86,6 +86,8 @@ TRI_barrier_t;
typedef struct TRI_barrier_blocker_s {
TRI_barrier_t base;
void* _data;
bool _mustFree;
size_t _line;
char const* _filename;
}

View File

@ -388,7 +388,8 @@ static TRI_col_file_structure_t ScanCollectionDirectory (char const* path) {
newName = TRI_Concatenate2File(path, relName);
TRI_FreeString(TRI_CORE_MEM_ZONE, relName);
if (! TRI_ExistsFile(newName)) {
if (TRI_ExistsFile(newName)) {
// we have a compaction-xxxx and a datafile-xxxx file. we'll keep the datafile
TRI_UnlinkFile(filename);
LOG_WARNING("removing left-over compaction file '%s'", filename);
@ -414,6 +415,8 @@ static TRI_col_file_structure_t ScanCollectionDirectory (char const* path) {
}
}
TRI_Free(TRI_CORE_MEM_ZONE, filename);
filename = newName;
TRI_PushBackVectorString(&structure._datafiles, filename);
}
@ -529,7 +532,6 @@ static bool CheckCollection (TRI_collection_t* collection) {
char* ptr;
TRI_col_header_marker_t* cm;
if (TRI_EqualString2("compaction", first, firstLen)) {
// found a compaction file. now rename it back
char* relName;
@ -540,10 +542,10 @@ static bool CheckCollection (TRI_collection_t* collection) {
newName = TRI_Concatenate2File(collection->_directory, relName);
TRI_FreeString(TRI_CORE_MEM_ZONE, relName);
if (! TRI_ExistsFile(newName)) {
if (TRI_ExistsFile(newName)) {
// we have a compaction-xxxx and a datafile-xxxx file. we'll keep the datafile
LOG_WARNING("removing compaction file '%s'", filename);
LOG_WARNING("removing unfinished compaction file '%s'", filename);
TRI_UnlinkFile(filename);
TRI_FreeString(TRI_CORE_MEM_ZONE, newName);
@ -1151,6 +1153,7 @@ int TRI_LoadCollectionInfo (char const* path,
if (json->_type != TRI_JSON_ARRAY) {
LOG_ERROR("cannot open '%s', file does not contain a json array", filename);
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_set_errno(TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE);
}
@ -1191,7 +1194,7 @@ int TRI_LoadCollectionInfo (char const* path,
parameter->_isSystem = TRI_IsSystemCollectionName(parameter->_name);
}
else if (value->_type == TRI_JSON_STRING) {
else if (TRI_EqualString(key->_value._string.data, "cid")) {
parameter->_cid = (TRI_voc_cid_t) TRI_UInt64String(value->_value._string.data);
}
}

View File

@ -48,6 +48,27 @@
/// @{
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief minimum size of dead data (in bytes) in a datafile that will make
/// the datafile eligible for compaction at all.
///
/// Any datafile with less dead data than the threshold will not become a
/// candidate for compaction.
////////////////////////////////////////////////////////////////////////////////
#define COMPACTOR_DEAD_SIZE_THRESHOLD (1024 * 128)
////////////////////////////////////////////////////////////////////////////////
/// @brief percentage of dead documents in a datafile that will trigger the
/// compaction
///
/// for example, if the collection contains 800 bytes of alive and 400 bytes of
/// dead documents, the share of the dead documents is 400 / (400 + 800) = 33 %.
/// if this value if higher than the threshold, the datafile will be compacted
////////////////////////////////////////////////////////////////////////////////
#define COMPACTOR_DEAD_SIZE_SHARE (0.1)
////////////////////////////////////////////////////////////////////////////////
/// @brief compactify interval in microseconds
////////////////////////////////////////////////////////////////////////////////
@ -431,8 +452,7 @@ static bool Compactifier (TRI_df_marker_t const* marker,
if (found != NULL) {
found2 = CONST_CAST(found);
// the fid won't change
TRI_ASSERT_MAINTAINER(found2->_fid == context->_dfi._fid);
found2->_fid = context->_dfi._fid;
found2->_data = result;
// let _key point to the new key position
@ -662,22 +682,38 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
for (i = 0; i < n; ++i) {
TRI_datafile_t* df;
TRI_doc_datafile_info_t* dfi;
double share;
df = primary->base._datafiles._buffer[i];
dfi = TRI_FindDatafileInfoPrimaryCollection(primary, df->_fid);
if (dfi->_numberDead > 0) {
// only use those datafiles that contain dead objects
TRI_PushBackVector(&vector, dfi);
// we stop at the first datafile.
// this is better than going over all datafiles in a collection in one go
// because the compactor is single-threaded, and collecting all datafiles
// might take a long time (it might even be that there is a request to
// delete the collection in the middle of compaction, but the compactor
// will not pick this up as it is read-locking the collection status)
break;
if (dfi->_numberDead == 0 || dfi->_sizeDead < COMPACTOR_DEAD_SIZE_THRESHOLD) {
continue;
}
share = (double) dfi->_sizeDead / ((double) dfi->_sizeDead + (double) dfi->_sizeAlive);
if (share < COMPACTOR_DEAD_SIZE_SHARE) {
continue;
}
LOG_TRACE("found datafile eligible for compaction. fid: %llu, numberDead: %llu, numberAlive: %llu, sizeDead: %llu, sizeAlive: %llu",
(unsigned long long) df->_fid,
(unsigned long long) dfi->_numberDead,
(unsigned long long) dfi->_numberAlive,
(unsigned long long) dfi->_sizeDead,
(unsigned long long) dfi->_sizeAlive);
// only use those datafiles that contain dead objects
TRI_PushBackVector(&vector, dfi);
// we stop at the first datafile.
// this is better than going over all datafiles in a collection in one go
// because the compactor is single-threaded, and collecting all datafiles
// might take a long time (it might even be that there is a request to
// delete the collection in the middle of compaction, but the compactor
// will not pick this up as it is read-locking the collection status)
break;
}
// can now continue without the lock

View File

@ -277,6 +277,8 @@ static int TruncateAndSealDatafile (TRI_datafile_t* datafile,
if (fd < 0) {
LOG_ERROR("cannot create new datafile '%s': '%s'", filename, TRI_last_error());
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
return TRI_set_errno(TRI_ERROR_SYS_ERROR);
}
@ -291,6 +293,8 @@ static int TruncateAndSealDatafile (TRI_datafile_t* datafile,
TRI_UnlinkFile(filename);
LOG_ERROR("cannot seek in new datafile '%s': '%s'", filename, TRI_last_error());
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
return TRI_ERROR_SYS_ERROR;
}
@ -305,6 +309,8 @@ static int TruncateAndSealDatafile (TRI_datafile_t* datafile,
TRI_UnlinkFile(filename);
LOG_ERROR("cannot create sparse datafile '%s': '%s'", filename, TRI_last_error());
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
return TRI_ERROR_SYS_ERROR;
}
@ -319,6 +325,8 @@ static int TruncateAndSealDatafile (TRI_datafile_t* datafile,
TRI_UnlinkFile(filename);
LOG_ERROR("cannot memory map file '%s': '%s'", filename, TRI_last_error());
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
return TRI_errno();
}
@ -329,6 +337,9 @@ static int TruncateAndSealDatafile (TRI_datafile_t* datafile,
res = TRI_UNMMFile(datafile->_data, datafile->_maximalSize, datafile->_fd, &(datafile->_mmHandle));
if (res < 0) {
TRI_CLOSE(datafile->_fd);
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
LOG_ERROR("munmap failed with: %d", res);
return res;
}
@ -369,6 +380,9 @@ static int TruncateAndSealDatafile (TRI_datafile_t* datafile,
return res;
}
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
TRI_FreeString(TRI_CORE_MEM_ZONE, oldname);
TRI_SealDatafile(datafile);
return TRI_ERROR_NO_ERROR;
}
@ -514,17 +528,6 @@ static bool CheckDatafile (TRI_datafile_t* datafile) {
(unsigned int) marker->_type);
#endif
if (marker->_size == 0 && marker->_crc == 0 && marker->_type == 0 && marker->_tick == 0) {
LOG_DEBUG("reached end of datafile '%s' data, current size %lu",
datafile->getName(datafile),
(unsigned long) currentSize);
datafile->_currentSize = currentSize;
datafile->_next = datafile->_data + datafile->_currentSize;
return true;
}
if (marker->_size == 0) {
LOG_DEBUG("reached end of datafile '%s' data, current size %lu",
datafile->getName(datafile),
@ -738,6 +741,9 @@ static TRI_datafile_t* OpenDatafile (char const* filename, bool ignoreErrors) {
datafile = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_datafile_t), false);
if (datafile == NULL) {
TRI_UNMMFile(data, size, fd, &mmHandle);
TRI_CLOSE(fd);
return NULL;
}

View File

@ -283,9 +283,9 @@ TRI_datafile_t;
/// <tr>
/// <td>TRI_voc_size_t</td>
/// <td>_size</td>
/// <td>The total size of the blob. This includes the size of the the
/// marker and the data. In order to iterate through the datafile
/// you can read the TRI_voc_size_t entry _size and skip the next
/// <td>The total size of the blob. This includes the size of the marker
/// and the data. In order to iterate through the datafile you can
/// read the TRI_voc_size_t entry _size and skip the next
/// _size - sizeof(TRI_voc_size_t) bytes.</td>
/// </tr>
/// <tr>
@ -369,7 +369,7 @@ TRI_df_header_marker_t;
/// @brief datafile footer marker
///
/// The last entry in a full datafile is always a TRI_df_footer_marker_t.
/// The footer contains the maximal size of the datafile and it total
/// The footer contains the maximal size of the datafile and its total
/// size.
///
/// <table border>

View File

@ -758,6 +758,9 @@ static int RollbackUpdate (TRI_document_collection_t* document,
// ignore any errors we're getting from this
DeleteSecondaryIndexes(document, newHeader, true);
// put back the header into its old position
document->_headers->move(document->_headers, newHeader, oldHeader);
*newHeader = *oldHeader;
@ -767,9 +770,6 @@ static int RollbackUpdate (TRI_document_collection_t* document,
LOG_ERROR("error rolling back update operation");
}
// put back the header into its old position
document->_headers->move(document->_headers, newHeader, newHeader);
return res;
}

View File

@ -196,7 +196,7 @@ TRI_general_cursor_t* TRI_CreateGeneralCursor (TRI_general_cursor_result_t* resu
TRI_general_cursor_t* cursor;
cursor = (TRI_general_cursor_t*) TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_general_cursor_t), false);
if (!cursor) {
if (cursor == NULL) {
return NULL;
}

View File

@ -128,6 +128,8 @@ static void MoveBackHeader (TRI_headers_t* h,
// we have at least one element in the list
TRI_ASSERT_MAINTAINER(headers->_begin != NULL);
TRI_ASSERT_MAINTAINER(headers->_end != NULL);
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
if (headers->_end == header) {
// header is already at the end
@ -157,6 +159,8 @@ static void MoveBackHeader (TRI_headers_t* h,
TRI_ASSERT_MAINTAINER(headers->_begin != NULL);
TRI_ASSERT_MAINTAINER(headers->_end != NULL);
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
}
////////////////////////////////////////////////////////////////////////////////
@ -167,6 +171,10 @@ static void UnlinkHeader (TRI_headers_t* h,
TRI_doc_mptr_t* header) {
simple_headers_t* headers = (simple_headers_t*) h;
TRI_ASSERT_MAINTAINER(header != NULL);
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
// unlink the header
if (header->_prev != NULL) {
header->_prev->_next = header->_next;
@ -199,10 +207,13 @@ static void UnlinkHeader (TRI_headers_t* h,
TRI_ASSERT_MAINTAINER(headers->_begin != NULL);
TRI_ASSERT_MAINTAINER(headers->_end != NULL);
}
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief moves a header back into the list, using its previous position
/// @brief moves a header around in the list, using its previous position
/// (specified in "old")
////////////////////////////////////////////////////////////////////////////////
@ -216,26 +227,61 @@ static void MoveHeader (TRI_headers_t* h,
}
TRI_ASSERT_MAINTAINER(headers->_nrAllocated > 0);
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
// adjust list start and end pointers
if (old->_prev == NULL) {
headers->_begin = header;
}
if (old->_next == NULL) {
headers->_end = header;
}
if (header->_prev != NULL && header->_prev == old->_next) {
header->_prev->_next = NULL;
headers->_end = header->_prev;
}
else if (header->_next != NULL && header->_next == old->_prev) {
header->_next->_prev = NULL;
headers->_begin = header->_next;
}
/*
if (headers->_begin == old->_next) {
// adjust list start pointer
headers->_begin = header;
}
*/
/*
if (old->_next == NULL) {
// adjust list end pointer
headers->_end = header;
}
*/
if (old->_prev != NULL) {
old->_prev->_next = header;
header->_prev = old->_prev;
}
else {
header->_prev = NULL;
}
if (old->_next != NULL) {
old->_next->_prev = header;
header->_next = old->_next;
}
else {
header->_next = NULL;
}
/*
header->_prev = old->_prev;
header->_next = old->_next;
*/
TRI_ASSERT_MAINTAINER(headers->_begin != NULL);
TRI_ASSERT_MAINTAINER(headers->_end != NULL);
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
}
////////////////////////////////////////////////////////////////////////////////
@ -257,6 +303,9 @@ static void RelinkHeader (TRI_headers_t* h,
MoveHeader(h, header, old);
headers->_nrLinked++;
TRI_ASSERT_MAINTAINER(header->_prev != header);
TRI_ASSERT_MAINTAINER(header->_next != header);
}
////////////////////////////////////////////////////////////////////////////////
@ -402,13 +451,13 @@ static void DumpHeaders (TRI_headers_t const* h) {
TRI_doc_mptr_t* next = headers->_begin;
size_t i = 0;
LOG_TRACE("number of allocated headers: %lu\n", (unsigned long) headers->_nrAllocated);
LOG_TRACE("number of linked headers: %lu\n", (unsigned long) headers->_nrLinked);
LOG_TRACE("begin ptr: %p\n", headers->_begin);
LOG_TRACE("end ptr: %p\n", headers->_end);
printf("number of allocated headers: %lu\n", (unsigned long) headers->_nrAllocated);
printf("number of linked headers: %lu\n", (unsigned long) headers->_nrLinked);
printf("begin ptr: %p\n", headers->_begin);
printf("end ptr: %p\n", headers->_end);
while (next != NULL) {
LOG_TRACE("- header #%lu: ptr: %p, prev: %p, next: %p, key: %s\n",
printf("- header #%lu: ptr: %p, prev: %p, next: %p, key: %s\n",
(unsigned long) i,
next,
next->_prev,
@ -424,7 +473,6 @@ static void DumpHeaders (TRI_headers_t const* h) {
}
TRI_ASSERT_MAINTAINER(i == headers->_nrLinked);
TRI_ASSERT_MAINTAINER(i == headers->_nrLinked);
}
#endif

View File

@ -106,6 +106,7 @@ int TRI_ReadServerId (char const* filename) {
}
json = TRI_JsonFile(TRI_UNKNOWN_MEM_ZONE, filename, NULL);
if (json == NULL) {
return TRI_ERROR_INTERNAL;
}
@ -119,14 +120,13 @@ int TRI_ReadServerId (char const* filename) {
foundId = TRI_UInt64String(idString->_value._string.data);
LOG_TRACE("using existing server id: %llu", (unsigned long long) foundId);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
if (foundId == 0) {
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_ERROR_INTERNAL;
}
TRI_EstablishServerId(foundId);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_ERROR_NO_ERROR;
}
@ -148,6 +148,7 @@ int TRI_WriteServerId (char const* filename) {
// create a json object
json = TRI_CreateArrayJson(TRI_UNKNOWN_MEM_ZONE);
if (json == NULL) {
// out of memory
LOG_ERROR("cannot save server id in file '%s': out of memory", filename);
@ -168,16 +169,14 @@ int TRI_WriteServerId (char const* filename) {
// save json info to file
LOG_DEBUG("Writing server id to file '%s'", filename);
ok = TRI_SaveJson(filename, json, true);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
if (! ok) {
LOG_ERROR("could not save server id in file '%s': %s", filename, TRI_last_error());
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_ERROR_INTERNAL;
}
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_ERROR_NO_ERROR;
}
@ -189,14 +188,14 @@ int TRI_WriteServerId (char const* filename) {
int TRI_GenerateServerId () {
uint64_t randomValue = 0ULL; // init for our friend Valgrind
uint32_t* value;
uint32_t value1, value2;
// save two uint32_t values
value = (uint32_t*) &randomValue;
*(value++) = TRI_UInt32Random();
*(value) = TRI_UInt32Random();
value1 = TRI_UInt32Random();
value2 = TRI_UInt32Random();
// use the lower 6 bytes only
randomValue = (((uint64_t) value1) << 32) | ((uint64_t) value2);
randomValue &= TRI_SERVER_ID_MASK;
TRI_EstablishServerId((TRI_server_id_t) randomValue);

View File

@ -57,7 +57,7 @@ static inline void UpdateTimestampShadow (TRI_shadow_t* const shadow) {
static TRI_shadow_t* CreateShadow (const void* const data) {
TRI_shadow_t* shadow = (TRI_shadow_t*) TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_shadow_t), false);
if (!shadow) {
if (shadow == NULL) {
return NULL;
}
@ -69,10 +69,10 @@ static TRI_shadow_t* CreateShadow (const void* const data) {
UpdateTimestampShadow(shadow);
LOG_TRACE("created shadow %p with data ptr %p and id %lu",
LOG_TRACE("created shadow %p with data ptr %p and id %llu",
shadow,
data,
(unsigned long) shadow->_id);
(unsigned long long) shadow->_id);
return shadow;
}
@ -82,10 +82,10 @@ static TRI_shadow_t* CreateShadow (const void* const data) {
////////////////////////////////////////////////////////////////////////////////
static void DecreaseRefCount (TRI_shadow_store_t* const store, TRI_shadow_t* const shadow) {
LOG_TRACE("decreasing refcount for shadow %p with data ptr %p and id %lu to %d",
LOG_TRACE("decreasing refcount for shadow %p with data ptr %p and id %llu to %d",
shadow,
shadow->_data,
(unsigned long) shadow->_id,
(unsigned long long) shadow->_id,
(int) (shadow->_rc - 1));
if (--shadow->_rc <= 0 && shadow->_type == SHADOW_TRANSIENT) {
@ -103,10 +103,10 @@ static void DecreaseRefCount (TRI_shadow_store_t* const store, TRI_shadow_t* con
////////////////////////////////////////////////////////////////////////////////
static void IncreaseRefCount (TRI_shadow_store_t* const store, TRI_shadow_t* const shadow) {
LOG_TRACE("increasing refcount for shadow %p with data ptr %p and id %lu to %d",
LOG_TRACE("increasing refcount for shadow %p with data ptr %p and id %llu to %d",
shadow,
shadow->_data,
(unsigned long) shadow->_id,
(unsigned long long) shadow->_id,
(int) (shadow->_rc + 1));
if (++shadow->_rc <= 0) {
@ -121,10 +121,10 @@ static void IncreaseRefCount (TRI_shadow_store_t* const store, TRI_shadow_t* con
////////////////////////////////////////////////////////////////////////////////
static void PersistShadow (TRI_shadow_t* const shadow) {
LOG_TRACE("persisting shadow %p with data ptr %p and id %lu",
LOG_TRACE("persisting shadow %p with data ptr %p and id %llu",
shadow,
shadow->_data,
(unsigned long) shadow->_id);
(unsigned long long) shadow->_id);
shadow->_type = SHADOW_PERSISTENT;
UpdateTimestampShadow(shadow);
@ -137,10 +137,10 @@ static void PersistShadow (TRI_shadow_t* const shadow) {
static void DeleteShadow (TRI_shadow_store_t* const store,
TRI_shadow_t* const shadow,
const bool decreaseRefCount) {
LOG_TRACE("setting deleted flag for shadow %p with data ptr %p and id %lu",
LOG_TRACE("setting deleted flag for shadow %p with data ptr %p and id %llu",
shadow,
shadow->_data,
(unsigned long) shadow->_id);
(unsigned long long) shadow->_id);
shadow->_deleted = true;
if (decreaseRefCount) {
@ -232,7 +232,7 @@ TRI_shadow_store_t* TRI_CreateShadowStore (void (*destroy) (void*)) {
store = (TRI_shadow_store_t*) TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_shadow_store_t), false);
if (store) {
if (store != NULL) {
TRI_InitAssociativePointer(&store->_ids,
TRI_UNKNOWN_MEM_ZONE,
HashKeyId,
@ -302,7 +302,7 @@ TRI_shadow_id TRI_GetIdDataShadowData (TRI_shadow_store_t* const store,
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_pointers, data);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
id = shadow->_id;
UpdateTimestampShadow(shadow);
}
@ -326,14 +326,14 @@ void* TRI_BeginUsageDataShadowData (TRI_shadow_store_t* const store,
assert(store);
if (!data) {
if (! data) {
return NULL;
}
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_pointers, data);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
IncreaseRefCount(store, shadow);
TRI_UnlockMutex(&store->_lock);
return shadow->_data;
@ -359,7 +359,7 @@ void* TRI_BeginUsageIdShadowData (TRI_shadow_store_t* const store,
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_ids, (void const*) &id);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
IncreaseRefCount(store, shadow);
TRI_UnlockMutex(&store->_lock);
return shadow->_data;
@ -386,7 +386,7 @@ void TRI_EndUsageDataShadowData (TRI_shadow_store_t* const store,
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_pointers, data);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
DecreaseRefCount(store, shadow); // this might delete the shadow
}
@ -407,7 +407,7 @@ bool TRI_PersistDataShadowData (TRI_shadow_store_t* const store,
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_pointers, data);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
PersistShadow(shadow);
result = true;
}
@ -433,7 +433,7 @@ bool TRI_DeleteDataShadowData (TRI_shadow_store_t* const store,
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_pointers, data);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
DeleteShadow(store, shadow, true);
found = true;
}
@ -458,12 +458,13 @@ bool TRI_DeleteIdShadowData (TRI_shadow_store_t* const store,
TRI_LockMutex(&store->_lock);
shadow = (TRI_shadow_t*) TRI_LookupByKeyAssociativePointer(&store->_ids, &id);
if (shadow && !shadow->_deleted) {
if (shadow && ! shadow->_deleted) {
DeleteShadow(store, shadow, false);
found = true;
}
TRI_UnlockMutex(&store->_lock);
return found;
}
@ -503,18 +504,20 @@ void TRI_CleanupShadowData (TRI_shadow_store_t* const store,
for (i = 0; i < store->_ids._nrAlloc; i++) {
// enum all shadows
TRI_shadow_t* shadow = (TRI_shadow_t*) store->_ids._table[i];
if (!shadow) {
if (shadow == NULL) {
continue;
}
// check if shadow is unused and expired
if (shadow->_rc < 1 || force) {
if (shadow->_type == SHADOW_TRANSIENT ||
shadow->_timestamp < compareStamp ||
shadow->_deleted ||
force) {
LOG_TRACE("cleaning shadow %p, rc: %d, expired: %d, deleted: %d",
LOG_TRACE("cleaning shadow %p, id: %llu, rc: %d, expired: %d, deleted: %d",
shadow,
(unsigned long long) shadow->_id,
(int) shadow->_rc,
(int) (shadow->_timestamp < compareStamp),
(int) shadow->_deleted);
@ -532,7 +535,7 @@ void TRI_CleanupShadowData (TRI_shadow_store_t* const store,
}
}
if (!deleted) {
if (! deleted) {
// we did not find anything to delete, so give up
break;
}
@ -554,19 +557,21 @@ TRI_shadow_t* TRI_StoreShadowData (TRI_shadow_store_t* const store,
shadow = CreateShadow(data);
if (shadow) {
LOG_TRACE("storing shadow %p with data ptr %p and id %lu",
LOG_TRACE("storing shadow %p with data ptr %p and id %llu",
shadow,
shadow->_data,
(unsigned long) shadow->_id);
(unsigned long long) shadow->_id);
TRI_LockMutex(&store->_lock);
if (TRI_InsertKeyAssociativePointer(&store->_ids, &shadow->_id, shadow, false)) {
// duplicate entry
LOG_INFO("storing shadow failed");
LOG_WARNING("storing shadow failed");
TRI_UnlockMutex(&store->_lock);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, shadow);
return NULL;
}
TRI_InsertKeyAssociativePointer(&store->_pointers, data, shadow, false);
TRI_UnlockMutex(&store->_lock);

View File

@ -325,6 +325,120 @@ static inline void UpdateTick (TRI_voc_tick_t tick) {
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief reads shutdown information file
/// this is called at server startup. if the file is present, the last tick
/// value used by the server will be read from the file.
////////////////////////////////////////////////////////////////////////////////
static int ReadShutdownInfo (char const* filename) {
TRI_json_t* json;
TRI_json_t* shutdownTime;
TRI_json_t* tickString;
uint64_t foundTick;
assert(filename != NULL);
if (! TRI_ExistsFile(filename)) {
return TRI_ERROR_FILE_NOT_FOUND;
}
json = TRI_JsonFile(TRI_UNKNOWN_MEM_ZONE, filename, NULL);
if (json == NULL) {
return TRI_ERROR_INTERNAL;
}
shutdownTime = TRI_LookupArrayJson(json, "shutdownTime");
if (shutdownTime != NULL && shutdownTime->_type == TRI_JSON_STRING) {
LOG_DEBUG("server was shut down cleanly last time at '%s'", shutdownTime->_value._string.data);
}
tickString = TRI_LookupArrayJson(json, "tick");
if (tickString == NULL || tickString->_type != TRI_JSON_STRING) {
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
return TRI_ERROR_INTERNAL;
}
foundTick = TRI_UInt64String(tickString->_value._string.data);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
LOG_TRACE("using existing tick from shutdown info file: %llu", (unsigned long long) foundTick);
if (foundTick == 0) {
return TRI_ERROR_INTERNAL;
}
UpdateTick((TRI_voc_tick_t) foundTick);
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief removes the shutdown information file
/// this is called after the shutdown info file is read at restart. we need
/// to remove the file because if we don't and the server crashes, we would
/// leave some stale data around, leading to potential inconsistencies later.
////////////////////////////////////////////////////////////////////////////////
static int RemoveShutdownInfo (char const* filename) {
int res = TRI_UnlinkFile(filename);
return res;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief writes shutdown information file
/// the file will contain the timestamp of the shutdown time plus the last
/// tick value the server used. it will be read on restart of the server.
/// if the server can find the file on restart, it can avoid scanning
/// collections.
////////////////////////////////////////////////////////////////////////////////
static int WriteShutdownInfo (char const* filename) {
TRI_json_t* json;
char* tickString;
char buffer[32];
size_t len;
time_t tt;
struct tm tb;
bool ok;
assert(filename != NULL);
// create a json object
json = TRI_CreateArrayJson(TRI_UNKNOWN_MEM_ZONE);
if (json == NULL) {
// out of memory
LOG_ERROR("cannot save shutdown info in file '%s': out of memory", filename);
return TRI_ERROR_OUT_OF_MEMORY;
}
tickString = TRI_StringUInt64((uint64_t) GetTick());
TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "tick", TRI_CreateStringCopyJson(TRI_UNKNOWN_MEM_ZONE, tickString));
TRI_FreeString(TRI_CORE_MEM_ZONE, tickString);
tt = time(0);
TRI_gmtime(tt, &tb);
len = strftime(buffer, sizeof(buffer), "%Y-%m-%dT%H:%M:%SZ", &tb);
TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, json, "shutdownTime", TRI_CreateString2CopyJson(TRI_UNKNOWN_MEM_ZONE, buffer, len));
// save json info to file
LOG_DEBUG("Writing shutdown info to file '%s'", filename);
ok = TRI_SaveJson(filename, json, true);
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
if (! ok) {
LOG_ERROR("could not save shutdown info in file '%s': %s", filename, TRI_last_error());
return TRI_ERROR_INTERNAL;
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief free the memory associated with a collection
////////////////////////////////////////////////////////////////////////////////
@ -637,6 +751,7 @@ static int ReadServerId (TRI_vocbase_t* vocbase) {
}
res = TRI_ReadServerId(filename);
if (res == TRI_ERROR_FILE_NOT_FOUND) {
// id file does not yet exist. now create it
res = TRI_GenerateServerId();
@ -748,7 +863,9 @@ static bool StartupTickIterator (TRI_df_marker_t const* marker,
/// @brief scans a directory and loads all collections
////////////////////////////////////////////////////////////////////////////////
static int ScanPath (TRI_vocbase_t* vocbase, char const* path) {
static int ScanPath (TRI_vocbase_t* vocbase,
char const* path,
const bool iterateMarkers) {
TRI_vector_string_t files;
regmatch_t matches[2];
regex_t re;
@ -793,6 +910,10 @@ static int ScanPath (TRI_vocbase_t* vocbase, char const* path) {
// encounter this situation
LOG_ERROR("database subdirectory '%s' is not writable for current user", file);
TRI_FreeString(TRI_CORE_MEM_ZONE, file);
regfree(&re);
TRI_DestroyVectorString(&files);
return TRI_set_errno(TRI_ERROR_ARANGO_DATADIR_NOT_WRITABLE);
}
@ -874,8 +995,12 @@ static int ScanPath (TRI_vocbase_t* vocbase, char const* path) {
}
c = AddCollection(vocbase, type, info._name, info._cid, file);
TRI_IterateTicksCollection(file, StartupTickIterator, NULL);
if (iterateMarkers) {
// iterating markers may be time-consuming. we'll only do it if
// we have to
TRI_IterateTicksCollection(file, StartupTickIterator, NULL);
}
if (c == NULL) {
LOG_ERROR("failed to add document collection from '%s'", file);
@ -1170,6 +1295,7 @@ bool TRI_msync (int fd, void* mmHandle, char const* begin, char const* end) {
TRI_vocbase_t* TRI_OpenVocBase (char const* path) {
TRI_vocbase_t* vocbase;
char* lockFile;
bool iterateMarkers;
int res;
if (! TRI_IsDirectory(path)) {
@ -1227,10 +1353,12 @@ TRI_vocbase_t* TRI_OpenVocBase (char const* path) {
vocbase->_lockFile = lockFile;
vocbase->_path = TRI_DuplicateString(path);
vocbase->_shutdownFilename = TRI_Concatenate2File(path, "SHUTDOWN");
// init AQL functions
vocbase->_functions = TRI_InitialiseFunctionsAql();
// init collections
TRI_InitVectorPointer(&vocbase->_collections, TRI_UNKNOWN_MEM_ZONE);
TRI_InitVectorPointer(&vocbase->_deadCollections, TRI_UNKNOWN_MEM_ZONE);
@ -1260,6 +1388,7 @@ TRI_vocbase_t* TRI_OpenVocBase (char const* path) {
TRI_InitReadWriteLock(&vocbase->_authInfoLock);
TRI_InitReadWriteLock(&vocbase->_lock);
vocbase->_authInfoFlush = true;
vocbase->_syncWaiters = 0;
TRI_InitCondition(&vocbase->_syncWaitersCondition);
@ -1280,10 +1409,29 @@ TRI_vocbase_t* TRI_OpenVocBase (char const* path) {
LOG_FATAL_AND_EXIT("reading/creating server id failed");
}
// check if we can find a SHUTDOWN file
// this file will contain the last tick value issued by the server
// if we find the file, we can avoid scanning datafiles for the last used tick value
iterateMarkers = true;
res = ReadShutdownInfo(vocbase->_shutdownFilename);
if (res == TRI_ERROR_NO_ERROR) {
// we found the SHUTDOWN file
// no need to iterate the markers
iterateMarkers = false;
}
else if (res == TRI_ERROR_INTERNAL) {
LOG_FATAL_AND_EXIT("cannot read shutdown information from file '%s'", vocbase->_shutdownFilename);
}
// scan the database path for collections
// this will create the list of collections and their datafiles, and will also
// determine the last tick values used
res = ScanPath(vocbase, vocbase->_path);
// determine the last tick values used (if iterateMarkers is true)
res = ScanPath(vocbase, vocbase->_path, iterateMarkers);
if (res != TRI_ERROR_NO_ERROR) {
TRI_DestroyAssociativePointer(&vocbase->_collectionsByName);
@ -1292,15 +1440,26 @@ TRI_vocbase_t* TRI_OpenVocBase (char const* path) {
TRI_DestroyVectorPointer(&vocbase->_deadCollections);
TRI_DestroyLockFile(vocbase->_lockFile);
TRI_FreeString(TRI_CORE_MEM_ZONE, vocbase->_lockFile);
TRI_FreeString(TRI_CORE_MEM_ZONE, vocbase->_shutdownFilename);
TRI_FreeString(TRI_CORE_MEM_ZONE, vocbase->_path);
TRI_FreeShadowStore(vocbase->_cursors);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, vocbase);
TRI_DestroyReadWriteLock(&vocbase->_authInfoLock);
TRI_DestroyReadWriteLock(&vocbase->_lock);
TRI_Free(TRI_UNKNOWN_MEM_ZONE, vocbase);
return NULL;
}
LOG_TRACE("last tick value found: %llu", (unsigned long long) GetTick());
// now remove SHUTDOWN file if it was present
if (! iterateMarkers) {
if (RemoveShutdownInfo(vocbase->_shutdownFilename) != TRI_ERROR_NO_ERROR) {
LOG_FATAL_AND_EXIT("unable to remove shutdown information file '%s'", vocbase->_shutdownFilename);
}
}
// .............................................................................
@ -1387,6 +1546,12 @@ void TRI_DestroyVocBase (TRI_vocbase_t* vocbase) {
FreeCollection(vocbase, collection);
}
// we are just before terminating the server. we can now write out a file with the
// shutdown timestamp and the last tick value the server used.
// if writing the file fails, it is not a problem as in this case we'll scan the
// collections for the tick value on startup
WriteShutdownInfo(vocbase->_shutdownFilename);
// free the auth info
TRI_DestroyAuthInfo(vocbase);
@ -1417,6 +1582,7 @@ void TRI_DestroyVocBase (TRI_vocbase_t* vocbase) {
TRI_DestroyCondition(&vocbase->_cleanupCondition);
// free the filename path
TRI_Free(TRI_CORE_MEM_ZONE, vocbase->_shutdownFilename);
TRI_Free(TRI_CORE_MEM_ZONE, vocbase->_path);
}
@ -2058,8 +2224,11 @@ void TRI_ReleaseCollectionVocBase (TRI_vocbase_t* vocbase, TRI_vocbase_col_t* co
////////////////////////////////////////////////////////////////////////////////
void TRI_InitialiseVocBase () {
// TODO: these two fcalls can probably be removed because we're initialising
// BasicsC anyway
TRI_InitialiseHashes();
TRI_InitialiseRandom();
TRI_GlobalInitStatementListAql();
ServerIdentifier = TRI_UInt16Random();

View File

@ -289,13 +289,14 @@ extern size_t PageSize;
////////////////////////////////////////////////////////////////////////////////
typedef struct TRI_vocbase_s {
char* _path;
char* _path; // path to the data directory
char* _shutdownFilename; // absolute filename of the file that contains the shutdown information
bool _authInfoLoaded; // flag indicating whether the authentication info was loaded successfully
bool _removeOnDrop; // wipe collection from disk after dropping
bool _removeOnCompacted; // wipe datafile from disk after compaction
bool _authInfoLoaded; // flag indicating whether the authentication info was loaded successfully
bool _removeOnDrop; // wipe collection from disk after dropping
bool _removeOnCompacted; // wipe datafile from disk after compaction
bool _defaultWaitForSync;
bool _forceSyncShapes; // force syncing of shape data to disk
bool _forceSyncShapes; // force syncing of shape data to disk
bool _forceSyncProperties; // force syncing of shape data to disk
TRI_voc_size_t _defaultMaximalSize;
@ -309,6 +310,7 @@ typedef struct TRI_vocbase_s {
TRI_associative_pointer_t _authInfo;
TRI_read_write_lock_t _authInfoLock;
bool _authInfoFlush;
struct TRI_transaction_context_s* _transactionContext;

View File

@ -244,7 +244,7 @@ static void InitMRClientConnection (mrb_state* mrb, MRubyClientConnection* conne
////////////////////////////////////////////////////////////////////////////////
static void RunShell (mrb_state* mrb) {
MRLineEditor console(mrb, ".arangoirb");
MRLineEditor console(mrb, ".arangoirb.history");
console.open(false /*! NoAutoComplete*/);

View File

@ -341,6 +341,11 @@ void ArangoClient::parse (ProgramOptions& options,
}
}
// set temp path
if (options.has("temp-path")) {
TRI_SetUserTempPath((char*) _tempPath.c_str());
}
// check if have a password
_hasPassword = options.has("server.password") ||
options.has("server.disable-authentication") ||
@ -668,14 +673,6 @@ void ArangoClient::setUsePager (bool value) {
_usePager = value;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief gets the temporary path
////////////////////////////////////////////////////////////////////////////////
string const& ArangoClient::tempPath () const {
return _tempPath;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief gets endpoint to connect to as string
////////////////////////////////////////////////////////////////////////////////

View File

@ -325,12 +325,6 @@ namespace triagens {
void setUsePager (bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief gets the temporary path
////////////////////////////////////////////////////////////////////////////////
string const& tempPath () const;
////////////////////////////////////////////////////////////////////////////////
/// @brief gets endpoint to connect to as string
////////////////////////////////////////////////////////////////////////////////

View File

@ -308,7 +308,7 @@ int main (int argc, char* argv[]) {
if (Delay) {
Status("sleeping (startup delay)...");
sleep(15);
sleep(10);
}
Status("executing tests...");
@ -347,26 +347,18 @@ int main (int argc, char* argv[]) {
size_t failures = operationsCounter.failures();
if (! BaseClient.quiet()) {
cout << endl;
cout << "Total number of operations: " << Operations << ", batch size: " << BatchSize << ", concurrency level (threads): " << Concurrency << endl;
cout << "Total request/response duration (sum of all threads): " << fixed << requestTime << " s" << endl;
cout << "Request/response duration (per thread): " << fixed << (requestTime / (double) Concurrency) << " s" << endl;
cout << "Time needed per operation: " << fixed << (time / Operations) << " s" << endl;
cout << "Time needed per operation per thread: " << fixed << (time / (double) Operations * (double) Concurrency) << " s" << endl;
cout << "Operations per second rate: " << fixed << ((double) Operations / time) << endl;
cout << "Elapsed time since start: " << fixed << time << " s" << endl;
cout << endl;
cout << "Total number of operations: " << Operations << ", batch size: " << BatchSize << ", concurrency level (threads): " << Concurrency << endl;
cout << "Test case: " << TestCase << ", complexity: " << Complexity << ", collection: '" << Collection << "'" << endl;
cout << "Total request/response duration (sum of all threads): " << fixed << requestTime << " s" << endl;
cout << "Request/response duration (per thread): " << fixed << (requestTime / (double) Concurrency) << " s" << endl;
cout << "Time needed per operation: " << fixed << (time / Operations) << " s" << endl;
cout << "Time needed per operation per thread: " << fixed << (time / (double) Operations * (double) Concurrency) << " s" << endl;
cout << "Operations per second rate: " << fixed << ((double) Operations / time) << endl;
cout << "Elapsed time since start: " << fixed << time << " s" << endl << endl;
cout << endl;
if (failures > 0) {
cerr << "WARNING: " << failures << " request(s) failed!!" << endl << endl;
}
}
else {
if (failures > 0) {
cerr << "WARNING: " << failures << " arangob request(s) failed!!" << endl;
}
if (failures > 0) {
cerr << "WARNING: " << failures << " arangob request(s) failed!!" << endl;
}
testCase->tearDown();

View File

@ -1063,7 +1063,7 @@ static void RunShell (v8::Handle<v8::Context> context, bool promptError) {
v8::Context::Scope contextScope(context);
v8::Local<v8::String> name(v8::String::New("(shell)"));
V8LineEditor console(context, ".arangosh");
V8LineEditor console(context, ".arangosh.history");
console.open(BaseClient.autoComplete());
@ -1454,7 +1454,8 @@ int main (int argc, char* argv[]) {
TRI_AddGlobalVariableVocbase(context, "SYS_OUTPUT", v8::FunctionTemplate::New(JS_PagerOutput)->GetFunction());
TRI_InitV8Buffer(context);
TRI_InitV8Utils(context, StartupModules, StartupPackages, BaseClient.tempPath());
TRI_InitV8Utils(context, StartupModules, StartupPackages);
TRI_InitV8Shell(context);
// reset the prompt error flag (will determine prompt colors)

Some files were not shown because too many files have changed in this diff Show More