mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/triAGENS/ArangoDB into devel
This commit is contained in:
commit
6075027484
19
CHANGELOG
19
CHANGELOG
|
@ -1,11 +1,24 @@
|
|||
v2.4.0 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
|
||||
v2.3.0 (2014-11-18)
|
||||
-------------------
|
||||
|
||||
* fixed syslog flags. `--log.syslog` is deprecated and setting it has no effect,
|
||||
`--log.facility` now works as described. Application name has been changed from
|
||||
`triagens` to `arangod`. It can be changed using `--log.application`. The syslog
|
||||
will only contain the actual log message. The datetime prefix is omiited.
|
||||
|
||||
* fixed deflate in SimpleHttpClient
|
||||
|
||||
* fixed issue #1104: edgeExamples broken or changed
|
||||
|
||||
v2.3.0 (XXXX-XX-XX)
|
||||
-------------------
|
||||
* fixed issue #1103: Error while importing user queries
|
||||
|
||||
* fixed issue #1100: AQL: HAS() fails on doc[attribute_name]
|
||||
|
||||
* fixed issue #1098: runtime error when creating graph vertex
|
||||
|
||||
* hide system applications in **Applications** tab by default
|
||||
|
||||
|
@ -32,6 +45,8 @@ v2.3.0 (XXXX-XX-XX)
|
|||
|
||||
* dynamically create extra dispatcher threads if required
|
||||
|
||||
* fixed issue #1097: schemas in the API docs no longer show required properties as optional
|
||||
|
||||
|
||||
v2.3.0-beta2 (2014-11-08)
|
||||
-------------------------
|
||||
|
|
|
@ -19,10 +19,6 @@ statistics about executed requests and timings about computation steps.
|
|||
<!-- lib/ApplicationServer/ApplicationServer.h -->
|
||||
@startDocuBlock logSeverity
|
||||
|
||||
!SUBSECTION Syslog
|
||||
<!-- lib/ApplicationServer/ApplicationServer.h -->
|
||||
@startDocuBlock logSyslog
|
||||
|
||||
!SECTION Human Readable Logging
|
||||
|
||||
!SUBSECTION Level
|
||||
|
@ -62,7 +58,3 @@ statistics about executed requests and timings about computation steps.
|
|||
!SUBSECTION Facility
|
||||
<!-- lib/ApplicationServer/ApplicationServer.h -->
|
||||
@startDocuBlock logFacility
|
||||
|
||||
!SUBSECTION Histname
|
||||
<!-- lib/ApplicationServer/ApplicationServer.h -->
|
||||
@startDocuBlock logHostname
|
||||
|
|
|
@ -55,6 +55,47 @@ person.attributes // => { name: "Pete", admin: true, active: true }
|
|||
person.errors // => {admin: [ValidationError: value is not allowed]}
|
||||
```
|
||||
|
||||
The following events are emitted by a model:
|
||||
|
||||
- beforeCreate
|
||||
- afterCreate
|
||||
- beforeSave
|
||||
- afterSave
|
||||
- beforeUpdate
|
||||
- afterUpdate
|
||||
- beforeRemove
|
||||
- afterRemove
|
||||
|
||||
Model lifecycle:
|
||||
|
||||
```js
|
||||
var person = new PersonModel();
|
||||
person.on('beforeCreate', function() {
|
||||
var model = this;
|
||||
model.fancyMethod(); // Do something fancy with the model
|
||||
});
|
||||
var people = new Repository(appContext.collection("people"), { model: PersonModel });
|
||||
|
||||
people.save(person);
|
||||
// beforeCreate()
|
||||
// beforeSave()
|
||||
// The model is created at db
|
||||
// afterSave()
|
||||
// afterCreate()
|
||||
|
||||
people.update(person, data);
|
||||
// beforeUpdate(data)
|
||||
// beforeSave(data)
|
||||
// The model is updated at db
|
||||
// afterSave(data)
|
||||
// afterUpdate(data)
|
||||
|
||||
people.remove(person);
|
||||
// beforeRemove()
|
||||
// The model is deleted at db
|
||||
// afterRemove()
|
||||
```
|
||||
|
||||
!SUBSECTION Extend
|
||||
<!-- js/server/modules/org/arangodb/foxx/model.js -->
|
||||
@startDocuBlock JSF_foxx_model_extend
|
||||
|
|
|
@ -4,7 +4,9 @@ It is recommended to check the following list of incompatible changes **before**
|
|||
upgrading to ArangoDB 2.3, and adjust any client programs if necessary.
|
||||
|
||||
|
||||
!SECTION Default configuration file changes
|
||||
!SECTION Configuration file changes
|
||||
|
||||
!SUBSECTION Threads and contexts
|
||||
|
||||
The number of server threads specified is now the minimum of threads
|
||||
started. There are situation in which threads are waiting for results of
|
||||
|
@ -34,6 +36,32 @@ files, please review if the default number of server threads is okay in your
|
|||
environment. Additionally you should verify that the number of V8 contexts
|
||||
created (as specified in option `--javascript.v8-contexts`) is okay.
|
||||
|
||||
!SUBSECTION Syslog
|
||||
|
||||
The command-line option `--log.syslog` was used in previous versions of
|
||||
ArangoDB to turn logging to syslog on or off: when setting to a non-empty
|
||||
string, syslog logging was turned on, otherwise turned off.
|
||||
When syslog logging was turned on, logging was done with the application
|
||||
name specified in `--log.application`, which defaulted to `triagens`.
|
||||
There was also a command-line option `--log.hostname` which could be set
|
||||
but did not have any effect.
|
||||
|
||||
This behavior turned out to be unintuitive and was changed in 2.3 as follows:
|
||||
|
||||
* the command-line option `--log.syslog` is deprecated and does not have any
|
||||
effect when starting ArangoDB.
|
||||
* to turn on syslog logging in 2.3, the option `--log.facility` has to be set
|
||||
to a non-empty string. The value for `facility` is OS-dependent (possible
|
||||
values can be found in `/usr/include/syslog.h` or the like - `user` should
|
||||
be available on many systems).
|
||||
* the default value for `--log.application` has been changed from `triagens` to
|
||||
`arangod`.
|
||||
* the command-line option `--log.hostname` is deprecated and does not have any
|
||||
effect when starting ArangoDB. Instead, the host name will be set by syslog
|
||||
automatically.
|
||||
* when logging to syslog, ArangoDB now omits the datetime prefix and the process
|
||||
id, because they'll be added by syslog automatically.
|
||||
|
||||
|
||||
!SECTION AQL
|
||||
|
||||
|
|
|
@ -1445,20 +1445,20 @@ void IndexRangeBlock::readHashIndex (IndexOrCondition const& ranges) {
|
|||
};
|
||||
|
||||
setupSearchValue();
|
||||
TRI_index_result_t list = TRI_LookupHashIndex(idx, &searchValue);
|
||||
TRI_vector_pointer_t list = TRI_LookupHashIndex(idx, &searchValue);
|
||||
destroySearchValue();
|
||||
|
||||
size_t const n = list._length;
|
||||
size_t const n = TRI_LengthVectorPointer(&list);
|
||||
try {
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
_documents.emplace_back(*(list._documents[i]));
|
||||
_documents.emplace_back(* (static_cast<TRI_doc_mptr_t*>(TRI_AtVectorPointer(&list, i))));
|
||||
}
|
||||
|
||||
_engine->_stats.scannedIndex += static_cast<int64_t>(n);
|
||||
TRI_DestroyIndexResult(&list);
|
||||
TRI_DestroyVectorPointer(&list);
|
||||
}
|
||||
catch (...) {
|
||||
TRI_DestroyIndexResult(&list);
|
||||
TRI_DestroyVectorPointer(&list);
|
||||
throw;
|
||||
}
|
||||
LEAVE_BLOCK;
|
||||
|
|
|
@ -480,6 +480,58 @@ TRI_vector_pointer_t TRI_LookupByKeyHashArrayMulti (TRI_hash_array_multi_t const
|
|||
return result;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief lookups an element given a key and a state
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_vector_pointer_t TRI_LookupByKeyHashArrayMulti (TRI_hash_array_multi_t const* array,
|
||||
TRI_index_search_value_t const* key,
|
||||
TRI_hash_index_element_multi_t*& next,
|
||||
size_t batchSize) {
|
||||
TRI_ASSERT_EXPENSIVE(array->_nrUsed < array->_nrAlloc);
|
||||
TRI_ASSERT(batchSize > 0);
|
||||
|
||||
// ...........................................................................
|
||||
// initialise the vector which will hold the result if any
|
||||
// ...........................................................................
|
||||
|
||||
TRI_vector_pointer_t result;
|
||||
TRI_InitVectorPointer(&result, TRI_UNKNOWN_MEM_ZONE);
|
||||
|
||||
if (next == nullptr) {
|
||||
// no previous state. start at the beginning
|
||||
uint64_t const n = array->_nrAlloc;
|
||||
uint64_t i, k;
|
||||
|
||||
i = k = HashKey(array, key) % n;
|
||||
|
||||
for (; i < n && array->_table[i]._document != nullptr && ! IsEqualKeyElement(array, key, &array->_table[i]); ++i);
|
||||
if (i == n) {
|
||||
for (i = 0; i < k && array->_table[i]._document != nullptr && ! IsEqualKeyElement(array, key, &array->_table[i]); ++i);
|
||||
}
|
||||
|
||||
TRI_ASSERT_EXPENSIVE(i < n);
|
||||
|
||||
if (array->_table[i]._document != nullptr) {
|
||||
TRI_PushBackVectorPointer(&result, array->_table[i]._document);
|
||||
}
|
||||
next = array->_table[i]._next;
|
||||
}
|
||||
|
||||
if (next != nullptr) {
|
||||
// we already had a state
|
||||
size_t total = TRI_LengthVectorPointer(&result);;
|
||||
|
||||
while (next != nullptr && total < batchSize) {
|
||||
TRI_PushBackVectorPointer(&result, next->_document);
|
||||
next = next->_next;
|
||||
++total;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief adds an element to the array
|
||||
///
|
||||
|
|
|
@ -120,6 +120,15 @@ int TRI_ResizeHashArrayMulti (TRI_hash_array_multi_t*,
|
|||
TRI_vector_pointer_t TRI_LookupByKeyHashArrayMulti (TRI_hash_array_multi_t const*,
|
||||
struct TRI_index_search_value_s const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief lookups an element given a key and a state
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_vector_pointer_t TRI_LookupByKeyHashArrayMulti (TRI_hash_array_multi_t const*,
|
||||
struct TRI_index_search_value_s const*,
|
||||
struct TRI_hash_index_element_multi_s*&,
|
||||
size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief adds an element to the array
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -289,37 +289,24 @@ static int HashIndex_remove (TRI_hash_index_t* hashIndex,
|
|||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief locates a key within the hash array part
|
||||
/// it is the callers responsibility to destroy the result
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static TRI_index_result_t HashIndex_find (TRI_hash_index_t* hashIndex,
|
||||
TRI_index_search_value_t* key) {
|
||||
TRI_hash_index_element_t* result;
|
||||
TRI_index_result_t results;
|
||||
static TRI_vector_pointer_t HashIndex_find (TRI_hash_index_t* hashIndex,
|
||||
TRI_index_search_value_t* key) {
|
||||
TRI_vector_pointer_t results;
|
||||
TRI_InitVectorPointer(&results, TRI_UNKNOWN_MEM_ZONE);
|
||||
|
||||
// .............................................................................
|
||||
// A find request means that a set of values for the "key" was sent. We need
|
||||
// to locate the hash array entry by key.
|
||||
// .............................................................................
|
||||
|
||||
result = TRI_FindByKeyHashArray(&hashIndex->_hashArray, key);
|
||||
TRI_hash_index_element_t* result = TRI_FindByKeyHashArray(&hashIndex->_hashArray, key);
|
||||
|
||||
if (result != nullptr) {
|
||||
|
||||
// unique hash index: maximum number is 1
|
||||
results._length = 1;
|
||||
results._documents = static_cast<TRI_doc_mptr_t**>(TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, 1 * sizeof(TRI_doc_mptr_t*), false));
|
||||
|
||||
if (results._documents == nullptr) {
|
||||
// no memory. prevent worst case by re-setting results length to 0
|
||||
results._length = 0;
|
||||
return results;
|
||||
}
|
||||
|
||||
results._documents[0] = result->_document;
|
||||
}
|
||||
else {
|
||||
results._length = 0;
|
||||
results._documents = nullptr;
|
||||
TRI_PushBackVectorPointer(&results, result->_document);
|
||||
}
|
||||
|
||||
return results;
|
||||
|
@ -395,46 +382,6 @@ int MultiHashIndex_remove (TRI_hash_index_t* hashIndex,
|
|||
return res;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief locates a key within the hash array part
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static TRI_index_result_t MultiHashIndex_find (TRI_hash_index_t* hashIndex,
|
||||
TRI_index_search_value_t* key) {
|
||||
TRI_index_result_t results;
|
||||
|
||||
// .............................................................................
|
||||
// We can only use the LookupByKey method for non-unique hash indexes, since
|
||||
// we want more than one result returned!
|
||||
// .............................................................................
|
||||
|
||||
TRI_vector_pointer_t result = TRI_LookupByKeyHashArrayMulti(&hashIndex->_hashArrayMulti, key);
|
||||
|
||||
if (result._length == 0) {
|
||||
results._length = 0;
|
||||
results._documents = nullptr;
|
||||
}
|
||||
else {
|
||||
results._length = result._length;
|
||||
results._documents = static_cast<TRI_doc_mptr_t**>(TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, result._length * sizeof(TRI_doc_mptr_t*), false));
|
||||
|
||||
if (results._documents == nullptr) {
|
||||
// no memory. prevent worst case by re-setting results length to 0
|
||||
TRI_DestroyVectorPointer(&result);
|
||||
results._length = 0;
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < result._length; ++j) {
|
||||
results._documents[j] = ((TRI_doc_mptr_t*) result._buffer[j]);
|
||||
}
|
||||
}
|
||||
|
||||
TRI_DestroyVectorPointer(&result);
|
||||
return results;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- HASH INDEX
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -698,18 +645,41 @@ void TRI_FreeHashIndex (TRI_index_t* idx) {
|
|||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief locates entries in the hash index given shaped json objects
|
||||
/// it is the callers responsibility to destroy the result
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_index_result_t TRI_LookupHashIndex (TRI_index_t* idx,
|
||||
TRI_index_search_value_t* searchValue) {
|
||||
TRI_vector_pointer_t TRI_LookupHashIndex (TRI_index_t* idx,
|
||||
TRI_index_search_value_t* searchValue) {
|
||||
TRI_hash_index_t* hashIndex = (TRI_hash_index_t*) idx;
|
||||
|
||||
if (hashIndex->base._unique) {
|
||||
return HashIndex_find(hashIndex, searchValue);
|
||||
}
|
||||
else {
|
||||
return MultiHashIndex_find(hashIndex, searchValue);
|
||||
|
||||
return TRI_LookupByKeyHashArrayMulti(&hashIndex->_hashArrayMulti, searchValue);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief locates entries in the hash index given shaped json objects
|
||||
/// this function uses the state passed to it to return a fragment of the
|
||||
/// total result - the next call to the function can resume at the state where
|
||||
/// it was left off last
|
||||
/// note: state is ignored for unique indexes as there will be at most one
|
||||
/// item in the result
|
||||
/// it is the callers responsibility to destroy the result
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_vector_pointer_t TRI_LookupHashIndex (TRI_index_t* idx,
|
||||
TRI_index_search_value_t* searchValue,
|
||||
TRI_hash_index_element_multi_t*& next,
|
||||
size_t batchSize) {
|
||||
TRI_hash_index_t* hashIndex = (TRI_hash_index_t*) idx;
|
||||
|
||||
if (hashIndex->base._unique) {
|
||||
return HashIndex_find(hashIndex, searchValue);
|
||||
}
|
||||
|
||||
return TRI_LookupByKeyHashArrayMulti(&hashIndex->_hashArrayMulti, searchValue, next, batchSize);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -55,19 +55,25 @@ struct TRI_shaped_sub_s;
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief hash index element
|
||||
///
|
||||
/// This structure is used for the elements of an hash index.
|
||||
/// This structure is used for the elements of a unique hash index.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef struct TRI_hash_index_element_s {
|
||||
struct TRI_doc_mptr_t* _document;
|
||||
struct TRI_shaped_sub_s* _subObjects;
|
||||
struct TRI_doc_mptr_t* _document;
|
||||
struct TRI_shaped_sub_s* _subObjects;
|
||||
}
|
||||
TRI_hash_index_element_t;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief hash index element
|
||||
///
|
||||
/// This structure is used for the elements of a non-unique hash index.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef struct TRI_hash_index_element_multi_s {
|
||||
struct TRI_doc_mptr_t* _document;
|
||||
struct TRI_shaped_sub_s* _subObjects;
|
||||
struct TRI_hash_index_element_multi_s* _next;
|
||||
struct TRI_doc_mptr_t* _document;
|
||||
struct TRI_shaped_sub_s* _subObjects;
|
||||
struct TRI_hash_index_element_multi_s* _next;
|
||||
}
|
||||
TRI_hash_index_element_multi_t;
|
||||
|
||||
|
@ -79,10 +85,10 @@ typedef struct TRI_hash_index_s {
|
|||
TRI_index_t base;
|
||||
|
||||
union {
|
||||
TRI_hash_array_t _hashArray; // the hash array itself, unique values
|
||||
TRI_hash_array_t _hashArray; // the hash array itself, unique values
|
||||
TRI_hash_array_multi_t _hashArrayMulti; // the hash array itself, non-unique values
|
||||
};
|
||||
TRI_vector_t _paths; // a list of shape pid which identifies the fields of the index
|
||||
TRI_vector_t _paths; // a list of shape pid which identifies the fields of the index
|
||||
}
|
||||
TRI_hash_index_t;
|
||||
|
||||
|
@ -118,10 +124,26 @@ void TRI_FreeHashIndex (TRI_index_t*);
|
|||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief locates entries in the hash index given shaped json objects
|
||||
/// it is the callers responsibility to destroy the result
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_index_result_t TRI_LookupHashIndex (TRI_index_t*,
|
||||
struct TRI_index_search_value_s*);
|
||||
TRI_vector_pointer_t TRI_LookupHashIndex (TRI_index_t*,
|
||||
struct TRI_index_search_value_s*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief locates entries in the hash index given shaped json objects
|
||||
/// this function uses the state passed to it to return a fragment of the
|
||||
/// total result - the next call to the function can resume at the state where
|
||||
/// it was left off last
|
||||
/// note: state is ignored for unique indexes as there will be at most one
|
||||
/// item in the result
|
||||
/// it is the callers responsibility to destroy the result
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_vector_pointer_t TRI_LookupHashIndex (TRI_index_t*,
|
||||
struct TRI_index_search_value_s*,
|
||||
struct TRI_hash_index_element_multi_s*&,
|
||||
size_t);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1589,11 +1589,11 @@ static v8::Handle<v8::Value> ByExampleHashIndexQuery (SingleCollectionReadOnlyTr
|
|||
}
|
||||
|
||||
// find the matches
|
||||
TRI_index_result_t list = TRI_LookupHashIndex(idx, &searchValue);
|
||||
TRI_vector_pointer_t list = TRI_LookupHashIndex(idx, &searchValue);
|
||||
DestroySearchValue(shaper->_memoryZone, searchValue);
|
||||
|
||||
// convert result
|
||||
size_t total = list._length;
|
||||
size_t total = TRI_LengthVectorPointer(&list);
|
||||
size_t count = 0;
|
||||
bool error = false;
|
||||
|
||||
|
@ -1605,7 +1605,7 @@ static v8::Handle<v8::Value> ByExampleHashIndexQuery (SingleCollectionReadOnlyTr
|
|||
|
||||
if (s < e) {
|
||||
for (size_t i = s; i < e; ++i) {
|
||||
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, collection->_cid, list._documents[i]->getDataPtr());
|
||||
v8::Handle<v8::Value> doc = WRAP_SHAPED_JSON(trx, collection->_cid, static_cast<TRI_doc_mptr_t*>(TRI_AtVectorPointer(&list, i))->getDataPtr());
|
||||
|
||||
if (doc.IsEmpty()) {
|
||||
error = true;
|
||||
|
@ -1619,7 +1619,7 @@ static v8::Handle<v8::Value> ByExampleHashIndexQuery (SingleCollectionReadOnlyTr
|
|||
}
|
||||
|
||||
// free data allocated by hash index result
|
||||
TRI_DestroyIndexResult(&list);
|
||||
TRI_DestroyVectorPointer(&list);
|
||||
|
||||
result->Set(v8::String::New("total"), v8::Number::New((double) total));
|
||||
result->Set(v8::String::New("count"), v8::Number::New((double) count));
|
||||
|
|
|
@ -470,16 +470,6 @@ TRI_json_t* TRI_JsonIndex (TRI_memory_zone_t* zone,
|
|||
return json;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief destroys a result set returned by a hash index query
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_DestroyIndexResult (TRI_index_result_t* result) {
|
||||
if (result->_documents != nullptr) {
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, result->_documents);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief copies a path vector
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -199,16 +199,6 @@ typedef struct TRI_cap_constraint_s {
|
|||
}
|
||||
TRI_cap_constraint_t;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief index query result
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef struct TRI_index_result_s {
|
||||
size_t _length;
|
||||
struct TRI_doc_mptr_t** _documents; // simple list of elements
|
||||
}
|
||||
TRI_index_result_t;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief index query parameter
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -310,12 +300,6 @@ TRI_index_t* TRI_LookupIndex (struct TRI_document_collection_t*,
|
|||
TRI_json_t* TRI_JsonIndex (TRI_memory_zone_t*,
|
||||
TRI_index_t const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief destroys a result set returned by a hash index query
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_DestroyIndexResult (TRI_index_result_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief copies a path vector
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -4655,24 +4655,22 @@ function FILTER_RESTRICTION (list, restrictionList) {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function DOCUMENTS_BY_EXAMPLE (collectionList, example) {
|
||||
var res = [];
|
||||
if (example === "null") {
|
||||
example = [{}];
|
||||
}
|
||||
if (!example) {
|
||||
example = [{}];
|
||||
var res = [ ];
|
||||
if (example === "null" || example === null || ! example) {
|
||||
example = [ { } ];
|
||||
}
|
||||
if (typeof example === "string") {
|
||||
example = {_id : example};
|
||||
example = { _id : example };
|
||||
}
|
||||
if (!Array.isArray(example)) {
|
||||
example = [example];
|
||||
if (! Array.isArray(example)) {
|
||||
example = [ example ];
|
||||
}
|
||||
var tmp = [];
|
||||
var tmp = [ ];
|
||||
example.forEach(function (e) {
|
||||
if (typeof e === "string") {
|
||||
tmp.push({_id : e});
|
||||
} else {
|
||||
tmp.push({ _id : e });
|
||||
}
|
||||
else {
|
||||
tmp.push(e);
|
||||
}
|
||||
});
|
||||
|
@ -5219,6 +5217,9 @@ function CALCULATE_SHORTEST_PATHES_WITH_DIJKSTRA (graphName, options) {
|
|||
params.paths = true;
|
||||
if (options.edgeExamples) {
|
||||
params.followEdges = options.edgeExamples;
|
||||
if (! Array.isArray(params.followEdges)) {
|
||||
params.followEdges = [ params.followEdges ];
|
||||
}
|
||||
}
|
||||
if (options.edgeCollectionRestriction) {
|
||||
params.edgeCollectionRestriction = options.edgeCollectionRestriction;
|
||||
|
|
|
@ -32,6 +32,8 @@ var Model,
|
|||
joi = require("joi"),
|
||||
is = require("org/arangodb/is"),
|
||||
extend = require('org/arangodb/extend').extend,
|
||||
EventEmitter = require('events').EventEmitter,
|
||||
util = require('util'),
|
||||
excludeExtraAttributes,
|
||||
metadataSchema = {
|
||||
_id: joi.string().optional(),
|
||||
|
@ -142,8 +144,11 @@ Model = function (attributes) {
|
|||
} else if (attributes) {
|
||||
instance.attributes = _.clone(attributes);
|
||||
}
|
||||
EventEmitter.call(instance);
|
||||
};
|
||||
|
||||
util.inherits(Model, EventEmitter);
|
||||
|
||||
Model.fromClient = function (attributes) {
|
||||
'use strict';
|
||||
return new this(excludeExtraAttributes(attributes, this));
|
||||
|
@ -161,39 +166,43 @@ _.extend(Model, {
|
|||
if (this.prototype.schema) {
|
||||
_.each(this.prototype.schema, function (schema, attributeName) {
|
||||
var description = schema.describe(),
|
||||
type = description.type,
|
||||
jsonSchema = {type: description.type},
|
||||
rules = description.rules,
|
||||
flags = description.flags;
|
||||
|
||||
if (flags && flags.presence === 'required') {
|
||||
jsonSchema.required = true;
|
||||
required.push(attributeName);
|
||||
}
|
||||
|
||||
if (
|
||||
type === 'number' &&
|
||||
jsonSchema.type === 'number' &&
|
||||
_.isArray(rules) &&
|
||||
_.some(rules, function (rule) {
|
||||
return rule.name === 'integer';
|
||||
})
|
||||
) {
|
||||
type = 'integer';
|
||||
jsonSchema.type = 'integer';
|
||||
}
|
||||
|
||||
properties[attributeName] = {type: type};
|
||||
properties[attributeName] = jsonSchema;
|
||||
});
|
||||
} else {
|
||||
// deprecated
|
||||
_.each(this.attributes, function (attribute, attributeName) {
|
||||
var jsonSchema = {};
|
||||
if (typeof attribute === 'string') {
|
||||
properties[attributeName] = {type: attribute};
|
||||
jsonSchema.type = attribute;
|
||||
} else if (attribute) {
|
||||
if (typeof attribute.type === 'string') {
|
||||
properties[attributeName] = {type: attribute.type};
|
||||
jsonSchema.type = attribute.type;
|
||||
}
|
||||
if (attribute.required) {
|
||||
required.push(attributeName);
|
||||
jsonSchema.required = true;
|
||||
}
|
||||
}
|
||||
properties[attributeName] = jsonSchema;
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -130,8 +130,12 @@ _.extend(Repository.prototype, {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
save: function (model) {
|
||||
'use strict';
|
||||
model.emit('beforeCreate');
|
||||
model.emit('beforeSave');
|
||||
var id_and_rev = this.collection.save(model.forDB());
|
||||
model.set(id_and_rev);
|
||||
model.emit('afterSave');
|
||||
model.emit('afterCreate');
|
||||
return model;
|
||||
},
|
||||
|
||||
|
@ -262,8 +266,11 @@ _.extend(Repository.prototype, {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
remove: function (model) {
|
||||
'use strict';
|
||||
var id = model.get('_id');
|
||||
return this.collection.remove(id);
|
||||
model.emit('beforeRemove');
|
||||
var id = model.get('_id'),
|
||||
result = this.collection.remove(id);
|
||||
model.emit('afterRemove');
|
||||
return result;
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -398,10 +405,14 @@ _.extend(Repository.prototype, {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
update: function (model, data) {
|
||||
'use strict';
|
||||
model.emit('beforeUpdate', data);
|
||||
model.emit('beforeSave', data);
|
||||
var id = model.get("_id") || model.get("_key"),
|
||||
id_and_rev = this.collection.update(id, data);
|
||||
model.set(data);
|
||||
model.set(id_and_rev);
|
||||
model.emit('afterSave', data);
|
||||
model.emit('afterUpdate', data);
|
||||
return model;
|
||||
},
|
||||
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*global require, describe, expect, it, beforeEach, createSpyObj */
|
||||
|
||||
var FoxxRepository = require("org/arangodb/foxx/repository").Repository,
|
||||
Model = require("org/arangodb/foxx/model").Model;
|
||||
|
||||
describe('Model Events', function () {
|
||||
'use strict';
|
||||
|
||||
var collection, instance, repository;
|
||||
|
||||
beforeEach(function () {
|
||||
collection = createSpyObj('collection', [
|
||||
'update',
|
||||
'save',
|
||||
'remove'
|
||||
]);
|
||||
instance = new Model({ random: '', beforeCalled: false, afterCalled: false });
|
||||
repository = new FoxxRepository(collection, {model: Model});
|
||||
});
|
||||
|
||||
it('should be possible to subscribe and emit events', function () {
|
||||
expect(instance.on).toBeDefined();
|
||||
expect(instance.emit).toBeDefined();
|
||||
});
|
||||
|
||||
it('should emit beforeCreate and afterCreate events when creating the model', function () {
|
||||
addHooks(instance, 'Create');
|
||||
expect(repository.save(instance)).toEqual(instance);
|
||||
expect(instance.get('beforeCalled')).toBe(true);
|
||||
expect(instance.get('afterCalled')).toBe(true);
|
||||
});
|
||||
|
||||
it('should emit beforeSave and afterSave events when creating the model', function () {
|
||||
addHooks(instance, 'Save');
|
||||
expect(repository.save(instance)).toEqual(instance);
|
||||
expect(instance.get('beforeCalled')).toBe(true);
|
||||
expect(instance.get('afterCalled')).toBe(true);
|
||||
});
|
||||
|
||||
it('should emit beforeUpdate and afterUpdate events when updating the model', function () {
|
||||
var newData = { newAttribute: 'test' };
|
||||
addHooks(instance, 'Update', newData);
|
||||
expect(repository.update(instance, newData)).toEqual(instance);
|
||||
expect(instance.get('beforeCalled')).toBe(true);
|
||||
expect(instance.get('afterCalled')).toBe(true);
|
||||
});
|
||||
|
||||
it('should emit beforeSave and afterSave events when updating the model', function () {
|
||||
var newData = { newAttribute: 'test' };
|
||||
addHooks(instance, 'Save', newData);
|
||||
expect(repository.update(instance, newData)).toEqual(instance);
|
||||
expect(instance.get('beforeCalled')).toBe(true);
|
||||
expect(instance.get('afterCalled')).toBe(true);
|
||||
});
|
||||
|
||||
it('should emit beforeRemove and afterRemove events when removing the model', function () {
|
||||
addHooks(instance, 'Remove');
|
||||
repository.remove(instance);
|
||||
expect(instance.get('beforeCalled')).toBe(true);
|
||||
expect(instance.get('afterCalled')).toBe(true);
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
function addHooks(model, ev, dataToReceive) {
|
||||
'use strict';
|
||||
|
||||
var random = String(Math.floor(Math.random() * 1000));
|
||||
|
||||
model.on('before' + ev, function (data) {
|
||||
expect(this).toEqual(model);
|
||||
expect(data).toEqual(dataToReceive);
|
||||
this.set('random', random);
|
||||
this.set('beforeCalled', true);
|
||||
});
|
||||
model.on('after' + ev, function (data) {
|
||||
expect(this).toEqual(model);
|
||||
expect(data).toEqual(dataToReceive);
|
||||
this.set('afterCalled', true);
|
||||
expect(this.get('beforeCalled')).toBe(true);
|
||||
expect(this.get('random')).toEqual(random);
|
||||
});
|
||||
}
|
|
@ -255,6 +255,27 @@ function transactionInvocationSuite () {
|
|||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test: nesting
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testNestingEmbedFlag : function () {
|
||||
var obj = {
|
||||
collections : {
|
||||
},
|
||||
action : function () {
|
||||
return 19 + TRANSACTION({
|
||||
collections: {
|
||||
},
|
||||
embed: true,
|
||||
action: "function () { return 23; }"
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
assertEqual(42, TRANSACTION(obj));
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test: params
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -53,6 +53,8 @@ using namespace triagens::basics;
|
|||
using namespace triagens::rest;
|
||||
using namespace std;
|
||||
|
||||
static string DeprecatedParameter;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- public constants
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -118,15 +120,13 @@ ApplicationServer::ApplicationServer (std::string const& name, std::string const
|
|||
_gid(),
|
||||
_realGid(0),
|
||||
_effectiveGid(0),
|
||||
_logApplicationName("triagens"),
|
||||
_logHostName("-"),
|
||||
_logFacility("-"),
|
||||
_logApplicationName("arangod"),
|
||||
_logFacility(""),
|
||||
_logLevel("info"),
|
||||
_logSeverity("human"),
|
||||
_logFile("+"),
|
||||
_logRequestsFile(""),
|
||||
_logPrefix(),
|
||||
_logSyslog(),
|
||||
_logThreadId(false),
|
||||
_logLineNumber(false),
|
||||
_logLocalTime(false),
|
||||
|
@ -159,15 +159,13 @@ ApplicationServer::ApplicationServer (std::string const& name, std::string const
|
|||
_gid(),
|
||||
_realGid(0),
|
||||
_effectiveGid(0),
|
||||
_logApplicationName("triagens"),
|
||||
_logHostName("-"),
|
||||
_logFacility("-"),
|
||||
_logApplicationName("arangod"),
|
||||
_logFacility(""),
|
||||
_logLevel("info"),
|
||||
_logSeverity("human"),
|
||||
_logFile("+"),
|
||||
_logRequestsFile(""),
|
||||
_logPrefix(),
|
||||
_logSyslog(),
|
||||
_logThreadId(false),
|
||||
_logLineNumber(false),
|
||||
_logLocalTime(false),
|
||||
|
@ -308,7 +306,7 @@ void ApplicationServer::setupLogging (bool threaded, bool daemon) {
|
|||
}
|
||||
|
||||
#ifdef TRI_ENABLE_SYSLOG
|
||||
if (_logSyslog != "") {
|
||||
if (! _logFacility.empty()) {
|
||||
TRI_CreateLogAppenderSyslog(_logApplicationName.c_str(),
|
||||
_logFacility.c_str(),
|
||||
contentFilter,
|
||||
|
@ -818,20 +816,20 @@ void ApplicationServer::setupOptions (map<string, ProgramOptionsDescription>& op
|
|||
|
||||
options[OPTIONS_LOGGER + ":help-log"]
|
||||
("log.application", &_logApplicationName, "application name for syslog")
|
||||
("log.facility", &_logFacility, "facility name for syslog")
|
||||
("log.facility", &_logFacility, "facility name for syslog (OS dependent)")
|
||||
("log.source-filter", &_logSourceFilter, "only debug and trace messages emitted by specific C source file")
|
||||
("log.content-filter", &_logContentFilter, "only log message containing the specified string (case-sensitive)")
|
||||
("log.hostname", &_logHostName, "host name for syslog")
|
||||
("log.line-number", "always log file and line number")
|
||||
("log.prefix", &_logPrefix, "prefix log")
|
||||
("log.severity", &_logSeverity, "log severities")
|
||||
("log.syslog", &_logSyslog, "use syslog facility")
|
||||
("log.thread", "log the thread identifier for severity 'human'")
|
||||
("log.use-local-time", "use local dates and times in log messages")
|
||||
;
|
||||
|
||||
options[OPTIONS_HIDDEN]
|
||||
("log", &_logLevel, "log level for severity 'human'")
|
||||
("log.syslog", &DeprecatedParameter, "use syslog facility (deprecated)")
|
||||
("log.hostname", &DeprecatedParameter, "host name for syslog")
|
||||
#ifdef TRI_HAVE_SETUID
|
||||
("uid", &_uid, "switch to user-id after reading config files")
|
||||
#endif
|
||||
|
|
|
@ -522,26 +522,23 @@ namespace triagens {
|
|||
|
||||
std::string _logApplicationName;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief log host name
|
||||
/// @startDocuBlock logHostname
|
||||
/// `--log.hostname name`
|
||||
///
|
||||
/// Specifies the *name* of the operating environment (the "hostname") which
|
||||
/// should be logged if this item of information is to be logged. Note that
|
||||
/// there is no default hostname.
|
||||
/// @endDocuBlock
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::string _logHostName;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief log facility
|
||||
/// @startDocuBlock logFacility
|
||||
/// `--log.facility name`
|
||||
///
|
||||
/// Specifies the name of the server instance which should be logged if this
|
||||
/// item of information is to be logged.
|
||||
/// If this option is set, then in addition to output being directed to the
|
||||
/// standard output (or to a specified file, in the case that the command line
|
||||
/// log.file option was set), log output is also sent to the system logging
|
||||
/// facility. The *arg* is the system log facility to use. See syslog for
|
||||
/// further details.
|
||||
///
|
||||
/// The value of *arg* depends on your syslog configuration. In general it
|
||||
/// will be *user*. Fatal messages are mapped to *crit*, so if *arg*
|
||||
/// is *user*, these messages will be logged as *user.crit*. Error
|
||||
/// messages are mapped to *err*. Warnings are mapped to *warn*. Info
|
||||
/// messages are mapped to *notice*. Debug messages are mapped to
|
||||
/// *info*. Trace messages are mapped to *debug*.
|
||||
/// @endDocuBlock
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -709,28 +706,6 @@ namespace triagens {
|
|||
|
||||
std::string _logPrefix;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief log level
|
||||
/// @startDocuBlock logSyslog
|
||||
/// `--log.syslog arg`
|
||||
///
|
||||
/// If this option is set, then in addition to output being directed to the
|
||||
/// standard output (or to a specified file, in the case that the command line
|
||||
/// log.file option was set), log output is also sent to the system logging
|
||||
/// facility. The *arg* is the system log facility to use. See syslog for
|
||||
/// further details.
|
||||
///
|
||||
/// The value of *arg* depends on your syslog configuration. In general it
|
||||
/// will be *user*. Fatal messages are mapped to *crit*, so if *arg*
|
||||
/// is *user*, these messages will be logged as *user.crit*. Error
|
||||
/// messages are mapped to *err*. Warnings are mapped to *warn*. Info
|
||||
/// messages are mapped to *notice*. Debug messages are mapped to
|
||||
/// *info*. Trace messages are mapped to *debug*.
|
||||
/// @endDocuBlock
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::string _logSyslog;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief log thread identifier
|
||||
/// @startDocuBlock logThread
|
||||
|
|
|
@ -1746,8 +1746,17 @@ static void LogAppenderSyslog_Log (TRI_log_appender_t* appender,
|
|||
|
||||
self = (log_appender_syslog_t*) appender;
|
||||
|
||||
char const* ptr = strchr(msg, ']');
|
||||
|
||||
if (ptr == nullptr) {
|
||||
ptr = msg;
|
||||
}
|
||||
else if (ptr[1] != '\0') {
|
||||
ptr += 2;
|
||||
}
|
||||
|
||||
TRI_LockMutex(&self->_mutex);
|
||||
syslog(priority, "%s", msg);
|
||||
syslog(priority, "%s", ptr);
|
||||
TRI_UnlockMutex(&self->_mutex);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue