mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'collection-not-loaded' of https://github.com/arangodb/arangodb into devel
Conflicts: js/apps/system/_admin/aardvark/APP/frontend/build/app.js.gz js/apps/system/_admin/aardvark/APP/frontend/build/app.min.js js/apps/system/_admin/aardvark/APP/frontend/build/app.min.js.gz js/apps/system/_admin/aardvark/APP/frontend/build/scripts.html.part js/apps/system/_admin/aardvark/APP/frontend/build/standalone-min.html js/apps/system/_admin/aardvark/APP/frontend/build/standalone-min.html.gz js/apps/system/_admin/aardvark/APP/frontend/build/standalone.html
This commit is contained in:
commit
68465ebba5
47
CHANGELOG
47
CHANGELOG
|
@ -1,8 +1,55 @@
|
|||
v2.7.0 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* Linux startup scripts and systemd configuration for arangod now try to
|
||||
adjust the NOFILE (number of open files) limits for the process. The limit
|
||||
value is set to 131072 (128k) when ArangoDB is started via start/stop
|
||||
commands
|
||||
|
||||
* When ArangoDB is started/stopped manually via the start/stop commands, the
|
||||
main process will wait for up to 10 seconds after it forks the supervisor
|
||||
and arangod child processes. If the startup fails within that period, the
|
||||
start/stop script will fail with an exit code other than zero. If the
|
||||
startup of the supervisor or arangod is still ongoing after 10 seconds,
|
||||
the main program will still return with exit code 0. The limit of 10 seconds
|
||||
is arbitrary because the time required for a startup is not known in advance.
|
||||
|
||||
* added startup option `--database.throw-collection-not-loaded-error`
|
||||
|
||||
Accessing a not-yet loaded collection will automatically load a collection
|
||||
on first access. This flag controls what happens in case an operation
|
||||
would need to wait for another thread to finalize loading a collection. If
|
||||
set to *true*, then the first operation that accesses an unloaded collection
|
||||
will load it. Further threads that try to access the same collection while
|
||||
it is still loading will get an error (1238, *collection not loaded*). When
|
||||
the initial operation has completed loading the collection, all operations
|
||||
on the collection can be carried out normally, and error 1238 will not be
|
||||
thrown.
|
||||
|
||||
If set to *false*, the first thread that accesses a not-yet loaded collection
|
||||
will still load it. Other threads that try to access the collection while
|
||||
loading will not fail with error 1238 but instead block until the collection
|
||||
is fully loaded. This configuration might lead to all server threads being
|
||||
blocked because they are all waiting for the same collection to complete
|
||||
loading. Setting the option to *true* will prevent this from happening, but
|
||||
requires clients to catch error 1238 and react on it (maybe by scheduling
|
||||
a retry for later).
|
||||
|
||||
The default value is *false*.
|
||||
|
||||
* added better control-C support in arangosh
|
||||
|
||||
When CTRL-C is pressed in arangosh, it will now print a `^C` first. Pressing
|
||||
CTRL-C again will reset the prompt if something was entered before, or quit
|
||||
arangosh if no command was entered directly before.
|
||||
|
||||
This affects the arangosh version build with Readline-support only.
|
||||
|
||||
The MacOS version of ArangoDB for Homebrew now depends on Readline, too. The
|
||||
Homebrew formula has been changed accordingly.
|
||||
When self-compiling ArangoDB on MacOS without Homebrew, Readline now is a
|
||||
prerequisite.
|
||||
|
||||
* increased default value collection-specific `indexBuckets` value from 1 to 16
|
||||
|
||||
Collections created from 2.7 on will use the new default if not overriden on
|
||||
|
|
|
@ -107,6 +107,10 @@ the option *--disable-figures*.
|
|||
@startDocuBlock databaseDisableQueryTracking
|
||||
|
||||
|
||||
!SUBSECTION Throw collection not loaded error
|
||||
@startDocuBlock databaseThrowCollectionNotLoadedError
|
||||
|
||||
|
||||
!SUBSECTION AQL Query caching mode
|
||||
@startDocuBlock queryCacheMode
|
||||
|
||||
|
|
|
@ -482,7 +482,8 @@ SHELL_COMMON = \
|
|||
################################################################################
|
||||
|
||||
SHELL_SERVER_ONLY = \
|
||||
@top_srcdir@/js/server/tests/shell-readonly-noncluster-disabled.js\
|
||||
@top_srcdir@/js/server/tests/shell-readonly-noncluster-disabled.js \
|
||||
@top_srcdir@/js/server/tests/shell-collection-not-loaded-timecritical-noncluster.js \
|
||||
@top_srcdir@/js/server/tests/shell-wal-noncluster-memoryintense.js \
|
||||
@top_srcdir@/js/server/tests/shell-sharding-helpers.js \
|
||||
@top_srcdir@/js/server/tests/shell-compaction-noncluster-timecritical.js \
|
||||
|
|
|
@ -354,6 +354,7 @@ ArangoServer::ArangoServer (int argc, char** argv)
|
|||
_ignoreDatafileErrors(false),
|
||||
_disableReplicationApplier(false),
|
||||
_disableQueryTracking(false),
|
||||
_throwCollectionNotLoadedError(false),
|
||||
_foxxQueues(true),
|
||||
_foxxQueuesPollInterval(1.0),
|
||||
_server(nullptr),
|
||||
|
@ -576,6 +577,7 @@ void ArangoServer::buildApplicationServer () {
|
|||
("database.query-cache-mode", &_queryCacheMode, "mode for the AQL query cache (on, off, demand)")
|
||||
("database.query-cache-max-results", &_queryCacheMaxResults, "maximum number of results in query cache per database")
|
||||
("database.index-threads", &_indexThreads, "threads to start for parallel background index creation")
|
||||
("database.throw-collection-not-loaded-error", &_throwCollectionNotLoadedError, "throw an error when accessing a collection that is still loading")
|
||||
;
|
||||
|
||||
// .............................................................................
|
||||
|
@ -719,6 +721,8 @@ void ArangoServer::buildApplicationServer () {
|
|||
// testing disables authentication
|
||||
_disableAuthentication = true;
|
||||
}
|
||||
|
||||
TRI_SetThrowCollectionNotLoadedVocBase(nullptr, _throwCollectionNotLoadedError);
|
||||
|
||||
// set global query tracking flag
|
||||
triagens::aql::Query::DisableQueryTracking(_disableQueryTracking);
|
||||
|
|
|
@ -367,6 +367,9 @@ namespace triagens {
|
|||
/// are shared among multiple collections and databases. Specifying a value of
|
||||
/// *0* will turn off parallel building, meaning that indexes for each collection
|
||||
/// are built sequentially by the thread that opened the collection.
|
||||
/// If the number of index threads is greater than 1, it will also be used to
|
||||
/// built the edge index of a collection in parallel (this also requires the
|
||||
/// edge index in the collection to be split into multiple buckets).
|
||||
/// @endDocuBlock
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -554,6 +557,36 @@ namespace triagens {
|
|||
|
||||
bool _disableQueryTracking;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief throw collection not loaded error
|
||||
/// @startDocuBlock databaseThrowCollectionNotLoadedError
|
||||
/// `--database.throw-collection-not-loaded-error flag`
|
||||
///
|
||||
/// Accessing a not-yet loaded collection will automatically load a collection
|
||||
/// on first access. This flag controls what happens in case an operation
|
||||
/// would need to wait for another thread to finalize loading a collection. If
|
||||
/// set to *true*, then the first operation that accesses an unloaded collection
|
||||
/// will load it. Further threads that try to access the same collection while
|
||||
/// it is still loading will get an error (1238, *collection not loaded*). When
|
||||
/// the initial operation has completed loading the collection, all operations
|
||||
/// on the collection can be carried out normally, and error 1238 will not be
|
||||
/// thrown.
|
||||
///
|
||||
/// If set to *false*, the first thread that accesses a not-yet loaded collection
|
||||
/// will still load it. Other threads that try to access the collection while
|
||||
/// loading will not fail with error 1238 but instead block until the collection
|
||||
/// is fully loaded. This configuration might lead to all server threads being
|
||||
/// blocked because they are all waiting for the same collection to complete
|
||||
/// loading. Setting the option to *true* will prevent this from happening, but
|
||||
/// requires clients to catch error 1238 and react on it (maybe by scheduling
|
||||
/// a retry for later).
|
||||
///
|
||||
/// The default value is *false*.
|
||||
/// @endDocuBlock
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool _throwCollectionNotLoadedError;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief enable or disable the Foxx queues feature
|
||||
/// @startDocuBlock foxxQueues
|
||||
|
|
|
@ -1602,6 +1602,34 @@ static void JS_QueryCacheInvalidateAql (const v8::FunctionCallbackInfo<v8::Value
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief throw collection not loaded
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_ThrowCollectionNotLoaded (const v8::FunctionCallbackInfo<v8::Value>& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
}
|
||||
|
||||
if (args.Length() == 0) {
|
||||
bool value = TRI_GetThrowCollectionNotLoadedVocBase(vocbase);
|
||||
TRI_V8_RETURN(v8::Boolean::New(isolate, value));
|
||||
}
|
||||
else if (args.Length() == 1) {
|
||||
TRI_SetThrowCollectionNotLoadedVocBase(vocbase, TRI_ObjectToBoolean(args[0]));
|
||||
}
|
||||
else {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("THROW_COLLECTION_NOT_LOADED(<value>)");
|
||||
}
|
||||
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Transforms VertexId to v8String
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -3814,6 +3842,8 @@ void TRI_InitV8VocBridge (v8::Isolate* isolate,
|
|||
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("AQL_QUERY_CACHE_PROPERTIES"), JS_QueryCachePropertiesAql, true);
|
||||
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("AQL_QUERY_CACHE_INVALIDATE"), JS_QueryCacheInvalidateAql, true);
|
||||
|
||||
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("THROW_COLLECTION_NOT_LOADED"), JS_ThrowCollectionNotLoaded, true);
|
||||
|
||||
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("CPP_SHORTEST_PATH"), JS_QueryShortestPath, true);
|
||||
TRI_AddGlobalFunctionVocbase(isolate, context, TRI_V8_ASCII_STRING("CPP_NEIGHBORS"), JS_QueryNeighbors, true);
|
||||
|
||||
|
|
|
@ -72,11 +72,17 @@
|
|||
#define COLLECTION_STATUS_POLL_INTERVAL (1000 * 10)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- private types
|
||||
// --SECTION-- private variables
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
static std::atomic<TRI_voc_tick_t> QueryId(1);
|
||||
|
||||
static std::atomic<bool> ThrowCollectionNotLoaded(false);
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- private types
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief auxiliary struct for index iteration
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -989,7 +995,7 @@ static int ScanPath (TRI_vocbase_t* vocbase,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief loads an existing (document) collection
|
||||
///
|
||||
/// Note that this will READ lock the collection you have to release the
|
||||
/// Note that this will READ lock the collection. You have to release the
|
||||
/// collection lock by yourself.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -1076,7 +1082,7 @@ static int LoadCollectionVocBase (TRI_vocbase_t* vocbase,
|
|||
// currently loading
|
||||
if (collection->_status == TRI_VOC_COL_STATUS_LOADING) {
|
||||
// loop until the status changes
|
||||
while (1) {
|
||||
while (true) {
|
||||
TRI_vocbase_col_status_e status = collection->_status;
|
||||
|
||||
TRI_WRITE_UNLOCK_STATUS_VOCBASE_COL(collection);
|
||||
|
@ -1084,6 +1090,12 @@ static int LoadCollectionVocBase (TRI_vocbase_t* vocbase,
|
|||
if (status != TRI_VOC_COL_STATUS_LOADING) {
|
||||
break;
|
||||
}
|
||||
|
||||
// only throw this particular error if the server is configured to do so
|
||||
if (ThrowCollectionNotLoaded.load(std::memory_order_relaxed)) {
|
||||
return TRI_ERROR_ARANGO_COLLECTION_NOT_LOADED;
|
||||
}
|
||||
|
||||
usleep(COLLECTION_STATUS_POLL_INTERVAL);
|
||||
|
||||
TRI_WRITE_LOCK_STATUS_VOCBASE_COL(collection);
|
||||
|
@ -1094,8 +1106,6 @@ static int LoadCollectionVocBase (TRI_vocbase_t* vocbase,
|
|||
|
||||
// unloaded, load collection
|
||||
if (collection->_status == TRI_VOC_COL_STATUS_UNLOADED) {
|
||||
TRI_document_collection_t* document;
|
||||
|
||||
// set the status to loading
|
||||
collection->_status = TRI_VOC_COL_STATUS_LOADING;
|
||||
|
||||
|
@ -1105,7 +1115,7 @@ static int LoadCollectionVocBase (TRI_vocbase_t* vocbase,
|
|||
// disk activity, index creation etc.)
|
||||
TRI_WRITE_UNLOCK_STATUS_VOCBASE_COL(collection);
|
||||
|
||||
document = TRI_OpenDocumentCollection(vocbase, collection, IGNORE_DATAFILE_ERRORS);
|
||||
TRI_document_collection_t* document = TRI_OpenDocumentCollection(vocbase, collection, IGNORE_DATAFILE_ERRORS);
|
||||
|
||||
// lock again the adjust the status
|
||||
TRI_WRITE_LOCK_STATUS_VOCBASE_COL(collection);
|
||||
|
@ -2331,6 +2341,23 @@ TRI_voc_tick_t TRI_NextQueryIdVocBase (TRI_vocbase_t* vocbase) {
|
|||
return QueryId.fetch_add(1, std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief gets the "throw collection not loaded error"
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool TRI_GetThrowCollectionNotLoadedVocBase (TRI_vocbase_t* vocbase) {
|
||||
return ThrowCollectionNotLoaded.load(std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief sets the "throw collection not loaded error"
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_SetThrowCollectionNotLoadedVocBase (TRI_vocbase_t* vocbase,
|
||||
bool value) {
|
||||
ThrowCollectionNotLoaded.store(value, std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- TRI_vocbase_t
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -611,6 +611,19 @@ bool TRI_IsAllowedNameVocBase (bool,
|
|||
|
||||
TRI_voc_tick_t TRI_NextQueryIdVocBase (TRI_vocbase_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief gets the "throw collection not loaded error"
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool TRI_GetThrowCollectionNotLoadedVocBase (TRI_vocbase_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief sets the "throw collection not loaded error"
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_SetThrowCollectionNotLoadedVocBase (TRI_vocbase_t*,
|
||||
bool);
|
||||
|
||||
#endif
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -158,7 +158,7 @@ function get_api_aqlfunction (req, res) {
|
|||
///
|
||||
/// var response = logCurlRequest('POST', url, JSON.stringify(body));
|
||||
///
|
||||
/// assert(response.code === 201);
|
||||
/// assert(response.code === 200 || response.code === 201 || response.code === 202);
|
||||
///
|
||||
/// logJsonResponse(response);
|
||||
/// @END_EXAMPLE_ARANGOSH_RUN
|
||||
|
|
|
@ -105,6 +105,7 @@
|
|||
"ERROR_ARANGO_INDEX_CREATION_FAILED" : { "code" : 1235, "message" : "index creation failed" },
|
||||
"ERROR_ARANGO_WRITE_THROTTLE_TIMEOUT" : { "code" : 1236, "message" : "write-throttling timeout" },
|
||||
"ERROR_ARANGO_COLLECTION_TYPE_MISMATCH" : { "code" : 1237, "message" : "collection type mismatch" },
|
||||
"ERROR_ARANGO_COLLECTION_NOT_LOADED" : { "code" : 1238, "message" : "collection not loaded" },
|
||||
"ERROR_ARANGO_DATAFILE_FULL" : { "code" : 1300, "message" : "datafile full" },
|
||||
"ERROR_ARANGO_EMPTY_DATADIR" : { "code" : 1301, "message" : "server database directory is empty" },
|
||||
"ERROR_REPLICATION_NO_RESPONSE" : { "code" : 1400, "message" : "no response" },
|
||||
|
|
|
@ -214,15 +214,26 @@
|
|||
return result;
|
||||
},
|
||||
|
||||
changeCollection: function (wfs, journalSize) {
|
||||
changeCollection: function (wfs, journalSize, indexBuckets) {
|
||||
var result = false;
|
||||
if (wfs === "true") {
|
||||
wfs = true;
|
||||
}
|
||||
else if (wfs === "false") {
|
||||
wfs = false;
|
||||
}
|
||||
var data = {
|
||||
waitForSync: wfs,
|
||||
journalSize: parseInt(journalSize),
|
||||
indexBuckets: parseInt(indexBuckets)
|
||||
};
|
||||
|
||||
$.ajax({
|
||||
cache: false,
|
||||
type: "PUT",
|
||||
async: false, // sequential calls!
|
||||
url: "/_api/collection/" + this.get("id") + "/properties",
|
||||
data: '{"waitForSync":' + wfs + ',"journalSize":' + JSON.stringify(journalSize) + '}',
|
||||
data: JSON.stringify(data),
|
||||
contentType: "application/json",
|
||||
processData: false,
|
||||
success: function() {
|
||||
|
|
|
@ -86,6 +86,13 @@
|
|||
<div class="modal-text"><%=content.get("status")%></div>
|
||||
</th>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<th class="collectionInfoTh2">Index buckets:</th>
|
||||
<th class="collectionInfoTh">
|
||||
<div class="modal-text"><%=figuresData["indexBuckets"]%></div>
|
||||
</th>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
|
|
|
@ -104,12 +104,6 @@
|
|||
var status = this.model.get('status');
|
||||
|
||||
if (status === 'loaded') {
|
||||
var result;
|
||||
if (this.model.get('name') !== newname) {
|
||||
result = this.model.renameCollection(newname);
|
||||
}
|
||||
|
||||
var wfs = $('#change-collection-sync').val();
|
||||
var journalSize;
|
||||
try {
|
||||
journalSize = JSON.parse($('#change-collection-size').val() * 1024 * 1024);
|
||||
|
@ -118,7 +112,23 @@
|
|||
arangoHelper.arangoError('Please enter a valid number');
|
||||
return 0;
|
||||
}
|
||||
var changeResult = this.model.changeCollection(wfs, journalSize);
|
||||
|
||||
var indexBuckets;
|
||||
try {
|
||||
indexBuckets = JSON.parse($('#change-index-buckets').val());
|
||||
if (indexBuckets < 1 || parseInt(indexBuckets) !== Math.pow(2, Math.log2(indexBuckets))) {
|
||||
throw "invalid indexBuckets value";
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
arangoHelper.arangoError('Please enter a valid number of index buckets');
|
||||
return 0;
|
||||
}
|
||||
|
||||
var result;
|
||||
if (this.model.get('name') !== newname) {
|
||||
result = this.model.renameCollection(newname);
|
||||
}
|
||||
|
||||
if (result !== true) {
|
||||
if (result !== undefined) {
|
||||
|
@ -127,19 +137,21 @@
|
|||
}
|
||||
}
|
||||
|
||||
var wfs = $('#change-collection-sync').val();
|
||||
var changeResult = this.model.changeCollection(wfs, journalSize, indexBuckets);
|
||||
|
||||
if (changeResult !== true) {
|
||||
arangoHelper.arangoNotification("Collection error", changeResult);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (changeResult === true) {
|
||||
this.collectionsView.render();
|
||||
window.modalView.hide();
|
||||
}
|
||||
this.collectionsView.render();
|
||||
window.modalView.hide();
|
||||
}
|
||||
else if (status === 'unloaded') {
|
||||
if (this.model.get('name') !== newname) {
|
||||
var result2 = this.model.renameCollection(newname);
|
||||
|
||||
if (result2 === true) {
|
||||
this.collectionsView.render();
|
||||
window.modalView.hide();
|
||||
|
@ -217,10 +229,26 @@
|
|||
]
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
var indexBuckets = this.model.getProperties().indexBuckets;
|
||||
|
||||
tableContent.push(
|
||||
window.modalView.createTextEntry(
|
||||
"change-index-buckets",
|
||||
"Index buckets",
|
||||
indexBuckets,
|
||||
"The number of index buckets for this collection. Must be at least 1 and a power of 2.",
|
||||
"",
|
||||
true,
|
||||
[
|
||||
{
|
||||
rule: Joi.string().allow('').optional().regex(/^[1-9][0-9]*$/),
|
||||
msg: "Must be a number greater than 1 and a power of 2."
|
||||
}
|
||||
]
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
if (collectionIsLoaded) {
|
||||
// prevent "unexpected sync method error"
|
||||
var wfs = this.model.getProperties().waitForSync;
|
||||
tableContent.push(
|
||||
|
|
|
@ -105,6 +105,7 @@
|
|||
"ERROR_ARANGO_INDEX_CREATION_FAILED" : { "code" : 1235, "message" : "index creation failed" },
|
||||
"ERROR_ARANGO_WRITE_THROTTLE_TIMEOUT" : { "code" : 1236, "message" : "write-throttling timeout" },
|
||||
"ERROR_ARANGO_COLLECTION_TYPE_MISMATCH" : { "code" : 1237, "message" : "collection type mismatch" },
|
||||
"ERROR_ARANGO_COLLECTION_NOT_LOADED" : { "code" : 1238, "message" : "collection not loaded" },
|
||||
"ERROR_ARANGO_DATAFILE_FULL" : { "code" : 1300, "message" : "datafile full" },
|
||||
"ERROR_ARANGO_EMPTY_DATADIR" : { "code" : 1301, "message" : "server database directory is empty" },
|
||||
"ERROR_REPLICATION_NO_RESPONSE" : { "code" : 1400, "message" : "no response" },
|
||||
|
|
|
@ -134,6 +134,15 @@ if (global.SYS_DEFINE_ACTION) {
|
|||
delete global.SYS_DEFINE_ACTION;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief throw-collection-not-loaded
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
if (global.THROW_COLLECTION_NOT_LOADED) {
|
||||
exports.throwOnCollectionNotLoaded = global.THROW_COLLECTION_NOT_LOADED;
|
||||
delete global.THROW_COLLECTION_NOT_LOADED;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief autoload modules from database
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -0,0 +1,256 @@
|
|||
/*jshint globalstrict:false, strict:false */
|
||||
/*global assertTrue, assertEqual */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the random document selector
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
|
||||
var arangodb = require("org/arangodb");
|
||||
var db = arangodb.db;
|
||||
var internal = require("internal");
|
||||
var ArangoCollection = require("org/arangodb/arango-collection").ArangoCollection;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- throw-collection-not-loaded
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function ThrowCollectionNotLoadedSuite () {
|
||||
'use strict';
|
||||
var old;
|
||||
var cn = "UnitTestsThrowCollectionNotLoaded";
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set up
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
setUp : function () {
|
||||
// fetch current settings
|
||||
old = internal.throwOnCollectionNotLoaded();
|
||||
db._drop(cn);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tear down
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
tearDown : function () {
|
||||
db._drop(cn);
|
||||
// restore old settings
|
||||
internal.throwOnCollectionNotLoaded(old);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test regular loading of collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoad : function () {
|
||||
internal.throwOnCollectionNotLoaded(false);
|
||||
|
||||
var c = db._create(cn);
|
||||
c.save({ value: 1 });
|
||||
|
||||
c.unload();
|
||||
c = null;
|
||||
internal.wal.flush(true, true);
|
||||
while (db._collection(cn).status() !== ArangoCollection.STATUS_UNLOADED) {
|
||||
internal.wait(0.5);
|
||||
}
|
||||
|
||||
db._collection(cn).load();
|
||||
assertEqual(ArangoCollection.STATUS_LOADED, db._collection(cn).status());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test regular loading of collection, but with flag turned on
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoadWithFlag : function () {
|
||||
internal.throwOnCollectionNotLoaded(true);
|
||||
|
||||
var c = db._create(cn);
|
||||
c.save({ value: 1 });
|
||||
|
||||
c.unload();
|
||||
c = null;
|
||||
internal.wal.flush(true, true);
|
||||
while (db._collection(cn).status() !== ArangoCollection.STATUS_UNLOADED) {
|
||||
internal.wait(0.5);
|
||||
}
|
||||
|
||||
db._collection(cn).load();
|
||||
assertEqual(ArangoCollection.STATUS_LOADED, db._collection(cn).status());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test parallel loading of collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoadParallel : function () {
|
||||
internal.throwOnCollectionNotLoaded(false);
|
||||
var tasks = require("org/arangodb/tasks");
|
||||
|
||||
var c = db._create(cn);
|
||||
for (var i = 0; i < 10000; ++i) {
|
||||
c.save({ value: 1 });
|
||||
}
|
||||
|
||||
db._drop(cn + "Collect");
|
||||
var cnCollect = cn + "Collect";
|
||||
db._create(cnCollect);
|
||||
|
||||
c.unload();
|
||||
c = null;
|
||||
internal.wal.flush(true, true);
|
||||
while (db._collection(cn).status() !== ArangoCollection.STATUS_UNLOADED) {
|
||||
internal.wait(0.5);
|
||||
}
|
||||
|
||||
var task = {
|
||||
offset: 0,
|
||||
params: { cn: cn },
|
||||
command: function (params) {
|
||||
var db = require('internal').db;
|
||||
try {
|
||||
for (var i = 0; i < 500; ++i) {
|
||||
db._collection(params.cn).load();
|
||||
db._collection(params.cn).unload();
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
db._collection(params.cn + "Collect").save({ err: err.errorNum });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// spawn a few tasks that load and unload
|
||||
for (i = 0; i < 20; ++i) {
|
||||
task.id = "loadtest" + i;
|
||||
tasks.register(task);
|
||||
}
|
||||
|
||||
// wait for tasks to join
|
||||
internal.wait(5);
|
||||
|
||||
var errors = internal.errors;
|
||||
|
||||
var found = db._collection(cnCollect).byExample({
|
||||
err: errors.ERROR_ARANGO_COLLECTION_NOT_LOADED.code
|
||||
}).toArray();
|
||||
db._drop(cnCollect);
|
||||
|
||||
// we should have seen no "collection not found" errors
|
||||
assertEqual(0, found.length);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test parallel loading of collection, with flag
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLoadParallelWithFlag : function () {
|
||||
internal.throwOnCollectionNotLoaded(true);
|
||||
var tasks = require("org/arangodb/tasks");
|
||||
|
||||
var c = db._create(cn);
|
||||
for (var i = 0; i < 10000; ++i) {
|
||||
c.save({ value: 1 });
|
||||
}
|
||||
|
||||
db._drop(cn + "Collect");
|
||||
var cnCollect = cn + "Collect";
|
||||
db._create(cnCollect);
|
||||
|
||||
c.unload();
|
||||
c = null;
|
||||
internal.wal.flush(true, true);
|
||||
while (db._collection(cn).status() !== ArangoCollection.STATUS_UNLOADED) {
|
||||
internal.wait(0.5);
|
||||
}
|
||||
|
||||
var task = {
|
||||
offset: 0,
|
||||
params: { cn: cn },
|
||||
command: function (params) {
|
||||
var db = require('internal').db;
|
||||
try {
|
||||
for (var i = 0; i < 500; ++i) {
|
||||
db._collection(params.cn).load();
|
||||
db._collection(params.cn).unload();
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
db._collection(params.cn + "Collect").save({ err: err.errorNum });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// spawn a few tasks that load and unload
|
||||
for (i = 0; i < 20; ++i) {
|
||||
task.id = "loadtest" + i;
|
||||
tasks.register(task);
|
||||
}
|
||||
|
||||
// wait for tasks to join
|
||||
internal.wait(5);
|
||||
|
||||
var errors = internal.errors;
|
||||
|
||||
var found = db._collection(cnCollect).byExample({
|
||||
err: errors.ERROR_ARANGO_COLLECTION_NOT_LOADED.code
|
||||
}).toArray();
|
||||
db._drop(cnCollect);
|
||||
|
||||
// we need to have seen at least one "collection not found" error
|
||||
assertTrue(found.length > 0);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- main
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(ThrowCollectionNotLoadedSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
||||
// Local Variables:
|
||||
// mode: outline-minor
|
||||
// outline-regexp: "^\\(/// @brief\\|/// @addtogroup\\|// --SECTION--\\|/// @page\\|/// @}\\)"
|
||||
// End:
|
||||
|
|
@ -125,6 +125,7 @@ ERROR_ARANGO_INDEX_DOCUMENT_ATTRIBUTE_MISSING,1234,"index insertion warning - at
|
|||
ERROR_ARANGO_INDEX_CREATION_FAILED,1235,"index creation failed","Will be raised when an attempt to create an index has failed."
|
||||
ERROR_ARANGO_WRITE_THROTTLE_TIMEOUT,1236,"write-throttling timeout","Will be raised when the server is write-throttled and a write operation has waited too long for the server to process queued operations."
|
||||
ERROR_ARANGO_COLLECTION_TYPE_MISMATCH,1237,"collection type mismatch","Will be raised when a collection has a different type from what has been expected."
|
||||
ERROR_ARANGO_COLLECTION_NOT_LOADED,1238,"collection not loaded","Will be raised when a collection is accessed that is not yet loaded."
|
||||
|
||||
################################################################################
|
||||
## ArangoDB storage errors
|
||||
|
|
|
@ -101,6 +101,7 @@ void TRI_InitialiseErrorMessages () {
|
|||
REG_ERROR(ERROR_ARANGO_INDEX_CREATION_FAILED, "index creation failed");
|
||||
REG_ERROR(ERROR_ARANGO_WRITE_THROTTLE_TIMEOUT, "write-throttling timeout");
|
||||
REG_ERROR(ERROR_ARANGO_COLLECTION_TYPE_MISMATCH, "collection type mismatch");
|
||||
REG_ERROR(ERROR_ARANGO_COLLECTION_NOT_LOADED, "collection not loaded");
|
||||
REG_ERROR(ERROR_ARANGO_DATAFILE_FULL, "datafile full");
|
||||
REG_ERROR(ERROR_ARANGO_EMPTY_DATADIR, "server database directory is empty");
|
||||
REG_ERROR(ERROR_REPLICATION_NO_RESPONSE, "no response");
|
||||
|
|
|
@ -226,6 +226,8 @@
|
|||
/// - 1237: @LIT{collection type mismatch}
|
||||
/// Will be raised when a collection has a different type from what has been
|
||||
/// expected.
|
||||
/// - 1238: @LIT{collection not loaded}
|
||||
/// Will be raised when a collection is accessed that is not yet loaded.
|
||||
/// - 1300: @LIT{datafile full}
|
||||
/// Will be raised when the datafile reaches its limit.
|
||||
/// - 1301: @LIT{server database directory is empty}
|
||||
|
@ -1642,6 +1644,16 @@ void TRI_InitialiseErrorMessages ();
|
|||
|
||||
#define TRI_ERROR_ARANGO_COLLECTION_TYPE_MISMATCH (1237)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief 1238: ERROR_ARANGO_COLLECTION_NOT_LOADED
|
||||
///
|
||||
/// collection not loaded
|
||||
///
|
||||
/// Will be raised when a collection is accessed that is not yet loaded.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#define TRI_ERROR_ARANGO_COLLECTION_NOT_LOADED (1238)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief 1300: ERROR_ARANGO_DATAFILE_FULL
|
||||
///
|
||||
|
|
|
@ -523,7 +523,16 @@ bool ApplicationScheduler::start () {
|
|||
buildSchedulerReporter();
|
||||
buildControlCHandler();
|
||||
|
||||
bool ok = _scheduler->start(0);
|
||||
#ifdef TRI_HAVE_GETRLIMIT
|
||||
struct rlimit rlim;
|
||||
int res = getrlimit(RLIMIT_NOFILE, &rlim);
|
||||
|
||||
if (res == 0) {
|
||||
LOG_INFO("file-descriptors (nofiles) hard limit is %d, soft limit is %d", (int) rlim.rlim_max, (int) rlim.rlim_cur);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ok = _scheduler->start(nullptr);
|
||||
|
||||
if (! ok) {
|
||||
LOG_FATAL_AND_EXIT("the scheduler cannot be started");
|
||||
|
@ -665,8 +674,8 @@ void ApplicationScheduler::adjustFileDescriptors () {
|
|||
if (res != 0) {
|
||||
LOG_FATAL_AND_EXIT("cannot get the file descriptor limit: %s", strerror(errno));
|
||||
}
|
||||
|
||||
LOG_DEBUG("hard limit is %d, soft limit is %d", (int) rlim.rlim_max, (int) rlim.rlim_cur);
|
||||
|
||||
LOG_DEBUG("file-descriptors (nofiles) hard limit is %d, soft limit is %d", (int) rlim.rlim_max, (int) rlim.rlim_cur);
|
||||
|
||||
bool changed = false;
|
||||
|
||||
|
@ -705,7 +714,7 @@ void ApplicationScheduler::adjustFileDescriptors () {
|
|||
LOG_FATAL_AND_EXIT("cannot get the file descriptor limit: %s", strerror(errno));
|
||||
}
|
||||
|
||||
LOG_DEBUG("new hard limit is %d, new soft limit is %d", (int) rlim.rlim_max, (int) rlim.rlim_cur);
|
||||
LOG_INFO("file-descriptors (nofiles) new hard limit is %d, new soft limit is %d", (int) rlim.rlim_max, (int) rlim.rlim_cur);
|
||||
}
|
||||
|
||||
// the select backend has more restrictions
|
||||
|
|
Loading…
Reference in New Issue