mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:triAGENS/ArangoDB into devel
This commit is contained in:
commit
aadec76f8e
|
@ -24,6 +24,10 @@ v1.4
|
|||
v1.3.2 (2013-XX-XX)
|
||||
-------------------
|
||||
|
||||
* fixed a locking issue in collection.truncate()
|
||||
|
||||
* fixed value overflow in accumulated filesizes reported by collection.figures()
|
||||
|
||||
* issue #545: AQL FILTER unnecessary (?) loop
|
||||
|
||||
* issue #549: wrong return code with --daemon
|
||||
|
|
|
@ -227,7 +227,6 @@ SHELL_COMMON = @top_srcdir@/js/common/tests/shell-require.js \
|
|||
@top_srcdir@/js/common/tests/shell-attributes.js \
|
||||
@top_srcdir@/js/common/tests/shell-collection.js \
|
||||
@top_srcdir@/js/common/tests/shell-collection-volatile.js \
|
||||
@top_srcdir@/js/common/tests/shell-compactor.js \
|
||||
@top_srcdir@/js/common/tests/shell-crypto.js \
|
||||
@top_srcdir@/js/common/tests/shell-database.js \
|
||||
@top_srcdir@/js/common/tests/shell-document.js \
|
||||
|
@ -246,6 +245,7 @@ SHELL_COMMON = @top_srcdir@/js/common/tests/shell-require.js \
|
|||
@top_srcdir@/js/common/tests/shell-graph.js
|
||||
|
||||
SHELL_SERVER_ONLY = \
|
||||
@top_srcdir@/js/server/tests/compaction.js \
|
||||
@top_srcdir@/js/server/tests/transactions.js \
|
||||
@top_srcdir@/js/server/tests/routing.js \
|
||||
@top_srcdir@/js/common/tests/shell-foxx.js \
|
||||
|
|
|
@ -448,7 +448,7 @@ namespace triagens {
|
|||
size_t pos = TRI_UInt32Random() % total;
|
||||
void** beg = primary->_primaryIndex._table;
|
||||
|
||||
while (beg[pos] == 0 || (((TRI_doc_mptr_t*) beg[pos])->_validTo != 0)) {
|
||||
while (beg[pos] == 0) {
|
||||
pos = (pos + 1) % total;
|
||||
}
|
||||
|
||||
|
@ -506,9 +506,7 @@ namespace triagens {
|
|||
if (*ptr) {
|
||||
TRI_doc_mptr_t const* d = (TRI_doc_mptr_t const*) *ptr;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
ids.push_back(d->_key);
|
||||
}
|
||||
ids.push_back(d->_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -573,11 +571,7 @@ namespace triagens {
|
|||
// skip from the beginning
|
||||
for (; ptr < end && 0 < skip; ++ptr) {
|
||||
if (*ptr) {
|
||||
TRI_doc_mptr_t const* d = (TRI_doc_mptr_t const*) *ptr;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
--skip;
|
||||
}
|
||||
--skip;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -587,14 +581,10 @@ namespace triagens {
|
|||
|
||||
for (; beg <= ptr; --ptr) {
|
||||
if (*ptr) {
|
||||
TRI_doc_mptr_t const* d = (TRI_doc_mptr_t const*) *ptr;
|
||||
++skip;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
++skip;
|
||||
|
||||
if (skip == 0) {
|
||||
break;
|
||||
}
|
||||
if (skip == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -609,10 +599,8 @@ namespace triagens {
|
|||
if (*ptr) {
|
||||
TRI_doc_mptr_t* d = (TRI_doc_mptr_t*) *ptr;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
docs.push_back(*d);
|
||||
++count;
|
||||
}
|
||||
docs.push_back(*d);
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -682,14 +670,12 @@ namespace triagens {
|
|||
if (*ptr) {
|
||||
TRI_doc_mptr_t* d = (TRI_doc_mptr_t*) *ptr;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
if (skip > 0) {
|
||||
--skip;
|
||||
}
|
||||
else {
|
||||
docs.push_back(*d);
|
||||
++count;
|
||||
}
|
||||
if (skip > 0) {
|
||||
--skip;
|
||||
}
|
||||
else {
|
||||
docs.push_back(*d);
|
||||
++count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5191,6 +5191,48 @@ static v8::Handle<v8::Value> JS_ReplaceVocbaseCol (v8::Arguments const& argv) {
|
|||
return ReplaceVocbaseCol(true, argv);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief rotates the current journal of a collection
|
||||
///
|
||||
/// @FUN{@FA{collection}.rotate()}
|
||||
///
|
||||
/// Rotates the current journal of a collection (i.e. makes the journal a
|
||||
/// datafile and creates a new, empty datafile).
|
||||
/// This function is used during testing to force certain states and
|
||||
/// conditions. It is not intended to be used publicly
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static v8::Handle<v8::Value> JS_RotateVocbaseCol (v8::Arguments const& argv) {
|
||||
v8::HandleScope scope;
|
||||
|
||||
v8::Handle<v8::Object> err;
|
||||
TRI_vocbase_col_t const* collection = UseCollection(argv.Holder(), &err);
|
||||
|
||||
if (collection == 0) {
|
||||
return scope.Close(v8::ThrowException(err));
|
||||
}
|
||||
|
||||
TRI_primary_collection_t* primary = collection->_collection;
|
||||
TRI_collection_t* base = &primary->base;
|
||||
|
||||
if (! TRI_IS_DOCUMENT_COLLECTION(base->_info._type)) {
|
||||
ReleaseCollection(collection);
|
||||
TRI_V8_EXCEPTION_INTERNAL(scope, "unknown collection type");
|
||||
}
|
||||
|
||||
TRI_document_collection_t* document = (TRI_document_collection_t*) primary;
|
||||
|
||||
int res = TRI_RotateJournalDocumentCollection(document);
|
||||
|
||||
ReleaseCollection(collection);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_V8_EXCEPTION_MESSAGE(scope, res, "could not rotate journal");
|
||||
}
|
||||
|
||||
return scope.Close(v8::Undefined());
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief updates a document
|
||||
///
|
||||
|
@ -7131,6 +7173,7 @@ void TRI_InitV8VocBridge (v8::Handle<v8::Context> context,
|
|||
TRI_AddMethodVocbase(rt, "remove", JS_RemoveVocbaseCol);
|
||||
TRI_AddMethodVocbase(rt, "revision", JS_RevisionVocbaseCol);
|
||||
TRI_AddMethodVocbase(rt, "rename", JS_RenameVocbaseCol);
|
||||
TRI_AddMethodVocbase(rt, "rotate", JS_RotateVocbaseCol, true);
|
||||
TRI_AddMethodVocbase(rt, "setAttribute", JS_SetAttributeVocbaseCol, true);
|
||||
TRI_AddMethodVocbase(rt, "status", JS_StatusVocbaseCol);
|
||||
TRI_AddMethodVocbase(rt, "truncate", JS_TruncateVocbaseCol);
|
||||
|
|
|
@ -278,26 +278,23 @@ bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
|
|||
|
||||
for (; ptr < end; ++ptr) {
|
||||
if (*ptr) {
|
||||
TRI_vocbase_auth_t* auth;
|
||||
TRI_doc_mptr_t const* d;
|
||||
TRI_shaped_json_t shapedJson;
|
||||
|
||||
d = (TRI_doc_mptr_t const*) *ptr;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
TRI_vocbase_auth_t* auth;
|
||||
TRI_EXTRACT_SHAPED_JSON_MARKER(shapedJson, d->_data);
|
||||
|
||||
TRI_EXTRACT_SHAPED_JSON_MARKER(shapedJson, d->_data);
|
||||
auth = ConvertAuthInfo(vocbase, primary, &shapedJson);
|
||||
|
||||
auth = ConvertAuthInfo(vocbase, primary, &shapedJson);
|
||||
if (auth != NULL) {
|
||||
TRI_vocbase_auth_t* old;
|
||||
|
||||
if (auth != NULL) {
|
||||
TRI_vocbase_auth_t* old;
|
||||
old = TRI_InsertKeyAssociativePointer(&vocbase->_authInfo, auth->_username, auth, true);
|
||||
|
||||
old = TRI_InsertKeyAssociativePointer(&vocbase->_authInfo, auth->_username, auth, true);
|
||||
|
||||
if (old != NULL) {
|
||||
FreeAuthInfo(old);
|
||||
}
|
||||
if (old != NULL) {
|
||||
FreeAuthInfo(old);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,23 @@
|
|||
// --SECTION-- COLLECTION MIGRATION
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- private types
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief old-style master pointer (deprecated)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef struct old_doc_mptr_s {
|
||||
TRI_voc_rid_t _rid; // this is the revision identifier
|
||||
TRI_voc_fid_t _fid; // this is the datafile identifier
|
||||
TRI_voc_tick_t _validTo; // this is the deletion time (0 if document is not yet deleted)
|
||||
void const* _data; // this is the pointer to the beginning of the raw marker
|
||||
char* _key; // this is the document identifier (string)
|
||||
}
|
||||
old_doc_mptr_t;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- private functions
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -96,7 +113,7 @@ static bool UpgradeOpenIterator (TRI_df_marker_t const* marker,
|
|||
void* data,
|
||||
TRI_datafile_t* datafile,
|
||||
bool journal) {
|
||||
TRI_doc_mptr_t const* found;
|
||||
old_doc_mptr_t const* found;
|
||||
TRI_associative_pointer_t* primaryIndex;
|
||||
TRI_voc_key_t key = NULL;
|
||||
|
||||
|
@ -113,7 +130,7 @@ static bool UpgradeOpenIterator (TRI_df_marker_t const* marker,
|
|||
|
||||
// it is a new entry
|
||||
if (found == NULL) {
|
||||
TRI_doc_mptr_t* header;
|
||||
old_doc_mptr_t* header;
|
||||
|
||||
header = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_doc_mptr_t), true);
|
||||
if (header == NULL) {
|
||||
|
@ -133,7 +150,7 @@ static bool UpgradeOpenIterator (TRI_df_marker_t const* marker,
|
|||
// it is an update, but only if found has a smaller revision identifier
|
||||
else if (found->_rid < d->_rid ||
|
||||
(found->_rid == d->_rid && found->_fid <= datafile->_fid)) {
|
||||
TRI_doc_mptr_t* newHeader;
|
||||
old_doc_mptr_t* newHeader;
|
||||
|
||||
newHeader = CONST_CAST(found);
|
||||
|
||||
|
@ -156,7 +173,7 @@ static bool UpgradeOpenIterator (TRI_df_marker_t const* marker,
|
|||
|
||||
// it is a new entry, so we missed the create
|
||||
if (found == NULL) {
|
||||
TRI_doc_mptr_t* header;
|
||||
old_doc_mptr_t* header;
|
||||
|
||||
header = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_doc_mptr_t), true);
|
||||
if (header == NULL) {
|
||||
|
@ -175,7 +192,7 @@ static bool UpgradeOpenIterator (TRI_df_marker_t const* marker,
|
|||
|
||||
// it is a real delete
|
||||
else if (found->_validTo == 0) {
|
||||
TRI_doc_mptr_t* newHeader;
|
||||
old_doc_mptr_t* newHeader;
|
||||
|
||||
newHeader = CONST_CAST(found);
|
||||
|
||||
|
@ -401,8 +418,10 @@ static TRI_col_file_structure_t ScanCollectionDirectory (char const* path) {
|
|||
else {
|
||||
int res;
|
||||
|
||||
// this should fail, but shouldn't do any harm either...
|
||||
TRI_UnlinkFile(newName);
|
||||
|
||||
// rename the compactor to a datafile
|
||||
res = TRI_RenameFile(filename, newName);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -768,11 +787,17 @@ static bool IterateDatafilesVector (const TRI_vector_pointer_t* const files,
|
|||
size_t i, n;
|
||||
|
||||
n = files->_length;
|
||||
|
||||
for (i = 0; i < n; ++i) {
|
||||
TRI_datafile_t* datafile;
|
||||
int result;
|
||||
|
||||
datafile = (TRI_datafile_t*) TRI_AtVectorPointer(files, i);
|
||||
|
||||
LOG_TRACE("iterating over datafile '%s', fid %llu",
|
||||
datafile->getName(datafile),
|
||||
(unsigned long long) datafile->_fid);
|
||||
|
||||
result = TRI_IterateDatafile(datafile, iterator, data, false);
|
||||
|
||||
if (! result) {
|
||||
|
@ -1684,7 +1709,7 @@ int TRI_UpgradeCollection (TRI_vocbase_t* vocbase,
|
|||
|
||||
// go over all documents in the index and calculate the total length
|
||||
for (i = 0; i < primaryIndex._nrAlloc; ++i) {
|
||||
TRI_doc_mptr_t* header = primaryIndex._table[i];
|
||||
old_doc_mptr_t* header = primaryIndex._table[i];
|
||||
|
||||
if (header != NULL && header->_validTo == 0) {
|
||||
TRI_df_marker_t const* marker = header->_data;
|
||||
|
@ -1750,7 +1775,7 @@ int TRI_UpgradeCollection (TRI_vocbase_t* vocbase,
|
|||
|
||||
// write all surviving documents into the datafile
|
||||
for (i = 0; i < primaryIndex._nrAlloc; ++i) {
|
||||
TRI_doc_mptr_t* mptr = primaryIndex._table[i];
|
||||
old_doc_mptr_t* mptr = primaryIndex._table[i];
|
||||
|
||||
if (mptr != NULL && mptr->_validTo == 0) {
|
||||
TRI_df_marker_t const* marker = mptr->_data;
|
||||
|
|
|
@ -96,6 +96,7 @@ typedef struct compaction_context_s {
|
|||
TRI_document_collection_t* _document;
|
||||
TRI_datafile_t* _compactor;
|
||||
TRI_doc_datafile_info_t _dfi;
|
||||
bool _keepDeletions;
|
||||
}
|
||||
compaction_context_t;
|
||||
|
||||
|
@ -117,18 +118,16 @@ compaction_context_t;
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static TRI_datafile_t* CreateCompactor (TRI_document_collection_t* document,
|
||||
TRI_datafile_t const* datafile) {
|
||||
TRI_voc_fid_t fid,
|
||||
TRI_voc_size_t maximalSize) {
|
||||
TRI_collection_t* collection;
|
||||
TRI_datafile_t* compactor;
|
||||
TRI_voc_fid_t fid;
|
||||
|
||||
collection = &document->base.base;
|
||||
|
||||
TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
|
||||
|
||||
// we are re-using the _fid of the datafile!
|
||||
fid = datafile->_fid;
|
||||
compactor = TRI_CreateCompactorPrimaryCollection(&document->base, fid, datafile->_maximalSize);
|
||||
compactor = TRI_CreateCompactorPrimaryCollection(&document->base, fid, maximalSize);
|
||||
|
||||
if (compactor != NULL) {
|
||||
TRI_PushBackVectorPointer(&collection->_compactors, compactor);
|
||||
|
@ -156,6 +155,7 @@ static int CopyMarker (TRI_document_collection_t* document,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
document->base.base._lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL);
|
||||
|
||||
return TRI_ERROR_ARANGO_NO_JOURNAL;
|
||||
}
|
||||
|
||||
|
@ -256,6 +256,15 @@ static void DropDatafileCallback (TRI_datafile_t* datafile, void* data) {
|
|||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief callback to rename a datafile
|
||||
///
|
||||
/// The datafile will be renamed to "temp-abc.db" (where "abc" is the fid of
|
||||
/// the datafile) first. If this rename operation fails, there will be a
|
||||
/// compactor file and a datafile. On startup, the datafile will be preferred
|
||||
/// in this case.
|
||||
/// If renaming succeeds, the compactor will be named to the original datafile.
|
||||
/// If that does not succeed, there is a compactor file and a renamed datafile.
|
||||
/// On startup, the compactor file will be used, and the renamed datafile
|
||||
/// will be treated as a temporary file and dropped.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void RenameDatafileCallback (TRI_datafile_t* datafile, void* data) {
|
||||
|
@ -309,7 +318,8 @@ static void RenameDatafileCallback (TRI_datafile_t* datafile, void* data) {
|
|||
if (ok) {
|
||||
TRI_doc_datafile_info_t* dfi;
|
||||
size_t i;
|
||||
|
||||
|
||||
// must acquire a write-lock as we're about to change the datafiles vector
|
||||
TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
if (! LocateDatafile(&primary->base._datafiles, datafile->_fid, &i)) {
|
||||
|
@ -386,7 +396,7 @@ static bool Compactifier (TRI_df_marker_t const* marker,
|
|||
TRI_READ_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
|
||||
|
||||
found = TRI_LookupByKeyAssociativePointer(&primary->_primaryIndex, key);
|
||||
deleted = (found == NULL || found->_validTo != 0 || found->_rid > marker->_tick);
|
||||
deleted = (found == NULL || found->_rid > marker->_tick);
|
||||
|
||||
TRI_READ_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
|
||||
|
||||
|
@ -406,7 +416,7 @@ static bool Compactifier (TRI_df_marker_t const* marker,
|
|||
TRI_WRITE_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
|
||||
|
||||
found = TRI_LookupByKeyAssociativePointer(&primary->_primaryIndex, key);
|
||||
deleted = found == NULL || found->_validTo != 0;
|
||||
deleted = found == NULL;
|
||||
|
||||
if (deleted) {
|
||||
context->_dfi._numberDead += 1;
|
||||
|
@ -436,7 +446,8 @@ static bool Compactifier (TRI_df_marker_t const* marker,
|
|||
}
|
||||
|
||||
// deletion
|
||||
else if (marker->_type == TRI_DOC_MARKER_KEY_DELETION) {
|
||||
else if (marker->_type == TRI_DOC_MARKER_KEY_DELETION &&
|
||||
context->_keepDeletions) {
|
||||
// write to compactor files
|
||||
res = CopyMarker(document, context->_compactor, marker, &result);
|
||||
|
||||
|
@ -458,7 +469,7 @@ static bool Compactifier (TRI_df_marker_t const* marker,
|
|||
LOG_FATAL_AND_EXIT("cannot write compactor file: %s", TRI_last_error());
|
||||
}
|
||||
|
||||
context->_dfi._numberTransaction += 1;
|
||||
context->_dfi._numberTransaction++;
|
||||
context->_dfi._sizeTransaction += (int64_t) marker->_size;
|
||||
}
|
||||
|
||||
|
@ -540,13 +551,14 @@ static int RemoveEmpty (TRI_document_collection_t* document,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void CompactifyDatafile (TRI_document_collection_t* document,
|
||||
TRI_voc_fid_t fid) {
|
||||
TRI_voc_fid_t fid,
|
||||
bool keepDeletions) {
|
||||
TRI_datafile_t* df;
|
||||
TRI_datafile_t* compactor;
|
||||
TRI_primary_collection_t* primary;
|
||||
compaction_context_t context;
|
||||
bool ok;
|
||||
size_t i;
|
||||
bool ok;
|
||||
|
||||
primary = &document->base;
|
||||
|
||||
|
@ -565,9 +577,10 @@ static void CompactifyDatafile (TRI_document_collection_t* document,
|
|||
df = primary->base._datafiles._buffer[i];
|
||||
TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
|
||||
// now create a compactor file for it
|
||||
compactor = CreateCompactor(document, df);
|
||||
// we are re-using the _fid of the original datafile!
|
||||
compactor = CreateCompactor(document, df->_fid, df->_maximalSize);
|
||||
|
||||
|
||||
if (compactor == NULL) {
|
||||
// some error occurred
|
||||
|
@ -587,6 +600,9 @@ static void CompactifyDatafile (TRI_document_collection_t* document,
|
|||
memset(&context._dfi, 0, sizeof(TRI_doc_datafile_info_t));
|
||||
// set _fid
|
||||
context._dfi._fid = df->_fid;
|
||||
// if this is the first datafile in the list of datafiles, we can also collect
|
||||
// deletion markers
|
||||
context._keepDeletions = keepDeletions;
|
||||
|
||||
ok = TRI_IterateDatafile(df, Compactifier, &context, false);
|
||||
|
||||
|
@ -597,24 +613,28 @@ static void CompactifyDatafile (TRI_document_collection_t* document,
|
|||
}
|
||||
|
||||
// locate the compactor
|
||||
TRI_READ_LOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
// must acquire a write-lock as we're about to change the datafiles vector
|
||||
TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
if (! LocateDatafile(&primary->base._compactors, compactor->_fid, &i)) {
|
||||
// not found
|
||||
TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
LOG_ERROR("logic error in CompactifyDatafile: could not find compactor");
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
if (! TRI_CloseCompactorPrimaryCollection(primary, i)) {
|
||||
TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
LOG_ERROR("could not close compactor file");
|
||||
// TODO: how do we recover from this state?
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
|
||||
if (context._dfi._numberAlive == 0 &&
|
||||
context._dfi._numberDead == 0 &&
|
||||
context._dfi._numberDeletion == 0 &&
|
||||
|
@ -642,6 +662,7 @@ static void CompactifyDatafile (TRI_document_collection_t* document,
|
|||
memcpy(copy, &context, sizeof(compaction_context_t));
|
||||
|
||||
b = TRI_CreateBarrierRenameDatafile(&primary->_barrierList, df, RenameDatafileCallback, copy);
|
||||
|
||||
if (b == NULL) {
|
||||
LOG_ERROR("out of memory when creating datafile-rename barrier");
|
||||
TRI_Free(TRI_CORE_MEM_ZONE, copy);
|
||||
|
@ -656,6 +677,7 @@ static void CompactifyDatafile (TRI_document_collection_t* document,
|
|||
static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
|
||||
TRI_primary_collection_t* primary;
|
||||
TRI_vector_t vector;
|
||||
int64_t numAlive;
|
||||
size_t i, n;
|
||||
|
||||
primary = &document->base;
|
||||
|
@ -668,44 +690,71 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
|
|||
}
|
||||
|
||||
n = primary->base._datafiles._length;
|
||||
|
||||
|
||||
if (primary->base._compactors._length > 0 || n == 0) {
|
||||
// we already have created a compactor file in progress.
|
||||
// if this happens, then a previous compaction attempt for this collection failed
|
||||
|
||||
// additionally, if there are no datafiles, then there's no need to compact
|
||||
TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// copy datafile information
|
||||
TRI_InitVector(&vector, TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_doc_datafile_info_t));
|
||||
numAlive = 0;
|
||||
|
||||
for (i = 0; i < n; ++i) {
|
||||
TRI_datafile_t* df;
|
||||
TRI_doc_datafile_info_t* dfi;
|
||||
double share;
|
||||
bool shouldCompact;
|
||||
|
||||
df = primary->base._datafiles._buffer[i];
|
||||
|
||||
assert(df != NULL);
|
||||
|
||||
dfi = TRI_FindDatafileInfoPrimaryCollection(primary, df->_fid, false);
|
||||
|
||||
if (dfi == NULL || dfi->_numberDead == 0 || dfi->_sizeDead < (int64_t) COMPACTOR_DEAD_SIZE_THRESHOLD) {
|
||||
|
||||
if (dfi == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
share = (double) dfi->_sizeDead / ((double) dfi->_sizeDead + (double) dfi->_sizeAlive);
|
||||
shouldCompact = false;
|
||||
|
||||
if (share < COMPACTOR_DEAD_SIZE_SHARE) {
|
||||
if (numAlive == 0 && dfi->_numberDeletion > 0) {
|
||||
// compact first datafile already if it has got some deletions
|
||||
shouldCompact = true;
|
||||
}
|
||||
else {
|
||||
// in all other cases, only check the number and size of "dead" objects
|
||||
if (dfi->_sizeDead >= (int64_t) COMPACTOR_DEAD_SIZE_THRESHOLD) {
|
||||
shouldCompact = true;
|
||||
}
|
||||
else if (dfi->_sizeDead > 0) {
|
||||
// the size of dead objects is above some threshold
|
||||
double share = (double) dfi->_sizeDead / ((double) dfi->_sizeDead + (double) dfi->_sizeAlive);
|
||||
|
||||
if (share >= COMPACTOR_DEAD_SIZE_SHARE) {
|
||||
// the size of dead objects is above some share
|
||||
shouldCompact = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (! shouldCompact) {
|
||||
numAlive += (int64_t) dfi->_numberAlive;
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG_TRACE("found datafile eligible for compaction. fid: %llu, "
|
||||
"numberDead: %llu, numberAlive: %llu, numberTransaction: %llu, "
|
||||
"numberDead: %llu, numberAlive: %llu, numberTransaction: %llu, numberDeletion: %llu, "
|
||||
"sizeDead: %llu, sizeAlive: %llu, sizeTransaction: %llu",
|
||||
(unsigned long long) df->_fid,
|
||||
(unsigned long long) dfi->_numberDead,
|
||||
(unsigned long long) dfi->_numberAlive,
|
||||
(unsigned long long) dfi->_numberTransaction,
|
||||
(unsigned long long) dfi->_numberDeletion,
|
||||
(unsigned long long) dfi->_sizeDead,
|
||||
(unsigned long long) dfi->_sizeAlive,
|
||||
(unsigned long long) dfi->_sizeTransaction);
|
||||
|
@ -721,23 +770,25 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
|
|||
// will not pick this up as it is read-locking the collection status)
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
// can now continue without the lock
|
||||
TRI_READ_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
|
||||
|
||||
|
||||
if (vector._length == 0) {
|
||||
// cleanup local variables
|
||||
TRI_DestroyVector(&vector);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// handle datafiles with dead objects
|
||||
n = vector._length;
|
||||
assert(n == 1);
|
||||
|
||||
for (i = 0; i < n; ++i) {
|
||||
TRI_doc_datafile_info_t* dfi = TRI_AtVector(&vector, i);
|
||||
|
||||
assert(dfi->_numberDead > 0);
|
||||
assert(dfi->_numberDead > 0 || dfi->_numberDeletion > 0);
|
||||
|
||||
LOG_TRACE("compacting datafile. fid: %llu, "
|
||||
"numberDead: %llu, numberAlive: %llu, numberTransaction: %llu, "
|
||||
|
@ -750,7 +801,13 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
|
|||
(unsigned long long) dfi->_sizeAlive,
|
||||
(unsigned long long) dfi->_sizeTransaction);
|
||||
|
||||
CompactifyDatafile(document, dfi->_fid);
|
||||
// should we keep delete markers in the compacted datafile?
|
||||
// note that this is not necessary for the first datafile, and not
|
||||
// for any following if there have been no "alive" markers so far
|
||||
// if we once allow more than one datafile in the vector, this logic must
|
||||
// probably be adjusted...
|
||||
|
||||
CompactifyDatafile(document, dfi->_fid, numAlive > 0);
|
||||
}
|
||||
|
||||
// cleanup local variables
|
||||
|
|
|
@ -1540,7 +1540,7 @@ int TRI_SealDatafile (TRI_datafile_t* datafile) {
|
|||
/*
|
||||
res = ftruncate(datafile->_fd, datafile->_currentSize);
|
||||
Linux centric problems:
|
||||
Under windows can not reduce size of the memory mapped file without unmappping it!
|
||||
Under windows can not reduce size of the memory mapped file without unmapping it!
|
||||
However, apparently we may have users
|
||||
*/
|
||||
#else
|
||||
|
@ -1557,6 +1557,7 @@ int TRI_SealDatafile (TRI_datafile_t* datafile) {
|
|||
|
||||
datafile->_isSealed = true;
|
||||
datafile->_state = TRI_DF_STATE_READ;
|
||||
datafile->_maximalSize = datafile->_currentSize;
|
||||
}
|
||||
|
||||
if (! ok) {
|
||||
|
|
|
@ -97,7 +97,7 @@ static int PriorityQueueFromJson (TRI_document_collection_t*,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static bool IsVisible (TRI_doc_mptr_t const* header) {
|
||||
return (header != NULL && header->_validTo == 0);
|
||||
return (header != NULL);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -145,11 +145,6 @@ static int InsertPrimaryIndex (TRI_document_collection_t* document,
|
|||
TRI_ASSERT_MAINTAINER(header != NULL);
|
||||
TRI_ASSERT_MAINTAINER(header->_key != NULL);
|
||||
|
||||
if (header->_validTo != 0) {
|
||||
// don't insert in case the document is deleted
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
primary = &document->base;
|
||||
|
||||
// add a new header
|
||||
|
@ -165,10 +160,9 @@ static int InsertPrimaryIndex (TRI_document_collection_t* document,
|
|||
IncreaseDocumentCount(primary);
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
// we found a previous revision in the index
|
||||
if (found->_validTo == 0) {
|
||||
}
|
||||
else {
|
||||
// we found a previous revision in the index
|
||||
// the found revision is still alive
|
||||
LOG_TRACE("document '%s' already existed with revision %llu while creating revision %llu",
|
||||
header->_key,
|
||||
|
@ -177,14 +171,6 @@ static int InsertPrimaryIndex (TRI_document_collection_t* document,
|
|||
|
||||
return TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED;
|
||||
}
|
||||
|
||||
// a deleted document was found in the index. now insert again and overwrite
|
||||
// this should be an exceptional case
|
||||
found = TRI_InsertKeyAssociativePointer(&primary->_primaryIndex, header->_key, (void*) header, true);
|
||||
IncreaseDocumentCount(primary);
|
||||
|
||||
// overwriting does not change the size of the index and should always succeed
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -244,9 +230,7 @@ static int DeletePrimaryIndex (TRI_document_collection_t* document,
|
|||
return TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND;
|
||||
}
|
||||
|
||||
if (found->_validTo == 0) {
|
||||
DecreaseDocumentCount(primary);
|
||||
}
|
||||
DecreaseDocumentCount(primary);
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
@ -547,7 +531,6 @@ static int CreateHeader (TRI_document_collection_t* document,
|
|||
|
||||
header->_rid = tick;
|
||||
header->_fid = fid;
|
||||
header->_validTo = 0; // document deletion time, 0 means "infinitely valid"
|
||||
header->_data = marker;
|
||||
header->_key = ((char*) marker) + marker->_offsetKey;
|
||||
|
||||
|
@ -573,6 +556,43 @@ static int CreateHeader (TRI_document_collection_t* document,
|
|||
/// @{
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief closes a journal, and triggers creation of a new one
|
||||
/// this is used internally for testing
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int RotateJournal (TRI_document_collection_t* document) {
|
||||
TRI_collection_t* base;
|
||||
int res;
|
||||
|
||||
base = &document->base.base;
|
||||
res = TRI_ERROR_ARANGO_NO_JOURNAL;
|
||||
|
||||
TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
|
||||
|
||||
if (base->_state == TRI_COL_STATE_WRITE) {
|
||||
size_t n;
|
||||
|
||||
n = base->_journals._length;
|
||||
|
||||
if (n > 0) {
|
||||
TRI_datafile_t* datafile;
|
||||
|
||||
datafile = base->_journals._buffer[0];
|
||||
datafile->_full = true;
|
||||
|
||||
TRI_INC_SYNCHRONISER_WAITER_VOCBASE(base->_vocbase);
|
||||
TRI_WAIT_JOURNAL_ENTRIES_DOC_COLLECTION(document);
|
||||
TRI_DEC_SYNCHRONISER_WAITER_VOCBASE(base->_vocbase);
|
||||
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
|
||||
return res;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief selects a journal, possibly waits until a journal appears
|
||||
///
|
||||
|
@ -1094,8 +1114,6 @@ static void UpdateHeader (TRI_voc_fid_t fid,
|
|||
newHeader->_fid = fid;
|
||||
newHeader->_data = marker;
|
||||
newHeader->_key = ((char*) marker) + marker->_offsetKey;
|
||||
|
||||
newHeader->_validTo = oldHeader->_validTo;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1374,11 +1392,10 @@ static void DebugHeadersDocumentCollection (TRI_document_collection_t* collectio
|
|||
if (*ptr) {
|
||||
TRI_doc_mptr_t const* d = *ptr;
|
||||
|
||||
printf("fid %llu, key %s, rid %llu, validTo %llu\n",
|
||||
printf("fid %llu, key %s, rid %llu\n",
|
||||
(unsigned long long) d->_fid,
|
||||
(char*) d->_key,
|
||||
(unsigned long long) d->_rid,
|
||||
(unsigned long long) d->_validTo);
|
||||
(unsigned long long) d->_rid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1954,7 +1971,6 @@ static int OpenIteratorApplyInsert (open_iterator_state_t* state,
|
|||
|
||||
// update the header info
|
||||
UpdateHeader(operation->_fid, marker, newHeader, found);
|
||||
newHeader->_validTo = 0;
|
||||
document->_headers->moveBack(document->_headers, newHeader);
|
||||
|
||||
// update the datafile info
|
||||
|
@ -1979,20 +1995,6 @@ static int OpenIteratorApplyInsert (open_iterator_state_t* state,
|
|||
state->_dfi->_numberAlive++;
|
||||
state->_dfi->_sizeAlive += (int64_t) marker->_size;
|
||||
}
|
||||
|
||||
if (oldData._validTo > 0) {
|
||||
// TODO: remove this
|
||||
LOG_WARNING("encountered wrong document marker order when loading collection. did not expect old.validTo > 0");
|
||||
// we resurrected a deleted marker
|
||||
// increase the count by one now because we did not count the document previously
|
||||
IncreaseDocumentCount(primary);
|
||||
}
|
||||
}
|
||||
|
||||
// it is a delete
|
||||
else if (found->_validTo != 0) {
|
||||
// TODO: remove this
|
||||
LOG_WARNING("encountered wrong document marker order when loading collection. did not expect found.validTo > 0");
|
||||
}
|
||||
|
||||
// it is a stale update
|
||||
|
@ -2057,7 +2059,7 @@ static int OpenIteratorApplyRemove (open_iterator_state_t* state,
|
|||
}
|
||||
|
||||
// it is a real delete
|
||||
else if (found->_validTo == 0) {
|
||||
else {
|
||||
TRI_doc_datafile_info_t* dfi;
|
||||
|
||||
// update the datafile info
|
||||
|
@ -2088,11 +2090,6 @@ static int OpenIteratorApplyRemove (open_iterator_state_t* state,
|
|||
document->_headers->release(document->_headers, CONST_CAST(found));
|
||||
}
|
||||
|
||||
// it is a double delete
|
||||
else {
|
||||
LOG_TRACE("skipping deletion of already deleted document: %s", (char*) key);
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -2305,8 +2302,10 @@ static int OpenIteratorHandleDocumentMarker (TRI_df_marker_t const* marker,
|
|||
// marker has a transaction id
|
||||
if (d->_tid != state->_tid) {
|
||||
// we have a different transaction ongoing
|
||||
LOG_WARNING("logic error in %s. found tid: %llu, expected tid: %llu",
|
||||
LOG_WARNING("logic error in %s, fid %llu. found tid: %llu, expected tid: %llu. "
|
||||
"this may also be the result of an aborted transaction",
|
||||
__FUNCTION__,
|
||||
(unsigned long long) datafile->_fid,
|
||||
(unsigned long long) d->_tid,
|
||||
(unsigned long long) state->_tid);
|
||||
OpenIteratorAbortTransaction(state);
|
||||
|
@ -2334,8 +2333,10 @@ static int OpenIteratorHandleDeletionMarker (TRI_df_marker_t const* marker,
|
|||
// marker has a transaction id
|
||||
if (d->_tid != state->_tid) {
|
||||
// we have a different transaction ongoing
|
||||
LOG_WARNING("logic error in %s. found tid: %llu, expected tid: %llu",
|
||||
LOG_WARNING("logic error in %s, fid %llu. found tid: %llu, expected tid: %llu. "
|
||||
"this may also be the result of an aborted transaction",
|
||||
__FUNCTION__,
|
||||
(unsigned long long) datafile->_fid,
|
||||
(unsigned long long) d->_tid,
|
||||
(unsigned long long) state->_tid);
|
||||
|
||||
|
@ -2362,8 +2363,10 @@ static int OpenIteratorHandleBeginMarker (TRI_df_marker_t const* marker,
|
|||
|
||||
if (m->_tid != state->_tid && state->_tid != 0) {
|
||||
// some incomplete transaction was going on before us...
|
||||
LOG_WARNING("logic error in %s. found tid: %llu, expected tid: %llu",
|
||||
LOG_WARNING("logic error in %s, fid %llu. found tid: %llu, expected tid: %llu. "
|
||||
"this may also be the result of an aborted transaction",
|
||||
__FUNCTION__,
|
||||
(unsigned long long) datafile->_fid,
|
||||
(unsigned long long) m->_tid,
|
||||
(unsigned long long) state->_tid);
|
||||
OpenIteratorAbortTransaction(state);
|
||||
|
@ -2386,8 +2389,9 @@ static int OpenIteratorHandleCommitMarker (TRI_df_marker_t const* marker,
|
|||
|
||||
if (m->_tid != state->_tid) {
|
||||
// we found a commit marker, but we did not find any begin marker beforehand. strange
|
||||
LOG_WARNING("logic error in %s. found tid: %llu, expected tid: %llu",
|
||||
LOG_WARNING("logic error in %s, fid %llu. found tid: %llu, expected tid: %llu",
|
||||
__FUNCTION__,
|
||||
(unsigned long long) datafile->_fid,
|
||||
(unsigned long long) m->_tid,
|
||||
(unsigned long long) state->_tid);
|
||||
OpenIteratorAbortTransaction(state);
|
||||
|
@ -2414,8 +2418,9 @@ static int OpenIteratorHandlePrepareMarker (TRI_df_marker_t const* marker,
|
|||
|
||||
if (m->_tid != state->_tid) {
|
||||
// we found a commit marker, but we did not find any begin marker beforehand. strange
|
||||
LOG_WARNING("logic error in %s. found tid: %llu, expected tid: %llu",
|
||||
LOG_WARNING("logic error in %s, fid %llu. found tid: %llu, expected tid: %llu",
|
||||
__FUNCTION__,
|
||||
(unsigned long long) datafile->_fid,
|
||||
(unsigned long long) m->_tid,
|
||||
(unsigned long long) state->_tid);
|
||||
OpenIteratorAbortTransaction(state);
|
||||
|
@ -2439,8 +2444,9 @@ static int OpenIteratorHandleAbortMarker (TRI_df_marker_t const* marker,
|
|||
|
||||
if (m->_tid != state->_tid) {
|
||||
// we found an abort marker, but we did not find any begin marker beforehand. strange
|
||||
LOG_WARNING("logic error in %s. found tid: %llu, expected tid: %llu",
|
||||
LOG_WARNING("logic error in %s, fid %llu. found tid: %llu, expected tid: %llu",
|
||||
__FUNCTION__,
|
||||
(unsigned long long) datafile->_fid,
|
||||
(unsigned long long) m->_tid,
|
||||
(unsigned long long) state->_tid);
|
||||
}
|
||||
|
@ -5924,6 +5930,15 @@ void TRI_SetRevisionDocumentCollection (TRI_document_collection_t* document,
|
|||
info->_tick = tick;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief rotate the current journal of the collection
|
||||
/// use this for testing only
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_RotateJournalDocumentCollection (TRI_document_collection_t* document) {
|
||||
return RotateJournal(document);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -646,7 +646,7 @@ struct TRI_index_s* TRI_EnsurePriorityQueueIndexDocumentCollection (TRI_document
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- SELECT BY EXAMPLE QUERY
|
||||
// --SECTION-- public functions
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -678,6 +678,13 @@ int TRI_DeleteDocumentDocumentCollection (struct TRI_transaction_collection_s*,
|
|||
void TRI_SetRevisionDocumentCollection (TRI_document_collection_t*,
|
||||
TRI_voc_rid_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief rotate the current journal of the collection
|
||||
/// use this for testing only
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_RotateJournalDocumentCollection (TRI_document_collection_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -121,11 +121,12 @@ static void DebugDatafileInfoDatafile (TRI_primary_collection_t* primary,
|
|||
return;
|
||||
}
|
||||
|
||||
printf(" number alive: %ld\n", (long) dfi->_numberAlive);
|
||||
printf(" size alive: %ld\n", (long) dfi->_sizeAlive);
|
||||
printf(" number dead: %ld\n", (long) dfi->_numberDead);
|
||||
printf(" size dead: %ld\n", (long) dfi->_sizeDead);
|
||||
printf(" deletion: %ld\n\n", (long) dfi->_numberDeletion);
|
||||
printf(" number alive: %llu\n", (unsigned long long) dfi->_numberAlive);
|
||||
printf(" size alive: %llu\n", (unsigned long long) dfi->_sizeAlive);
|
||||
printf(" number dead: %llu\n", (unsigned long long) dfi->_numberDead);
|
||||
printf(" size dead: %llu\n", (unsigned long long) dfi->_sizeDead);
|
||||
printf(" deletion: %llu\n", (unsigned long long) dfi->_numberDeletion);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -517,7 +518,7 @@ static TRI_doc_collection_info_t* Figures (TRI_primary_collection_t* primary) {
|
|||
for (i = 0; i < base->_journals._length; ++i) {
|
||||
TRI_datafile_t* df = (TRI_datafile_t*) base->_journals._buffer[i];
|
||||
|
||||
info->_journalfileSize += df->_maximalSize;
|
||||
info->_journalfileSize += (int64_t) df->_maximalSize;
|
||||
++info->_numberJournalfiles;
|
||||
}
|
||||
|
||||
|
@ -754,10 +755,8 @@ size_t TRI_DocumentIteratorPrimaryCollection (TRI_primary_collection_t* primary,
|
|||
if (*ptr) {
|
||||
TRI_doc_mptr_t const* d = (TRI_doc_mptr_t const*) *ptr;
|
||||
|
||||
if (d->_validTo == 0) {
|
||||
if (! callback(d, data)) {
|
||||
break;
|
||||
}
|
||||
if (! callback(d, data)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,7 +124,6 @@ struct TRI_primary_collection_s;
|
|||
typedef struct TRI_doc_mptr_s {
|
||||
TRI_voc_rid_t _rid; // this is the revision identifier
|
||||
TRI_voc_fid_t _fid; // this is the datafile identifier
|
||||
TRI_voc_tick_t _validTo; // this is the deletion time (0 if document is not yet deleted)
|
||||
void const* _data; // this is the pointer to the beginning of the raw marker
|
||||
char* _key; // this is the document identifier (string)
|
||||
struct TRI_doc_mptr_s* _prev; // previous master pointer
|
||||
|
@ -168,8 +167,7 @@ typedef struct TRI_doc_collection_info_s {
|
|||
int64_t _sizeTransaction; // populated only during compaction
|
||||
|
||||
int64_t _datafileSize;
|
||||
|
||||
TRI_voc_ssize_t _journalfileSize;
|
||||
int64_t _journalfileSize;
|
||||
|
||||
TRI_voc_ssize_t _numberShapes;
|
||||
TRI_voc_ssize_t _numberAttributes;
|
||||
|
|
|
@ -41,6 +41,10 @@
|
|||
"path": "api-docs/index",
|
||||
"description": "Interface for Indexes"
|
||||
},
|
||||
{
|
||||
"path": "api-docs/aqlfunction",
|
||||
"description": "Interface for AQL user functions"
|
||||
},
|
||||
{
|
||||
"path": "api-docs/simple",
|
||||
"description": "Interface for SimpleQueries"
|
||||
|
|
|
@ -0,0 +1,297 @@
|
|||
/*jslint indent: 2, nomen: true, maxlen: 100, white: true plusplus: true */
|
||||
/*global $, d3, _, console, document*/
|
||||
/*global AbstractAdapter*/
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Graph functionality
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
/// @author Copyright 2011-2013, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function FoxxAdapter(nodes, edges, route, config) {
|
||||
"use strict";
|
||||
|
||||
if (nodes === undefined) {
|
||||
throw "The nodes have to be given.";
|
||||
}
|
||||
if (edges === undefined) {
|
||||
throw "The edges have to be given.";
|
||||
}
|
||||
if (route === undefined) {
|
||||
throw "The route has to be given.";
|
||||
}
|
||||
|
||||
|
||||
|
||||
var self = this,
|
||||
absAdapter = new AbstractAdapter(nodes, edges),
|
||||
routes = {},
|
||||
baseRoute = route,
|
||||
requestBase = {
|
||||
cache: false,
|
||||
contentType: "application/json",
|
||||
dataType: "json",
|
||||
processData: false,
|
||||
error: function(data) {
|
||||
try {
|
||||
console.log(data.statusText);
|
||||
throw "[" + data.errorNum + "] " + data.errorMessage;
|
||||
}
|
||||
catch (e) {
|
||||
console.log(e);
|
||||
throw "Undefined ERROR";
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
fillRoutes = function () {
|
||||
routes.query = {
|
||||
get: function(id, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "GET",
|
||||
url: baseRoute + "/query/" + id,
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
}
|
||||
};
|
||||
routes.nodes = {
|
||||
post: function(data, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "POST",
|
||||
url: baseRoute + "/nodes",
|
||||
data: JSON.stringify(data),
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
},
|
||||
put: function(id, data, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "PUT",
|
||||
url: baseRoute + "/nodes/" + id,
|
||||
data: JSON.stringify(data),
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
},
|
||||
del: function(id, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "DELETE",
|
||||
url: baseRoute + "/nodes/" + id,
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
}
|
||||
};
|
||||
routes.edges = {
|
||||
post: function(data, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "POST",
|
||||
url: baseRoute + "/edges",
|
||||
data: JSON.stringify(data),
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
},
|
||||
put: function(id, data, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "PUT",
|
||||
url: baseRoute + "/edges/" + id,
|
||||
data: JSON.stringify(data),
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
},
|
||||
del: function(id, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "DELETE",
|
||||
url: baseRoute + "/edges/" + id,
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
}
|
||||
};
|
||||
routes.forNode = {
|
||||
del: function(id, cb) {
|
||||
var reqinfo = $.extend(requestBase, {
|
||||
type: "DELETE",
|
||||
url: baseRoute + "/edges/forNode/" + id,
|
||||
success: cb
|
||||
});
|
||||
$.ajax(reqinfo);
|
||||
}
|
||||
};
|
||||
},
|
||||
|
||||
sendGet = function (type, id, callback) {
|
||||
routes[type].get(id, callback);
|
||||
},
|
||||
|
||||
sendPost = function (type, data, callback) {
|
||||
routes[type].post(data, callback);
|
||||
},
|
||||
|
||||
sendDelete = function (type, id, callback) {
|
||||
routes[type].del(id, callback);
|
||||
},
|
||||
|
||||
sendPut = function (type, id, data, callback) {
|
||||
routes[type].put(id, data, callback);
|
||||
},
|
||||
|
||||
parseConfig = function(config) {
|
||||
/*
|
||||
if (config.host === undefined) {
|
||||
arangodb = "http://" + document.location.host;
|
||||
} else {
|
||||
arangodb = config.host;
|
||||
}
|
||||
*/
|
||||
if (config.width !== undefined) {
|
||||
absAdapter.setWidth(config.width);
|
||||
}
|
||||
if (config.height !== undefined) {
|
||||
absAdapter.setHeight(config.height);
|
||||
}
|
||||
},
|
||||
|
||||
parseResult = function (result, callback) {
|
||||
var inserted = {},
|
||||
first = result.nodes[0];
|
||||
first = absAdapter.insertNode(first);
|
||||
_.each(result.nodes, function(n) {
|
||||
n = absAdapter.insertNode(n);
|
||||
inserted[n._id] = n;
|
||||
});
|
||||
_.each(result.edges, function(e) {
|
||||
absAdapter.insertEdge(e);
|
||||
});
|
||||
delete inserted[first._id];
|
||||
absAdapter.checkSizeOfInserted(inserted);
|
||||
absAdapter.checkNodeLimit(first);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback(first);
|
||||
}
|
||||
};
|
||||
|
||||
config = config || {};
|
||||
|
||||
parseConfig(config);
|
||||
fillRoutes();
|
||||
|
||||
self.loadNode = function(nodeId, callback) {
|
||||
sendGet("query", nodeId, function(result) {
|
||||
parseResult(result, callback);
|
||||
});
|
||||
};
|
||||
|
||||
self.requestCentralityChildren = function(nodeId, callback) {
|
||||
/*
|
||||
sendQuery(queries.childrenCentrality,{
|
||||
id: nodeId
|
||||
}, function(res) {
|
||||
callback(res[0]);
|
||||
});
|
||||
*/
|
||||
};
|
||||
|
||||
self.createEdge = function (edgeToAdd, callback) {
|
||||
var toSend = _.clone(edgeToAdd);
|
||||
toSend._from = edgeToAdd.source._id;
|
||||
toSend._to = edgeToAdd.target._id;
|
||||
delete toSend.source;
|
||||
delete toSend.target;
|
||||
sendPost("edges", toSend, function(data) {
|
||||
data._from = edgeToAdd.source._id;
|
||||
data._to = edgeToAdd.target._id;
|
||||
delete data.error;
|
||||
var edge = absAdapter.insertEdge(data);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback(edge);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.deleteEdge = function (edgeToRemove, callback) {
|
||||
sendDelete("edges", edgeToRemove._id, function() {
|
||||
absAdapter.removeEdge(edgeToRemove);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.patchEdge = function (edgeToPatch, patchData, callback) {
|
||||
sendPut("edges", edgeToPatch._id, patchData, function(data) {
|
||||
edgeToPatch._data = $.extend(edgeToPatch._data, patchData);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.createNode = function (nodeToAdd, callback) {
|
||||
sendPost("nodes", nodeToAdd, function(data) {
|
||||
absAdapter.insertNode(data);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback(data);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.deleteNode = function (nodeToRemove, callback) {
|
||||
sendDelete("nodes", nodeToRemove._id, function() {
|
||||
absAdapter.removeEdgesForNode(nodeToRemove);
|
||||
sendDelete("forNode", nodeToRemove._id, function() {});
|
||||
absAdapter.removeNode(nodeToRemove);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.patchNode = function (nodeToPatch, patchData, callback) {
|
||||
sendPut("nodes", nodeToPatch._id, patchData, function(data) {
|
||||
nodeToPatch._data = $.extend(nodeToPatch._data, patchData);
|
||||
if (callback !== undefined && _.isFunction(callback)) {
|
||||
callback(nodeToPatch);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.setNodeLimit = function (pLimit, callback) {
|
||||
absAdapter.setNodeLimit(pLimit, callback);
|
||||
};
|
||||
|
||||
self.setChildLimit = function (pLimit) {
|
||||
absAdapter.setChildLimit(pLimit);
|
||||
};
|
||||
|
||||
self.expandCommunity = function (commNode, callback) {
|
||||
absAdapter.expandCommunity(commNode);
|
||||
if (callback !== undefined) {
|
||||
callback();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -128,7 +128,6 @@
|
|||
|
||||
});
|
||||
|
||||
|
||||
describe('checking the interface', function() {
|
||||
var testee;
|
||||
|
||||
|
@ -273,6 +272,17 @@
|
|||
expect(newNode.y).toBeLessThan(3 * height / 4);
|
||||
});
|
||||
|
||||
it('should encapsulate all attributes in _data', function() {
|
||||
var data = {
|
||||
_id: 1,
|
||||
name: "Alice",
|
||||
age: 42
|
||||
},
|
||||
newNode = adapter.insertNode(data);
|
||||
expect(newNode).toBeDefined();
|
||||
expect(newNode._data).toEqual(data);
|
||||
});
|
||||
|
||||
it('should be able to delete a node', function() {
|
||||
var toDelete = {_id: 1},
|
||||
nodeToDelete = adapter.insertNode(toDelete);
|
||||
|
@ -364,6 +374,19 @@
|
|||
expect(target._inboundCounter).toEqual(1);
|
||||
});
|
||||
|
||||
it('should encapsulate all attributes in _data', function() {
|
||||
var data = {
|
||||
_id: "1-2",
|
||||
_from: sourceid,
|
||||
_to: targetid,
|
||||
label: "MyLabel",
|
||||
color: "black"
|
||||
},
|
||||
edge = adapter.insertEdge(data);
|
||||
expect(edge._data).toBeDefined();
|
||||
expect(edge._data).toEqual(data);
|
||||
});
|
||||
|
||||
it('should be able to delete an edge', function() {
|
||||
var toDelete = {
|
||||
_id: "1-2",
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
/*global runs, spyOn, waitsFor, waits */
|
||||
/*global window, eb, loadFixtures, document */
|
||||
/*global $, _, d3*/
|
||||
/*global describeInterface*/
|
||||
/*global describeInterface, describeIntegeration*/
|
||||
/*global FoxxAdapter*/
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -42,146 +42,46 @@
|
|||
|
||||
describeInterface(new FoxxAdapter([], [], "foxx/route"));
|
||||
|
||||
describeIntegeration(new FoxxAdapter([], [], "foxx/route"));
|
||||
describeIntegeration(function() {
|
||||
spyOn($, "ajax").andCallFake(function(req) {
|
||||
var node1 = {_id: 1},
|
||||
node2 = {_id: 2},
|
||||
edge = {_id: "1-2", _from: 1, _to: 2};
|
||||
|
||||
switch(req.type) {
|
||||
case "DELETE":
|
||||
req.success();
|
||||
break;
|
||||
case "POST":
|
||||
if (req.url.match(/nodes$/)) {
|
||||
req.success({_id: 1});
|
||||
} else if (req.url.match(/edges/)) {
|
||||
req.success({_id: "1-2"});
|
||||
}
|
||||
break;
|
||||
case "GET":
|
||||
req.success({
|
||||
nodes: [{_id: 1}, {_id: 2}],
|
||||
edges: [{_id: "1-2", _from: 1, _to: 2}]
|
||||
});
|
||||
break;
|
||||
default:
|
||||
req.success();
|
||||
}
|
||||
});
|
||||
|
||||
return new FoxxAdapter([], [], "foxx/route");
|
||||
});
|
||||
|
||||
var adapter,
|
||||
nodes,
|
||||
edges,
|
||||
arangodb = "http://localhost:8529",
|
||||
nodesCollection,
|
||||
edgesCollection,
|
||||
mockCollection,
|
||||
callbackCheck,
|
||||
checkCallbackFunction = function() {
|
||||
callbackCheck = true;
|
||||
},
|
||||
|
||||
getCommunityNodes = function() {
|
||||
return _.filter(nodes, function(n) {
|
||||
return n._id.match(/^\*community/);
|
||||
});
|
||||
},
|
||||
|
||||
getCommunityNodesIds = function() {
|
||||
return _.pluck(getCommunityNodes(), "_id");
|
||||
},
|
||||
|
||||
nodeWithID = function(id) {
|
||||
return $.grep(nodes, function(e){
|
||||
return e._id === id;
|
||||
})[0];
|
||||
},
|
||||
edgeWithSourceAndTargetId = function(sourceId, targetId) {
|
||||
return $.grep(edges, function(e){
|
||||
return e.source._id === sourceId
|
||||
&& e.target._id === targetId;
|
||||
})[0];
|
||||
},
|
||||
existNode = function(id) {
|
||||
var node = nodeWithID(id);
|
||||
expect(node).toBeDefined();
|
||||
expect(node._id).toEqual(id);
|
||||
},
|
||||
|
||||
notExistNode = function(id) {
|
||||
var node = nodeWithID(id);
|
||||
expect(node).toBeUndefined();
|
||||
},
|
||||
|
||||
existEdge = function(source, target) {
|
||||
var edge = edgeWithSourceAndTargetId(source, target);
|
||||
expect(edge).toBeDefined();
|
||||
expect(edge.source._id).toEqual(source);
|
||||
expect(edge.target._id).toEqual(target);
|
||||
},
|
||||
|
||||
notExistEdge = function(source, target) {
|
||||
var edge = edgeWithSourceAndTargetId(source, target);
|
||||
expect(edge).toBeUndefined();
|
||||
},
|
||||
|
||||
existNodes = function(ids) {
|
||||
_.each(ids, existNode);
|
||||
},
|
||||
|
||||
notExistNodes = function(ids) {
|
||||
_.each(ids, notExistNode);
|
||||
},
|
||||
|
||||
insertEdge = function (collectionID, from, to, cont) {
|
||||
var key = String(Math.floor(Math.random()*100000)),
|
||||
id = collectionID + "/" + key;
|
||||
cont = cont || {};
|
||||
mockCollection[collectionID] = mockCollection[collectionID] || {};
|
||||
mockCollection[collectionID][from] = mockCollection[collectionID][from] || {};
|
||||
cont._id = id;
|
||||
cont._key = key;
|
||||
cont._rev = key;
|
||||
cont._from = from;
|
||||
cont._to = to;
|
||||
mockCollection[collectionID][from][to] = cont;
|
||||
return id;
|
||||
},
|
||||
insertNode = function (collectionID, nodeId, cont) {
|
||||
var key = String(Math.floor(Math.random()*100000)),
|
||||
id = collectionID + "/" + key;
|
||||
cont = cont || {};
|
||||
mockCollection[collectionID] = mockCollection[collectionID] || {};
|
||||
cont.id = nodeId;
|
||||
cont._id = id;
|
||||
cont._key = key;
|
||||
cont._rev = key;
|
||||
mockCollection[collectionID][id] = cont;
|
||||
return id;
|
||||
},
|
||||
readEdge = function (collectionID, from, to) {
|
||||
return mockCollection[collectionID][from._id][to._id];
|
||||
},
|
||||
readNode = function (collectionID, id) {
|
||||
return mockCollection[collectionID][id];
|
||||
},
|
||||
constructPath = function(colNodes, colEdges, from, to) {
|
||||
var obj = {},
|
||||
src = readNode(colNodes, from),
|
||||
tar = readNode(colNodes, to);
|
||||
obj.vertex = tar;
|
||||
obj.path = {
|
||||
edges: [
|
||||
readEdge(colEdges, src, tar)
|
||||
],
|
||||
vertices: [
|
||||
src,
|
||||
tar
|
||||
]
|
||||
};
|
||||
return obj;
|
||||
};
|
||||
edges;
|
||||
|
||||
beforeEach(function() {
|
||||
nodes = [];
|
||||
edges = [];
|
||||
|
||||
this.addMatchers({
|
||||
toHaveCorrectCoordinates: function() {
|
||||
var list = this.actual,
|
||||
evil;
|
||||
_.each(list, function(n) {
|
||||
if (isNaN(n.x) || isNaN(n.y)) {
|
||||
evil = n;
|
||||
}
|
||||
});
|
||||
this.message = function() {
|
||||
return "Expected " + JSON.stringify(evil) + " to contain Numbers as X and Y.";
|
||||
};
|
||||
return evil === undefined;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(function() {
|
||||
expect(nodes).toHaveCorrectCoordinates();
|
||||
});
|
||||
|
||||
|
||||
it('should throw an error if no nodes are given', function() {
|
||||
expect(
|
||||
function() {
|
||||
|
@ -201,29 +101,35 @@
|
|||
it('should throw an error if no route is given', function() {
|
||||
expect(
|
||||
function() {
|
||||
var t = new FoxxAdapter([], [], {
|
||||
edgeCollection: ""
|
||||
});
|
||||
var t = new FoxxAdapter([], []);
|
||||
}
|
||||
).toThrow("The route has to be given.");
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
it('should not throw an error if necessary info is given', function() {
|
||||
expect(
|
||||
function() {
|
||||
var t = new FoxxAdapter([], [], "foxx/route");
|
||||
}
|
||||
).not.toThrow();
|
||||
});
|
||||
/*
|
||||
it('should automatically determine the host of relative route is given', function() {
|
||||
var route = "foxx/route"
|
||||
var route = "foxx/route",
|
||||
args,
|
||||
host;
|
||||
adapter = new FoxxAdapter(
|
||||
nodes,
|
||||
edges,
|
||||
route
|
||||
),
|
||||
args,
|
||||
host;
|
||||
);
|
||||
spyOn($, "ajax");
|
||||
adapter.createNode({}, function() {});
|
||||
args = $.ajax.mostRecentCall.args[0];
|
||||
host = window.location.protocol + "//" + window.location.host + "/" + route;
|
||||
expect(args.url).toContain(host);
|
||||
});
|
||||
|
||||
*/
|
||||
it('should create a nodeReducer instance', function() {
|
||||
spyOn(window, "NodeReducer");
|
||||
var adapter = new FoxxAdapter(
|
||||
|
@ -244,10 +150,11 @@
|
|||
|
||||
beforeEach(function() {
|
||||
var self = this,
|
||||
route = "foxx/route"
|
||||
host = window.location.protocol + "//"
|
||||
route = "foxx/route",
|
||||
/*host = window.location.protocol + "//"
|
||||
+ window.location.host + "/"
|
||||
+ route;
|
||||
+ route;*/
|
||||
host = route;
|
||||
self.fakeReducerRequest = function() {};
|
||||
self.fakeReducerBucketRequest = function() {};
|
||||
spyOn(window, "NodeReducer").andCallFake(function(v, e) {
|
||||
|
@ -270,40 +177,14 @@
|
|||
);
|
||||
edgeRoute = host + "/edges";
|
||||
nodeRoute = host + "/nodes";
|
||||
queryRoute = host + "/query";
|
||||
|
||||
loadGraph = function(data) {
|
||||
var res = [],
|
||||
nid,
|
||||
ncol = nodesCollection,
|
||||
ecol = edgesCollection,
|
||||
inner = [],
|
||||
first = {},
|
||||
node1 = readNode(ncol, nid);
|
||||
res.push(inner);
|
||||
first.vertex = node1;
|
||||
first.path = {
|
||||
edges: [],
|
||||
vertices: [
|
||||
node1
|
||||
]
|
||||
};
|
||||
inner.push(first);
|
||||
if (mockCollection[ecol][nid] !== undefined) {
|
||||
_.each(mockCollection[ecol][nid], function(val, key) {
|
||||
inner.push(constructPath(ncol, ecol, nid, key));
|
||||
});
|
||||
}
|
||||
return res;
|
||||
};
|
||||
|
||||
queryRoute = host + "/query";
|
||||
|
||||
requests = {};
|
||||
requests.query = function(data) {
|
||||
requests.query = function(id) {
|
||||
return {
|
||||
type: 'POST',
|
||||
url: queryRoute,
|
||||
data: data,
|
||||
type: 'GET',
|
||||
cache: false,
|
||||
url: queryRoute + "/" + id,
|
||||
contentType: 'application/json',
|
||||
dataType: 'json',
|
||||
success: jasmine.any(Function),
|
||||
|
@ -325,14 +206,18 @@
|
|||
return $.extend(base, {url: nodeRoute, type: "POST", data: JSON.stringify(data)});
|
||||
},
|
||||
patch: function(id, data) {
|
||||
return $.extend(base, {url: nodeRoute + "/" + id, type: "PUT", data: JSON.stringify(data)});
|
||||
return $.extend(base, {
|
||||
url: nodeRoute + "/" + id,
|
||||
type: "PUT",
|
||||
data: JSON.stringify(data)
|
||||
});
|
||||
},
|
||||
del: function(id) {
|
||||
return $.extend(base, {url: nodeRoute + "/" + id, type: "DELETE"});
|
||||
}
|
||||
};
|
||||
};
|
||||
requests.edge = function(col) {
|
||||
requests.edge = function() {
|
||||
var base = {
|
||||
cache: false,
|
||||
dataType: "json",
|
||||
|
@ -351,406 +236,138 @@
|
|||
});
|
||||
},
|
||||
patch: function(id, data) {
|
||||
return $.extend(base, {url: edgeRoute + "/" + id, type: "PUT", data: JSON.stringify(data)});
|
||||
return $.extend(base, {
|
||||
url: edgeRoute + "/" + id,
|
||||
type: "PUT",
|
||||
data: JSON.stringify(data)
|
||||
});
|
||||
},
|
||||
del: function(id) {
|
||||
return $.extend(base, {url: edgeRoute + "/" + id, type: "DELETE"});
|
||||
},
|
||||
delForNode: function(id) {
|
||||
return $.extend(base, {url: edgeRoute + "/forNode/" + id, type: "DELETE"});
|
||||
}
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
it('should be able to load by internal _id attribute', function() {
|
||||
|
||||
var c0, c1, c2, c3, c4;
|
||||
|
||||
it('should be able to load a graph', function() {
|
||||
|
||||
var called, id;
|
||||
|
||||
runs(function() {
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
request.success({result: loadGraph(JSON.parse(request.data))});
|
||||
called = false;
|
||||
id = 1;
|
||||
spyOn($, "ajax").andCallFake(function(req) {
|
||||
req.success({
|
||||
nodes: [{_id: 1}, {_id: 2}],
|
||||
edges: [{_id: "1-2", _from: 1, _to: 2}]
|
||||
});
|
||||
});
|
||||
|
||||
c0 = insertNode(nodesCollection, 0);
|
||||
c1 = insertNode(nodesCollection, 1);
|
||||
c2 = insertNode(nodesCollection, 2);
|
||||
c3 = insertNode(nodesCollection, 3);
|
||||
c4 = insertNode(nodesCollection, 4);
|
||||
|
||||
insertEdge(edgesCollection, c0, c1);
|
||||
insertEdge(edgesCollection, c0, c2);
|
||||
insertEdge(edgesCollection, c0, c3);
|
||||
insertEdge(edgesCollection, c0, c4);
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNode(c0, checkCallbackFunction);
|
||||
var callback = function() {
|
||||
called = true;
|
||||
};
|
||||
adapter.loadNode(id, callback);
|
||||
});
|
||||
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
return called;
|
||||
}, 1000);
|
||||
|
||||
|
||||
runs(function() {
|
||||
existNodes([c0, c1, c2, c3, c4]);
|
||||
expect(nodes.length).toEqual(5);
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(traversalQuery(c0, nodesCollection, edgesCollection))
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should map loadNode to loadByID', function() {
|
||||
spyOn(adapter, "loadNodeFromTreeById");
|
||||
adapter.loadNode("a", "b");
|
||||
expect(adapter.loadNodeFromTreeById).toHaveBeenCalledWith("a", "b");
|
||||
});
|
||||
|
||||
it('should be able to load a tree node from ArangoDB'
|
||||
+ ' by internal attribute and value', function() {
|
||||
|
||||
var c0, c1, c2, c3, c4;
|
||||
|
||||
runs(function() {
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
var vars = JSON.parse(request.data).bindVars;
|
||||
if (vars !== undefined) {
|
||||
vars.id = c0;
|
||||
request.success({result: loadGraph(vars)});
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
c0 = insertNode(nodesCollection, 0);
|
||||
c1 = insertNode(nodesCollection, 1);
|
||||
c2 = insertNode(nodesCollection, 2);
|
||||
c3 = insertNode(nodesCollection, 3);
|
||||
c4 = insertNode(nodesCollection, 4);
|
||||
|
||||
insertEdge(edgesCollection, c0, c1);
|
||||
insertEdge(edgesCollection, c0, c2);
|
||||
insertEdge(edgesCollection, c0, c3);
|
||||
insertEdge(edgesCollection, c0, c4);
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeByAttributeValue("id", 0, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
});
|
||||
|
||||
runs(function() {
|
||||
existNodes([c0, c1, c2, c3, c4]);
|
||||
expect(nodes.length).toEqual(5);
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(filterQuery(0, nodesCollection, edgesCollection))
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should be able to request the number of children centrality', function() {
|
||||
var c0,
|
||||
children;
|
||||
runs(function() {
|
||||
c0 = insertNode(nodesCollection, 0);
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
request.success({result: [4]});
|
||||
});
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.requestCentralityChildren(c0, function(count) {
|
||||
callbackCheck = true;
|
||||
children = count;
|
||||
});
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
});
|
||||
|
||||
runs(function() {
|
||||
expect(children).toEqual(4);
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(childrenQuery(c0, nodesCollection, edgesCollection))
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should encapsulate all attributes of nodes and edges in _data', function() {
|
||||
var c0, c1, e1_2;
|
||||
|
||||
runs(function() {
|
||||
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
var vars = JSON.parse(request.data).bindVars;
|
||||
if (vars !== undefined) {
|
||||
request.success({result: loadGraph(vars)});
|
||||
}
|
||||
});
|
||||
|
||||
c0 = insertNode(nodesCollection, 0, {name: "Alice", age: 42});
|
||||
c1 = insertNode(nodesCollection, 1, {name: "Bob", age: 1337});
|
||||
e1_2 = insertEdge(edgesCollection, c0, c1, {label: "knows"});
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeById(c0, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
});
|
||||
|
||||
runs(function() {
|
||||
expect(nodes[0]._data).toEqual({
|
||||
_id: c0,
|
||||
_key: jasmine.any(String),
|
||||
_rev: jasmine.any(String),
|
||||
id: 0,
|
||||
name: "Alice",
|
||||
age: 42
|
||||
});
|
||||
expect(nodes[1]._data).toEqual({
|
||||
_id: c1,
|
||||
_key: jasmine.any(String),
|
||||
_rev: jasmine.any(String),
|
||||
id: 1,
|
||||
name: "Bob",
|
||||
age: 1337
|
||||
});
|
||||
expect(edges[0]._data).toEqual({
|
||||
_id: e1_2,
|
||||
_from: c0,
|
||||
_to: c1,
|
||||
_key: jasmine.any(String),
|
||||
_rev: jasmine.any(String),
|
||||
label: "knows"
|
||||
});
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(traversalQuery(c0, nodesCollection, edgesCollection))
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
});
|
||||
|
||||
it('should be able to switch to different collections', function() {
|
||||
var c0, c1, e1_2, insertedId;
|
||||
|
||||
runs(function() {
|
||||
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
var vars = JSON.parse(request.data).bindVars;
|
||||
if (vars !== undefined) {
|
||||
request.success({result: loadGraph(vars)});
|
||||
} else {
|
||||
request.success({result: {}});
|
||||
}
|
||||
});
|
||||
|
||||
c0 = insertNode(altNodesCollection, 0);
|
||||
c1 = insertNode(altNodesCollection, 1);
|
||||
e1_2 = insertEdge(altEdgesCollection, c0, c1);
|
||||
|
||||
adapter.changeTo(altNodesCollection, altEdgesCollection);
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeById(c0, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
}, 1000);
|
||||
|
||||
runs(function() {
|
||||
existNodes([c0, c1]);
|
||||
expect(nodes.length).toEqual(2);
|
||||
expect(edges.length).toEqual(1);
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(traversalQuery(c0, altNodesCollection, altEdgesCollection))
|
||||
);
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.createNode({}, function(node) {
|
||||
insertedId = node._id;
|
||||
callbackCheck = true;
|
||||
});
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
}, 1000);
|
||||
|
||||
runs(function() {
|
||||
existNode(insertedId);
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.node(altNodesCollection).create({})
|
||||
requests.query(id)
|
||||
);
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
it('should be able to switch to different collections and change to directed', function() {
|
||||
|
||||
runs(function() {
|
||||
|
||||
spyOn($, "ajax");
|
||||
|
||||
adapter.changeTo(altNodesCollection, altEdgesCollection, false);
|
||||
|
||||
adapter.loadNode("42");
|
||||
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(traversalQuery("42", altNodesCollection, altEdgesCollection, false))
|
||||
);
|
||||
|
||||
});
|
||||
|
||||
it('should be able to insert a node', function() {
|
||||
spyOn($, "ajax");
|
||||
var node = {_id: 1};
|
||||
adapter.createNode(node);
|
||||
expect($.ajax).wasCalledWith(requests.node().create(node));
|
||||
});
|
||||
|
||||
it('should be able to switch to different collections'
|
||||
+ ' and change to undirected', function() {
|
||||
|
||||
runs(function() {
|
||||
|
||||
spyOn($, "ajax");
|
||||
|
||||
adapter.changeTo(altNodesCollection, altEdgesCollection, true);
|
||||
|
||||
adapter.loadNode("42");
|
||||
|
||||
expect($.ajax).toHaveBeenCalledWith(
|
||||
requests.cursor(traversalQuery("42", altNodesCollection, altEdgesCollection, true))
|
||||
);
|
||||
|
||||
});
|
||||
it('should be able to change a node', function() {
|
||||
spyOn($, "ajax");
|
||||
var toPatch = {_id: 1},
|
||||
data = {name: "Alice"};
|
||||
adapter.patchNode(toPatch, data);
|
||||
expect($.ajax).wasCalledWith(requests.node().patch(toPatch._id, data));
|
||||
});
|
||||
|
||||
it('should add at most the upper bound of children in one step', function() {
|
||||
var inNodeCol, callNodes;
|
||||
|
||||
runs(function() {
|
||||
var addNNodes = function(n) {
|
||||
var i = 0,
|
||||
res = [];
|
||||
for (i = 0; i < n; i++) {
|
||||
res.push(insertNode(nodesCollection, i));
|
||||
}
|
||||
return res;
|
||||
},
|
||||
connectToAllButSelf = function(source, ns) {
|
||||
_.each(ns, function(target) {
|
||||
if (source !== target) {
|
||||
insertEdge(edgesCollection, source, target);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
inNodeCol = addNNodes(21);
|
||||
connectToAllButSelf(inNodeCol[0], inNodeCol);
|
||||
adapter.setChildLimit(5);
|
||||
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
var vars = JSON.parse(request.data).bindVars;
|
||||
if (vars !== undefined) {
|
||||
request.success({result: loadGraph(vars)});
|
||||
}
|
||||
});
|
||||
spyOn(this, "fakeReducerBucketRequest").andCallFake(function(ns) {
|
||||
var i = 0,
|
||||
res = [],
|
||||
pos;
|
||||
callNodes = ns;
|
||||
for (i = 0; i < 5; i++) {
|
||||
pos = i*4;
|
||||
res.push(ns.slice(pos, pos + 4));
|
||||
}
|
||||
return res;
|
||||
});
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeById(inNodeCol[0], checkCallbackFunction);
|
||||
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
});
|
||||
|
||||
runs(function() {
|
||||
var callNodesIds = _.map(callNodes, function(n) {
|
||||
return n._id;
|
||||
});
|
||||
expect(this.fakeReducerBucketRequest).toHaveBeenCalledWith(
|
||||
jasmine.any(Array),
|
||||
5
|
||||
);
|
||||
expect(callNodesIds).toEqual(inNodeCol.slice(1));
|
||||
expect(nodes.length).toEqual(6);
|
||||
expect(getCommunityNodes().length).toEqual(5);
|
||||
it('should be able to delete a node', function() {
|
||||
//Checks first ajax Call and omits propagation
|
||||
spyOn($, "ajax");
|
||||
var node = {_id: 1};
|
||||
adapter.deleteNode(node);
|
||||
expect($.ajax).wasCalledWith(requests.node().del(node._id));
|
||||
});
|
||||
|
||||
it('should delete adjacent edges on node delete', function() {
|
||||
// Checks the second (and last) ajax Call.
|
||||
spyOn($, "ajax").andCallFake(function(req) {
|
||||
req.success();
|
||||
});
|
||||
var node = {_id: 1};
|
||||
adapter.deleteNode(node);
|
||||
expect($.ajax).wasCalledWith(requests.edge().delForNode(node._id));
|
||||
});
|
||||
|
||||
it('should be able to insert an edge', function() {
|
||||
spyOn($, "ajax");
|
||||
var source = {_id: 1},
|
||||
target = {_id: 2},
|
||||
edge = {
|
||||
source: source,
|
||||
target: target,
|
||||
label: "Foxx"
|
||||
};
|
||||
adapter.createEdge(edge);
|
||||
expect($.ajax).wasCalledWith(requests.edge().create(
|
||||
source._id, target._id, {label: "Foxx"})
|
||||
);
|
||||
});
|
||||
|
||||
it('should be able to change an edge', function() {
|
||||
spyOn($, "ajax");
|
||||
var source = {_id: 1},
|
||||
target = {_id: 2},
|
||||
edge = {
|
||||
_id: "1-2",
|
||||
source: source,
|
||||
target: target
|
||||
},
|
||||
patch = {
|
||||
label: "Foxx"
|
||||
};
|
||||
adapter.patchEdge(edge, patch);
|
||||
expect($.ajax).wasCalledWith(requests.edge().patch(edge._id, patch));
|
||||
});
|
||||
|
||||
it('should be able to delete an edge', function() {
|
||||
spyOn($, "ajax");
|
||||
var source = {_id: 1},
|
||||
target = {_id: 2},
|
||||
edge = {
|
||||
_id: "1-2",
|
||||
source: source,
|
||||
target: target
|
||||
};
|
||||
adapter.deleteEdge(edge);
|
||||
expect($.ajax).wasCalledWith(requests.edge().del(edge._id));
|
||||
});
|
||||
|
||||
|
||||
describe('that has already loaded a graph', function() {
|
||||
|
||||
});
|
||||
|
||||
it('should not replace single nodes by communities', function() {
|
||||
var inNodeCol, callNodes;
|
||||
|
||||
runs(function() {
|
||||
var addNNodes = function(n) {
|
||||
var i = 0,
|
||||
res = [];
|
||||
for (i = 0; i < n; i++) {
|
||||
res.push(insertNode(nodesCollection, i));
|
||||
}
|
||||
return res;
|
||||
},
|
||||
connectToAllButSelf = function(source, ns) {
|
||||
_.each(ns, function(target) {
|
||||
if (source !== target) {
|
||||
insertEdge(edgesCollection, source, target);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
inNodeCol = addNNodes(7);
|
||||
connectToAllButSelf(inNodeCol[0], inNodeCol);
|
||||
adapter.setChildLimit(5);
|
||||
|
||||
spyOn($, "ajax").andCallFake(function(request) {
|
||||
var vars = JSON.parse(request.data).bindVars;
|
||||
if (vars !== undefined) {
|
||||
request.success({result: loadGraph(vars)});
|
||||
}
|
||||
});
|
||||
spyOn(this, "fakeReducerBucketRequest").andCallFake(function(ns) {
|
||||
var i = 0,
|
||||
res = [],
|
||||
pos;
|
||||
for (i = 0; i < 4; i++) {
|
||||
res.push([ns[i]]);
|
||||
}
|
||||
res.push([ns[4], ns[5]]);
|
||||
return res;
|
||||
});
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeById(inNodeCol[0], checkCallbackFunction);
|
||||
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
return callbackCheck;
|
||||
});
|
||||
|
||||
runs(function() {
|
||||
var callNodesIds = _.map(callNodes, function(n) {
|
||||
return n._id;
|
||||
});
|
||||
expect(this.fakeReducerBucketRequest).toHaveBeenCalledWith(
|
||||
jasmine.any(Array),
|
||||
5
|
||||
);
|
||||
expect(nodes.length).toEqual(6);
|
||||
expect(getCommunityNodes().length).toEqual(1);
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
/*
|
||||
describe('that has already loaded one graph', function() {
|
||||
var c0, c1, c2, c3, c4, c5, c6, c7,
|
||||
fakeResult, spyHook;
|
||||
|
@ -793,7 +410,7 @@
|
|||
insertEdge(edgesCollection, c1, c7);
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeById(c0, checkCallbackFunction);
|
||||
adapter.loadNode(c0, checkCallbackFunction);
|
||||
|
||||
this.addMatchers({
|
||||
toBeStoredPermanently: function() {
|
||||
|
@ -877,7 +494,7 @@
|
|||
it('should be able to add nodes from another query', function() {
|
||||
|
||||
runs(function() {
|
||||
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
|
||||
adapter.loadNode(c1, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
|
@ -992,7 +609,7 @@
|
|||
spyOn(this, "fakeReducerRequest").andCallFake(function() {
|
||||
return [c0];
|
||||
});
|
||||
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
|
||||
adapter.loadNode(c1, checkCallbackFunction);
|
||||
expect(this.fakeReducerRequest).toHaveBeenCalledWith(6, nodeWithID(c1));
|
||||
});
|
||||
});
|
||||
|
@ -1049,7 +666,7 @@
|
|||
spyOn(this, "fakeReducerRequest").andCallFake(function() {
|
||||
return [c0, c1, c2, c3];
|
||||
});
|
||||
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
|
||||
adapter.loadNode(c1, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
|
@ -1246,7 +863,7 @@
|
|||
spyOn(this, "fakeReducerRequest").andCallFake(function() {
|
||||
return fakeResult;
|
||||
});
|
||||
adapter.loadNodeFromTreeById(c1, checkCallbackFunction);
|
||||
adapter.loadNode(c1, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
|
@ -1379,7 +996,7 @@
|
|||
insertEdge(edgesCollection, c3, c9);
|
||||
|
||||
callbackCheck = false;
|
||||
adapter.loadNodeFromTreeById(c2, checkCallbackFunction);
|
||||
adapter.loadNode(c2, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
|
@ -1395,7 +1012,7 @@
|
|||
it('should not add a node to the list twice', function() {
|
||||
|
||||
runs(function() {
|
||||
adapter.loadNodeFromTreeById(c3, checkCallbackFunction);
|
||||
adapter.loadNode(c3, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
|
@ -1491,7 +1108,9 @@
|
|||
});
|
||||
|
||||
});
|
||||
*/
|
||||
|
||||
/*
|
||||
describe('displaying only parts of the graph', function() {
|
||||
|
||||
it('should be able to remove a node and all '
|
||||
|
@ -1538,7 +1157,7 @@
|
|||
}
|
||||
});
|
||||
|
||||
adapter.loadNodeFromTreeById(s0, checkCallbackFunction);
|
||||
adapter.loadNode(s0, checkCallbackFunction);
|
||||
});
|
||||
|
||||
waitsFor(function() {
|
||||
|
@ -1581,7 +1200,7 @@
|
|||
});
|
||||
|
||||
});
|
||||
|
||||
*/
|
||||
|
||||
});
|
||||
|
||||
|
|
|
@ -59,9 +59,9 @@ var describeInterface = function (testee) {
|
|||
|
||||
// Add functions to load here:
|
||||
expect(testee).toHaveFunction("loadNode", 2);
|
||||
expect(testee).toHaveFunction("loadNodeFromTreeById", 2);
|
||||
// expect(testee).toHaveFunction("loadNodeFromTreeById", 2);
|
||||
expect(testee).toHaveFunction("requestCentralityChildren", 2);
|
||||
expect(testee).toHaveFunction("loadNodeFromTreeByAttributeValue", 3);
|
||||
// expect(testee).toHaveFunction("loadNodeFromTreeByAttributeValue", 3);
|
||||
expect(testee).toHaveFunction("createEdge", 2);
|
||||
expect(testee).toHaveFunction("deleteEdge", 2);
|
||||
expect(testee).toHaveFunction("patchEdge", 3);
|
||||
|
|
|
@ -39,24 +39,41 @@ var aqlfunctions = require("org/arangodb/aql/functions");
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief fetch a user
|
||||
/// @brief gets all reqistered AQL user functions
|
||||
///
|
||||
/// @RESTHEADER{GET /_api/aqlfunction,returns registered AQL user functions}
|
||||
///
|
||||
/// @REST{GET /_api/aqlfunction}
|
||||
///
|
||||
/// Returns all registered AQL user functions.
|
||||
///
|
||||
/// @REST{GET /_api/aqlfunction?namespace=`namespace`}
|
||||
/// @RESTQUERYPARAMETERS
|
||||
///
|
||||
/// @RESTQUERYPARAM{namespace,string,optional}
|
||||
/// Returns all registered AQL user functions from namespace `namespace`.
|
||||
///
|
||||
/// @RESTDESCRIPTION
|
||||
/// Returns all registered AQL user functions.
|
||||
///
|
||||
/// The call will return a JSON list with all user functions found. Each user
|
||||
/// function will at least have the following attributes:
|
||||
///
|
||||
/// - `name`: The fully qualified name of the user function
|
||||
///
|
||||
/// - `code`: A string representation of the function body
|
||||
///
|
||||
/// @RESTRETURNCODES
|
||||
///
|
||||
/// @RESTRETURNCODE{200}
|
||||
/// if success `HTTP 200` is returned.
|
||||
///
|
||||
/// @EXAMPLES
|
||||
///
|
||||
/// @EXAMPLE_ARANGOSH_RUN{RestAqlfunctionsGetAll}
|
||||
/// var url = "/_api/aqlfunction";
|
||||
///
|
||||
/// var response = logCurlRequest('GET', url);
|
||||
///
|
||||
/// assert(response.code === 200);
|
||||
///
|
||||
/// logJsonResponse(response);
|
||||
/// @END_EXAMPLE_ARANGOSH_RUN
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function GET_api_aqlfunction (req, res) {
|
||||
|
@ -79,7 +96,10 @@ function GET_api_aqlfunction (req, res) {
|
|||
///
|
||||
/// @RESTHEADER{POST /_api/aqlfunction,creates or replaces an AQL user function}
|
||||
///
|
||||
/// @REST{POST /_api/aqlfunction}
|
||||
/// @RESTBODYPARAM{body,json,required}
|
||||
/// the body with name and code of the aql user function.
|
||||
///
|
||||
/// @RESTDESCRIPTION
|
||||
///
|
||||
/// The following data need to be passed in a JSON representation in the body of
|
||||
/// the POST request:
|
||||
|
@ -94,10 +114,6 @@ function GET_api_aqlfunction (req, res) {
|
|||
/// input). The `isDeterministic` attribute is currently not used but may be
|
||||
/// used later for optimisations.
|
||||
///
|
||||
/// If the function can be registered by the server, the server will respond with
|
||||
/// `HTTP 201`. If the function already existed and was replaced by the
|
||||
/// call, the server will respond with `HTTP 200`.
|
||||
///
|
||||
/// In case of success, the returned JSON object has the following properties:
|
||||
///
|
||||
/// - `error`: boolean flag to indicate that an error occurred (`false`
|
||||
|
@ -105,9 +121,6 @@ function GET_api_aqlfunction (req, res) {
|
|||
///
|
||||
/// - `code`: the HTTP status code
|
||||
///
|
||||
/// If the JSON representation is malformed or mandatory data is missing from the
|
||||
/// request, the server will respond with `HTTP 400`.
|
||||
///
|
||||
/// The body of the response will contain a JSON object with additional error
|
||||
/// details. The object has the following attributes:
|
||||
///
|
||||
|
@ -119,9 +132,32 @@ function GET_api_aqlfunction (req, res) {
|
|||
///
|
||||
/// - `errorMessage`: a descriptive error message
|
||||
///
|
||||
/// @RESTRETURNCODES
|
||||
///
|
||||
/// @RESTRETURNCODE{200}
|
||||
/// If the function already existed and was replaced by the
|
||||
/// call, the server will respond with `HTTP 200`.
|
||||
///
|
||||
/// @RESTRETURNCODE{201}
|
||||
/// If the function can be registered by the server, the server will respond with
|
||||
/// `HTTP 201`.
|
||||
///
|
||||
/// @RESTRETURNCODE{400}
|
||||
/// If the JSON representation is malformed or mandatory data is missing from the
|
||||
/// request, the server will respond with `HTTP 400`.
|
||||
///
|
||||
/// @EXAMPLES
|
||||
///
|
||||
/// @verbinclude api-aqlfunction-create
|
||||
/// @EXAMPLE_ARANGOSH_RUN{RestAqlfunctionCreate}
|
||||
/// var url = "/_api/aqlfunction";
|
||||
/// var body = '{ "name" : "myfunctions:temperature:celsiustofahrenheit", "code" : "function (celsius) { return celsius * 1.8 + 32; }" }';
|
||||
///
|
||||
/// var response = logCurlRequest('POST', url, body);
|
||||
///
|
||||
/// assert(response.code === 201);
|
||||
///
|
||||
/// logJsonResponse(response);
|
||||
/// @END_EXAMPLE_ARANGOSH_RUN
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function POST_api_aqlfunction (req, res) {
|
||||
|
@ -139,16 +175,18 @@ function POST_api_aqlfunction (req, res) {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief remove an existing AQL user function
|
||||
///
|
||||
/// @RESTHEADER{DELETE /_api/aqlfunction,remove an existing AQL user function}
|
||||
///
|
||||
/// @REST{DELETE /_api/aqlfunction/`name`}
|
||||
/// @RESTHEADER{DELETE /_api/aqlfunction/{name},remove an existing AQL user function}
|
||||
///
|
||||
/// @RESTURLPARAMETERS
|
||||
///
|
||||
/// @RESTURLPARAM{group,string,optional}
|
||||
/// @RESTURLPARAM{name,string,required}
|
||||
/// the name of the AQL user function.
|
||||
///
|
||||
/// @RESTQUERYPARAMETERS
|
||||
///
|
||||
/// @RESTQUERYPARAM{group,string,optional}
|
||||
/// If set to `true`, then the function name provided in `name` is treated as
|
||||
/// a namespace prefix, and all functions in the specified namespace will be deleted.
|
||||
///
|
||||
/// If set to `false`, the function name provided in `name` must be fully
|
||||
/// qualified, including any namespaces.
|
||||
///
|
||||
|
@ -156,9 +194,6 @@ function POST_api_aqlfunction (req, res) {
|
|||
///
|
||||
/// Removes an existing AQL user function, identified by `name`.
|
||||
///
|
||||
/// If the function can be removed by the server, the server will respond with
|
||||
/// `HTTP 200`.
|
||||
///
|
||||
/// In case of success, the returned JSON object has the following properties:
|
||||
///
|
||||
/// - `error`: boolean flag to indicate that an error occurred (`false`
|
||||
|
@ -166,10 +201,6 @@ function POST_api_aqlfunction (req, res) {
|
|||
///
|
||||
/// - `code`: the HTTP status code
|
||||
///
|
||||
/// If the JSON representation is malformed or mandatory data is missing from the
|
||||
/// request, the server will respond with `HTTP 400`. If the specified user
|
||||
/// does not exist, the server will respond with `HTTP 404`.
|
||||
///
|
||||
/// The body of the response will contain a JSON object with additional error
|
||||
/// details. The object has the following attributes:
|
||||
///
|
||||
|
@ -180,6 +211,46 @@ function POST_api_aqlfunction (req, res) {
|
|||
/// - `errorNum`: the server error number
|
||||
///
|
||||
/// - `errorMessage`: a descriptive error message
|
||||
///
|
||||
/// @RESTRETURNCODES
|
||||
///
|
||||
/// @RESTRETURNCODE{200}
|
||||
/// If the function can be removed by the server, the server will respond with
|
||||
/// `HTTP 200`.
|
||||
///
|
||||
/// @RESTRETURNCODE{400}
|
||||
/// If the user function name is malformed, the server will respond with `HTTP 400`.
|
||||
///
|
||||
/// @RESTRETURNCODE{404}
|
||||
/// If the specified user user function does not exist, the server will respond with `HTTP 404`.
|
||||
///
|
||||
/// @EXAMPLES
|
||||
///
|
||||
/// deletes a function:
|
||||
///
|
||||
/// @EXAMPLE_ARANGOSH_RUN{RestAqlfunctionDelete}
|
||||
/// var url = "/_api/aqlfunction/square:x:y";
|
||||
///
|
||||
/// var body = '{ "name" : "square:x:y", "code" : "function (x) { return x*x; }" }';
|
||||
///
|
||||
/// db._connection.POST("/_api/aqlfunction", body);
|
||||
/// var response = logCurlRequest('DELETE', url);
|
||||
///
|
||||
/// assert(response.code === 200);
|
||||
///
|
||||
/// logJsonResponse(response);
|
||||
/// @END_EXAMPLE_ARANGOSH_RUN
|
||||
///
|
||||
/// function not found:
|
||||
///
|
||||
/// @EXAMPLE_ARANGOSH_RUN{RestAqlfunctionDeleteFails}
|
||||
/// var url = "/_api/aqlfunction/myfunction:x:y";
|
||||
/// var response = logCurlRequest('DELETE', url);
|
||||
///
|
||||
/// assert(response.code === 404);
|
||||
///
|
||||
/// logJsonResponse(response);
|
||||
/// @END_EXAMPLE_ARANGOSH_RUN
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function DELETE_api_aqlfunction (req, res) {
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the compaction
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dr. Frank Celler
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var internal = require("internal");
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- collection methods
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite: collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function CompactionSuite () {
|
||||
var ERRORS = require("internal").errors;
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief read by name
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCompactAfterTruncate : function () {
|
||||
var maxWait;
|
||||
var waited;
|
||||
var cn = "example";
|
||||
var n = 400;
|
||||
var payload = "the quick brown fox jumped over the lazy dog. a quick dog jumped over the lazy fox";
|
||||
|
||||
for (var i = 0; i < 5; ++i) {
|
||||
payload += payload;
|
||||
}
|
||||
|
||||
internal.db._drop(cn);
|
||||
internal.wait(5);
|
||||
var c1 = internal.db._create(cn, { "journalSize" : 1048576 } );
|
||||
internal.wait(2);
|
||||
|
||||
for (var i = 0; i < n; ++i) {
|
||||
c1.save({ value : i, payload : payload });
|
||||
if ((i > 0) && (i % 100 == 0)) {
|
||||
internal.wait(2);
|
||||
}
|
||||
}
|
||||
|
||||
var fig = c1.figures();
|
||||
assertEqual(n, c1.count());
|
||||
assertEqual(n, fig["alive"]["count"]);
|
||||
assertEqual(0, fig["dead"]["count"]);
|
||||
assertEqual(0, fig["dead"]["deletion"]);
|
||||
assertTrue(0 < fig["datafiles"]["count"]);
|
||||
assertTrue(0 < fig["journals"]["count"]);
|
||||
|
||||
c1.truncate();
|
||||
c1.unload();
|
||||
internal.wait(2);
|
||||
|
||||
c1 = null;
|
||||
|
||||
// wait for compactor to run
|
||||
require("console").log("waiting for compactor to run");
|
||||
|
||||
// set max wait time
|
||||
if (internal.valgrind) {
|
||||
maxWait = 750;
|
||||
}
|
||||
else {
|
||||
maxWait = 90;
|
||||
}
|
||||
|
||||
waited = 0;
|
||||
|
||||
while (waited < maxWait) {
|
||||
internal.wait(5);
|
||||
waited += 5;
|
||||
|
||||
c1 = internal.db[cn];
|
||||
c1.load();
|
||||
|
||||
fig = c1.figures();
|
||||
if (fig["dead"]["deletion"] >= n && fig["dead"]["count"] >= n) {
|
||||
break;
|
||||
}
|
||||
|
||||
c1.unload();
|
||||
}
|
||||
|
||||
|
||||
c1 = internal.db[cn];
|
||||
c1.load();
|
||||
fig = c1.figures();
|
||||
assertEqual(0, c1.count());
|
||||
assertEqual(0, fig["alive"]["count"]);
|
||||
assertEqual(n, fig["dead"]["count"]);
|
||||
assertEqual(n, fig["dead"]["deletion"]);
|
||||
|
||||
internal.db._drop(cn);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- main
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suites
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(CompactionSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
||||
// Local Variables:
|
||||
// mode: outline-minor
|
||||
// outline-regexp: "^\\(/// @brief\\|/// @addtogroup\\|// --SECTION--\\|/// @page\\|/// @}\\)"
|
||||
// End:
|
||||
|
|
@ -0,0 +1,342 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the compaction
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dr. Frank Celler
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var internal = require("internal");
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- collection methods
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite: collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function CompactionSuite () {
|
||||
var ERRORS = require("internal").errors;
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test figures after truncate and rotate
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testFiguresTruncate : function () {
|
||||
var maxWait;
|
||||
var waited;
|
||||
var cn = "example";
|
||||
var n = 400;
|
||||
var payload = "the quick brown fox jumped over the lazy dog. a quick dog jumped over the lazy fox";
|
||||
|
||||
for (var i = 0; i < 5; ++i) {
|
||||
payload += payload;
|
||||
}
|
||||
|
||||
internal.db._drop(cn);
|
||||
var c1 = internal.db._create(cn, { "journalSize" : 1048576 } );
|
||||
|
||||
for (var i = 0; i < n; ++i) {
|
||||
c1.save({ _key: "test" + i, value : i, payload : payload });
|
||||
}
|
||||
|
||||
c1.unload();
|
||||
internal.wait(5);
|
||||
|
||||
var fig = c1.figures();
|
||||
assertEqual(n, c1.count());
|
||||
assertEqual(n, fig["alive"]["count"]);
|
||||
assertEqual(0, fig["dead"]["count"]);
|
||||
assertEqual(0, fig["dead"]["size"]);
|
||||
assertEqual(0, fig["dead"]["deletion"]);
|
||||
assertEqual(1, fig["journals"]["count"]);
|
||||
assertTrue(0 < fig["datafiles"]["count"]);
|
||||
|
||||
c1.truncate();
|
||||
c1.rotate();
|
||||
|
||||
var fig = c1.figures();
|
||||
|
||||
assertEqual(0, c1.count());
|
||||
assertEqual(0, fig["alive"]["count"]);
|
||||
assertTrue(0 < fig["dead"]["count"]);
|
||||
assertTrue(0 < fig["dead"]["count"]);
|
||||
assertTrue(0 < fig["dead"]["size"]);
|
||||
assertTrue(0 < fig["dead"]["deletion"]);
|
||||
assertEqual(1, fig["journals"]["count"]);
|
||||
assertTrue(0 < fig["datafiles"]["count"]);
|
||||
|
||||
// wait for compactor to run
|
||||
require("console").log("waiting for compactor to run");
|
||||
|
||||
// set max wait time
|
||||
if (internal.valgrind) {
|
||||
maxWait = 750;
|
||||
}
|
||||
else {
|
||||
maxWait = 90;
|
||||
}
|
||||
|
||||
waited = 0;
|
||||
|
||||
while (waited < maxWait) {
|
||||
internal.wait(5);
|
||||
waited += 5;
|
||||
|
||||
fig = c1.figures();
|
||||
if (fig["dead"]["deletion"] == 0 && fig["dead"]["count"] == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fig = c1.figures();
|
||||
assertEqual(0, c1.count());
|
||||
assertEqual(0, fig["alive"]["count"]);
|
||||
assertEqual(0, fig["alive"]["size"]);
|
||||
assertEqual(0, fig["dead"]["count"]);
|
||||
assertEqual(0, fig["dead"]["size"]);
|
||||
assertEqual(0, fig["dead"]["deletion"]);
|
||||
|
||||
internal.db._drop(cn);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test document presence after compaction
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDocumentPresence : function () {
|
||||
var maxWait;
|
||||
var waited;
|
||||
var cn = "example";
|
||||
var n = 400;
|
||||
var payload = "the quick brown fox jumped over the lazy dog. a quick dog jumped over the lazy fox";
|
||||
|
||||
for (var i = 0; i < 5; ++i) {
|
||||
payload += payload;
|
||||
}
|
||||
|
||||
internal.db._drop(cn);
|
||||
var c1 = internal.db._create(cn, { "journalSize" : 1048576 } );
|
||||
|
||||
for (var i = 0; i < n; ++i) {
|
||||
c1.save({ _key: "test" + i, value : i, payload : payload });
|
||||
}
|
||||
|
||||
for (var i = 0; i < n; i += 2) {
|
||||
c1.remove("test" + i);
|
||||
}
|
||||
|
||||
// this will create a barrier that will block compaction
|
||||
var doc = c1.document("test1");
|
||||
|
||||
c1.rotate();
|
||||
|
||||
var fig = c1.figures();
|
||||
assertEqual(n / 2, c1.count());
|
||||
assertEqual(n / 2, fig["alive"]["count"]);
|
||||
assertEqual(n / 2, fig["dead"]["count"]);
|
||||
assertTrue(0 < fig["dead"]["size"]);
|
||||
assertTrue(0 < fig["dead"]["deletion"]);
|
||||
assertEqual(1, fig["journals"]["count"]);
|
||||
assertTrue(0 < fig["datafiles"]["count"]);
|
||||
|
||||
// trigger GC
|
||||
doc = null;
|
||||
internal.wait(0);
|
||||
|
||||
// wait for compactor to run
|
||||
require("console").log("waiting for compactor to run");
|
||||
|
||||
// set max wait time
|
||||
if (internal.valgrind) {
|
||||
maxWait = 750;
|
||||
}
|
||||
else {
|
||||
maxWait = 90;
|
||||
}
|
||||
|
||||
waited = 0;
|
||||
var lastValue = fig["dead"]["deletion"];
|
||||
|
||||
while (waited < maxWait) {
|
||||
internal.wait(5);
|
||||
waited += 5;
|
||||
|
||||
fig = c1.figures();
|
||||
if (fig["dead"]["deletion"] == lastValue) {
|
||||
break;
|
||||
}
|
||||
lastValue = fig["dead"]["deletion"];
|
||||
}
|
||||
|
||||
var doc;
|
||||
for (var i = 0; i < n; i++) {
|
||||
|
||||
// will throw if document does not exist
|
||||
if (i % 2 == 0) {
|
||||
try {
|
||||
doc = c1.document("test" + i);
|
||||
fail();
|
||||
}
|
||||
catch (err) {
|
||||
}
|
||||
}
|
||||
else {
|
||||
doc = c1.document("test" + i);
|
||||
}
|
||||
}
|
||||
|
||||
// trigger GC
|
||||
doc = null;
|
||||
internal.wait(0);
|
||||
|
||||
c1.truncate();
|
||||
c1.rotate();
|
||||
|
||||
waited = 0;
|
||||
|
||||
while (waited < maxWait) {
|
||||
internal.wait(5);
|
||||
waited += 5;
|
||||
|
||||
fig = c1.figures();
|
||||
if (fig["dead"]["deletion"] == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
var fig = c1.figures();
|
||||
assertEqual(0, c1.count());
|
||||
assertEqual(0, fig["alive"]["count"]);
|
||||
assertEqual(0, fig["dead"]["count"]);
|
||||
assertEqual(0, fig["dead"]["size"]);
|
||||
assertEqual(0, fig["dead"]["deletion"]);
|
||||
assertEqual(1, fig["journals"]["count"]);
|
||||
assertTrue(0 < fig["datafiles"]["count"]);
|
||||
|
||||
internal.db._drop(cn);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief creates documents, rotates the journal and truncates all documents
|
||||
///
|
||||
/// this will fully compact the 1st datafile (with the data), but will leave
|
||||
/// the journal with the delete markers untouched
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCompactAfterTruncate : function () {
|
||||
var maxWait;
|
||||
var waited;
|
||||
var cn = "example";
|
||||
var n = 400;
|
||||
var payload = "the quick brown fox jumped over the lazy dog. a quick dog jumped over the lazy fox";
|
||||
|
||||
for (var i = 0; i < 5; ++i) {
|
||||
payload += payload;
|
||||
}
|
||||
|
||||
internal.db._drop(cn);
|
||||
internal.wait(5);
|
||||
var c1 = internal.db._create(cn, { "journalSize" : 1048576 } );
|
||||
internal.wait(2);
|
||||
|
||||
for (var i = 0; i < n; ++i) {
|
||||
c1.save({ value : i, payload : payload });
|
||||
}
|
||||
|
||||
var fig = c1.figures();
|
||||
assertEqual(n, c1.count());
|
||||
assertEqual(n, fig["alive"]["count"]);
|
||||
assertEqual(0, fig["dead"]["count"]);
|
||||
assertEqual(0, fig["dead"]["deletion"]);
|
||||
assertEqual(1, fig["journals"]["count"]);
|
||||
assertTrue(0 < fig["datafiles"]["count"]);
|
||||
|
||||
// truncation will go fully into the journal...
|
||||
c1.rotate();
|
||||
|
||||
c1.truncate();
|
||||
c1.unload();
|
||||
|
||||
// trigger GC
|
||||
internal.wait(2);
|
||||
|
||||
// wait for compactor to run
|
||||
require("console").log("waiting for compactor to run");
|
||||
|
||||
// set max wait time
|
||||
if (internal.valgrind) {
|
||||
maxWait = 750;
|
||||
}
|
||||
else {
|
||||
maxWait = 90;
|
||||
}
|
||||
|
||||
waited = 0;
|
||||
|
||||
while (waited < maxWait) {
|
||||
internal.wait(5);
|
||||
waited += 5;
|
||||
|
||||
fig = c1.figures();
|
||||
if (fig["dead"]["count"] == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fig = c1.figures();
|
||||
assertEqual(0, c1.count());
|
||||
// all alive & dead markers should be gone
|
||||
assertEqual(0, fig["alive"]["count"]);
|
||||
assertEqual(0, fig["dead"]["count"]);
|
||||
// we should still have all the deletion markers
|
||||
assertEqual(n, fig["dead"]["deletion"]);
|
||||
|
||||
internal.db._drop(cn);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- main
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suites
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(CompactionSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
||||
// Local Variables:
|
||||
// mode: outline-minor
|
||||
// outline-regexp: "^\\(/// @brief\\|/// @addtogroup\\|// --SECTION--\\|/// @page\\|/// @}\\)"
|
||||
// End:
|
||||
|
Loading…
Reference in New Issue