mirror of https://gitee.com/bigwinds/arangodb
small stuff, oom handling etc.
This commit is contained in:
parent
d5a868079f
commit
21fa3ed616
|
@ -277,12 +277,17 @@ int TRI_FreeBitarray(TRI_bitarray_t* ba) {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_InsertBitMaskElementBitarray(TRI_bitarray_t* ba, TRI_bitarray_mask_t* mask, void* element) {
|
||||
// TODO: ba is dereferenced in the following line, but 4 lines later it is check for NULL. This is illogical
|
||||
MasterTable_t* mt = (MasterTable_t*)(ba->_masterTable);
|
||||
MasterTable_t* mt;
|
||||
TRI_master_table_position_t position;
|
||||
int result;
|
||||
|
||||
if (ba == NULL || mask == NULL || element == NULL) {
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
if (ba == NULL || mask == NULL || element == NULL || mt == NULL) {
|
||||
mt = (MasterTable_t*)(ba->_masterTable);
|
||||
|
||||
if (mt == NULL) {
|
||||
assert(NULL);
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
|
|
@ -429,8 +429,8 @@ void TRI_InitSkipList (TRI_skiplist_t* skiplist, size_t elementSize,
|
|||
// ..........................................................................
|
||||
growResult = GrowNodeHeight(&(skiplist->_base._startNode), 2); // may fail
|
||||
growResult = growResult && GrowNodeHeight(&(skiplist->_base._endNode), 2); // may fail
|
||||
if (!growResult) {
|
||||
// todo: undo growth by cutting down the node height
|
||||
if (! growResult) {
|
||||
// TODO: undo growth by cutting down the node height
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1886,6 +1886,11 @@ static int InsertHashIndex (TRI_index_t* idx, TRI_doc_mptr_t const* doc) {
|
|||
// .............................................................................
|
||||
|
||||
hashElement.fields = TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(TRI_shaped_json_t) * hashIndex->_paths._length, false);
|
||||
if (hashElement.fields == NULL) {
|
||||
LOG_ERROR("out of memory in hashindex");
|
||||
return TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
|
||||
res = HashIndexHelper(hashIndex, &hashElement, doc, NULL);
|
||||
|
||||
|
@ -1962,6 +1967,10 @@ static int RemoveHashIndex (TRI_index_t* idx, TRI_doc_mptr_t const* doc) {
|
|||
// .............................................................................
|
||||
|
||||
hashElement.fields = TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(TRI_shaped_json_t) * hashIndex->_paths._length, false);
|
||||
if (hashElement.fields == NULL) {
|
||||
LOG_ERROR("out of memory in hashindex");
|
||||
return TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
// .............................................................................
|
||||
// Fill the json field list from the document
|
||||
|
@ -2056,6 +2065,10 @@ static int UpdateHashIndex (TRI_index_t* idx,
|
|||
// .............................................................................
|
||||
|
||||
hashElement.fields = TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(TRI_shaped_json_t) * hashIndex->_paths._length, false);
|
||||
if (hashElement.fields == NULL) {
|
||||
LOG_ERROR("out of memory in hashindex");
|
||||
return TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
// .............................................................................
|
||||
// Update for unique hash index
|
||||
|
@ -4357,7 +4370,6 @@ static int UpdateFulltextIndex (TRI_index_t* idx,
|
|||
const TRI_doc_mptr_t* newDoc,
|
||||
const TRI_shaped_json_t* oldDoc) {
|
||||
|
||||
// TODO union { void* p; void const* c; } cnv;
|
||||
TRI_fulltext_index_t* fulltextIndex;
|
||||
int res;
|
||||
|
||||
|
|
|
@ -454,8 +454,8 @@ int TRI_InitPrimaryCollection (TRI_primary_collection_t* primary,
|
|||
|
||||
TRI_InitReadWriteLock(&primary->_lock);
|
||||
|
||||
// init key generator. TODO: make this configurable
|
||||
res = TRI_CreateKeyGenerator(NULL, primary);
|
||||
// init key generator
|
||||
res = TRI_CreateKeyGenerator(primary->base._info._options, primary);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -1099,7 +1099,7 @@ int TRI_CloseVocShaper (TRI_shaper_t* s) {
|
|||
err = TRI_CloseShapeCollection(shaper->_collection);
|
||||
|
||||
if (err != TRI_ERROR_NO_ERROR) {
|
||||
LOG_ERROR("cannot close shape collection of shaper, error %lu", (unsigned long) err);
|
||||
LOG_ERROR("cannot close shape collection of shaper, error %d", (int) err);
|
||||
}
|
||||
|
||||
// TODO free the accessors
|
||||
|
|
|
@ -110,7 +110,7 @@ HttpResponse* HttpHandler::getResponse () const {
|
|||
Job* HttpHandler::createJob (AsyncJobServer* server) {
|
||||
HttpServer* httpServer = dynamic_cast<HttpServer*>(server);
|
||||
|
||||
// stj: TODO: ugly temporary hack, must be fixed
|
||||
// TODO: ugly temporary hack, must be fixed
|
||||
if (httpServer != 0) {
|
||||
return new GeneralServerJob<HttpServer, HttpHandlerFactory::GeneralHandler>(httpServer, this);
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ Job* HttpHandler::createJob (AsyncJobServer* server) {
|
|||
if (httpsServer != 0) {
|
||||
return new GeneralServerJob<HttpsServer, HttpHandlerFactory::GeneralHandler>(httpsServer, this);
|
||||
}
|
||||
// stj: end of hack
|
||||
// end of hack
|
||||
|
||||
LOGGER_WARNING << "cannot convert AsyncJobServer into a HttpServer";
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue