mirror of https://gitee.com/bigwinds/arangodb
fix figures and volatile collection counts
This commit is contained in:
parent
fb4b3d0a3c
commit
c5f847a912
|
@ -36,6 +36,7 @@
|
|||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <velocypack/Buffer.h>
|
||||
#include <velocypack/Collection.h>
|
||||
#include <velocypack/Helpers.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/Slice.h>
|
||||
|
@ -46,6 +47,81 @@ using namespace arangodb::rest;
|
|||
|
||||
static double const CL_DEFAULT_TIMEOUT = 60.0;
|
||||
|
||||
namespace {
|
||||
template<typename T>
|
||||
T addFigures(VPackSlice const& v1, VPackSlice const& v2, std::vector<std::string> const& attr) {
|
||||
TRI_ASSERT(v1.isObject());
|
||||
TRI_ASSERT(v2.isObject());
|
||||
|
||||
T value = 0;
|
||||
|
||||
VPackSlice found = v1.get(attr);
|
||||
if (found.isNumber()) {
|
||||
value += found.getNumericValue<T>();
|
||||
}
|
||||
|
||||
found = v2.get(attr);
|
||||
if (found.isNumber()) {
|
||||
value += found.getNumericValue<T>();
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
void recursiveAdd(VPackSlice const& value, std::shared_ptr<VPackBuilder>& builder) {
|
||||
TRI_ASSERT(value.isObject());
|
||||
TRI_ASSERT(builder->slice().isObject());
|
||||
TRI_ASSERT(builder->isClosed());
|
||||
|
||||
VPackBuilder updated;
|
||||
|
||||
updated.openObject();
|
||||
|
||||
updated.add("alive", VPackValue(VPackValueType::Object));
|
||||
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "alive", "count" })));
|
||||
updated.add("size", VPackValue(addFigures<size_t>(value, builder->slice(), { "alive", "size" })));
|
||||
updated.close();
|
||||
|
||||
updated.add("dead", VPackValue(VPackValueType::Object));
|
||||
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "dead", "count" })));
|
||||
updated.add("size", VPackValue(addFigures<size_t>(value, builder->slice(), { "dead", "size" })));
|
||||
updated.add("deletion", VPackValue(addFigures<size_t>(value, builder->slice(), { "dead", "deletion" })));
|
||||
updated.close();
|
||||
|
||||
updated.add("indexes", VPackValue(VPackValueType::Object));
|
||||
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "indexes", "count" })));
|
||||
updated.add("size", VPackValue(addFigures<size_t>(value, builder->slice(), { "indexes", "size" })));
|
||||
updated.close();
|
||||
|
||||
updated.add("datafiles", VPackValue(VPackValueType::Object));
|
||||
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "datafiles", "count" })));
|
||||
updated.add("fileSize", VPackValue(addFigures<size_t>(value, builder->slice(), { "datafiles", "fileSize" })));
|
||||
updated.close();
|
||||
|
||||
updated.add("journals", VPackValue(VPackValueType::Object));
|
||||
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "journals", "count" })));
|
||||
updated.add("fileSize", VPackValue(addFigures<size_t>(value, builder->slice(), { "journals", "fileSize" })));
|
||||
updated.close();
|
||||
|
||||
updated.add("compactors", VPackValue(VPackValueType::Object));
|
||||
updated.add("count", VPackValue(addFigures<size_t>(value, builder->slice(), { "compactors", "count" })));
|
||||
updated.add("fileSize", VPackValue(addFigures<size_t>(value, builder->slice(), { "compactors", "fileSize" })));
|
||||
updated.close();
|
||||
|
||||
updated.add("documentReferences", VPackValue(addFigures<size_t>(value, builder->slice(), { "documentReferences" })));
|
||||
|
||||
updated.close();
|
||||
|
||||
TRI_ASSERT(updated.slice().isObject());
|
||||
TRI_ASSERT(updated.isClosed());
|
||||
|
||||
builder.reset(new VPackBuilder(VPackCollection::merge(builder->slice(), updated.slice(), true, false)));
|
||||
TRI_ASSERT(builder->slice().isObject());
|
||||
TRI_ASSERT(builder->isClosed());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
static int handleGeneralCommErrors(ClusterCommResult const* res) {
|
||||
|
@ -547,7 +623,7 @@ int revisionOnCoordinator(std::string const& dbname,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||
TRI_doc_collection_info_t*& result) {
|
||||
std::shared_ptr<arangodb::velocypack::Builder>& result) {
|
||||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
ClusterComm* cc = ClusterComm::instance();
|
||||
|
@ -560,14 +636,6 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
return TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
|
||||
}
|
||||
|
||||
// prefill with 0s
|
||||
result = (TRI_doc_collection_info_t*)TRI_Allocate(
|
||||
TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_doc_collection_info_t), true);
|
||||
|
||||
if (result == nullptr) {
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
// If we get here, the sharding attributes are not only _key, therefore
|
||||
// we have to contact everybody:
|
||||
auto shards = collinfo->shardIds();
|
||||
|
@ -598,39 +666,7 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
VPackSlice figures = answer.get("figures");
|
||||
if (figures.isObject()) {
|
||||
// add to the total
|
||||
result->_numberAlive +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "alive", "count");
|
||||
result->_numberDead +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "dead", "count");
|
||||
result->_numberDeletions +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "dead", "deletion");
|
||||
result->_numberIndexes +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "indexes", "count");
|
||||
|
||||
result->_sizeAlive +=
|
||||
ExtractFigure<int64_t>(figures, "alive", "size");
|
||||
result->_sizeDead +=
|
||||
ExtractFigure<int64_t>(figures, "dead", "size");
|
||||
result->_sizeIndexes +=
|
||||
ExtractFigure<int64_t>(figures, "indexes", "size");
|
||||
|
||||
result->_numberDatafiles +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "datafiles", "count");
|
||||
result->_numberJournalfiles +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "journals", "count");
|
||||
result->_numberCompactorfiles +=
|
||||
ExtractFigure<TRI_voc_ssize_t>(figures, "compactors", "count");
|
||||
|
||||
result->_datafileSize +=
|
||||
ExtractFigure<int64_t>(figures, "datafiles", "fileSize");
|
||||
result->_journalfileSize +=
|
||||
ExtractFigure<int64_t>(figures, "journals", "fileSize");
|
||||
result->_compactorfileSize +=
|
||||
ExtractFigure<int64_t>(figures, "compactors", "fileSize");
|
||||
|
||||
result->_numberDocumentDitches +=
|
||||
arangodb::basics::VelocyPackHelper::getNumericValue<uint64_t>(
|
||||
figures, "documentReferences", 0);
|
||||
recursiveAdd(figures, result);
|
||||
}
|
||||
nrok++;
|
||||
}
|
||||
|
@ -639,8 +675,6 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
}
|
||||
|
||||
if (nrok != (int)shards->size()) {
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, result);
|
||||
result = 0;
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ int revisionOnCoordinator(std::string const& dbname,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||
TRI_doc_collection_info_t*&);
|
||||
std::shared_ptr<arangodb::velocypack::Builder>&);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief counts number of documents in a coordinator
|
||||
|
|
|
@ -82,12 +82,15 @@ int64_t MMFilesCollection::initialCount() const {
|
|||
int MMFilesCollection::close() {
|
||||
// close compactor files
|
||||
closeDatafiles(_compactors);
|
||||
_compactors.clear();
|
||||
|
||||
// close journal files
|
||||
closeDatafiles(_journals);
|
||||
_journals.clear();
|
||||
|
||||
// close datafiles
|
||||
closeDatafiles(_datafiles);
|
||||
_datafiles.clear();
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
@ -599,3 +602,71 @@ bool MMFilesCollection::closeDatafiles(std::vector<TRI_datafile_t*> const& files
|
|||
|
||||
return result;
|
||||
}
|
||||
|
||||
void MMFilesCollection::figures(std::shared_ptr<arangodb::velocypack::Builder>& builder) {
|
||||
READ_LOCKER(readLocker, _filesLock);
|
||||
|
||||
size_t sizeDatafiles = 0;
|
||||
builder->add("datafiles", VPackValue(VPackValueType::Object));
|
||||
for (auto const& it : _datafiles) {
|
||||
sizeDatafiles += it->_initSize;
|
||||
}
|
||||
|
||||
builder->add("count", VPackValue(_datafiles.size()));
|
||||
builder->add("fileSize", VPackValue(sizeDatafiles));
|
||||
builder->close(); // datafiles
|
||||
|
||||
size_t sizeJournals = 0;
|
||||
for (auto const& it : _journals) {
|
||||
sizeJournals += it->_initSize;
|
||||
}
|
||||
builder->add("journals", VPackValue(VPackValueType::Object));
|
||||
builder->add("count", VPackValue(_journals.size()));
|
||||
builder->add("fileSize", VPackValue(sizeJournals));
|
||||
builder->close(); // journals
|
||||
|
||||
size_t sizeCompactors = 0;
|
||||
for (auto const& it : _compactors) {
|
||||
sizeCompactors += it->_initSize;
|
||||
}
|
||||
builder->add("compactors", VPackValue(VPackValueType::Object));
|
||||
builder->add("count", VPackValue(_compactors.size()));
|
||||
builder->add("fileSize", VPackValue(sizeCompactors));
|
||||
builder->close(); // compactors
|
||||
}
|
||||
|
||||
/// @brief iterate over a vector of datafiles and pick those with a specific
|
||||
/// data range
|
||||
std::vector<DatafileDescription> MMFilesCollection::datafilesInRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax) {
|
||||
std::vector<DatafileDescription> result;
|
||||
|
||||
READ_LOCKER(readLocker, _filesLock);
|
||||
|
||||
for (auto& it : _datafiles) {
|
||||
DatafileDescription entry = {it, it->_dataMin, it->_dataMax, it->_tickMax, false};
|
||||
LOG(TRACE) << "checking datafile " << it->_fid << " with data range " << it->_dataMin << " - " << it->_dataMax << ", tick max: " << it->_tickMax;
|
||||
|
||||
if (it->_dataMin == 0 || it->_dataMax == 0) {
|
||||
// datafile doesn't have any data
|
||||
continue;
|
||||
}
|
||||
|
||||
TRI_ASSERT(it->_tickMin <= it->_tickMax);
|
||||
TRI_ASSERT(it->_dataMin <= it->_dataMax);
|
||||
|
||||
if (dataMax < it->_dataMin) {
|
||||
// datafile is newer than requested range
|
||||
continue;
|
||||
}
|
||||
|
||||
if (dataMin > it->_dataMax) {
|
||||
// datafile is older than requested range
|
||||
continue;
|
||||
}
|
||||
|
||||
result.emplace_back(entry);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,9 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
int64_t initialCount() const override;
|
||||
|
||||
/// @brief return engine-specific figures
|
||||
void figures(std::shared_ptr<arangodb::velocypack::Builder>&) override;
|
||||
|
||||
// datafile management
|
||||
|
||||
/// @brief closes an open collection
|
||||
|
@ -88,6 +91,10 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
/// @brief iterates over a collection
|
||||
bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) override;
|
||||
|
||||
/// @brief iterate over a vector of datafiles and pick those with a specific
|
||||
/// data range
|
||||
std::vector<DatafileDescription> datafilesInRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax) override;
|
||||
|
||||
private:
|
||||
bool iterateDatafilesVector(std::vector<TRI_datafile_t*> const& files,
|
||||
|
|
|
@ -1251,7 +1251,7 @@ TRI_vocbase_t* MMFilesEngine::openExistingDatabase(TRI_voc_tick_t id, std::strin
|
|||
|
||||
TRI_ASSERT(info.id() != 0);
|
||||
try {
|
||||
c = StorageEngine::registerCollection(ConditionalWriteLocker::DoLock(), vocbase.get(), info.type(), info.id(), info.name(), info.planId(), directory);
|
||||
c = StorageEngine::registerCollection(ConditionalWriteLocker::DoLock(), vocbase.get(), info.type(), info.id(), info.name(), info.planId(), directory, info.isVolatile());
|
||||
registerCollectionPath(vocbase->id(), info.id(), directory);
|
||||
} catch (...) {
|
||||
// if we caught an exception, c is still a nullptr
|
||||
|
|
|
@ -251,8 +251,8 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
|
||||
protected:
|
||||
arangodb::LogicalCollection* registerCollection(bool doLock, TRI_vocbase_t* vocbase, TRI_col_type_e type, TRI_voc_cid_t cid,
|
||||
std::string const& name, TRI_voc_cid_t planId, std::string const& path) {
|
||||
return vocbase->registerCollection(doLock, type, cid, name, planId, path);
|
||||
std::string const& name, TRI_voc_cid_t planId, std::string const& path, bool isVolatile) {
|
||||
return vocbase->registerCollection(doLock, type, cid, name, planId, path, isVolatile);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -864,60 +864,6 @@ static void JS_ExistsVocbaseVPack(
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief fetch the figures for a sharded collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static TRI_doc_collection_info_t* GetFiguresCoordinator(
|
||||
arangodb::LogicalCollection* collection) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
||||
std::string const databaseName(collection->dbName());
|
||||
std::string const cid = collection->cid_as_string();
|
||||
|
||||
TRI_doc_collection_info_t* result = nullptr;
|
||||
|
||||
int res = figuresOnCoordinator(databaseName, cid, result);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_set_errno(res);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief fetch the figures for a local collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static TRI_doc_collection_info_t* GetFigures(arangodb::LogicalCollection* collection) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
||||
SingleCollectionTransaction trx(
|
||||
V8TransactionContext::Create(collection->vocbase(), true),
|
||||
collection->cid(), TRI_TRANSACTION_READ);
|
||||
|
||||
int res = trx.begin();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_set_errno(res);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// READ-LOCK start
|
||||
trx.lockRead();
|
||||
|
||||
TRI_collection_t* document = collection->_collection;
|
||||
TRI_ASSERT(document != nullptr);
|
||||
TRI_doc_collection_info_t* info = document->figures();
|
||||
|
||||
trx.finish(res);
|
||||
// READ-LOCK end
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock collectionFigures
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -934,100 +880,20 @@ static void JS_FiguresVocbaseCol(
|
|||
if (collection == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
|
||||
}
|
||||
|
||||
SingleCollectionTransaction trx(V8TransactionContext::Create(collection->vocbase(), true), collection->cid(),
|
||||
TRI_TRANSACTION_READ);
|
||||
int res = trx.begin();
|
||||
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
|
||||
TRI_doc_collection_info_t* info;
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
info = GetFiguresCoordinator(collection);
|
||||
} else {
|
||||
info = GetFigures(collection);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_V8_THROW_EXCEPTION(res);
|
||||
}
|
||||
|
||||
if (info == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_MEMORY();
|
||||
}
|
||||
std::shared_ptr<VPackBuilder> builder = collection->figures();
|
||||
|
||||
v8::Handle<v8::Object> alive = v8::Object::New(isolate);
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("alive"), alive);
|
||||
alive->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, (double)info->_numberAlive));
|
||||
alive->Set(TRI_V8_ASCII_STRING("size"),
|
||||
v8::Number::New(isolate, (double)info->_sizeAlive));
|
||||
|
||||
v8::Handle<v8::Object> dead = v8::Object::New(isolate);
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("dead"), dead);
|
||||
dead->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, (double)info->_numberDead));
|
||||
dead->Set(TRI_V8_ASCII_STRING("size"),
|
||||
v8::Number::New(isolate, (double)info->_sizeDead));
|
||||
dead->Set(TRI_V8_ASCII_STRING("deletion"),
|
||||
v8::Number::New(isolate, (double)info->_numberDeletions));
|
||||
|
||||
// datafile info
|
||||
v8::Handle<v8::Object> dfs = v8::Object::New(isolate);
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("datafiles"), dfs);
|
||||
dfs->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, (double)info->_numberDatafiles));
|
||||
dfs->Set(TRI_V8_ASCII_STRING("fileSize"),
|
||||
v8::Number::New(isolate, (double)info->_datafileSize));
|
||||
|
||||
// journal info
|
||||
v8::Handle<v8::Object> js = v8::Object::New(isolate);
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("journals"), js);
|
||||
js->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, (double)info->_numberJournalfiles));
|
||||
js->Set(TRI_V8_ASCII_STRING("fileSize"),
|
||||
v8::Number::New(isolate, (double)info->_journalfileSize));
|
||||
|
||||
// compactors info
|
||||
v8::Handle<v8::Object> cs = v8::Object::New(isolate);
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("compactors"), cs);
|
||||
cs->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, (double)info->_numberCompactorfiles));
|
||||
cs->Set(TRI_V8_ASCII_STRING("fileSize"),
|
||||
v8::Number::New(isolate, (double)info->_compactorfileSize));
|
||||
|
||||
v8::Handle<v8::Object> indexes = v8::Object::New(isolate);
|
||||
result->Set(TRI_V8_ASCII_STRING("indexes"), indexes);
|
||||
indexes->Set(TRI_V8_ASCII_STRING("count"),
|
||||
v8::Number::New(isolate, (double)info->_numberIndexes));
|
||||
indexes->Set(TRI_V8_ASCII_STRING("size"),
|
||||
v8::Number::New(isolate, (double)info->_sizeIndexes));
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("lastTick"),
|
||||
V8TickId(isolate, info->_tickMax));
|
||||
result->Set(
|
||||
TRI_V8_ASCII_STRING("uncollectedLogfileEntries"),
|
||||
v8::Number::New(isolate, (double)info->_uncollectedLogfileEntries));
|
||||
result->Set(TRI_V8_ASCII_STRING("documentReferences"),
|
||||
v8::Number::New(isolate, (double)info->_numberDocumentDitches));
|
||||
|
||||
char const* wfd = "-";
|
||||
if (info->_waitingForDitch != nullptr) {
|
||||
wfd = info->_waitingForDitch;
|
||||
}
|
||||
result->Set(TRI_V8_ASCII_STRING("waitingFor"), TRI_V8_ASCII_STRING(wfd));
|
||||
|
||||
v8::Handle<v8::Object> compaction = v8::Object::New(isolate);
|
||||
if (info->_lastCompactionStatus != nullptr) {
|
||||
compaction->Set(TRI_V8_ASCII_STRING("message"),
|
||||
TRI_V8_ASCII_STRING(info->_lastCompactionStatus));
|
||||
compaction->Set(TRI_V8_ASCII_STRING("time"),
|
||||
TRI_V8_ASCII_STRING(&info->_lastCompactionStamp[0]));
|
||||
} else {
|
||||
compaction->Set(TRI_V8_ASCII_STRING("message"), TRI_V8_ASCII_STRING("-"));
|
||||
compaction->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_ASCII_STRING("-"));
|
||||
}
|
||||
result->Set(TRI_V8_ASCII_STRING("compactionStatus"), compaction);
|
||||
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, info);
|
||||
trx.finish(TRI_ERROR_NO_ERROR);
|
||||
|
||||
v8::Handle<v8::Value> result = TRI_VPackToV8(isolate, builder->slice());
|
||||
|
||||
TRI_V8_RETURN(result);
|
||||
TRI_V8_TRY_CATCH_END
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_VOCBASE_DATAFILE_DESCRIPTION_H
|
||||
#define ARANGOD_VOCBASE_DATAFILE_DESCRIPTION_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
struct TRI_datafile_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
struct DatafileDescription {
|
||||
TRI_datafile_t const* _data;
|
||||
TRI_voc_tick_t _dataMin;
|
||||
TRI_voc_tick_t _dataMax;
|
||||
TRI_voc_tick_t _tickMax;
|
||||
bool _isJournal;
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -28,6 +28,7 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
|
@ -126,7 +127,8 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
TRI_col_type_e type, TRI_voc_cid_t cid,
|
||||
std::string const& name,
|
||||
TRI_voc_cid_t planId,
|
||||
std::string const& path, bool isLocal)
|
||||
std::string const& path,
|
||||
bool isVolatile, bool isLocal)
|
||||
: _internalVersion(0),
|
||||
_cid(cid),
|
||||
_planId(planId),
|
||||
|
@ -138,7 +140,7 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
_isDeleted(false),
|
||||
_doCompact(false),
|
||||
_isSystem(TRI_collection_t::IsSystemName(name)),
|
||||
_isVolatile(false),
|
||||
_isVolatile(isVolatile),
|
||||
_waitForSync(false),
|
||||
_journalSize(TRI_JOURNAL_DEFAULT_SIZE),
|
||||
_keyOptions(nullptr),
|
||||
|
@ -464,6 +466,21 @@ int LogicalCollection::rename(std::string const& newName) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int LogicalCollection::close() {
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
|
||||
int res = _collection->unload();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
}
|
||||
|
||||
delete _collection;
|
||||
_collection = nullptr;
|
||||
|
||||
return getPhysical()->close();
|
||||
}
|
||||
|
||||
void LogicalCollection::drop() {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
_isDeleted = true;
|
||||
|
@ -576,6 +593,33 @@ int LogicalCollection::update(VPackSlice const& slice, bool preferDefaults) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief return the figures for a collection
|
||||
std::shared_ptr<arangodb::velocypack::Builder> LogicalCollection::figures() {
|
||||
auto builder = std::make_shared<VPackBuilder>();
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
builder->openObject();
|
||||
builder->close();
|
||||
|
||||
int res = figuresOnCoordinator(dbName(), cid_as_string(), builder);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
} else {
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
// add figures from TRI_collection_t
|
||||
builder->openObject();
|
||||
_collection->figures(builder);
|
||||
|
||||
// add engine-specific figures
|
||||
getPhysical()->figures(builder);
|
||||
builder->close();
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
PhysicalCollection* LogicalCollection::createPhysical() {
|
||||
TRI_ASSERT(_physical == nullptr);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define ARANGOD_VOCBASE_LOGICAL_COLLECTION_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "VocBase/DatafileDescription.h"
|
||||
#include "VocBase/PhysicalCollection.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
@ -47,7 +48,7 @@ class LogicalCollection {
|
|||
|
||||
LogicalCollection(TRI_vocbase_t* vocbase, TRI_col_type_e type,
|
||||
TRI_voc_cid_t cid, std::string const& name, TRI_voc_cid_t planId,
|
||||
std::string const& path, bool isLocal);
|
||||
std::string const& path, bool isVolatile, bool isLocal);
|
||||
|
||||
LogicalCollection(TRI_vocbase_t*, arangodb::velocypack::Slice);
|
||||
|
||||
|
@ -138,6 +139,10 @@ class LogicalCollection {
|
|||
int update(arangodb::velocypack::Slice const&, bool);
|
||||
int update(VocbaseCollectionInfo const&);
|
||||
|
||||
|
||||
/// @brief return the figures for a collection
|
||||
std::shared_ptr<arangodb::velocypack::Builder> figures();
|
||||
|
||||
|
||||
/// @brief iterates over a collection
|
||||
bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) {
|
||||
|
@ -148,9 +153,7 @@ class LogicalCollection {
|
|||
int open(bool ignoreErrors);
|
||||
|
||||
/// @brief closes an open collection
|
||||
int close() {
|
||||
return getPhysical()->close();
|
||||
}
|
||||
int close();
|
||||
|
||||
/// datafile management
|
||||
|
||||
|
@ -200,7 +203,10 @@ class LogicalCollection {
|
|||
bool closeDatafiles(std::vector<TRI_datafile_t*> const& files) {
|
||||
return getPhysical()->closeDatafiles(files);
|
||||
}
|
||||
|
||||
|
||||
std::vector<DatafileDescription> datafilesInRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax) {
|
||||
return getPhysical()->datafilesInRange(dataMin, dataMax);
|
||||
}
|
||||
|
||||
|
||||
PhysicalCollection* getPhysical() const {
|
||||
|
|
|
@ -25,8 +25,11 @@
|
|||
#define ARANGOD_VOCBASE_PHYSICAL_COLLECTION_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "VocBase/DatafileDescription.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
||||
struct TRI_datafile_t;
|
||||
struct TRI_df_marker_t;
|
||||
|
||||
|
@ -46,6 +49,8 @@ class PhysicalCollection {
|
|||
virtual void setRevision(TRI_voc_rid_t revision, bool force) = 0;
|
||||
|
||||
virtual int64_t initialCount() const = 0;
|
||||
|
||||
virtual void figures(std::shared_ptr<arangodb::velocypack::Builder>&) = 0;
|
||||
|
||||
virtual int close() = 0;
|
||||
|
||||
|
@ -87,6 +92,8 @@ class PhysicalCollection {
|
|||
|
||||
/// @brief iterates over a collection
|
||||
virtual bool iterateDatafiles(std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) = 0;
|
||||
|
||||
virtual std::vector<DatafileDescription> datafilesInRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax) = 0;
|
||||
|
||||
protected:
|
||||
LogicalCollection* _logicalCollection;
|
||||
|
|
|
@ -512,66 +512,58 @@ int TRI_collection_t::beginWriteTimed(uint64_t timeout,
|
|||
}
|
||||
}
|
||||
|
||||
/// @brief returns information about the collection
|
||||
/// note: the collection lock must be held when calling this function
|
||||
TRI_doc_collection_info_t* TRI_collection_t::figures() {
|
||||
// prefill with 0's to init counters
|
||||
auto info = static_cast<TRI_doc_collection_info_t*>(TRI_Allocate(
|
||||
TRI_UNKNOWN_MEM_ZONE, sizeof(TRI_doc_collection_info_t), true));
|
||||
|
||||
if (info == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void TRI_collection_t::figures(std::shared_ptr<arangodb::velocypack::Builder>& builder) {
|
||||
DatafileStatisticsContainer dfi = _datafileStatistics.all();
|
||||
info->_numberAlive += static_cast<TRI_voc_ssize_t>(dfi.numberAlive);
|
||||
info->_numberDead += static_cast<TRI_voc_ssize_t>(dfi.numberDead);
|
||||
info->_numberDeletions += static_cast<TRI_voc_ssize_t>(dfi.numberDeletions);
|
||||
|
||||
info->_sizeAlive += dfi.sizeAlive;
|
||||
info->_sizeDead += dfi.sizeDead;
|
||||
builder->add("alive", VPackValue(VPackValueType::Object));
|
||||
builder->add("count", VPackValue(dfi.numberAlive));
|
||||
builder->add("size", VPackValue(dfi.sizeAlive));
|
||||
builder->close(); // alive
|
||||
|
||||
builder->add("dead", VPackValue(VPackValueType::Object));
|
||||
builder->add("count", VPackValue(dfi.numberDead));
|
||||
builder->add("size", VPackValue(dfi.sizeDead));
|
||||
builder->add("deletion", VPackValue(dfi.numberDeletions));
|
||||
builder->close(); // dead
|
||||
|
||||
#warning FIXME
|
||||
/*
|
||||
// add the file sizes for datafiles and journals
|
||||
for (auto& df : physical->_datafiles) {
|
||||
info->_datafileSize += (int64_t)df->_initSize;
|
||||
++info->_numberDatafiles;
|
||||
}
|
||||
|
||||
for (auto& df : physical->_journals) {
|
||||
info->_journalfileSize += (int64_t)df->_initSize;
|
||||
++info->_numberJournalfiles;
|
||||
}
|
||||
|
||||
for (auto& df : physical->_compactors) {
|
||||
info->_compactorfileSize += (int64_t)df->_initSize;
|
||||
++info->_numberCompactorfiles;
|
||||
}
|
||||
*/
|
||||
// add index information
|
||||
info->_numberIndexes = 0;
|
||||
info->_sizeIndexes = 0;
|
||||
|
||||
info->_sizeIndexes += static_cast<int64_t>(_masterPointers.memory());
|
||||
|
||||
size_t sizeIndexes = _masterPointers.memory();
|
||||
size_t numIndexes = 0;
|
||||
for (auto const& idx : allIndexes()) {
|
||||
info->_sizeIndexes += idx->memory();
|
||||
info->_numberIndexes++;
|
||||
sizeIndexes += static_cast<size_t>(idx->memory());
|
||||
++numIndexes;
|
||||
}
|
||||
|
||||
info->_uncollectedLogfileEntries = _uncollectedLogfileEntries;
|
||||
info->_tickMax = _tickMax;
|
||||
builder->add("indexes", VPackValue(VPackValueType::Object));
|
||||
builder->add("count", VPackValue(numIndexes));
|
||||
builder->add("size", VPackValue(sizeIndexes));
|
||||
builder->close(); // indexes
|
||||
|
||||
info->_numberDocumentDitches = _ditches.numDocumentDitches();
|
||||
info->_waitingForDitch = _ditches.head();
|
||||
builder->add("uncollectedLogfileEntries", VPackValue(_uncollectedLogfileEntries));
|
||||
builder->add("lastTick", VPackValue(_tickMax));
|
||||
|
||||
builder->add("documentReferences", VPackValue(_ditches.numDocumentDitches()));
|
||||
|
||||
char const* waitingForDitch = _ditches.head();
|
||||
builder->add("waitingFor", VPackValue(waitingForDitch == nullptr ? "-" : waitingForDitch));
|
||||
|
||||
// fills in compaction status
|
||||
getCompactionStatus(info->_lastCompactionStatus,
|
||||
&info->_lastCompactionStamp[0],
|
||||
sizeof(info->_lastCompactionStamp));
|
||||
char const* lastCompactionStatus = nullptr;
|
||||
char lastCompactionStamp[21];
|
||||
getCompactionStatus(lastCompactionStatus,
|
||||
&lastCompactionStamp[0],
|
||||
sizeof(lastCompactionStamp));
|
||||
|
||||
return info;
|
||||
if (lastCompactionStatus == nullptr) {
|
||||
lastCompactionStatus = "-";
|
||||
lastCompactionStamp[0] = '-';
|
||||
lastCompactionStamp[1] = '\0';
|
||||
}
|
||||
|
||||
builder->add("compactionStatus", VPackValue(VPackValueType::Object));
|
||||
builder->add("message", VPackValue(lastCompactionStatus));
|
||||
builder->add("time", VPackValue(&lastCompactionStamp[0]));
|
||||
builder->close(); // compactionStatus
|
||||
}
|
||||
|
||||
/// @brief add an index to the collection
|
||||
|
@ -4362,7 +4354,7 @@ TRI_collection_t* TRI_collection_t::open(TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
/// @brief closes an open collection
|
||||
int TRI_collection_t::unload(bool updateStats) {
|
||||
int TRI_collection_t::unload() {
|
||||
auto primaryIndex = this->primaryIndex();
|
||||
auto idxSize = primaryIndex->size();
|
||||
|
||||
|
@ -4371,5 +4363,7 @@ int TRI_collection_t::unload(bool updateStats) {
|
|||
_info.updateCount(idxSize);
|
||||
}
|
||||
|
||||
_numberDocuments = 0;
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
|
|
@ -73,33 +73,6 @@ struct DocumentOperation;
|
|||
/// @brief predefined collection name for users
|
||||
#define TRI_COL_NAME_USERS "_users"
|
||||
|
||||
/// @brief collection info
|
||||
struct TRI_doc_collection_info_t {
|
||||
TRI_voc_ssize_t _numberDatafiles;
|
||||
TRI_voc_ssize_t _numberJournalfiles;
|
||||
TRI_voc_ssize_t _numberCompactorfiles;
|
||||
|
||||
TRI_voc_ssize_t _numberAlive;
|
||||
TRI_voc_ssize_t _numberDead;
|
||||
TRI_voc_ssize_t _numberDeletions;
|
||||
TRI_voc_ssize_t _numberIndexes;
|
||||
|
||||
int64_t _sizeAlive;
|
||||
int64_t _sizeDead;
|
||||
int64_t _sizeIndexes;
|
||||
|
||||
int64_t _datafileSize;
|
||||
int64_t _journalfileSize;
|
||||
int64_t _compactorfileSize;
|
||||
|
||||
TRI_voc_tick_t _tickMax;
|
||||
uint64_t _uncollectedLogfileEntries;
|
||||
uint64_t _numberDocumentDitches;
|
||||
char const* _waitingForDitch;
|
||||
char const* _lastCompactionStatus;
|
||||
char _lastCompactionStamp[21];
|
||||
};
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
/// @brief collection info block saved to disk as json
|
||||
|
@ -270,13 +243,13 @@ struct TRI_collection_t {
|
|||
void setCompactionStatus(char const*);
|
||||
void getCompactionStatus(char const*&, char*, size_t);
|
||||
|
||||
void figures(std::shared_ptr<arangodb::velocypack::Builder>& result);
|
||||
|
||||
void addIndex(arangodb::Index*);
|
||||
std::vector<arangodb::Index*> const& allIndexes() const;
|
||||
arangodb::Index* lookupIndex(TRI_idx_iid_t) const;
|
||||
arangodb::PrimaryIndex* primaryIndex();
|
||||
|
||||
TRI_doc_collection_info_t* figures();
|
||||
|
||||
int beginRead();
|
||||
int endRead();
|
||||
int beginWrite();
|
||||
|
@ -402,7 +375,7 @@ struct TRI_collection_t {
|
|||
VPackSlice const& slice, arangodb::Index** idx);
|
||||
|
||||
/// @brief closes an open collection
|
||||
int unload(bool updateStatus);
|
||||
int unload();
|
||||
|
||||
private:
|
||||
bool openIndex(VPackSlice const& description, arangodb::Transaction* trx);
|
||||
|
|
|
@ -62,18 +62,6 @@ static void Append(TRI_replication_dump_t* dump, char const* value) {
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief a datafile descriptor
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
struct df_entry_t {
|
||||
TRI_datafile_t const* _data;
|
||||
TRI_voc_tick_t _dataMin;
|
||||
TRI_voc_tick_t _dataMax;
|
||||
TRI_voc_tick_t _tickMax;
|
||||
bool _isJournal;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief translate a (local) collection id into a collection name
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -105,66 +93,6 @@ static char const* NameFromCid(TRI_replication_dump_t* dump,
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief iterate over a vector of datafiles and pick those with a specific
|
||||
/// data range
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void IterateDatafiles(std::vector<TRI_datafile_t*> const& datafiles,
|
||||
std::vector<df_entry_t>& result,
|
||||
TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax,
|
||||
bool isJournal) {
|
||||
for (auto& df : datafiles) {
|
||||
df_entry_t entry = {df, df->_dataMin, df->_dataMax, df->_tickMax,
|
||||
isJournal};
|
||||
|
||||
LOG(TRACE) << "checking datafile " << df->_fid << " with data range " << df->_dataMin << " - " << df->_dataMax << ", tick max: " << df->_tickMax;
|
||||
|
||||
if (df->_dataMin == 0 || df->_dataMax == 0) {
|
||||
// datafile doesn't have any data
|
||||
continue;
|
||||
}
|
||||
|
||||
TRI_ASSERT(df->_tickMin <= df->_tickMax);
|
||||
TRI_ASSERT(df->_dataMin <= df->_dataMax);
|
||||
|
||||
if (dataMax < df->_dataMin) {
|
||||
// datafile is newer than requested range
|
||||
continue;
|
||||
}
|
||||
|
||||
if (dataMin > df->_dataMax) {
|
||||
// datafile is older than requested range
|
||||
continue;
|
||||
}
|
||||
|
||||
result.emplace_back(entry);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get the datafiles of a collection for a specific tick range
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::vector<df_entry_t> GetRangeDatafiles(
|
||||
LogicalCollection* collection, TRI_voc_tick_t dataMin,
|
||||
TRI_voc_tick_t dataMax) {
|
||||
LOG(TRACE) << "getting datafiles in data range " << dataMin << " - " << dataMax;
|
||||
|
||||
std::vector<df_entry_t> datafiles;
|
||||
|
||||
#warning FIXME
|
||||
/*
|
||||
{
|
||||
READ_LOCKER(readLocker, document->_filesLock);
|
||||
|
||||
IterateDatafiles(document->_datafiles, datafiles, dataMin, dataMax, false);
|
||||
IterateDatafiles(document->_journals, datafiles, dataMin, dataMax, true);
|
||||
}
|
||||
*/
|
||||
return datafiles;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief whether or not a marker should be replicated
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -469,10 +397,11 @@ static int DumpCollection(TRI_replication_dump_t* dump,
|
|||
TRI_collection_t* document = collection->_collection;
|
||||
TRI_string_buffer_t* buffer = dump->_buffer;
|
||||
|
||||
std::vector<df_entry_t> datafiles;
|
||||
std::vector<DatafileDescription> datafiles;
|
||||
|
||||
try {
|
||||
datafiles = GetRangeDatafiles(collection, dataMin, dataMax);
|
||||
LOG(TRACE) << "getting datafiles in data range " << dataMin << " - " << dataMax;
|
||||
datafiles = collection->datafilesInRange(dataMin, dataMax);
|
||||
} catch (...) {
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -488,7 +417,7 @@ static int DumpCollection(TRI_replication_dump_t* dump,
|
|||
size_t const n = datafiles.size();
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
df_entry_t const& e = datafiles[i];
|
||||
auto const& e = datafiles[i];
|
||||
TRI_datafile_t const* datafile = e._data;
|
||||
|
||||
// we are reading from a journal that might be modified in parallel
|
||||
|
|
|
@ -74,12 +74,13 @@ void TRI_vocbase_t::signalCleanup() {
|
|||
/// caller must hold _collectionsLock in write mode or set doLock
|
||||
arangodb::LogicalCollection* TRI_vocbase_t::registerCollection(
|
||||
bool doLock, TRI_col_type_e type, TRI_voc_cid_t cid,
|
||||
std::string const& name, TRI_voc_cid_t planId, std::string const& path) {
|
||||
std::string const& name, TRI_voc_cid_t planId, std::string const& path,
|
||||
bool isVolatile) {
|
||||
TRI_ASSERT(cid != 0);
|
||||
|
||||
// create a new proxy
|
||||
auto collection = std::make_unique<arangodb::LogicalCollection>(
|
||||
this, type, cid, name, planId, path, true);
|
||||
this, type, cid, name, planId, path, isVolatile, true);
|
||||
|
||||
{
|
||||
CONDITIONAL_WRITE_LOCKER(writeLocker, _collectionsLock, doLock);
|
||||
|
@ -220,16 +221,11 @@ bool TRI_vocbase_t::UnloadCollectionCallback(LogicalCollection* collection) {
|
|||
// as the cleanup thread has already popped the unload ditch from the
|
||||
// ditches list,
|
||||
// we need to insert a new one to really executed the unload
|
||||
collection->_collection->_vocbase->unloadCollection(collection, false);
|
||||
collection->vocbase()->unloadCollection(collection, false);
|
||||
return false;
|
||||
}
|
||||
|
||||
TRI_collection_t* document = collection->_collection;
|
||||
|
||||
TRI_ASSERT(document != nullptr);
|
||||
|
||||
int res = document->unload(true);
|
||||
collection->close();
|
||||
int res = collection->close();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
std::string const colName(collection->name());
|
||||
|
@ -240,8 +236,6 @@ bool TRI_vocbase_t::UnloadCollectionCallback(LogicalCollection* collection) {
|
|||
return true;
|
||||
}
|
||||
|
||||
delete document;
|
||||
|
||||
collection->setStatus(TRI_VOC_COL_STATUS_UNLOADED);
|
||||
|
||||
return true;
|
||||
|
@ -261,20 +255,13 @@ bool TRI_vocbase_t::DropCollectionCallback(arangodb::LogicalCollection* collecti
|
|||
|
||||
// unload collection
|
||||
if (collection->_collection != nullptr) {
|
||||
TRI_collection_t* document = collection->_collection;
|
||||
|
||||
int res = document->unload(false);
|
||||
collection->close();
|
||||
int res = collection->close();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
LOG(ERR) << "failed to close collection '" << name
|
||||
<< "': " << TRI_last_error();
|
||||
return false;
|
||||
}
|
||||
|
||||
delete document;
|
||||
|
||||
collection->_collection = nullptr;
|
||||
}
|
||||
} // release status lock
|
||||
|
||||
|
@ -331,35 +318,33 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
TRI_collection_t* col = document;
|
||||
arangodb::LogicalCollection* collection = nullptr;
|
||||
TRI_voc_cid_t planId = parameters.planId();
|
||||
col->_info.setPlanId(planId);
|
||||
document->_info.setPlanId(planId);
|
||||
|
||||
TRI_ASSERT(col->_info.id() != 0);
|
||||
TRI_ASSERT(document->_info.id() != 0);
|
||||
|
||||
arangodb::LogicalCollection* collection = nullptr;
|
||||
try {
|
||||
collection = registerCollection(ConditionalWriteLocker::DoNotLock(), col->_info.type(), col->_info.id(), col->_info.name(), planId, col->path());
|
||||
collection = registerCollection(ConditionalWriteLocker::DoNotLock(), document->_info.type(), document->_info.id(), document->_info.name(), planId, document->path(), document->_info.isVolatile());
|
||||
} catch (...) {
|
||||
// if an exception is caught, collection will be a nullptr
|
||||
}
|
||||
|
||||
if (collection == nullptr) {
|
||||
document->unload(false);
|
||||
collection->close();
|
||||
document->unload();
|
||||
delete document;
|
||||
// TODO: does the collection directory need to be removed?
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// cid might have been assigned
|
||||
cid = col->_info.id();
|
||||
cid = document->_info.id();
|
||||
|
||||
collection->setStatus(TRI_VOC_COL_STATUS_LOADED);
|
||||
collection->_collection = document;
|
||||
|
||||
if (writeMarker) {
|
||||
col->_info.toVelocyPack(builder);
|
||||
document->_info.toVelocyPack(builder);
|
||||
}
|
||||
|
||||
return collection;
|
||||
|
|
|
@ -355,7 +355,8 @@ struct TRI_vocbase_t {
|
|||
/// caller must hold _collectionsLock in write mode or set doLock
|
||||
arangodb::LogicalCollection* registerCollection(
|
||||
bool doLock, TRI_col_type_e type, TRI_voc_cid_t cid,
|
||||
std::string const& name, TRI_voc_cid_t planId, std::string const& path);
|
||||
std::string const& name, TRI_voc_cid_t planId, std::string const& path,
|
||||
bool isVolatile);
|
||||
|
||||
/// @brief removes a collection from the global list of collections
|
||||
/// This function is called when a collection is dropped.
|
||||
|
|
Loading…
Reference in New Issue