1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Kaveh Vahedipour 2017-04-28 13:51:02 +02:00
commit 0d322dc113
5 changed files with 118 additions and 26 deletions

View File

@ -335,29 +335,114 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(
addIndex(idx);
{
bool const doSync =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database")
->forceSyncProperties();
VPackBuilder builder = _logicalCollection->toVelocyPackIgnore(
{"path", "statusString"}, true, /*forPersistence*/ false);
auto rtrx = rocksTransaction(trx);
rtrx->PutLogData(
int res =
static_cast<RocksDBEngine*>(engine)->writeCreateCollectionMarker(
_logicalCollection->vocbase()->id(), _logicalCollection->cid(),
builder.slice(),
RocksDBLogValue::IndexCreate(_logicalCollection->vocbase()->id(),
_logicalCollection->cid(), info)
.slice());
_logicalCollection->updateProperties(builder.slice(), doSync);
_logicalCollection->cid(), info));
if (res != TRI_ERROR_NO_ERROR) {
// We could not persist the index creation. Better abort
// Remove the Index in the local list again.
size_t i = 0;
// TODO: need to protect _indexes with an RW-lock!!
for (auto index : getIndexes()) {
if (index == idx) {
_indexes.erase(_indexes.begin() + i);
break;
}
++i;
}
THROW_ARANGO_EXCEPTION(res);
}
}
created = true;
return idx;
}
/// @brief Restores an index from VelocyPack.
int RocksDBCollection::restoreIndex(transaction::Methods*,
velocypack::Slice const&,
std::shared_ptr<Index>&) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return 0;
int RocksDBCollection::restoreIndex(transaction::Methods* trx,
velocypack::Slice const& info,
std::shared_ptr<Index>& idx) {
// The coordinator can never get into this state!
TRI_ASSERT(!ServerState::instance()->isCoordinator());
idx.reset(); // Clear it to make sure.
if (!info.isObject()) {
return TRI_ERROR_INTERNAL;
}
// We create a new Index object to make sure that the index
// is not handed out except for a successful case.
std::shared_ptr<Index> newIdx;
try {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
IndexFactory const* idxFactory = engine->indexFactory();
TRI_ASSERT(idxFactory != nullptr);
newIdx = idxFactory->prepareIndexFromSlice(info, false, _logicalCollection,
false);
} catch (arangodb::basics::Exception const& e) {
// Something with index creation went wrong.
// Just report.
return e.code();
}
TRI_ASSERT(newIdx != nullptr);
auto const id = newIdx->id();
TRI_UpdateTickServer(id);
for (auto& it : _indexes) {
if (it->id() == id) {
// index already exists
idx = it;
return TRI_ERROR_NO_ERROR;
}
}
TRI_ASSERT(newIdx.get()->type() !=
Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
Result res = fillIndexes(trx, newIdx);
if (!res.ok()) {
return res.errorNumber();
}
addIndex(newIdx);
{
VPackBuilder builder = _logicalCollection->toVelocyPackIgnore(
{"path", "statusString"}, true, /*forPersistence*/ false);
RocksDBEngine* engine =
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
TRI_ASSERT(engine != nullptr);
int res = engine->writeCreateCollectionMarker(
_logicalCollection->vocbase()->id(), _logicalCollection->cid(),
builder.slice(),
RocksDBLogValue::IndexCreate(_logicalCollection->vocbase()->id(),
_logicalCollection->cid(), info));
if (res != TRI_ERROR_NO_ERROR) {
// We could not persist the index creation. Better abort
// Remove the Index in the local list again.
size_t i = 0;
// TODO: need to protect _indexes with an RW-lock!!
for (auto index : getIndexes()) {
if (index == newIdx) {
_indexes.erase(_indexes.begin() + i);
break;
}
++i;
}
return res;
}
}
idx = newIdx;
// We need to write the IndexMarker
return TRI_ERROR_NO_ERROR;
}
/// @brief Drop an index with the given iid.

View File

@ -380,14 +380,20 @@ void RocksDBEngine::getCollectionInfo(TRI_vocbase_t* vocbase, TRI_voc_cid_t cid,
// dump index information
VPackSlice indexes = fullParameters.get("indexes");
builder.add(VPackValue("indexes"));
if (indexes.isArray()) {
builder.add(indexes);
} else {
// Insert an empty array instead
builder.openArray();
builder.close();
if (indexes.isArray()) {
for (auto const idx : VPackArrayIterator(indexes)) {
// This is only allowed to contain user-defined indexes.
// So we have to exclude Primary + Edge Types
VPackSlice type = idx.get("type");
TRI_ASSERT(type.isString());
if (!type.isEqualString("primary") && !type.isEqualString("edge")) {
builder.add(idx);
}
}
}
builder.close();
}
builder.close();
}

View File

@ -355,6 +355,9 @@ std::shared_ptr<Index> RocksDBIndexFactory::prepareIndexFromSlice(
}
if (iid == 0 && !isClusterConstructor) {
if (!generateKey) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << info.toJson();
}
// Restore is not allowed to generate an id
TRI_ASSERT(generateKey);
iid = arangodb::Index::generateId();

View File

@ -26,6 +26,7 @@
#include "Basics/StringBuffer.h"
#include "Basics/StringRef.h"
#include "Basics/VPackStringBufferAdapter.h"
#include "VocBase/replication-common.h"
#include "RocksDBEngine/RocksDBCollection.h"
#include "RocksDBEngine/RocksDBCommon.h"
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
@ -140,10 +141,7 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
}
// set type
int type = 2300; // documents
if (_collection->type() == TRI_COL_TYPE_EDGE) {
type = 2301; // edge documents
}
int type = REPLICATION_MARKER_DOCUMENT; // documents
arangodb::basics::VPackStringBufferAdapter adapter(buff.stringBuffer());
@ -167,8 +165,9 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
}
builder.add(VPackValue("data"));
auto key = VPackSlice(_mdr.vpack()).get(StaticStrings::KeyString);
_mdr.addToBuilder(builder, false);
builder.add("key", builder.slice().get(StaticStrings::KeyString));
builder.add("key", key);
builder.close();
VPackDumper dumper(

View File

@ -59,7 +59,6 @@ add_executable(
target_link_libraries(
arangodbtests
arangoserver
rocksdblib
)
target_include_directories(arangodbtests PRIVATE