1
0
Fork 0

Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel

This commit is contained in:
Max Neunhoeffer 2016-09-09 14:31:13 +02:00
commit 8353b6b42d
7 changed files with 56 additions and 62 deletions

View File

@ -76,6 +76,8 @@ devel
v3.0.8 (XXXX-XX-XX) v3.0.8 (XXXX-XX-XX)
------------------- -------------------
* fixed issue #2005
* fixed issue #2039 * fixed issue #2039

View File

@ -927,13 +927,16 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
THROW_ARANGO_EXCEPTION_MESSAGE( THROW_ARANGO_EXCEPTION_MESSAGE(
TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message); TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message);
} else { } else {
// Only if the aresult was successful we will get here // Only if the result was successful we will get here
arangodb::basics::StringBuffer& body = res->result->getBody(); arangodb::basics::StringBuffer& body = res->result->getBody();
std::shared_ptr<VPackBuilder> builder = std::shared_ptr<VPackBuilder> builder =
VPackParser::fromJson(body.c_str(), body.length()); VPackParser::fromJson(body.c_str(), body.length());
VPackSlice resultSlice = builder->slice(); VPackSlice resultSlice = builder->slice();
TRI_ASSERT(resultSlice.isNumber()); if (!resultSlice.isNumber()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
TRI_ERROR_INTERNAL, "got unexpected response from engine lock request");
}
auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>(); auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>();
TRI_ASSERT(engineId != 0); TRI_ASSERT(engineId != 0);
traverserEngines.emplace(engineId, shardSet); traverserEngines.emplace(engineId, shardSet);

View File

@ -236,7 +236,7 @@ bool VppCommTask::processRead() {
// handle request types // handle request types
if (type == 1000) { if (type == 1000) {
// do authentication // do authentication
std::string encryption = header.at(2).copyString(); // std::string encryption = header.at(2).copyString();
std::string user = header.at(3).copyString(); std::string user = header.at(3).copyString();
std::string pass = header.at(4).copyString(); std::string pass = header.at(4).copyString();
auto auth = basics::StringUtils::encodeBase64(user + ":" + pass); auto auth = basics::StringUtils::encodeBase64(user + ":" + pass);
@ -367,6 +367,7 @@ void VppCommTask::handleSimpleError(rest::ResponseCode responseCode,
boost::optional<bool> VppCommTask::getMessageFromSingleChunk( boost::optional<bool> VppCommTask::getMessageFromSingleChunk(
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute, ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
char const* vpackBegin, char const* chunkEnd) { char const* vpackBegin, char const* chunkEnd) {
// add agent for this new message
_agents.emplace( _agents.emplace(
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
@ -414,6 +415,7 @@ boost::optional<bool> VppCommTask::getMessageFromMultiChunks(
// CASE 2a: chunk starts new message // CASE 2a: chunk starts new message
if (chunkHeader._isFirst) { // first chunk of multi chunk message if (chunkHeader._isFirst) { // first chunk of multi chunk message
// add agent for this new message
_agents.emplace( _agents.emplace(
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
@ -449,6 +451,7 @@ boost::optional<bool> VppCommTask::getMessageFromMultiChunks(
// CASE 2b: chunk continues a message // CASE 2b: chunk continues a message
} else { // followup chunk of some mesage } else { // followup chunk of some mesage
// do not add agent for this continued message
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk continues a message"; << "chunk continues a message";
if (incompleteMessageItr == _incompleteMessages.end()) { if (incompleteMessageItr == _incompleteMessages.end()) {

View File

@ -209,11 +209,11 @@ void PrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const {
} }
int PrimaryIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) { int PrimaryIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "insert() called for primary index");
} }
int PrimaryIndex::remove(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) { int PrimaryIndex::remove(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "remove() called for primary index");
} }
/// @brief unload the index data from memory /// @brief unload the index data from memory

View File

@ -80,6 +80,7 @@ class IndexFiller {
void operator()() { void operator()() {
int res = TRI_ERROR_INTERNAL; int res = TRI_ERROR_INTERNAL;
TRI_ASSERT(_idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
try { try {
res = _collection->fillIndex(_trx, _idx); res = _collection->fillIndex(_trx, _idx);
@ -210,7 +211,6 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
VPackSlice value = info.get("type"); VPackSlice value = info.get("type");
if (!value.isString()) { if (!value.isString()) {
// FIXME Intenral Compatibility.
// Compatibility with old v8-vocindex. // Compatibility with old v8-vocindex.
if (generateKey) { if (generateKey) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
@ -232,8 +232,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
iid = Helper::getNumericValue<TRI_idx_iid_t>(info, "id", 0); iid = Helper::getNumericValue<TRI_idx_iid_t>(info, "id", 0);
} else if (!generateKey) { } else if (!generateKey) {
// In the restore case it is forbidden to NOT have id // In the restore case it is forbidden to NOT have id
LOG(ERR) << "ignoring index, index identifier could not be located"; THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot restore index without index identifier");
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
} }
if (iid == 0 && !isClusterConstructor) { if (iid == 0 && !isClusterConstructor) {
@ -249,7 +248,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: { case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: {
if (!isClusterConstructor) { if (!isClusterConstructor) {
// this indexes cannot be created directly // this indexes cannot be created directly
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create primary index");
} }
newIdx.reset(new arangodb::PrimaryIndex(col)); newIdx.reset(new arangodb::PrimaryIndex(col));
break; break;
@ -257,7 +256,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: { case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: {
if (!isClusterConstructor) { if (!isClusterConstructor) {
// this indexes cannot be created directly // this indexes cannot be created directly
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create edge index");
} }
newIdx.reset(new arangodb::EdgeIndex(iid, col)); newIdx.reset(new arangodb::EdgeIndex(iid, col));
break; break;
@ -281,7 +280,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
break; break;
#else #else
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED,
"index type not supported in this build"); "index type 'persistent' not supported in this build");
#endif #endif
} }
case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: { case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: {
@ -445,7 +444,12 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i
} }
} }
} }
/*
if (!isCluster) {
createInitialIndexes();
}
*/
auto indexesSlice = info.get("indexes"); auto indexesSlice = info.get("indexes");
if (indexesSlice.isArray()) { if (indexesSlice.isArray()) {
bool const isCluster = ServerState::instance()->isRunningInCluster(); bool const isCluster = ServerState::instance()->isRunningInCluster();
@ -457,15 +461,27 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i
// TODO Handle Properly // TODO Handle Properly
continue; continue;
} }
auto idx = PrepareIndexFromSlice(v, false, this, true); auto idx = PrepareIndexFromSlice(v, false, this, true);
if (isCluster) { if (isCluster) {
addIndexCoordinator(idx, false); addIndexCoordinator(idx, false);
} else { } else {
/* if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
idx->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) {
// already added those types earlier
continue;
}
*/
addIndex(idx); addIndex(idx);
} }
} }
} }
if (_indexes.empty()) {
createInitialIndexes();
}
if (!ServerState::instance()->isCoordinator() && isPhysical) { if (!ServerState::instance()->isCoordinator() && isPhysical) {
// If we are not in the coordinator we need a path // If we are not in the coordinator we need a path
// to the physical data. // to the physical data.
@ -764,6 +780,7 @@ LogicalCollection::getIndexes() const {
// or it's indexes are freed the pointer returned will get invalidated. // or it's indexes are freed the pointer returned will get invalidated.
arangodb::PrimaryIndex* LogicalCollection::primaryIndex() const { arangodb::PrimaryIndex* LogicalCollection::primaryIndex() const {
TRI_ASSERT(!_indexes.empty()); TRI_ASSERT(!_indexes.empty());
TRI_ASSERT(_indexes[0]->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
// the primary index must be the index at position #0 // the primary index must be the index at position #0
return static_cast<arangodb::PrimaryIndex*>(_indexes[0].get()); return static_cast<arangodb::PrimaryIndex*>(_indexes[0].get());
} }
@ -1124,7 +1141,7 @@ PhysicalCollection* LogicalCollection::createPhysical() {
void LogicalCollection::open(bool ignoreErrors) { void LogicalCollection::open(bool ignoreErrors) {
VPackBuilder builder; VPackBuilder builder;
StorageEngine* engine = EngineSelectorFeature::ENGINE; StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->getCollectionInfo(_vocbase, cid(), builder, false, 0); engine->getCollectionInfo(_vocbase, cid(), builder, true, 0);
double start = TRI_microtime(); double start = TRI_microtime();
@ -1135,17 +1152,9 @@ void LogicalCollection::open(bool ignoreErrors) {
int res = openWorker(ignoreErrors); int res = openWorker(ignoreErrors);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot open document collection from path '" << path() << "'"; THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot open document collection from path '") + path() + "': " + TRI_errno_string(res));
THROW_ARANGO_EXCEPTION(res);
} }
res = createInitialIndexes();
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res);
THROW_ARANGO_EXCEPTION(res);
}
arangodb::SingleCollectionTransaction trx( arangodb::SingleCollectionTransaction trx(
arangodb::StandaloneTransactionContext::Create(_vocbase), arangodb::StandaloneTransactionContext::Create(_vocbase),
cid(), TRI_TRANSACTION_WRITE); cid(), TRI_TRANSACTION_WRITE);
@ -1309,6 +1318,7 @@ std::shared_ptr<Index> LogicalCollection::createIndex(Transaction* trx,
return idx; return idx;
} }
TRI_ASSERT(idx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
int res = fillIndex(trx, idx.get(), false); int res = fillIndex(trx, idx.get(), false);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
@ -1336,21 +1346,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
if (!info.isObject()) { if (!info.isObject()) {
return TRI_ERROR_INTERNAL; return TRI_ERROR_INTERNAL;
} }
/* FIXME Old style First check if iid is okay and update server tick
TRI_idx_iid_t iid;
if (iis.isNumber()) {
iid = iis.getNumericValue<TRI_idx_iid_t>();
} else if (iis.isString()) {
std::string tmp = iis.copyString();
iid = static_cast<TRI_idx_iid_t>(basics::StringUtils::uint64(tmp));
} else {
LOG(ERR) << "ignoring index, index identifier could not be located";
return TRI_ERROR_INTERNAL;
}
TRI_UpdateTickServer(iid);
*/
// We create a new Index object to make sure that the index // We create a new Index object to make sure that the index
// is not handed out except for a successful case. // is not handed out except for a successful case.
std::shared_ptr<Index> newIdx; std::shared_ptr<Index> newIdx;
@ -1366,6 +1362,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
// FIXME New style. Update tick after successful creation of index. // FIXME New style. Update tick after successful creation of index.
TRI_UpdateTickServer(newIdx->id()); TRI_UpdateTickServer(newIdx->id());
TRI_ASSERT(newIdx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
int res = fillIndex(trx, newIdx.get()); int res = fillIndex(trx, newIdx.get());
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
@ -1516,21 +1513,16 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
} }
/// @brief creates the initial indexes for the collection /// @brief creates the initial indexes for the collection
int LogicalCollection::createInitialIndexes() { void LogicalCollection::createInitialIndexes() {
// TODO Properly fix this. The outside should make sure that only NEW collections // TODO Properly fix this. The outside should make sure that only NEW collections
// try to create the indexes. // try to create the indexes.
if (!_indexes.empty()) { if (!_indexes.empty()) {
return TRI_ERROR_NO_ERROR; return;
} }
// create primary index // create primary index
auto primaryIndex = std::make_shared<arangodb::PrimaryIndex>(this); auto primaryIndex = std::make_shared<arangodb::PrimaryIndex>(this);
addIndex(primaryIndex);
try {
addIndex(primaryIndex);
} catch (...) {
return TRI_ERROR_OUT_OF_MEMORY;
}
// create edges index // create edges index
if (_type == TRI_COL_TYPE_EDGE) { if (_type == TRI_COL_TYPE_EDGE) {
@ -1539,16 +1531,10 @@ int LogicalCollection::createInitialIndexes() {
iid = _planId; iid = _planId;
} }
try { auto edgeIndex = std::make_shared<arangodb::EdgeIndex>(iid, this);
auto edgeIndex = std::make_shared<arangodb::EdgeIndex>(iid, this);
addIndex(edgeIndex); addIndex(edgeIndex);
} catch (...) {
return TRI_ERROR_OUT_OF_MEMORY;
}
} }
return TRI_ERROR_NO_ERROR;
} }
/// @brief iterator for index open /// @brief iterator for index open
@ -1633,6 +1619,7 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) {
// now actually fill the secondary indexes // now actually fill the secondary indexes
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
auto idx = _indexes[i]; auto idx = _indexes[i];
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
// index threads must come first, otherwise this thread will block the // index threads must come first, otherwise this thread will block the
// loop and // loop and
@ -1685,6 +1672,9 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) {
} }
void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) { void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
// primary index must be added at position 0
TRI_ASSERT(idx->type() != arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX || _indexes.empty());
_indexes.emplace_back(idx); _indexes.emplace_back(idx);
// update statistics // update statistics
@ -2364,6 +2354,7 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx,
int LogicalCollection::fillIndex(arangodb::Transaction* trx, int LogicalCollection::fillIndex(arangodb::Transaction* trx,
arangodb::Index* idx, arangodb::Index* idx,
bool skipPersistent) { bool skipPersistent) {
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
if (!useSecondaryIndexes()) { if (!useSecondaryIndexes()) {
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
@ -2490,6 +2481,7 @@ int LogicalCollection::fillIndexSequential(arangodb::Transaction* trx,
auto primaryIndex = this->primaryIndex(); auto primaryIndex = this->primaryIndex();
size_t nrUsed = primaryIndex->size(); size_t nrUsed = primaryIndex->size();
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
idx->sizeHint(trx, nrUsed); idx->sizeHint(trx, nrUsed);
if (nrUsed > 0) { if (nrUsed > 0) {
@ -2965,6 +2957,7 @@ int LogicalCollection::insertSecondaryIndexes(
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
auto idx = _indexes[i]; auto idx = _indexes[i];
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
if (!useSecondary && !idx->isPersistent()) { if (!useSecondary && !idx->isPersistent()) {
continue; continue;
@ -3007,6 +3000,7 @@ int LogicalCollection::deleteSecondaryIndexes(
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
auto idx = _indexes[i]; auto idx = _indexes[i];
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
if (!useSecondary && !idx->isPersistent()) { if (!useSecondary && !idx->isPersistent()) {
continue; continue;

View File

@ -359,7 +359,7 @@ class LogicalCollection {
// SECTION: Index creation // SECTION: Index creation
/// @brief creates the initial indexes for the collection /// @brief creates the initial indexes for the collection
int createInitialIndexes(); void createInitialIndexes();
int openWorker(bool ignoreErrors); int openWorker(bool ignoreErrors);

View File

@ -303,14 +303,6 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
TRI_ASSERT(collection != nullptr); TRI_ASSERT(collection != nullptr);
try { try {
// Maybe the ordering is broken now
// create document collection
int res = collection->createInitialIndexes();
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(res);
}
// cid might have been assigned // cid might have been assigned
cid = collection->cid(); cid = collection->cid();