1
0
Fork 0

Bugfix/early out invalid links in view creation (#6502)

This commit is contained in:
Lars Maier 2018-09-17 09:50:28 +02:00 committed by Jan
parent 8bd834bcf7
commit f93b6fd7b8
7 changed files with 166 additions and 79 deletions

View File

@ -133,7 +133,7 @@ bool IResearchViewCoordinator::emplace(
auto& properties = info.isObject() ? info : emptyObjectSlice(); // if no 'info' then assume defaults
std::string error;
bool hasLinks = properties.hasKey("links");
bool hasLinks = properties.hasKey(StaticStrings::LinksField);
auto view = std::shared_ptr<IResearchViewCoordinator>(
new IResearchViewCoordinator(vocbase, info, planVersion)

View File

@ -57,27 +57,33 @@ namespace iresearch {
auto& properties = info.isObject() ? info : emptyObjectSlice(); // if no 'info' then assume defaults
std::string error;
bool hasLinks = properties.hasKey("links");
bool hasLinks = properties.hasKey(StaticStrings::LinksField);
if (hasLinks && isNew) {
// check link auth as per https://github.com/arangodb/backlog/issues/459
if (arangodb::ExecContext::CURRENT) {
arangodb::velocypack::ObjectIterator iterator{info.get(StaticStrings::LinksField)};
// check new links
if (info.hasKey(StaticStrings::LinksField)) {
for (arangodb::velocypack::ObjectIterator itr(info.get(StaticStrings::LinksField)); itr.valid(); ++itr) {
if (!itr.key().isString()) {
continue; // not a resolvable collection (invalid jSON)
}
for (auto itr : iterator) {
if (!itr.key.isString()) {
continue; // not a resolvable collection (invalid jSON)
}
auto collection= vocbase.lookupCollection(itr.key().copyString());
auto colname = itr.key.copyString();
if (collection
&& !arangodb::ExecContext::CURRENT->canUseCollection(vocbase.name(), collection->name(), arangodb::auth::Level::RO)) {
return nullptr;
}
}
// check if the collection exists
auto collection = vocbase.lookupCollection(colname);
if (!collection) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC) << "Could not create view: "
<< "Collection not found: " << colname;
TRI_set_errno(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
return nullptr;
}
// check if the collection can be used
if (arangodb::ExecContext::CURRENT &&
!arangodb::ExecContext::CURRENT->canUseCollection(vocbase.name(), collection->name(), arangodb::auth::Level::RO)) {
return nullptr;
}
}
}

View File

@ -57,6 +57,7 @@
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/Validator.h>
#include <velocypack/Collection.h>
#include <velocypack/velocypack-aliases.h>
#include <array>
#include <cstring>
@ -68,21 +69,21 @@ namespace {
/// @brief maximum internal value for chunkSize
size_t const maxChunkSize = 10 * 1024 * 1024;
std::chrono::milliseconds sleepTimeFromWaitTime(double waitTime) {
if (waitTime < 1.0) {
return std::chrono::milliseconds(100);
}
if (waitTime < 5.0) {
return std::chrono::milliseconds(200);
}
}
if (waitTime < 20.0) {
return std::chrono::milliseconds(500);
}
if (waitTime < 60.0) {
return std::chrono::seconds(1);
}
return std::chrono::seconds(2);
}
@ -177,9 +178,9 @@ Result DatabaseInitialSyncer::runWithInventory(bool incremental,
"not supported with a master < "
"ArangoDB 2.7";
incremental = false;
}
}
}
r = sendFlush();
if (r.fail()) {
return r;
@ -219,7 +220,7 @@ Result DatabaseInitialSyncer::runWithInventory(bool incremental,
"collections section is missing from response");
}
}
if (!_config.applier._skipCreateDrop &&
_config.applier._restrictCollections.empty()) {
r = handleViewCreation(views); // no requests to master
@ -319,7 +320,7 @@ Result DatabaseInitialSyncer::sendFlush() {
if (isAborted()) {
return Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED);
}
std::string const& engineName = EngineSelectorFeature::ENGINE->typeName();
if (engineName == "rocksdb" && _state.master.engine == engineName) {
// no WAL flush required for RocksDB. this is only relevant for MMFiles
@ -465,7 +466,7 @@ Result DatabaseInitialSyncer::parseCollectionDump(
if (r.fail()) {
return r;
}
++markersProcessed;
}
}
@ -476,16 +477,16 @@ Result DatabaseInitialSyncer::parseCollectionDump(
/// @brief order a new chunk from the /dump API
void DatabaseInitialSyncer::fetchDumpChunk(std::shared_ptr<Syncer::JobSynchronizer> sharedStatus,
std::string const& baseUrl,
arangodb::LogicalCollection* coll,
std::string const& baseUrl,
arangodb::LogicalCollection* coll,
std::string const& leaderColl,
InitialSyncerDumpStats& stats,
int batch,
TRI_voc_tick_t fromTick,
int batch,
TRI_voc_tick_t fromTick,
uint64_t chunkSize) {
using ::arangodb::basics::StringUtils::itoa;
if (isAborted()) {
sharedStatus->gotResponse(Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED));
return;
@ -499,10 +500,10 @@ void DatabaseInitialSyncer::fetchDumpChunk(std::shared_ptr<Syncer::JobSynchroniz
std::string const& engineName = EngineSelectorFeature::ENGINE->typeName();
bool const useAsync = (batch == 1 &&
(engineName != "rocksdb" || _state.master.engine != engineName));
try {
try {
std::string const typeString = (coll->type() == TRI_COL_TYPE_EDGE ? "edge" : "document");
if (!_config.isChild()) {
_config.batch.extend(_config.connection, _config.progress);
_config.barrier.extend(_config.connection);
@ -532,7 +533,7 @@ void DatabaseInitialSyncer::fetchDumpChunk(std::shared_ptr<Syncer::JobSynchroniz
headers[StaticStrings::Accept] = StaticStrings::MimeTypeVPack;
}
#endif
_config.progress.set(std::string("fetching master collection dump for collection '") +
coll->name() + "', type: " + typeString + ", id: " +
leaderColl + ", batch " + itoa(batch) + ", url: " + url);
@ -607,7 +608,7 @@ void DatabaseInitialSyncer::fetchDumpChunk(std::shared_ptr<Syncer::JobSynchroniz
sharedStatus->gotResponse(Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED));
return;
}
std::chrono::milliseconds sleepTime = ::sleepTimeFromWaitTime(waitTime);
std::this_thread::sleep_for(sleepTime);
}
@ -615,7 +616,7 @@ void DatabaseInitialSyncer::fetchDumpChunk(std::shared_ptr<Syncer::JobSynchroniz
}
stats.waitedForDump += TRI_microtime() - t;
if (replutils::hasFailed(response.get())) {
// failure
sharedStatus->gotResponse(replutils::buildHttpError(response.get(), url, _config.connection));
@ -638,11 +639,11 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
using ::arangodb::basics::StringUtils::itoa;
using ::arangodb::basics::StringUtils::uint64;
using ::arangodb::basics::StringUtils::urlEncode;
if (isAborted()) {
return Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED);
}
std::string const typeString = (coll->type() == TRI_COL_TYPE_EDGE ? "edge" : "document");
InitialSyncerDumpStats stats;
@ -653,9 +654,9 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
std::string baseUrl =
replutils::ReplicationUrl + "/dump?collection=" + urlEncode(leaderColl) +
"&batchId=" + std::to_string(_config.batch.id) +
"&includeSystem=" + std::string(_config.applier._includeSystem ? "true" : "false") +
"&includeSystem=" + std::string(_config.applier._includeSystem ? "true" : "false") +
"&serverId=" + _state.localServerIdString;
if (maxTick > 0) {
baseUrl += "&to=" + itoa(maxTick + 1);
}
@ -666,21 +667,21 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
uint64_t chunkSize = _config.applier._chunkSize;
uint64_t bytesReceived = 0;
uint64_t markersProcessed = 0;
double const startTime = TRI_microtime();
// the shared status will wait in its destructor until all posted
// the shared status will wait in its destructor until all posted
// requests have been completed/canceled!
auto self = shared_from_this();
auto sharedStatus = std::make_shared<Syncer::JobSynchronizer>(self);
// order initial chunk. this will block until the initial response
// has arrived
fetchDumpChunk(sharedStatus, baseUrl, coll, leaderColl, stats, batch, fromTick, chunkSize);
while (true) {
std::unique_ptr<httpclient::SimpleHttpResult> dumpResponse;
// block until we either got a response or were shut down
Result res = sharedStatus->waitForResponse(dumpResponse);
@ -691,7 +692,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
// now we have got a response!
TRI_ASSERT(dumpResponse != nullptr);
if (dumpResponse->hasContentLength()) {
bytesReceived += dumpResponse->getContentLength();
}
@ -730,7 +731,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
checkMore = false;
}
}
// increase chunk size for next fetch
if (chunkSize < ::maxChunkSize) {
chunkSize = static_cast<uint64_t>(chunkSize * 1.25);
@ -739,7 +740,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
chunkSize = ::maxChunkSize;
}
}
if (checkMore && !isAborted()) {
// already fetch next batch in the background, by posting the
// request to the scheduler, which can run it asynchronously
@ -785,7 +786,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
}
res = trx.commit();
double applyTime = TRI_microtime() - t;
stats.waitedForApply += applyTime;
@ -793,7 +794,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
coll->name() + "', type: " + typeString + ", id: " +
leaderColl + ", batch " + itoa(batch) +
", markers processed: " + itoa(markersProcessed) +
", bytes received: " + itoa(bytesReceived) +
", bytes received: " + itoa(bytesReceived) +
", apply time: " + std::to_string(applyTime) + " s");
if (!res.ok()) {
@ -805,16 +806,16 @@ Result DatabaseInitialSyncer::fetchCollectionDump(
_config.progress.set(std::string("finished initial dump for collection '") + coll->name() +
"', type: " + typeString + ", id: " + leaderColl +
", markers processed: " + itoa(markersProcessed) +
", bytes received: " + itoa(bytesReceived) +
", dump requests: " + std::to_string(stats.numDumpRequests) +
", bytes received: " + itoa(bytesReceived) +
", dump requests: " + std::to_string(stats.numDumpRequests) +
", waited for dump: " + std::to_string(stats.waitedForDump) + " s" +
", apply time: " + std::to_string(stats.waitedForApply) + " s" +
", total time: " + std::to_string(TRI_microtime() - startTime) + " s");
", apply time: " + std::to_string(stats.waitedForApply) + " s" +
", total time: " + std::to_string(TRI_microtime() - startTime) + " s");
return Result();
}
batch++;
if (isAborted()) {
return Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED);
}
@ -908,7 +909,7 @@ Result DatabaseInitialSyncer::fetchCollectionSync(
if (isAborted()) {
return Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED);
}
std::chrono::milliseconds sleepTime = ::sleepTimeFromWaitTime(waitTime);
std::this_thread::sleep_for(sleepTime);
}
@ -1288,7 +1289,7 @@ Result DatabaseInitialSyncer::handleCollection(VPackSlice const& parameters,
" skipped because of configuration");
return res;
}
// now create indexes
TRI_ASSERT(indexes.isArray());
VPackValueLength const numIdx = indexes.length();
@ -1516,11 +1517,18 @@ Result DatabaseInitialSyncer::iterateCollections(
// all ok
return Result();
}
/// @brief create non-existing views locally
Result DatabaseInitialSyncer::handleViewCreation(VPackSlice const& views) {
for (VPackSlice slice : VPackArrayIterator(views)) {
Result res = createView(vocbase(), slice);
// Remove the links from the view slice
// This is required since views are created before collections.
// Hence, the collection does not exist and the view creation is aborted.
// The association views <-> collections is still created via the indexes.
auto patchedSlice = VPackCollection::remove(slice, std::vector<std::string>{"links"});
Result res = createView(vocbase(), patchedSlice.slice());
if (res.fail()) {
return res;
}

View File

@ -81,7 +81,7 @@ Result Indexes::getIndex(LogicalCollection const* collection,
return Result(TRI_ERROR_ARANGO_INDEX_NOT_FOUND);
}
VPackBuilder tmp;
Result res = Indexes::getAll(collection, Index::makeFlags(), false, tmp);
if (res.ok()) {
@ -223,7 +223,7 @@ arangodb::Result Indexes::getAll(LogicalCollection const* collection,
if ((val = figures.get("cacheSize")).isNumber()) {
cacheSize += val.getNumber<double>();
}
if ((val = figures.get("cacheUsage")).isNumber()) {
cacheUsage += val.getNumber<double>();
}
@ -454,7 +454,7 @@ Result Indexes::ensureIndex(LogicalCollection* collection,
return Result(create ? TRI_ERROR_OUT_OF_MEMORY
: TRI_ERROR_ARANGO_INDEX_NOT_FOUND);
}
// flush estimates
collection->flushClusterIndexEstimates();
@ -501,7 +501,7 @@ arangodb::Result Indexes::createIndex(LogicalCollection* coll, Index::IndexType
arangodb::velocypack::Value(sparse)
);
props.close();
VPackBuilder ignored;
return ensureIndex(coll, props.slice(), true, ignored);
}
@ -597,7 +597,7 @@ arangodb::Result Indexes::drop(LogicalCollection* collection,
if (!res.ok()) {
return res;
}
// flush estimates
collection->flushClusterIndexEstimates();

View File

@ -58,6 +58,20 @@ function IResearchFeatureDDLTestSuite () {
}
},
testStressAddRemoveViewWithDirectLinks : function() {
db._drop("TestCollection0");
db._dropView("TestView");
db._create("TestCollection0");
for (let i = 0; i < 100; ++i) {
db._createView("TestView", "arangosearch", {links:{"TestCollection0":{}}});
var view = db._view("TestView");
assertTrue(null != view);
assertEqual(Object.keys(view.properties().links).length, 1);
db._dropView("TestView");
assertTrue(null == db._view("TestView"));
}
},
testStressAddRemoveViewWithLink : function() {
db._drop("TestCollection0");
db._dropView("TestView");
@ -161,6 +175,12 @@ function IResearchFeatureDDLTestSuite () {
assertTrue(Object === properties.links.constructor);
assertEqual(1, Object.keys(properties.links).length);
// create with links
db._dropView("TestView");
view = db._createView("TestView", "arangosearch", meta);
properties = view.properties();
assertTrue(Object === properties.links.constructor);
assertEqual(1, Object.keys(properties.links).length);
// consolidate
db._dropView("TestView");

View File

@ -74,12 +74,29 @@ function IResearchLinkSuite () {
////////////////////////////////////////////////////////////////////////////
testHandlingCreateWithLinks : function () {
var meta = { links: { 'testCollection' : { includeAllFields: true } } };
var view = db._createView("badView", "arangosearch", meta);
var view = db._createView("someView", "arangosearch", meta);
var links = view.properties().links;
assertNotEqual(links['testCollection'], undefined);
view.drop();
},
////////////////////////////////////////////////////////////////////////////
/// @brief don't create view when collections do not exist or the user
/// is not allowed to access them.
////////////////////////////////////////////////////////////////////////////
testHandlingCreateWithBadLinks : function () {
var meta = { links: { 'nonExistentCollection': {}, 'testCollection' : { includeAllFields: true } } };
var view;
try {
view = db._createView("someView", "arangosearch", meta);
fail();
} catch(e) {
assertEqual(ERRORS.ERROR_ARANGO_DATA_SOURCE_NOT_FOUND.code, e.errorNum);
}
assertNull(db._view("someView"));
},
////////////////////////////////////////////////////////////////////////////
/// @brief create a view and add/drop link
////////////////////////////////////////////////////////////////////////////

View File

@ -140,7 +140,7 @@ const compare = function(masterFunc, masterFunc2, slaveFuncOngoing, slaveFuncFin
console.log("slave has caught up. state.lastLogTick:", state.lastLogTick, "slaveState.lastAppliedContinuousTick:", slaveState.state.lastAppliedContinuousTick, "slaveState.lastProcessedContinuousTick:", slaveState.state.lastProcessedContinuousTick);
break;
}
if (!printed) {
console.log("waiting for slave to catch up");
printed = true;
@ -160,7 +160,7 @@ function BaseTestConfig() {
'use strict';
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief test duplicate _key issue and replacement
////////////////////////////////////////////////////////////////////////////////
@ -192,7 +192,7 @@ function BaseTestConfig() {
}
);
},
testSecondaryKeyConflict: function() {
connectToMaster();
@ -223,7 +223,7 @@ function BaseTestConfig() {
}
);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test collection creation
////////////////////////////////////////////////////////////////////////////////
@ -705,7 +705,7 @@ function BaseTestConfig() {
db._create(cn);
let view = db._createView("UnitTestsSyncView", "arangosearch", {});
let links = {};
links[cn] = {
links[cn] = {
includeAllFields: true,
fields: {
text: { analyzers: [ "text_en" ] }
@ -720,7 +720,43 @@ function BaseTestConfig() {
if (!state.arangoSearchEnabled) {
return;
}
let view = db._view("UnitTestsSyncView");
assertTrue(view !== null);
let props = view.properties();
assertEqual(Object.keys(props.links).length, 1);
assertTrue(props.hasOwnProperty("links"));
assertTrue(props.links.hasOwnProperty(cn));
},
{}
);
},
testViewCreateWithLinks: function() {
connectToMaster();
compare(
function() {},
function(state) {
try {
db._create(cn);
let links = {};
links[cn] = {
includeAllFields: true,
fields: {
text: { analyzers: [ "text_en" ] }
}
};
let view = db._createView("UnitTestsSyncView", "arangosearch", {"links": links});
state.arangoSearchEnabled = true;
} catch (err) { }
},
function() {},
function(state) {
if (!state.arangoSearchEnabled) {
return;
}
let view = db._view("UnitTestsSyncView");
assertTrue(view !== null);
let props = view.properties();
@ -741,7 +777,7 @@ function BaseTestConfig() {
db._create(cn);
let view = db._createView("UnitTestsSyncView", "arangosearch", {});
let links = {};
links[cn] = {
links[cn] = {
includeAllFields: true,
fields: {
text: { analyzers: [ "text_en" ] }
@ -770,7 +806,7 @@ function BaseTestConfig() {
if (!state.arangoSearchEnabled) {
return;
}
let view = db._view("UnitTestsSyncViewRenamed");
assertTrue(view !== null);
let props = view.properties();
@ -805,7 +841,7 @@ function BaseTestConfig() {
if (!state.arangoSearchEnabled) {
return;
}
let view = db._view("UnitTestsSyncView");
assertTrue(view === null);
},
@ -903,7 +939,7 @@ function ReplicationOtherDBSuite() {
db._dropDatabase(dbName);
} catch (e) {
}
db._createDatabase(dbName);
connectToMaster();
@ -931,7 +967,7 @@ function ReplicationOtherDBSuite() {
replication.applier.forget();
} catch (e) {
}
db._useDatabase("_system");
try {
db._dropDatabase(dbName);
@ -1051,9 +1087,9 @@ function ReplicationOtherDBSuite() {
// Collection should be empty
assertEqual(0, collectionCount(cn));
// now test if the replication is actually
// now test if the replication is actually
// switched off
// Section - Master
connectToMaster();
// Insert some documents
@ -1061,7 +1097,7 @@ function ReplicationOtherDBSuite() {
// Flush wal to trigger replication
internal.wal.flush(true, true);
const lastLogTick = replication.logger.state().state.lastLogTick;
const lastLogTick = replication.logger.state().state.lastLogTick;
// Section - Slave
connectToSlave();
@ -1130,7 +1166,7 @@ function ReplicationOtherDBSuite() {
}
// Now test if the Slave did replicate the new database directly...
assertEqual(50, collectionCount(cn),
assertEqual(50, collectionCount(cn),
"The slave inserted the new collection data into the old one, it skipped the drop.");
};