1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into bug-fix/trav-filter-optimizer-tests

This commit is contained in:
Frank Celler 2017-07-28 15:20:55 +02:00
commit 8fa8bbe5fd
11 changed files with 124 additions and 124 deletions

View File

@ -2893,7 +2893,7 @@ void MMFilesRestReplicationHandler::handleCommandMakeSlave() {
std::string errorMsg = "";
{
InitialSyncer syncer(_vocbase, &config, config._restrictCollections,
restrictType, false);
restrictType, false, false);
res = TRI_ERROR_NO_ERROR;
@ -3014,7 +3014,7 @@ void MMFilesRestReplicationHandler::handleCommandSync() {
MMFilesLogfileManager::instance()->waitForSync(5.0);
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
verbose);
verbose, false);
std::string errorMsg = "";

View File

@ -256,7 +256,7 @@ retry:
try {
InitialSyncer syncer(
_vocbase, &_configuration, _configuration._restrictCollections,
_configuration._restrictType, _configuration._verbose);
_configuration._restrictType, _configuration._verbose, false);
res = syncer.run(errorMsg, _configuration._incremental);

View File

@ -65,7 +65,7 @@ InitialSyncer::InitialSyncer(
TRI_vocbase_t* vocbase,
TRI_replication_applier_configuration_t const* configuration,
std::unordered_map<std::string, bool> const& restrictCollections,
std::string const& restrictType, bool verbose)
std::string const& restrictType, bool verbose, bool skipCreateDrop)
: Syncer(vocbase, configuration),
_progress("not started"),
_restrictCollections(restrictCollections),
@ -77,7 +77,8 @@ InitialSyncer::InitialSyncer(
_includeSystem(false),
_chunkSize(configuration->_chunkSize),
_verbose(verbose),
_hasFlushed(false) {
_hasFlushed(false),
_skipCreateDrop(skipCreateDrop) {
if (_chunkSize == 0) {
_chunkSize = (uint64_t)2 * 1024 * 1024; // 2 mb
} else if (_chunkSize < 128 * 1024) {
@ -1132,6 +1133,10 @@ int InitialSyncer::handleCollection(VPackSlice const& parameters,
}
} else {
// regular collection
if (_skipCreateDrop) {
setProgress("dropping " + collectionMsg + " skipped because of configuration");
return TRI_ERROR_NO_ERROR;
}
setProgress("dropping " + collectionMsg);
int res = _vocbase->dropCollection(col, true, -1.0);
@ -1160,7 +1165,13 @@ int InitialSyncer::handleCollection(VPackSlice const& parameters,
}
}
std::string const progress = "creating " + collectionMsg;
std::string progress = "creating " + collectionMsg;
if (_skipCreateDrop) {
progress += " skipped because of configuration";
setProgress(progress.c_str());
return TRI_ERROR_NO_ERROR;
}
setProgress(progress.c_str());
int res = createCollection(parameters, &col);

View File

@ -98,7 +98,7 @@ class InitialSyncer : public Syncer {
public:
InitialSyncer(TRI_vocbase_t*, TRI_replication_applier_configuration_t const*,
std::unordered_map<std::string, bool> const&,
std::string const&, bool verbose);
std::string const&, bool verbose, bool skipCreateDrop);
~InitialSyncer();
@ -331,6 +331,11 @@ class InitialSyncer : public Syncer {
static size_t const MaxChunkSize;
// in the cluster case it is a total NOGO to create or drop collections
// because this HAS to be handled in the schmutz. otherwise it forgets who
// the leader was etc.
bool _skipCreateDrop;
};
}

View File

@ -381,7 +381,7 @@ static std::shared_ptr<Index> findIndex(
if (!value.isString()) {
// Compatibility with old v8-vocindex.
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid index type definition");
}
std::string tmp = value.copyString();
@ -1369,7 +1369,7 @@ RocksDBOperationResult RocksDBCollection::insertDocument(
for (std::shared_ptr<Index> const& idx : _indexes) {
innerRes.reset(idx->insert(trx, revisionId, doc, false));
// in case of no-memory, return immediately
// in case of OOM return immediately
if (innerRes.is(TRI_ERROR_OUT_OF_MEMORY)) {
return innerRes;
}
@ -1433,7 +1433,7 @@ RocksDBOperationResult RocksDBCollection::removeDocument(
Result tmpres = idx->remove(trx, revisionId, doc, false);
resInner.reset(tmpres);
// in case of no-memory, return immediately
// in case of OOM return immediately
if (resInner.is(TRI_ERROR_OUT_OF_MEMORY)) {
return resInner;
}

View File

@ -57,7 +57,6 @@ static int ProcessIndexFields(VPackSlice const definition,
TRI_ASSERT(builder.isOpenObject());
std::unordered_set<StringRef> fields;
try {
VPackSlice fieldsSlice = definition.get("fields");
builder.add(VPackValue("fields"));
builder.openArray();
@ -90,9 +89,6 @@ static int ProcessIndexFields(VPackSlice const definition,
}
builder.close();
} catch (...) {
return TRI_ERROR_OUT_OF_MEMORY;
}
return TRI_ERROR_NO_ERROR;
}
@ -293,9 +289,7 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
}
TRI_ASSERT(enhanced.isEmpty());
int res = TRI_ERROR_INTERNAL;
try {
VPackObjectBuilder b(&enhanced);
current = definition.get("id");
uint64_t id = 0;
@ -314,16 +308,11 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
VPackValue(std::to_string(TRI_NewTickServer())));
}
}
// breaks lookupIndex()
/*else {
if (!definition.hasKey("objectId")) {
// objectId missing, but must be present
return TRI_ERROR_INTERNAL;
}
}*/
enhanced.add("type", VPackValue(Index::oldtypeName(type)));
int res = TRI_ERROR_INTERNAL;
switch (type) {
case Index::TRI_IDX_TYPE_PRIMARY_INDEX:
case Index::TRI_IDX_TYPE_EDGE_INDEX: {
@ -361,11 +350,6 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
}
}
} catch (...) {
// TODO Check for different type of Errors
return TRI_ERROR_OUT_OF_MEMORY;
}
return res;
}

View File

@ -1489,7 +1489,7 @@ void RocksDBRestReplicationHandler::handleCommandMakeSlave() {
std::string errorMsg = "";
{
InitialSyncer syncer(_vocbase, &config, config._restrictCollections,
restrictType, false);
restrictType, false, false);
res = TRI_ERROR_NO_ERROR;
@ -1607,7 +1607,7 @@ void RocksDBRestReplicationHandler::handleCommandSync() {
config._useCollectionId = useCollectionId;
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
verbose);
verbose, false);
std::string errorMsg = "";

View File

@ -532,13 +532,10 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
std::vector<RocksDBKey> elements;
std::vector<uint64_t> hashes;
int res;
try {
{
// rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(trx);
res = fillElement(*(leased.get()), revisionId, doc, elements, hashes);
} catch (basics::Exception const& ex) {
res = ex.code();
} catch (...) {
res = TRI_ERROR_OUT_OF_MEMORY;
}
if (res != TRI_ERROR_NO_ERROR) {
@ -604,13 +601,10 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
std::vector<uint64_t> hashes;
int res;
try {
{
// rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(trx);
res = fillElement(*(leased.get()), revisionId, doc, elements, hashes);
} catch (basics::Exception const& ex) {
res = ex.code();
} catch (...) {
res = TRI_ERROR_OUT_OF_MEMORY;
}
if (res != TRI_ERROR_NO_ERROR) {

View File

@ -254,6 +254,11 @@ static void JS_SynchronizeReplication(
verbose = TRI_ObjectToBoolean(object->Get(TRI_V8_ASCII_STRING("verbose")));
}
bool skipCreateDrop = false;
if (object->Has(TRI_V8_ASCII_STRING("skipCreateDrop"))) {
skipCreateDrop = TRI_ObjectToBoolean(object->Get(TRI_V8_ASCII_STRING("skipCreateDrop")));
}
if (endpoint.empty()) {
TRI_V8_THROW_EXCEPTION_PARAMETER("<endpoint> must be a valid endpoint");
}
@ -319,7 +324,7 @@ static void JS_SynchronizeReplication(
std::string errorMsg = "";
InitialSyncer syncer(vocbase, &config, restrictCollections, restrictType,
verbose);
verbose, skipCreateDrop);
if (!leaderId.empty()) {
syncer.setLeaderId(leaderId);
}

View File

@ -761,7 +761,8 @@ static void JS_ParseAql(v8::FunctionCallbackInfo<v8::Value> const& args) {
for (auto const& elem : parseResult.bindParameters) {
bindVars->Set(i++, TRI_V8_STD_STRING((elem)));
}
result->Set(TRI_V8_ASCII_STRING("parameters"), bindVars);
result->Set(TRI_V8_ASCII_STRING("parameters"), bindVars); // parameters is deprecated
result->Set(TRI_V8_ASCII_STRING("bindVars"), bindVars);
}
result->Set(TRI_V8_ASCII_STRING("ast"),

View File

@ -545,7 +545,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
let startTime = new Date();
sy = rep.syncCollection(shard,
{ endpoint: ep, incremental: true, keepBarrier: true,
useCollectionId: false, leaderId: leader });
useCollectionId: false, leaderId: leader, skipCreateDrop: true });
let endTime = new Date();
let longSync = false;
if (endTime - startTime > 5000) {