mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into bug-fix/trav-filter-optimizer-tests
This commit is contained in:
commit
8fa8bbe5fd
|
@ -2893,7 +2893,7 @@ void MMFilesRestReplicationHandler::handleCommandMakeSlave() {
|
|||
std::string errorMsg = "";
|
||||
{
|
||||
InitialSyncer syncer(_vocbase, &config, config._restrictCollections,
|
||||
restrictType, false);
|
||||
restrictType, false, false);
|
||||
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
|
@ -3014,7 +3014,7 @@ void MMFilesRestReplicationHandler::handleCommandSync() {
|
|||
MMFilesLogfileManager::instance()->waitForSync(5.0);
|
||||
|
||||
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
|
||||
verbose);
|
||||
verbose, false);
|
||||
|
||||
std::string errorMsg = "";
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ retry:
|
|||
try {
|
||||
InitialSyncer syncer(
|
||||
_vocbase, &_configuration, _configuration._restrictCollections,
|
||||
_configuration._restrictType, _configuration._verbose);
|
||||
_configuration._restrictType, _configuration._verbose, false);
|
||||
|
||||
res = syncer.run(errorMsg, _configuration._incremental);
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ InitialSyncer::InitialSyncer(
|
|||
TRI_vocbase_t* vocbase,
|
||||
TRI_replication_applier_configuration_t const* configuration,
|
||||
std::unordered_map<std::string, bool> const& restrictCollections,
|
||||
std::string const& restrictType, bool verbose)
|
||||
std::string const& restrictType, bool verbose, bool skipCreateDrop)
|
||||
: Syncer(vocbase, configuration),
|
||||
_progress("not started"),
|
||||
_restrictCollections(restrictCollections),
|
||||
|
@ -77,7 +77,8 @@ InitialSyncer::InitialSyncer(
|
|||
_includeSystem(false),
|
||||
_chunkSize(configuration->_chunkSize),
|
||||
_verbose(verbose),
|
||||
_hasFlushed(false) {
|
||||
_hasFlushed(false),
|
||||
_skipCreateDrop(skipCreateDrop) {
|
||||
if (_chunkSize == 0) {
|
||||
_chunkSize = (uint64_t)2 * 1024 * 1024; // 2 mb
|
||||
} else if (_chunkSize < 128 * 1024) {
|
||||
|
@ -1132,6 +1133,10 @@ int InitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
}
|
||||
} else {
|
||||
// regular collection
|
||||
if (_skipCreateDrop) {
|
||||
setProgress("dropping " + collectionMsg + " skipped because of configuration");
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
setProgress("dropping " + collectionMsg);
|
||||
|
||||
int res = _vocbase->dropCollection(col, true, -1.0);
|
||||
|
@ -1160,7 +1165,13 @@ int InitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
}
|
||||
}
|
||||
|
||||
std::string const progress = "creating " + collectionMsg;
|
||||
std::string progress = "creating " + collectionMsg;
|
||||
if (_skipCreateDrop) {
|
||||
progress += " skipped because of configuration";
|
||||
setProgress(progress.c_str());
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
setProgress(progress.c_str());
|
||||
|
||||
int res = createCollection(parameters, &col);
|
||||
|
|
|
@ -98,7 +98,7 @@ class InitialSyncer : public Syncer {
|
|||
public:
|
||||
InitialSyncer(TRI_vocbase_t*, TRI_replication_applier_configuration_t const*,
|
||||
std::unordered_map<std::string, bool> const&,
|
||||
std::string const&, bool verbose);
|
||||
std::string const&, bool verbose, bool skipCreateDrop);
|
||||
|
||||
~InitialSyncer();
|
||||
|
||||
|
@ -331,6 +331,11 @@ class InitialSyncer : public Syncer {
|
|||
|
||||
static size_t const MaxChunkSize;
|
||||
|
||||
// in the cluster case it is a total NOGO to create or drop collections
|
||||
// because this HAS to be handled in the schmutz. otherwise it forgets who
|
||||
// the leader was etc.
|
||||
bool _skipCreateDrop;
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -381,7 +381,7 @@ static std::shared_ptr<Index> findIndex(
|
|||
|
||||
if (!value.isString()) {
|
||||
// Compatibility with old v8-vocindex.
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid index type definition");
|
||||
}
|
||||
|
||||
std::string tmp = value.copyString();
|
||||
|
@ -1369,7 +1369,7 @@ RocksDBOperationResult RocksDBCollection::insertDocument(
|
|||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
innerRes.reset(idx->insert(trx, revisionId, doc, false));
|
||||
|
||||
// in case of no-memory, return immediately
|
||||
// in case of OOM return immediately
|
||||
if (innerRes.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
return innerRes;
|
||||
}
|
||||
|
@ -1433,7 +1433,7 @@ RocksDBOperationResult RocksDBCollection::removeDocument(
|
|||
Result tmpres = idx->remove(trx, revisionId, doc, false);
|
||||
resInner.reset(tmpres);
|
||||
|
||||
// in case of no-memory, return immediately
|
||||
// in case of OOM return immediately
|
||||
if (resInner.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
return resInner;
|
||||
}
|
||||
|
|
|
@ -57,42 +57,38 @@ static int ProcessIndexFields(VPackSlice const definition,
|
|||
TRI_ASSERT(builder.isOpenObject());
|
||||
std::unordered_set<StringRef> fields;
|
||||
|
||||
try {
|
||||
VPackSlice fieldsSlice = definition.get("fields");
|
||||
builder.add(VPackValue("fields"));
|
||||
builder.openArray();
|
||||
if (fieldsSlice.isArray()) {
|
||||
// "fields" is a list of fields
|
||||
for (auto const& it : VPackArrayIterator(fieldsSlice)) {
|
||||
if (!it.isString()) {
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
StringRef f(it);
|
||||
|
||||
if (f.empty() || (create && f == StaticStrings::IdString)) {
|
||||
// accessing internal attributes is disallowed
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
if (fields.find(f) != fields.end()) {
|
||||
// duplicate attribute name
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
fields.insert(f);
|
||||
builder.add(it);
|
||||
VPackSlice fieldsSlice = definition.get("fields");
|
||||
builder.add(VPackValue("fields"));
|
||||
builder.openArray();
|
||||
if (fieldsSlice.isArray()) {
|
||||
// "fields" is a list of fields
|
||||
for (auto const& it : VPackArrayIterator(fieldsSlice)) {
|
||||
if (!it.isString()) {
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
}
|
||||
|
||||
if (fields.empty() || (numFields > 0 && (int)fields.size() != numFields)) {
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
StringRef f(it);
|
||||
|
||||
builder.close();
|
||||
} catch (...) {
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
if (f.empty() || (create && f == StaticStrings::IdString)) {
|
||||
// accessing internal attributes is disallowed
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
if (fields.find(f) != fields.end()) {
|
||||
// duplicate attribute name
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
fields.insert(f);
|
||||
builder.add(it);
|
||||
}
|
||||
}
|
||||
|
||||
if (fields.empty() || (numFields > 0 && (int)fields.size() != numFields)) {
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
builder.close();
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -293,77 +289,65 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
|
|||
}
|
||||
|
||||
TRI_ASSERT(enhanced.isEmpty());
|
||||
|
||||
VPackObjectBuilder b(&enhanced);
|
||||
current = definition.get("id");
|
||||
uint64_t id = 0;
|
||||
if (current.isNumber()) {
|
||||
id = current.getNumericValue<uint64_t>();
|
||||
} else if (current.isString()) {
|
||||
id = basics::StringUtils::uint64(current.copyString());
|
||||
}
|
||||
if (id > 0) {
|
||||
enhanced.add("id", VPackValue(std::to_string(id)));
|
||||
}
|
||||
|
||||
if (create && !isCoordinator) {
|
||||
if (!definition.hasKey("objectId")) {
|
||||
enhanced.add("objectId",
|
||||
VPackValue(std::to_string(TRI_NewTickServer())));
|
||||
}
|
||||
}
|
||||
|
||||
enhanced.add("type", VPackValue(Index::oldtypeName(type)));
|
||||
|
||||
int res = TRI_ERROR_INTERNAL;
|
||||
|
||||
try {
|
||||
VPackObjectBuilder b(&enhanced);
|
||||
current = definition.get("id");
|
||||
uint64_t id = 0;
|
||||
if (current.isNumber()) {
|
||||
id = current.getNumericValue<uint64_t>();
|
||||
} else if (current.isString()) {
|
||||
id = basics::StringUtils::uint64(current.copyString());
|
||||
}
|
||||
if (id > 0) {
|
||||
enhanced.add("id", VPackValue(std::to_string(id)));
|
||||
switch (type) {
|
||||
case Index::TRI_IDX_TYPE_PRIMARY_INDEX:
|
||||
case Index::TRI_IDX_TYPE_EDGE_INDEX: {
|
||||
break;
|
||||
}
|
||||
|
||||
if (create && !isCoordinator) {
|
||||
if (!definition.hasKey("objectId")) {
|
||||
enhanced.add("objectId",
|
||||
VPackValue(std::to_string(TRI_NewTickServer())));
|
||||
}
|
||||
case Index::TRI_IDX_TYPE_GEO1_INDEX:
|
||||
res = EnhanceJsonIndexGeo1(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_GEO2_INDEX:
|
||||
res = EnhanceJsonIndexGeo2(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_HASH_INDEX:
|
||||
res = EnhanceJsonIndexHash(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_SKIPLIST_INDEX:
|
||||
res = EnhanceJsonIndexSkiplist(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_PERSISTENT_INDEX:
|
||||
res = EnhanceJsonIndexPersistent(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_FULLTEXT_INDEX:
|
||||
res = EnhanceJsonIndexFulltext(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_UNKNOWN:
|
||||
default: {
|
||||
res = TRI_ERROR_BAD_PARAMETER;
|
||||
break;
|
||||
}
|
||||
// breaks lookupIndex()
|
||||
/*else {
|
||||
if (!definition.hasKey("objectId")) {
|
||||
// objectId missing, but must be present
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
}*/
|
||||
|
||||
enhanced.add("type", VPackValue(Index::oldtypeName(type)));
|
||||
|
||||
switch (type) {
|
||||
case Index::TRI_IDX_TYPE_PRIMARY_INDEX:
|
||||
case Index::TRI_IDX_TYPE_EDGE_INDEX: {
|
||||
break;
|
||||
}
|
||||
|
||||
case Index::TRI_IDX_TYPE_GEO1_INDEX:
|
||||
res = EnhanceJsonIndexGeo1(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_GEO2_INDEX:
|
||||
res = EnhanceJsonIndexGeo2(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_HASH_INDEX:
|
||||
res = EnhanceJsonIndexHash(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_SKIPLIST_INDEX:
|
||||
res = EnhanceJsonIndexSkiplist(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_PERSISTENT_INDEX:
|
||||
res = EnhanceJsonIndexPersistent(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_FULLTEXT_INDEX:
|
||||
res = EnhanceJsonIndexFulltext(definition, enhanced, create);
|
||||
break;
|
||||
|
||||
case Index::TRI_IDX_TYPE_UNKNOWN:
|
||||
default: {
|
||||
res = TRI_ERROR_BAD_PARAMETER;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} catch (...) {
|
||||
// TODO Check for different type of Errors
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return res;
|
||||
|
|
|
@ -1489,7 +1489,7 @@ void RocksDBRestReplicationHandler::handleCommandMakeSlave() {
|
|||
std::string errorMsg = "";
|
||||
{
|
||||
InitialSyncer syncer(_vocbase, &config, config._restrictCollections,
|
||||
restrictType, false);
|
||||
restrictType, false, false);
|
||||
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
|
@ -1607,7 +1607,7 @@ void RocksDBRestReplicationHandler::handleCommandSync() {
|
|||
config._useCollectionId = useCollectionId;
|
||||
|
||||
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
|
||||
verbose);
|
||||
verbose, false);
|
||||
|
||||
std::string errorMsg = "";
|
||||
|
||||
|
|
|
@ -532,13 +532,10 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
std::vector<RocksDBKey> elements;
|
||||
std::vector<uint64_t> hashes;
|
||||
int res;
|
||||
try {
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
res = fillElement(*(leased.get()), revisionId, doc, elements, hashes);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -604,14 +601,11 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
|||
std::vector<uint64_t> hashes;
|
||||
|
||||
int res;
|
||||
try {
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
res = fillElement(*(leased.get()), revisionId, doc, elements, hashes);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
} catch (...) {
|
||||
res = TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(res, this);
|
||||
|
|
|
@ -254,6 +254,11 @@ static void JS_SynchronizeReplication(
|
|||
verbose = TRI_ObjectToBoolean(object->Get(TRI_V8_ASCII_STRING("verbose")));
|
||||
}
|
||||
|
||||
bool skipCreateDrop = false;
|
||||
if (object->Has(TRI_V8_ASCII_STRING("skipCreateDrop"))) {
|
||||
skipCreateDrop = TRI_ObjectToBoolean(object->Get(TRI_V8_ASCII_STRING("skipCreateDrop")));
|
||||
}
|
||||
|
||||
if (endpoint.empty()) {
|
||||
TRI_V8_THROW_EXCEPTION_PARAMETER("<endpoint> must be a valid endpoint");
|
||||
}
|
||||
|
@ -319,7 +324,7 @@ static void JS_SynchronizeReplication(
|
|||
|
||||
std::string errorMsg = "";
|
||||
InitialSyncer syncer(vocbase, &config, restrictCollections, restrictType,
|
||||
verbose);
|
||||
verbose, skipCreateDrop);
|
||||
if (!leaderId.empty()) {
|
||||
syncer.setLeaderId(leaderId);
|
||||
}
|
||||
|
|
|
@ -761,7 +761,8 @@ static void JS_ParseAql(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
for (auto const& elem : parseResult.bindParameters) {
|
||||
bindVars->Set(i++, TRI_V8_STD_STRING((elem)));
|
||||
}
|
||||
result->Set(TRI_V8_ASCII_STRING("parameters"), bindVars);
|
||||
result->Set(TRI_V8_ASCII_STRING("parameters"), bindVars); // parameters is deprecated
|
||||
result->Set(TRI_V8_ASCII_STRING("bindVars"), bindVars);
|
||||
}
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("ast"),
|
||||
|
|
|
@ -545,7 +545,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
let startTime = new Date();
|
||||
sy = rep.syncCollection(shard,
|
||||
{ endpoint: ep, incremental: true, keepBarrier: true,
|
||||
useCollectionId: false, leaderId: leader });
|
||||
useCollectionId: false, leaderId: leader, skipCreateDrop: true });
|
||||
let endTime = new Date();
|
||||
let longSync = false;
|
||||
if (endTime - startTime > 5000) {
|
||||
|
|
Loading…
Reference in New Issue