diff --git a/arangod/Cluster/ClusterMethods.cpp b/arangod/Cluster/ClusterMethods.cpp index 61190d6f0d..850dbe07a7 100644 --- a/arangod/Cluster/ClusterMethods.cpp +++ b/arangod/Cluster/ClusterMethods.cpp @@ -2284,7 +2284,7 @@ ClusterMethods::persistCollectionInAgency( std::string distributeShardsLike = col->distributeShardsLike(); std::vector dbServers; std::vector avoid = col->avoidServers(); - + ClusterInfo* ci = ClusterInfo::instance(); if (!distributeShardsLike.empty()) { CollectionNameResolver resolver(col->vocbase()); @@ -2320,7 +2320,6 @@ ClusterMethods::persistCollectionInAgency( } col->distributeShardsLike(otherCidString); } else { - LOG_TOPIC(WARN, Logger::CLUSTER) << "WTF? " << ignoreDistributeShardsLikeErrors; if (ignoreDistributeShardsLikeErrors) { col->distributeShardsLike(std::string()); } else { diff --git a/arangod/Cluster/ClusterMethods.h b/arangod/Cluster/ClusterMethods.h index e1929bdc4e..5bdd3b6dcd 100644 --- a/arangod/Cluster/ClusterMethods.h +++ b/arangod/Cluster/ClusterMethods.h @@ -258,8 +258,8 @@ class ClusterMethods { static std::unique_ptr createCollectionOnCoordinator( TRI_col_type_e collectionType, TRI_vocbase_t* vocbase, arangodb::velocypack::Slice parameters, - bool ignoreDistributeShardsLikeErrors, - bool waitForSyncReplication); + bool ignoreDistributeShardsLikeErrors = true, + bool waitForSyncReplication = true); private: @@ -268,7 +268,8 @@ class ClusterMethods { //////////////////////////////////////////////////////////////////////////////// static std::unique_ptr persistCollectionInAgency( - LogicalCollection* col, bool ignoreDistributeShardsLikeErrors, bool waitForSyncReplication); + LogicalCollection* col, bool ignoreDistributeShardsLikeErrors = true, + bool waitForSyncReplication = true); }; } // namespace arangodb diff --git a/arangod/Replication/InitialSyncer.cpp b/arangod/Replication/InitialSyncer.cpp index 32c86b6673..25683e710f 100644 --- a/arangod/Replication/InitialSyncer.cpp +++ b/arangod/Replication/InitialSyncer.cpp @@ -1265,8 +1265,7 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col, // smaller values than lowKey mean they don't exist remotely trx.remove(collectionName, key, options); return; - } - if (cmp1 >= 0 && cmp2 <= 0) { + } else if (cmp1 >= 0 && cmp2 <= 0) { // we only need to hash we are in the range if (cmp1 == 0) { foundLowKey = true; @@ -1294,21 +1293,23 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col, nextChunk = true; } - if (rangeUnequal) { - int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey, - highKey, markers, errorMsg); - if (res != TRI_ERROR_NO_ERROR) { - THROW_ARANGO_EXCEPTION(res); - } - } + TRI_ASSERT(!rangeUnequal || nextChunk); // A => B - if (nextChunk && currentChunkId + 1 < numChunks) { - currentChunkId++; // we are out of range, see next chunk - resetChunk(); - - // key is higher than upper bound, recheck the current document - if (cmp2 > 0) { - parseDoc(doc, key); + if (nextChunk) {// we are out of range, see next chunk + if (rangeUnequal && currentChunkId < numChunks) { + int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey, + highKey, markers, errorMsg); + if (res != TRI_ERROR_NO_ERROR) { + THROW_ARANGO_EXCEPTION(res); + } + } + currentChunkId++; + if (currentChunkId < numChunks) { + resetChunk(); + // key is higher than upper bound, recheck the current document + if (cmp2 > 0) { + parseDoc(doc, key); + } } } }; @@ -1325,6 +1326,19 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col, parseDoc(doc, key); }, UINT64_MAX); + + // we might have missed chunks, if the keys don't exist at all locally + while (currentChunkId < numChunks) { + int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey, + highKey, markers, errorMsg); + if (res != TRI_ERROR_NO_ERROR) { + THROW_ARANGO_EXCEPTION(res); + } + currentChunkId++; + if (currentChunkId < numChunks) { + resetChunk(); + } + } res = trx.commit(); if (!res.ok()) { @@ -1423,8 +1437,8 @@ int InitialSyncer::syncChunkRocksDB( size_t const numKeys = static_cast(responseBody.length()); if (numKeys == 0) { errorMsg = "got invalid response from master at " + _masterInfo._endpoint + - ": response contains an empty chunk. ChunkId: " + - std::to_string(chunkId); + ": response contains an empty chunk. Collection: " + collectionName + + " Chunk: " + std::to_string(chunkId); return TRI_ERROR_REPLICATION_INVALID_RESPONSE; } TRI_ASSERT(numKeys > 0); @@ -1494,6 +1508,23 @@ int InitialSyncer::syncChunkRocksDB( i++; } + + // delete all keys at end of the range + while (nextStart < markers.size()) { + std::string const& localKey = markers[nextStart].first; + + TRI_ASSERT(localKey.compare(highString) > 0); + //if (localKey.compare(highString) > 0) { + // we have a local key that is not present remotely + keyBuilder->clear(); + keyBuilder->openObject(); + keyBuilder->add(StaticStrings::KeyString, VPackValue(localKey)); + keyBuilder->close(); + + trx->remove(collectionName, keyBuilder->slice(), options); + //} + ++nextStart; + } if (!toFetch.empty()) { VPackBuilder keysBuilder; diff --git a/arangod/RocksDBEngine/RocksDBCollection.cpp b/arangod/RocksDBEngine/RocksDBCollection.cpp index d1c86226c1..7fec75c0e6 100644 --- a/arangod/RocksDBEngine/RocksDBCollection.cpp +++ b/arangod/RocksDBEngine/RocksDBCollection.cpp @@ -71,7 +71,7 @@ static inline rocksdb::Transaction* rocksTransaction( return static_cast(trx->state()) ->rocksTransaction(); } -} +} // namespace RocksDBCollection::RocksDBCollection(LogicalCollection* collection, VPackSlice const& info) @@ -196,8 +196,8 @@ void RocksDBCollection::open(bool ignoreErrors) { RocksDBEngine* engine = static_cast(EngineSelectorFeature::ENGINE); auto counterValue = engine->counterManager()->loadCounter(this->objectId()); - LOG_TOPIC(ERR, Logger::DEVEL) << " number of documents: " - << counterValue.added(); + LOG_TOPIC(ERR, Logger::DEVEL) + << " number of documents: " << counterValue.added(); _numberDocuments = counterValue.added() - counterValue.removed(); _revisionId = counterValue.revisionId(); //_numberDocuments = countKeyRange(db, readOptions, @@ -443,19 +443,21 @@ void RocksDBCollection::truncate(transaction::Methods* trx, iter->Seek(documentBounds.start()); while (iter->Valid() && cmp->Compare(iter->key(), documentBounds.end()) < 0) { + TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key()); + + // add possible log statement + state->prepareOperation(cid, revisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE); rocksdb::Status s = rtrx->Delete(iter->key()); if (!s.ok()) { auto converted = convertStatus(s); THROW_ARANGO_EXCEPTION(converted); } - - // transaction size limit reached -- fail - TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key()); // report size of key RocksDBOperationResult result = state->addOperation(cid, revisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0, iter->key().size()); + // transaction size limit reached -- fail if (result.fail()) { THROW_ARANGO_EXCEPTION(result); } @@ -615,6 +617,11 @@ int RocksDBCollection::insert(arangodb::transaction::Methods* trx, RocksDBSavePoint guard(rocksTransaction(trx), trx->isSingleOperationTransaction()); + RocksDBTransactionState* state = + static_cast(trx->state()); + state->prepareOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_INSERT); + res = insertDocument(trx, revisionId, newSlice, options.waitForSync); if (res.ok()) { Result lookupResult = lookupRevisionVPack(revisionId, trx, mdr); @@ -624,11 +631,9 @@ int RocksDBCollection::insert(arangodb::transaction::Methods* trx, } // report document and key size - RocksDBOperationResult result = - static_cast(trx->state()) - ->addOperation(_logicalCollection->cid(), revisionId, - TRI_VOC_DOCUMENT_OPERATION_INSERT, - newSlice.byteSize(), res.keySize()); + RocksDBOperationResult result = state->addOperation( + _logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_INSERT, newSlice.byteSize(), res.keySize()); // transaction size limit reached -- fail if (result.fail()) { @@ -703,8 +708,9 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx, mergeObjectsForUpdate(trx, oldDoc, newSlice, isEdgeCollection, TRI_RidToString(revisionId), options.mergeObjects, options.keepNull, *builder.get()); - - if (trx->state()->isDBServer()) { + RocksDBTransactionState* state = + static_cast(trx->state()); + if (state->isDBServer()) { // Need to check that no sharding keys have changed: if (arangodb::shardKeysChanged(_logicalCollection->dbName(), trx->resolver()->getCollectionNameCluster( @@ -717,8 +723,10 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx, RocksDBSavePoint guard(rocksTransaction(trx), trx->isSingleOperationTransaction()); + // add possible log statement under guard + state->prepareOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_UPDATE); VPackSlice const newDoc(builder->slice()); - res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, newDoc, options.waitForSync); @@ -731,10 +739,9 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx, TRI_ASSERT(!mdr.empty()); // report document and key size - result = static_cast(trx->state()) - ->addOperation(_logicalCollection->cid(), revisionId, - TRI_VOC_DOCUMENT_OPERATION_UPDATE, - newDoc.byteSize(), res.keySize()); + result = state->addOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_UPDATE, + newDoc.byteSize(), res.keySize()); // transaction size limit reached -- fail if (result.fail()) { @@ -803,7 +810,9 @@ int RocksDBCollection::replace( isEdgeCollection, TRI_RidToString(revisionId), *builder.get()); - if (trx->state()->isDBServer()) { + RocksDBTransactionState* state = + static_cast(trx->state()); + if (state->isDBServer()) { // Need to check that no sharding keys have changed: if (arangodb::shardKeysChanged(_logicalCollection->dbName(), trx->resolver()->getCollectionNameCluster( @@ -816,6 +825,10 @@ int RocksDBCollection::replace( RocksDBSavePoint guard(rocksTransaction(trx), trx->isSingleOperationTransaction()); + // add possible log statement under guard + state->prepareOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_REPLACE); + RocksDBOperationResult opResult = updateDocument(trx, oldRevisionId, oldDoc, revisionId, VPackSlice(builder->slice()), options.waitForSync); @@ -829,11 +842,10 @@ int RocksDBCollection::replace( TRI_ASSERT(!mdr.empty()); // report document and key size - result = static_cast(trx->state()) - ->addOperation(_logicalCollection->cid(), revisionId, - TRI_VOC_DOCUMENT_OPERATION_REPLACE, - VPackSlice(builder->slice()).byteSize(), - opResult.keySize()); + result = state->addOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_REPLACE, + VPackSlice(builder->slice()).byteSize(), + opResult.keySize()); // transaction size limit reached -- fail if (result.fail()) { @@ -901,13 +913,19 @@ int RocksDBCollection::remove(arangodb::transaction::Methods* trx, RocksDBSavePoint guard(rocksTransaction(trx), trx->isSingleOperationTransaction()); + // add possible log statement under guard + RocksDBTransactionState* state = + static_cast(trx->state()); + state->prepareOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_REMOVE); + // RocksDBLogValue val = RocksDBLogValue::DocumentRemove(StringRef(key)); + // state->rocksTransaction()->PutLogData(val.slice()); res = removeDocument(trx, oldRevisionId, oldDoc, options.waitForSync); if (res.ok()) { // report key size - res = - static_cast(trx->state()) - ->addOperation(_logicalCollection->cid(), revisionId, - TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0, res.keySize()); + res = state->addOperation(_logicalCollection->cid(), revisionId, + TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0, + res.keySize()); // transaction size limit reached -- fail if (res.fail()) { THROW_ARANGO_EXCEPTION(res); diff --git a/arangod/RocksDBEngine/RocksDBLogValue.cpp b/arangod/RocksDBEngine/RocksDBLogValue.cpp index 89a5ad9d42..303f98e154 100644 --- a/arangod/RocksDBEngine/RocksDBLogValue.cpp +++ b/arangod/RocksDBEngine/RocksDBLogValue.cpp @@ -85,6 +85,10 @@ RocksDBLogValue RocksDBLogValue::ViewDrop(TRI_voc_cid_t cid, return RocksDBLogValue(RocksDBLogType::ViewDrop, cid, iid); } +RocksDBLogValue RocksDBLogValue::DocumentOpsPrologue(TRI_voc_cid_t cid) { + return RocksDBLogValue(RocksDBLogType::DocumentOperationsPrologue, cid); +} + RocksDBLogValue RocksDBLogValue::DocumentRemove( arangodb::StringRef const& key) { return RocksDBLogValue(RocksDBLogType::DocumentRemove, key); @@ -105,7 +109,8 @@ RocksDBLogValue::RocksDBLogValue(RocksDBLogType type, uint64_t val) : _buffer() { switch (type) { case RocksDBLogType::DatabaseDrop: - case RocksDBLogType::CollectionCreate: { + case RocksDBLogType::CollectionCreate: + case RocksDBLogType::DocumentOperationsPrologue: { _buffer.reserve(sizeof(RocksDBLogType) + sizeof(uint64_t)); _buffer += static_cast(type); uint64ToPersistent(_buffer, val); // database or collection ID diff --git a/arangod/RocksDBEngine/RocksDBLogValue.h b/arangod/RocksDBEngine/RocksDBLogValue.h index fe79c9c00a..bac04a6412 100644 --- a/arangod/RocksDBEngine/RocksDBLogValue.h +++ b/arangod/RocksDBEngine/RocksDBLogValue.h @@ -64,7 +64,7 @@ class RocksDBLogValue { static RocksDBLogValue ViewCreate(TRI_voc_cid_t, TRI_idx_iid_t); static RocksDBLogValue ViewDrop(TRI_voc_cid_t, TRI_idx_iid_t); - + static RocksDBLogValue DocumentOpsPrologue(TRI_voc_cid_t cid); static RocksDBLogValue DocumentRemove(arangodb::StringRef const&); public: diff --git a/arangod/RocksDBEngine/RocksDBTransactionState.cpp b/arangod/RocksDBEngine/RocksDBTransactionState.cpp index fcab1ac8ea..c17351ccf8 100644 --- a/arangod/RocksDBEngine/RocksDBTransactionState.cpp +++ b/arangod/RocksDBEngine/RocksDBTransactionState.cpp @@ -52,7 +52,7 @@ using namespace arangodb; -// for the RocksDB engine we do not need any additional data +// for the RocksDB engine we do not need any additional data struct RocksDBTransactionData final : public TransactionData {}; RocksDBSavePoint::RocksDBSavePoint(rocksdb::Transaction* trx) @@ -98,7 +98,8 @@ RocksDBTransactionState::RocksDBTransactionState( _numInserts(0), _numUpdates(0), _numRemoves(0), - _intermediateTransactionEnabled(intermediateTransactionEnabled) {} + _intermediateTransactionEnabled(intermediateTransactionEnabled), + _lastUsedCollection(UINT64_MAX) {} /// @brief free a transaction container RocksDBTransactionState::~RocksDBTransactionState() { @@ -111,9 +112,9 @@ RocksDBTransactionState::~RocksDBTransactionState() { /// @brief start a transaction Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) { - LOG_TRX(this, _nestingLevel) << "beginning " << AccessMode::typeString(_type) - << " transaction"; - + LOG_TRX(this, _nestingLevel) + << "beginning " << AccessMode::typeString(_type) << " transaction"; + Result result = useCollections(_nestingLevel); if (result.ok()) { @@ -157,23 +158,23 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) { _rocksWriteOptions, rocksdb::TransactionOptions())); _rocksTransaction->SetSnapshot(); _rocksReadOptions.snapshot = _rocksTransaction->GetSnapshot(); - - RocksDBLogValue header = RocksDBLogValue::BeginTransaction(_vocbase->id(), - _id); + + RocksDBLogValue header = + RocksDBLogValue::BeginTransaction(_vocbase->id(), _id); _rocksTransaction->PutLogData(header.slice()); - + } else { TRI_ASSERT(_status == transaction::Status::RUNNING); } - + return result; } /// @brief commit a transaction Result RocksDBTransactionState::commitTransaction( transaction::Methods* activeTrx) { - LOG_TRX(this, _nestingLevel) << "committing " << AccessMode::typeString(_type) - << " transaction"; + LOG_TRX(this, _nestingLevel) + << "committing " << AccessMode::typeString(_type) << " transaction"; TRI_ASSERT(_status == transaction::Status::RUNNING); TRI_IF_FAILURE("TransactionWriteCommitMarker") { @@ -189,7 +190,7 @@ Result RocksDBTransactionState::commitTransaction( _rocksWriteOptions.sync = true; _rocksTransaction->SetWriteOptions(_rocksWriteOptions); } - + // TODO wait for response on github issue to see how we can use the // sequence number result = rocksutils::convertStatus(_rocksTransaction->Commit()); @@ -245,8 +246,8 @@ Result RocksDBTransactionState::commitTransaction( /// @brief abort and rollback a transaction Result RocksDBTransactionState::abortTransaction( transaction::Methods* activeTrx) { - LOG_TRX(this, _nestingLevel) << "aborting " << AccessMode::typeString(_type) - << " transaction"; + LOG_TRX(this, _nestingLevel) + << "aborting " << AccessMode::typeString(_type) << " transaction"; TRI_ASSERT(_status == transaction::Status::RUNNING); Result result; @@ -277,6 +278,26 @@ Result RocksDBTransactionState::abortTransaction( return result; } +void RocksDBTransactionState::prepareOperation( + TRI_voc_cid_t collectionId, TRI_voc_rid_t revisionId, + TRI_voc_document_operation_e operationType) { + switch (operationType) { + case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN: + break; + case TRI_VOC_DOCUMENT_OPERATION_INSERT: + case TRI_VOC_DOCUMENT_OPERATION_UPDATE: + case TRI_VOC_DOCUMENT_OPERATION_REPLACE: + case TRI_VOC_DOCUMENT_OPERATION_REMOVE: { + if (collectionId != _lastUsedCollection) { + RocksDBLogValue logValue = + RocksDBLogValue::DocumentOpsPrologue(collectionId); + //_rocksTransaction->PutLogData(logValue.slice()); + _lastUsedCollection = collectionId; + } + } break; + } +} + /// @brief add an operation for a transaction collection RocksDBOperationResult RocksDBTransactionState::addOperation( TRI_voc_cid_t cid, TRI_voc_rid_t revisionId, @@ -298,7 +319,7 @@ RocksDBOperationResult RocksDBTransactionState::addOperation( static_cast(findCollection(cid)); if (collection == nullptr) { - std::string message = "collection '" + std::to_string(cid) + + std::string message = "collection '" + std::to_string(cid) + "' not found in transaction state"; THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message); } diff --git a/arangod/RocksDBEngine/RocksDBTransactionState.h b/arangod/RocksDBEngine/RocksDBTransactionState.h index afccd37c91..1c3e93dde1 100644 --- a/arangod/RocksDBEngine/RocksDBTransactionState.h +++ b/arangod/RocksDBEngine/RocksDBTransactionState.h @@ -42,7 +42,7 @@ namespace rocksdb { class Transaction; class Slice; class Iterator; -} +} // namespace rocksdb namespace arangodb { namespace cache { @@ -62,6 +62,7 @@ class RocksDBSavePoint { ~RocksDBSavePoint(); void commit(); + private: void rollback(); @@ -101,6 +102,9 @@ class RocksDBTransactionState final : public TransactionState { return (_status == transaction::Status::ABORTED) && hasOperations(); } + void prepareOperation(TRI_voc_cid_t collectionId, TRI_voc_rid_t revisionId, + TRI_voc_document_operation_e operationType); + /// @brief add an operation for a transaction collection RocksDBOperationResult addOperation( TRI_voc_cid_t collectionId, TRI_voc_rid_t revisionId, @@ -135,7 +139,10 @@ class RocksDBTransactionState final : public TransactionState { uint64_t _numUpdates; uint64_t _numRemoves; bool _intermediateTransactionEnabled; + + /// Last collection used for transaction + TRI_voc_cid_t _lastUsedCollection; }; -} +} // namespace arangodb #endif diff --git a/arangod/RocksDBEngine/RocksDBTypes.h b/arangod/RocksDBEngine/RocksDBTypes.h index a62fcfe21b..86167f3be5 100644 --- a/arangod/RocksDBEngine/RocksDBTypes.h +++ b/arangod/RocksDBEngine/RocksDBTypes.h @@ -57,7 +57,8 @@ enum class RocksDBLogType : char { ViewCreate = '9', ViewDrop = ':', ViewChange = ';', - DocumentRemove = '<' + DocumentOperationsPrologue = '<', + DocumentRemove = '=' }; diff --git a/arangod/V8Server/v8-vocindex.cpp b/arangod/V8Server/v8-vocindex.cpp index 08bbc8398b..39d4dc5f31 100644 --- a/arangod/V8Server/v8-vocindex.cpp +++ b/arangod/V8Server/v8-vocindex.cpp @@ -734,8 +734,9 @@ static void CreateVocBase(v8::FunctionCallbackInfo const& args, } std::unique_ptr col = - ClusterMethods::createCollectionOnCoordinator(collectionType, vocbase, - infoSlice, true, createWaitsForSyncReplication); + ClusterMethods::createCollectionOnCoordinator( + collectionType, vocbase, infoSlice, false, + createWaitsForSyncReplication); TRI_V8_RETURN(WrapCollection(isolate, col.release())); } diff --git a/arangod/VocBase/LogicalCollection.cpp b/arangod/VocBase/LogicalCollection.cpp index 936929de50..13c1ac4bb6 100644 --- a/arangod/VocBase/LogicalCollection.cpp +++ b/arangod/VocBase/LogicalCollection.cpp @@ -499,15 +499,7 @@ std::string LogicalCollection::name() const { } std::string const LogicalCollection::distributeShardsLike() const { - if (!_distributeShardsLike.empty()) { - CollectionNameResolver resolver(_vocbase); - TRI_voc_cid_t shardLike = - resolver.getCollectionIdCluster(_distributeShardsLike); - if (shardLike != 0) { - return basics::StringUtils::itoa(shardLike); - } - } - return ""; + return _distributeShardsLike; } void LogicalCollection::distributeShardsLike(std::string const& cid) { diff --git a/lib/Basics/StringUtils.cpp b/lib/Basics/StringUtils.cpp index dbcc4d81c6..87058031bf 100644 --- a/lib/Basics/StringUtils.cpp +++ b/lib/Basics/StringUtils.cpp @@ -1687,7 +1687,7 @@ int32_t int32(std::string const& str) { struct reent buffer; return _strtol_r(&buffer, str.c_str(), 0, 10); #else - return strtol(str.c_str(), 0, 10); + return (int32_t)strtol(str.c_str(), 0, 10); #endif #endif } @@ -1713,7 +1713,7 @@ int32_t int32(char const* value, size_t size) { struct reent buffer; return _strtol_r(&buffer, value, 0, 10); #else - return strtol(value, 0, 10); + return (int32_t)strtol(value, 0, 10); #endif #endif } @@ -1727,7 +1727,7 @@ uint32_t uint32(std::string const& str) { struct reent buffer; return _strtoul_r(&buffer, str.c_str(), 0, 10); #else - return strtoul(str.c_str(), 0, 10); + return (uint32_t)strtoul(str.c_str(), 0, 10); #endif #endif } @@ -1741,7 +1741,7 @@ uint32_t unhexUint32(std::string const& str) { struct reent buffer; return _strtoul_r(&buffer, str.c_str(), 0, 16); #else - return strtoul(str.c_str(), 0, 16); + return (uint32_t)strtoul(str.c_str(), 0, 16); #endif #endif } @@ -1767,7 +1767,7 @@ uint32_t uint32(char const* value, size_t size) { struct reent buffer; return _strtoul_r(&buffer, value, 0, 10); #else - return strtoul(value, 0, 10); + return (uint32_t)strtoul(value, 0, 10); #endif #endif } @@ -1793,7 +1793,7 @@ uint32_t unhexUint32(char const* value, size_t size) { struct reent buffer; return _strtoul_r(&buffer, value, 0, 16); #else - return strtoul(value, 0, 16); + return (uint32_t)strtoul(value, 0, 16); #endif #endif } diff --git a/lib/Basics/socket-utils.h b/lib/Basics/socket-utils.h index 81baac4e28..3ed4810d28 100644 --- a/lib/Basics/socket-utils.h +++ b/lib/Basics/socket-utils.h @@ -105,11 +105,11 @@ static inline int TRI_bind(TRI_socket_t s, const struct sockaddr* address, //////////////////////////////////////////////////////////////////////////////// static inline int TRI_connect(TRI_socket_t s, const struct sockaddr* address, - int addr_len) { + size_t addr_len) { #ifdef _WIN32 - return connect(s.fileHandle, address, addr_len); + return connect(s.fileHandle, address, (int)addr_len); #else - return connect(s.fileDescriptor, address, addr_len); + return connect(s.fileDescriptor, address, (socklen_t)addr_len); #endif } @@ -117,7 +117,7 @@ static inline int TRI_connect(TRI_socket_t s, const struct sockaddr* address, /// @brief send abstraction for different OSes //////////////////////////////////////////////////////////////////////////////// -static inline int TRI_send(TRI_socket_t s, const void* buffer, size_t length, +static inline long TRI_send(TRI_socket_t s, const void* buffer, size_t length, int flags) { #ifdef _WIN32 return send(s.fileHandle, (char*)buffer, (int)length, flags); diff --git a/lib/Basics/system-functions.cpp b/lib/Basics/system-functions.cpp index 61727e2eab..bc635f1c90 100644 --- a/lib/Basics/system-functions.cpp +++ b/lib/Basics/system-functions.cpp @@ -89,7 +89,7 @@ int gettimeofday(struct timeval* tv, void* tz) { #endif void TRI_localtime(time_t tt, struct tm* tb) { -#ifdef TRI_HAVE_LOCALTIME_R +#ifdef ARANGODB_HAVE_LOCALTIME_R localtime_r(&tt, tb); #else #ifdef ARANGODB_HAVE_LOCALTIME_S @@ -105,10 +105,10 @@ void TRI_localtime(time_t tt, struct tm* tb) { } void TRI_gmtime(time_t tt, struct tm* tb) { -#ifdef TRI_HAVE_GMTIME_R +#ifdef ARANGODB_HAVE_GMTIME_R gmtime_r(&tt, tb); #else -#ifdef TRI_HAVE_GMTIME_S +#ifdef ARANGODB_HAVE_GMTIME_S gmtime_s(tb, &tt); #else struct tm* tp = gmtime(&tt); diff --git a/lib/Endpoint/EndpointIp.cpp b/lib/Endpoint/EndpointIp.cpp index 1e2d129040..a99ea492ef 100644 --- a/lib/Endpoint/EndpointIp.cpp +++ b/lib/Endpoint/EndpointIp.cpp @@ -228,7 +228,7 @@ TRI_socket_t EndpointIp::connectSocket(const struct addrinfo* aip, setTimeout(listenSocket, connectTimeout); int result = TRI_connect(listenSocket, (const struct sockaddr*)aip->ai_addr, - (int)aip->ai_addrlen); + aip->ai_addrlen); if (result != 0) { pErr = STR_ERROR(); diff --git a/lib/SimpleHttpClient/ClientConnection.cpp b/lib/SimpleHttpClient/ClientConnection.cpp index b851d0da89..bf11edfd90 100644 --- a/lib/SimpleHttpClient/ClientConnection.cpp +++ b/lib/SimpleHttpClient/ClientConnection.cpp @@ -326,15 +326,15 @@ bool ClientConnection::writeClientConnection(void const* buffer, size_t length, #if defined(__APPLE__) // MSG_NOSIGNAL not supported on apple platform - int status = TRI_send(_socket, buffer, length, 0); + long status = TRI_send(_socket, buffer, length, 0); #elif defined(_WIN32) // MSG_NOSIGNAL not supported on windows platform - int status = TRI_send(_socket, buffer, length, 0); + long status = TRI_send(_socket, buffer, length, 0); #elif defined(__sun) // MSG_NOSIGNAL not supported on solaris platform - int status = TRI_send(_socket, buffer, length, 0); + long status = TRI_send(_socket, buffer, length, 0); #else - int status = TRI_send(_socket, buffer, length, MSG_NOSIGNAL); + long status = TRI_send(_socket, buffer, length, MSG_NOSIGNAL); #endif if (status < 0) {