mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
* 'devel' of https://github.com/arangodb/arangodb: fix race try to detect situations where we need to invalidate the windows build directory fixing the fix fix indentation remove unnecessary includes fixing the fix remove obsolete comment oops more ooops left debug output behind Fixed header parsing in SimpleHttpClient for wrongly advanced cursor position in readBuffer Possibly too ambitious timing Windows warnings Docs: Fix link between Manual and AQL book The rocksdb cluster dump now tests Fulltext index as well
This commit is contained in:
commit
cc51a10da2
|
@ -2,7 +2,7 @@ Geo Queries
|
|||
===========
|
||||
|
||||
{% hint 'warning' %}
|
||||
It is recommended to use AQL instead, see [**Geo functions**](../../../../AQL/Functions/Geo.html).
|
||||
It is recommended to use AQL instead, see [**Geo functions**](../../../AQL/Functions/Geo.html).
|
||||
{% endhint %}
|
||||
|
||||
The ArangoDB allows to select documents based on geographic coordinates. In
|
||||
|
|
|
@ -653,13 +653,21 @@ fi
|
|||
PARTIAL_STATE=$?
|
||||
set -e
|
||||
|
||||
if test "${isCygwin}" == 1 -a "${PARTIAL_STATE}" == 0; then
|
||||
# windows fails to partialy re-configure - so do a complete configure run.
|
||||
if test -f CMakeFiles/generate.stamp -a CMakeFiles/generate.stamp -ot "${SOURCE_DIR}/CMakeList.txt"; then
|
||||
echo "CMakeList older - Forcing complete configure run!"
|
||||
PARTIAL_STATE=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "${PARTIAL_STATE}" == 0; then
|
||||
rm -rf CMakeFiles CMakeCache.txt CMakeCPackOptions.cmake cmake_install.cmake CPackConfig.cmake CPackSourceConfig.cmake
|
||||
CFLAGS="${CFLAGS}" CXXFLAGS="${CXXFLAGS}" LDFLAGS="${LDFLAGS}" LIBS="${LIBS}" \
|
||||
cmake ${SOURCE_DIR} ${CONFIGURE_OPTIONS} -G "${GENERATOR}" || exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$CPACK" -a -n "${TARGET_DIR}" -a -z "${MSVC}" ]; then
|
||||
if [ -n "$CPACK" ] && [ -n "${TARGET_DIR}" ] && [ -z "${MSVC}" ]; then
|
||||
if ! grep -q CMAKE_STRIP CMakeCache.txt; then
|
||||
echo "cmake failed to detect strip; refusing to build unstripped packages!"
|
||||
exit 1
|
||||
|
|
|
@ -184,7 +184,7 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
|
|||
while (_hasMore && buff.length() < chunkSize) {
|
||||
try {
|
||||
_hasMore = _iter->next(cb, 1); // TODO: adjust limit?
|
||||
} catch (std::exception const& ex) {
|
||||
} catch (std::exception const&) {
|
||||
_hasMore = false;
|
||||
return RocksDBReplicationResult(TRI_ERROR_INTERNAL, _lastTick);
|
||||
} catch (RocksDBReplicationResult const& ex) {
|
||||
|
@ -244,7 +244,7 @@ arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
|
|||
b.add("hash", VPackValue(std::to_string(hash)));
|
||||
b.close();
|
||||
lowKey = "";
|
||||
} catch (std::exception const& ex) {
|
||||
} catch (std::exception const&) {
|
||||
return Result(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,9 +58,13 @@ void ListenTask::start() {
|
|||
} catch (std::exception const& err) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::COMMUNICATION) << "failed to open endpoint '" << _endpoint->specification()
|
||||
<< "' with error: " << err.what();
|
||||
return;
|
||||
}
|
||||
|
||||
_handler = [this](boost::system::error_code const& ec) {
|
||||
TRI_ASSERT(_bound);
|
||||
|
||||
auto self = shared_from_this();
|
||||
_handler = [this, self](boost::system::error_code const& ec) {
|
||||
// copy the shared_ptr so nobody can delete the Acceptor while the
|
||||
// callback is running
|
||||
std::shared_ptr<Acceptor> acceptorCopy(_acceptor);
|
||||
|
|
|
@ -498,12 +498,12 @@ function agencyTestSuite () {
|
|||
assertEqual(readAndCheck([["/a/z"]]), [{"a":{"z":12}}]);
|
||||
writeAndCheck([[{"a/y":{"op":"set","new":12, "ttl": 1}}]]);
|
||||
assertEqual(readAndCheck([["/a/y"]]), [{"a":{"y":12}}]);
|
||||
wait(1.0);
|
||||
wait(1.1);
|
||||
assertEqual(readAndCheck([["/a/y"]]), [{a:{}}]);
|
||||
writeAndCheck([[{"/a/y":{"op":"set","new":12, "ttl": 1}}]]);
|
||||
writeAndCheck([[{"/a/y":{"op":"set","new":12}}]]);
|
||||
assertEqual(readAndCheck([["a/y"]]), [{"a":{"y":12}}]);
|
||||
wait(1.0);
|
||||
wait(1.1);
|
||||
assertEqual(readAndCheck([["/a/y"]]), [{"a":{"y":12}}]);
|
||||
writeAndCheck([[{"foo/bar":{"op":"set","new":{"baz":12}}}]]);
|
||||
assertEqual(readAndCheck([["/foo/bar/baz"]]),
|
||||
|
@ -511,7 +511,7 @@ function agencyTestSuite () {
|
|||
assertEqual(readAndCheck([["/foo/bar"]]), [{"foo":{"bar":{"baz":12}}}]);
|
||||
assertEqual(readAndCheck([["/foo"]]), [{"foo":{"bar":{"baz":12}}}]);
|
||||
writeAndCheck([[{"foo/bar":{"op":"set","new":{"baz":12},"ttl":1}}]]);
|
||||
wait(1.0);
|
||||
wait(1.1);
|
||||
assertEqual(readAndCheck([["/foo"]]), [{"foo":{}}]);
|
||||
assertEqual(readAndCheck([["/foo/bar"]]), [{"foo":{}}]);
|
||||
assertEqual(readAndCheck([["/foo/bar/baz"]]), [{"foo":{}}]);
|
||||
|
|
|
@ -184,7 +184,7 @@ function dumpTestSuite () {
|
|||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(7, c.getIndexes().length);
|
||||
assertEqual(8, c.getIndexes().length);
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
|
||||
assertEqual("hash", c.getIndexes()[1].type);
|
||||
|
@ -217,6 +217,10 @@ function dumpTestSuite () {
|
|||
assertTrue(c.getIndexes()[6].sparse);
|
||||
assertEqual([ "a_ss1", "a_ss2" ], c.getIndexes()[6].fields);
|
||||
|
||||
assertFalse(c.getIndexes()[7].unique);
|
||||
assertEqual("fulltext", c.getIndexes()[7].type);
|
||||
assertEqual([ "a_f" ], c.getIndexes()[7].fields);
|
||||
|
||||
assertEqual(0, c.count());
|
||||
},
|
||||
|
||||
|
|
|
@ -24,9 +24,6 @@
|
|||
#include "Basics/Common.h"
|
||||
|
||||
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <new>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -51,19 +51,6 @@ int TRI_closesocket(TRI_socket_t s) {
|
|||
if (res != 0) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "socket close error: " << WSAGetLastError();
|
||||
}
|
||||
// We patch libev on Windows lightly to not really distinguish between
|
||||
// socket handles and file descriptors, therefore, we do not have to do the
|
||||
// following any more:
|
||||
// if (s.fileDescriptor != -1) {
|
||||
// res = _close(s.fileDescriptor);
|
||||
// "To close a file opened with _open_osfhandle, call _close."
|
||||
// The underlying handle is also closed by a call to _close,
|
||||
// so it is not necessary to call the Win32 function CloseHandle
|
||||
// on the original handle.
|
||||
// However, we do want to do the special shutdown/recv magic above
|
||||
// because only then we can reuse the port quickly, which we want
|
||||
// to do directly after a port test.
|
||||
// }
|
||||
}
|
||||
#else
|
||||
if (s.fileDescriptor != TRI_INVALID_SOCKET) {
|
||||
|
|
|
@ -658,12 +658,16 @@ void SimpleHttpClient::processHeader() {
|
|||
if (*ptr == '\r' || *ptr == '\n' || *ptr == '\0') {
|
||||
size_t len = pos - ptr;
|
||||
_readBufferOffset += len + 1;
|
||||
TRI_ASSERT(_readBufferOffset <= _readBuffer.length());
|
||||
|
||||
ptr += len + 1;
|
||||
remain -= len + 1;
|
||||
|
||||
if (*pos == '\r') {
|
||||
// adjust offset if line ended with \r\n
|
||||
++_readBufferOffset;
|
||||
TRI_ASSERT(_readBufferOffset <= _readBuffer.length());
|
||||
|
||||
ptr++;
|
||||
remain--;
|
||||
}
|
||||
|
@ -727,8 +731,11 @@ void SimpleHttpClient::processHeader() {
|
|||
++len;
|
||||
}
|
||||
|
||||
// account for \n
|
||||
ptr += len + 1;
|
||||
_readBufferOffset += len + 1;
|
||||
TRI_ASSERT(_readBufferOffset <= _readBuffer.length());
|
||||
|
||||
remain -= (len + 1);
|
||||
|
||||
TRI_ASSERT(_readBufferOffset <= _readBuffer.length());
|
||||
|
@ -736,11 +743,6 @@ void SimpleHttpClient::processHeader() {
|
|||
TRI_ASSERT(remain == _readBuffer.length() - _readBufferOffset);
|
||||
pos = static_cast<char const*>(memchr(ptr, '\n', remain));
|
||||
|
||||
if (pos == nullptr) {
|
||||
_readBufferOffset++;
|
||||
ptr++;
|
||||
remain--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -783,6 +785,8 @@ void SimpleHttpClient::processBody() {
|
|||
}
|
||||
|
||||
_readBufferOffset += _result->getContentLength();
|
||||
TRI_ASSERT(_readBufferOffset <= _readBuffer.length());
|
||||
|
||||
_result->setResultType(SimpleHttpResult::COMPLETE);
|
||||
_state = FINISHED;
|
||||
|
||||
|
@ -815,6 +819,7 @@ void SimpleHttpClient::processChunkedHeader() {
|
|||
// adjust offset if line ended with \r\n
|
||||
if (*pos == '\r') {
|
||||
++_readBufferOffset;
|
||||
TRI_ASSERT(_readBufferOffset <= _readBuffer.length());
|
||||
++len;
|
||||
}
|
||||
|
||||
|
@ -894,6 +899,7 @@ void SimpleHttpClient::processChunkedBody() {
|
|||
}
|
||||
|
||||
_readBufferOffset += (size_t)_nextChunkedSize + 2;
|
||||
|
||||
_state = IN_READ_CHUNKED_HEADER;
|
||||
processChunkedHeader();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue