1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
Michael Hackstein 2016-10-24 18:56:35 -07:00
commit 6e75a738ac
14 changed files with 168 additions and 102 deletions

View File

@ -68,6 +68,8 @@ have to use `arangodump` to dump all data out (using ArangoDB 3.0), and
use `arangorestore` into the new ArangoDB 3.1, which is the safest
way to upgrade.
The change also affects the return format of `_rev` values in HTTP APIs (see below).
!SECTION HTTP API changes
!SUBSECTION APIs added
@ -80,8 +82,33 @@ the server:
!SUBSECTION APIs changed
The replication API will now use the attribute name `journalSize` instead of
`maximalSize` when returning information about collections.
* the following REST APIs that return revision ids now make use of the new revision
id format introduced in 3.1. All revision ids returned will be strings as in 3.0, but
have a different internal format.
The following APIs are affected:
- GET /_api/collection/{collection}/checksum: `revision` attribute
- GET /_api/collection/{collection}/revision: `revision` attribute
- all other APIs that return documents, which may include the documents' `_rev` attribute
The default value for `keepNull` has been changed to `true` for edge and vertex
modification operations in /_api/gharial.
Client applications should not try to interpret the internals of revision values, but only
use revision values for checking whether two revision strings are identical.
* the replication REST APIs will now use the attribute name `journalSize` instead of
`maximalSize` when returning information about collections.
* the default value for `keepNull` has been changed from `false` to `true` for
the following partial update operations for vertices and edges in /_api/gharial:
- PATCH /_api/gharial/{graph}/vertex/{collection}/{key}
- PATCH /_api/gharial/{graph}/edge/{collection}/{key}
The value for `keepNull` can still be set explicitly to `false` by setting the
URL parameter `keepNull` to a value of `false`.
* the REST API for dropping collections (DELETE /_api/collection) now accepts an
optional query string parameter `isSystem`, which can set to `true` in order to
drop system collections. If the parameter is not set or not set to true, the REST
API will refuse to drop system collections. In previous versions of ArangoDB, the
`isSystem` parameter did not exist, and there was no distinction between system
and non-system collections when dropping collections.

View File

@ -600,6 +600,27 @@ Function WaitForServiceDown
Goto try_again
FunctionEnd
Function un.WaitForServiceDown
Push 0
Pop $retryCount
try_again:
SimpleSC::ServiceIsRunning '${TRI_SVC_NAME}'
Pop $0 ; returns an errorcode (<>0) otherwise success (0)
Pop $1 ; returns 1 (service is running) - returns 0 (service is not running)
${If} $1 == 0
;MessageBox MB_OK "Service running : $retryCount "
; ok, running now.
Return
${EndIf}
Sleep 1000
${If} $retryCount == 40
MessageBox MB_OK "Service shutdown waiting retry count reached; you may need to remove files by hand"
Return
${EndIf}
IntOp $retryCount $retryCount + 1
Goto try_again
FunctionEnd
;--------------------------------
!ifdef CPACK_USES_DOWNLOAD
@ -1211,7 +1232,7 @@ Section "Uninstall"
StrCmp $1 '"$INSTDIR/${SBIN_DIR}/arangod.exe" --start-service' '' Done
DetailPrint 'Shutting down Service'
SimpleSC::StopService '${TRI_SVC_NAME}' 0 30
Call WaitForServiceDown
Call un.WaitForServiceDown
SimpleSC::RemoveService '${TRI_SVC_NAME}'
Pop $0 ; returns an errorcode (<>0) otherwise success (0)
IntCmp $0 0 Done +1 +1

View File

@ -42,7 +42,7 @@ RestUploadHandler::RestUploadHandler(GeneralRequest* request,
RestUploadHandler::~RestUploadHandler() {}
RestStatus RestUploadHandler::execute() {
// cast is ok because http requst is required
// cast is ok because http request is required
HttpRequest* request = dynamic_cast<HttpRequest*>(_request.get());
if (request == nullptr) {
@ -59,26 +59,46 @@ RestStatus RestUploadHandler::execute() {
return RestStatus::DONE;
}
char* filename = nullptr;
std::string errorMessage;
long systemError;
std::string filenameString;
{
char* filename = nullptr;
std::string errorMessage;
long systemError;
if (TRI_GetTempName("uploads", &filename, false, systemError, errorMessage) !=
TRI_ERROR_NO_ERROR) {
errorMessage = "could not generate temp file: " + errorMessage;
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_INTERNAL, errorMessage);
return RestStatus::FAIL;
if (TRI_GetTempName("uploads", &filename, false, systemError, errorMessage) !=
TRI_ERROR_NO_ERROR) {
errorMessage = "could not generate temp file: " + errorMessage;
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_INTERNAL, errorMessage);
return RestStatus::FAIL;
}
if (filename == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
filenameString.append(filename);
TRI_Free(TRI_CORE_MEM_ZONE, filename);
}
char* relative = TRI_GetFilename(filename);
std::string relativeString;
{
char* relative = TRI_GetFilename(filenameString.c_str());
if (relative == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
relativeString.append(relative);
TRI_Free(TRI_CORE_MEM_ZONE, relative);
}
std::string const& bodyStr = request->body();
char const* body = bodyStr.c_str();
size_t bodySize = bodyStr.size();
LOG(TRACE) << "saving uploaded file of length " << bodySize << " in file '"
<< filename << "', relative '" << relative << "'";
<< filenameString << "', relative '" << relativeString << "'";
bool found;
std::string const& value = request->value("multipart", found);
@ -88,8 +108,6 @@ RestStatus RestUploadHandler::execute() {
if (multiPart) {
if (!parseMultiPart(body, bodySize)) {
TRI_Free(TRI_CORE_MEM_ZONE, relative);
TRI_Free(TRI_CORE_MEM_ZONE, filename);
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_INTERNAL, "invalid multipart request");
return RestStatus::FAIL;
@ -98,18 +116,14 @@ RestStatus RestUploadHandler::execute() {
}
try {
FileUtils::spit(std::string(filename), body, bodySize);
TRI_Free(TRI_CORE_MEM_ZONE, filename);
FileUtils::spit(filenameString, body, bodySize);
} catch (...) {
TRI_Free(TRI_CORE_MEM_ZONE, relative);
TRI_Free(TRI_CORE_MEM_ZONE, filename);
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_INTERNAL, "could not save file");
return RestStatus::FAIL;
}
char* fullName = TRI_Concatenate2File("uploads", relative);
TRI_Free(TRI_CORE_MEM_ZONE, relative);
std::string fullName = basics::FileUtils::buildFilename("uploads", relativeString);
// create the response
resetResponse(rest::ResponseCode::CREATED);
@ -118,7 +132,6 @@ RestStatus RestUploadHandler::execute() {
b.add(VPackValue(VPackValueType::Object));
b.add("filename", VPackValue(fullName));
TRI_Free(TRI_CORE_MEM_ZONE, fullName);
b.close();
VPackSlice s = b.slice();

View File

@ -1089,8 +1089,7 @@ int MMFilesCollection::iterateMarkersOnLoad(arangodb::Transaction* trx) {
_lastRevision >= static_cast<TRI_voc_rid_t>(2016 - 1970) * 1000 * 60 * 60 * 24 * 365 &&
application_features::ApplicationServer::server->getFeature<DatabaseFeature>("Database")->check30Revisions()) {
// a collection from 3.0 or earlier with a _rev value that is higher than we can handle safely
LOG(FATAL) << "collection '" << _logicalCollection->name() << "' contains _rev values that are higher than expected for an ArangoDB 3.0 database. If this collection was created or used with a pre-release ArangoDB 3.1, please restart the server with option '--database.check-30-revisions false' to suppress this warning.";
FATAL_ERROR_EXIT();
LOG(WARN) << "collection '" << _logicalCollection->name() << "' contains _rev values that are higher than expected for an ArangoDB 3.0 database. If this collection was created or used with a pre-release or development version of ArangoDB 3.1, please restart the server with option '--database.check-30-revisions false' to suppress this warning.";
}

View File

@ -120,7 +120,6 @@ std::string const MMFilesEngine::EngineName("MMFiles");
// create the storage engine
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
: StorageEngine(server, EngineName),
_iterateMarkersOnOpen(true),
_isUpgrade(false),
_maxTick(0) {
}
@ -157,28 +156,19 @@ void MMFilesEngine::start() {
// test if the "databases" directory is present and writable
verifyDirectories();
int res = TRI_ERROR_NO_ERROR;
// get names of all databases
std::vector<std::string> names(getDatabaseNames());
if (names.empty()) {
// no databases found, i.e. there is no system database!
// create a database for the system database
res = createDatabaseDirectory(TRI_NewTickServer(), TRI_VOC_SYSTEM_DATABASE);
_iterateMarkersOnOpen = false;
} else {
_iterateMarkersOnOpen = !application_features::ApplicationServer::getFeature<wal::LogfileManager>("LogfileManager")->hasFoundLastTick();
}
int res = createDatabaseDirectory(TRI_NewTickServer(), TRI_VOC_SYSTEM_DATABASE);
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "unable to initialize databases: " << TRI_errno_string(res);
THROW_ARANGO_EXCEPTION(res);
}
if (_iterateMarkersOnOpen) {
LOG(WARN) << "no shutdown info found. scanning datafiles for last tick...";
}
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "unable to initialize databases: " << TRI_errno_string(res);
THROW_ARANGO_EXCEPTION(res);
}
}
}
// stop the storage engine. this can be used to flush all data to disk,
@ -413,10 +403,6 @@ int MMFilesEngine::getCollectionsAndIndexes(TRI_vocbase_t* vocbase,
arangodb::velocypack::Builder& result,
bool wasCleanShutdown,
bool isUpgrade) {
if (!wasCleanShutdown) {
LOG(TRACE) << "scanning all collection markers in database '" << vocbase->name() << "'";
}
result.openArray();
std::string const path = databaseDirectory(vocbase->id());
@ -1218,6 +1204,7 @@ TRI_vocbase_t* MMFilesEngine::openExistingDatabase(TRI_voc_tick_t id, std::strin
if (!wasCleanShutdown) {
// iterating markers may be time-consuming. we'll only do it if
// we have to
LOG(WARN) << "no shutdown info found. scanning all collection markers in collection '" << collection->name() << "', database '" << vocbase->name() << "'";
findMaxTickInJournals(collection->path());
}

View File

@ -339,7 +339,6 @@ class MMFilesEngine final : public StorageEngine {
private:
std::string _basePath;
std::string _databasePath;
bool _iterateMarkersOnOpen;
bool _isUpgrade;
TRI_voc_tick_t _maxTick;
std::vector<std::pair<std::string, std::string>> _deleted;

View File

@ -783,7 +783,7 @@ static void ResponseV8ToCpp(v8::Isolate* isolate, TRI_v8_global_t const* v8g,
// .........................................................................
// cookies
// .........................................................................
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(response);
TRI_GET_GLOBAL_STRING(CookiesKey);
if (res->Has(CookiesKey)) {
v8::Handle<v8::Value> val = res->Get(CookiesKey);
@ -791,6 +791,7 @@ static void ResponseV8ToCpp(v8::Isolate* isolate, TRI_v8_global_t const* v8g,
switch (response->transportType()) {
case Endpoint::TransportType::HTTP: {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(response);
if (v8Cookies->IsArray()) {
v8::Handle<v8::Array> v8Array = v8Cookies.As<v8::Array>();

View File

@ -1880,20 +1880,21 @@ static void JS_RevisionVocbaseCol(
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
TRI_voc_rid_t rid;
TRI_voc_rid_t revisionId;
int res;
if (ServerState::instance()->isCoordinator()) {
res = GetRevisionCoordinator(collection, rid);
res = GetRevisionCoordinator(collection, revisionId);
} else {
res = GetRevision(collection, rid);
res = GetRevision(collection, revisionId);
}
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN(V8RevisionId(isolate, rid));
std::string ridString = TRI_RidToString(revisionId);
TRI_V8_RETURN(TRI_V8_STD_STRING(ridString));
TRI_V8_TRY_CATCH_END
}

View File

@ -377,7 +377,7 @@ static void JS_ChecksumCollection(
LogicalCollection* collection = trx.documentCollection();
auto physical = collection->getPhysical();
TRI_ASSERT(physical != nullptr);
std::string const revisionId = std::to_string(physical->revision());
std::string const revisionId = TRI_RidToString(physical->revision());
uint64_t hash = 0;
ManagedDocumentResult mmdr(&trx);

View File

@ -52,17 +52,6 @@ v8::Handle<v8::Value> V8TickId(v8::Isolate* isolate, TRI_voc_tick_t tick) {
return TRI_V8_PAIR_STRING(&buffer[0], static_cast<int>(len));
}
////////////////////////////////////////////////////////////////////////////////
/// @brief create a v8 revision id value from the internal revision id
////////////////////////////////////////////////////////////////////////////////
v8::Handle<v8::Value> V8RevisionId(v8::Isolate* isolate, TRI_voc_rid_t rid) {
char buffer[21];
size_t len = TRI_StringUInt64InPlace(static_cast<uint64_t>(rid), &buffer[0]);
return TRI_V8_PAIR_STRING(&buffer[0], static_cast<int>(len));
}
////////////////////////////////////////////////////////////////////////////////
/// @brief checks if argument is a document identifier
////////////////////////////////////////////////////////////////////////////////

View File

@ -61,12 +61,6 @@ extern int32_t const WRP_VOCBASE_COL_TYPE;
TRI_V8_THROW_EXCEPTION(TRI_ERROR_TRANSACTION_DISALLOWED_OPERATION); \
}
////////////////////////////////////////////////////////////////////////////////
/// @brief create a v8 revision id value from the internal revision id
////////////////////////////////////////////////////////////////////////////////
v8::Handle<v8::Value> V8RevisionId(v8::Isolate* isolate, TRI_voc_rid_t rid);
////////////////////////////////////////////////////////////////////////////////
/// @brief get the vocbase pointer from the current V8 context
////////////////////////////////////////////////////////////////////////////////

View File

@ -1326,6 +1326,8 @@ void LogicalCollection::open(bool ignoreErrors) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->changeCollection(_vocbase, _cid, this, doSync);
}
TRI_UpdateTickServer(_cid);
}
/// @brief opens an existing collection
@ -1468,7 +1470,6 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
}
TRI_ASSERT(newIdx != nullptr);
// FIXME New style. Update tick after successful creation of index.
TRI_UpdateTickServer(newIdx->id());
TRI_ASSERT(newIdx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);

View File

@ -208,7 +208,6 @@ function CollectionSuite () {
var r1 = c1.checksum(true);
assertTypeOf("string", r1.revision);
assertTrue(r1.revision !== "");
assertTrue(r1.revision.match(/^[0-9]+$/));
assertTypeOf("string", r1.checksum);
assertEqual("0", r1.checksum);
@ -218,7 +217,6 @@ function CollectionSuite () {
assertNotEqual(r1.revision, r2.revision);
assertTypeOf("string", r2.revision);
assertTrue(r2.revision !== "");
assertTrue(r2.revision.match(/^[0-9]+$/));
assertTypeOf("string", r2.checksum);
assertNotEqual("0", r2.checksum);
@ -229,7 +227,6 @@ function CollectionSuite () {
assertNotEqual(r2.revision, r3.revision);
assertTypeOf("string", r3.revision);
assertTrue(r3.revision !== "");
assertTrue(r3.revision.match(/^[0-9]+$/));
assertTypeOf("string", r3.checksum);
assertNotEqual("0", r3.checksum);
assertNotEqual(r2.checksum, r3.checksum);
@ -279,7 +276,6 @@ function CollectionSuite () {
var r1 = c1.checksum(true);
assertTypeOf("string", r1.revision);
assertTrue(r1.revision !== "");
assertTrue(r1.revision.match(/^[0-9]+$/));
assertTypeOf("string", r1.checksum);
assertEqual("0", r1.checksum);
@ -288,7 +284,6 @@ function CollectionSuite () {
assertNotEqual(r1.revision, r2.revision);
assertTypeOf("string", r2.revision);
assertTrue(r2.revision !== "");
assertTrue(r2.revision.match(/^[0-9]+$/));
assertTypeOf("string", r2.checksum);
assertNotEqual("0", r2.checksum);
@ -298,7 +293,6 @@ function CollectionSuite () {
assertNotEqual(r2.revision, r3.revision);
assertTypeOf("string", r3.revision);
assertTrue(r3.revision !== "");
assertTrue(r3.revision.match(/^[0-9]+$/));
assertTypeOf("string", r3.checksum);
assertNotEqual("0", r3.checksum);
assertNotEqual(r2.checksum, r3.checksum);

View File

@ -37,23 +37,76 @@ var ArangoCollection = arangodb.ArangoCollection;
var db = arangodb.db;
var ERRORS = arangodb.errors;
var compareStringIds = function (l, r) {
var i;
if (l.length !== r.length) {
return l.length - r.length < 0 ? -1 : 1;
}
// copied from lib/Basics/HybridLogicalClock.cpp
var decodeTable = [
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 0 - 15
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 16 - 31
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, 0, -1, -1, // 32 - 47
54, 55, 56, 57, 58, 59, 60, 61,
62, 63, -1, -1, -1, -1, -1, -1, // 48 - 63
-1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, // 64 - 79
17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, -1, -1, -1, -1, 1, // 80 - 95
-1, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, // 96 - 111
43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, -1, -1, -1, -1, -1, // 112 - 127
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 128 - 143
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 144 - 159
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 160 - 175
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 176 - 191
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 192 - 207
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 208 - 223
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, // 224 - 239
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 // 240 - 255
];
// length is equal
for (i = 0; i < l.length; ++i) {
if (l[i] !== r[i]) {
return l[i] < r[i] ? -1 : 1;
var decode = function(value) {
var result = 0;
if (value !== '0') {
for (var i = 0, n = value.length; i < n; ++i) {
result = (result * 2 * 2 * 2 * 2 * 2 * 2) + decodeTable[value.charCodeAt(i)];
}
}
return result;
};
var compareStringIds = function (l, r) {
if (l.length === r.length) {
// strip common prefix because the accuracy of JS numbers is limited
var prefixLength = 0;
for (var i = 0; i < l.length; ++i) {
if (l[i] !== r[i]) {
break;
}
++prefixLength;
}
if (prefixLength > 0) {
l = l.substr(prefixLength);
r = r.substr(prefixLength);
}
}
l = decode(l);
r = decode(r);
if (l !== r) {
return l < r ? -1 : 1;
}
return 0;
};
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite: error handling
////////////////////////////////////////////////////////////////////////////////
@ -782,18 +835,15 @@ function CollectionSuite () {
var r1 = c1.revision();
assertTypeOf("string", r1);
assertTrue(r1.match(/^[0-9]+$/));
c1.save({ a : 1 });
var r2 = c1.revision();
assertTypeOf("string", r2);
assertTrue(r2.match(/^[0-9]+$/));
assertEqual(1, compareStringIds(r2, r1));
c1.save({ a : 2 });
var r3 = c1.revision();
assertTypeOf("string", r3);
assertTrue(r3.match(/^[0-9]+$/));
assertEqual(1, compareStringIds(r3, r2));
// unload
@ -820,11 +870,8 @@ function CollectionSuite () {
var c1 = db._create(cn);
var r1 = c1.revision();
assertTrue(r1.match(/^[0-9]+$/));
c1.save({ _key: "abc" });
var r2 = c1.revision();
assertTrue(r2.match(/^[0-9]+$/));
assertEqual(1, compareStringIds(r2, r1));
c1.save({ _key: "123" });
@ -832,17 +879,14 @@ function CollectionSuite () {
c1.save({ _key: "789" });
var r3 = c1.revision();
assertTrue(r3.match(/^[0-9]+$/));
assertEqual(1, compareStringIds(r3, r2));
c1.remove("123");
var r4 = c1.revision();
assertTrue(r4.match(/^[0-9]+$/));
assertEqual(1, compareStringIds(r4, r3));
c1.truncate();
var r5 = c1.revision();
assertTrue(r5.match(/^[0-9]+$/));
assertEqual(1, compareStringIds(r5, r4));
// unload
@ -853,12 +897,10 @@ function CollectionSuite () {
// compare rev
c1 = db._collection(cn);
var r6 = c1.revision();
assertTrue(r6.match(/^[0-9]+$/));
assertEqual(0, compareStringIds(r6, r5));
for (var i = 0; i < 10; ++i) {
c1.save({ _key: "test" + i });
assertTrue(c1.revision().match(/^[0-9]+$/));
assertEqual(1, compareStringIds(c1.revision(), r6));
r6 = c1.revision();
}
@ -871,7 +913,6 @@ function CollectionSuite () {
// compare rev
c1 = db._collection(cn);
var r7 = c1.revision();
assertTrue(r7.match(/^[0-9]+$/));
assertEqual(0, compareStringIds(r7, r6));
c1.truncate();
@ -885,7 +926,6 @@ function CollectionSuite () {
// compare rev
c1 = db._collection(cn);
var r9 = c1.revision();
assertTrue(r9.match(/^[0-9]+$/));
assertEqual(0, compareStringIds(r9, r8));
db._drop(cn);