diff --git a/Documentation/Books/Users/ReleaseNotes/NewFeatures30.mdpp b/Documentation/Books/Users/ReleaseNotes/NewFeatures30.mdpp index 3b6879d875..fe2d425080 100644 --- a/Documentation/Books/Users/ReleaseNotes/NewFeatures30.mdpp +++ b/Documentation/Books/Users/ReleaseNotes/NewFeatures30.mdpp @@ -238,6 +238,25 @@ more efficiently carry out operations with multiple documents than their single- equivalents, which required one HTTP request per operation. With the batch operations, the HTTP request/response overhead can be amortized across multiple operations. +!SECTION Persistent indexes + +ArangoDB 3.0 provides an experimental persistent index feature. Persistent indexes store +the index values on disk instead of in-memory only. This means the indexes do not need +to be rebuilt in-memory when a collection is loaded or reloaded, which should improve +collection loading times. + +The persistent indexes in ArangoDB are based on the RocksDB engine. +To create a persistent index for a collection, create an index of type "rocksdb" as +follows: + +```js +db.mycollection.ensureIndex({ type: "rocksdb", fields: [ "fieldname" ]}); +``` + +The persistent indexes are sorted, so they allow equality lookups and range queries. +Note that the feature is still highly experimental and has some known deficiencies. It +will be finalized until the release of the 3.0 stable version. + !SECTION Upgraded V8 version The V8 engine that is used inside ArangoDB to execute JavaScript code has been upgraded from diff --git a/arangod/Aql/ClusterBlocks.cpp b/arangod/Aql/ClusterBlocks.cpp index 6775b51262..5b256018c7 100644 --- a/arangod/Aql/ClusterBlocks.cpp +++ b/arangod/Aql/ClusterBlocks.cpp @@ -930,7 +930,7 @@ bool DistributeBlock::getBlockForClient(size_t atLeast, size_t atMost, AqlItemBlock* cur = _buffer.at(_index); - while (_pos < cur->size() && buf.at(clientId).size() < atLeast) { + while (_pos < cur->size() && buf.at(clientId).size() < atMost) { // this may modify the input item buffer in place size_t id = sendToClient(cur); diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 4a2cee6fad..6e90b9deba 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -1547,7 +1547,6 @@ int ClusterInfo::ensureIndexCoordinator( "Plan/Collections/" + databaseName + "/" + collectionID; AgencyCommResult previous = ac.getValues2(key); - bool usePrevious = true; velocypack::Slice collection = previous.slice()[0].get(std::vector( @@ -1676,11 +1675,7 @@ int ClusterInfo::ensureIndexCoordinator( } AgencyCommResult result; - if (usePrevious) { - result = ac.casValue(key, collection, newBuilder.slice(), 0.0, 0.0); - } else { // only when there is no previous value - result = ac.setValue(key, newBuilder.slice(), 0.0); - } + result = ac.casValue(key, collection, newBuilder.slice(), 0.0, 0.0); if (!result.successful()) { return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN, diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index 48d3d43268..924022f3cc 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -329,7 +329,7 @@ class CollectionInfo { for (auto const& serverSlice: VPackArrayIterator(shardSlice.value)) { servers.push_back(serverSlice.copyString()); } - (*res).insert(make_pair(shardSlice.key.copyString(), servers)); + (*res).insert(make_pair(shard, servers)); } } }