mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel
This commit is contained in:
commit
a16c7e8f30
|
@ -238,6 +238,25 @@ more efficiently carry out operations with multiple documents than their single-
|
|||
equivalents, which required one HTTP request per operation. With the batch operations,
|
||||
the HTTP request/response overhead can be amortized across multiple operations.
|
||||
|
||||
!SECTION Persistent indexes
|
||||
|
||||
ArangoDB 3.0 provides an experimental persistent index feature. Persistent indexes store
|
||||
the index values on disk instead of in-memory only. This means the indexes do not need
|
||||
to be rebuilt in-memory when a collection is loaded or reloaded, which should improve
|
||||
collection loading times.
|
||||
|
||||
The persistent indexes in ArangoDB are based on the RocksDB engine.
|
||||
To create a persistent index for a collection, create an index of type "rocksdb" as
|
||||
follows:
|
||||
|
||||
```js
|
||||
db.mycollection.ensureIndex({ type: "rocksdb", fields: [ "fieldname" ]});
|
||||
```
|
||||
|
||||
The persistent indexes are sorted, so they allow equality lookups and range queries.
|
||||
Note that the feature is still highly experimental and has some known deficiencies. It
|
||||
will be finalized until the release of the 3.0 stable version.
|
||||
|
||||
!SECTION Upgraded V8 version
|
||||
|
||||
The V8 engine that is used inside ArangoDB to execute JavaScript code has been upgraded from
|
||||
|
|
|
@ -930,7 +930,7 @@ bool DistributeBlock::getBlockForClient(size_t atLeast, size_t atMost,
|
|||
|
||||
AqlItemBlock* cur = _buffer.at(_index);
|
||||
|
||||
while (_pos < cur->size() && buf.at(clientId).size() < atLeast) {
|
||||
while (_pos < cur->size() && buf.at(clientId).size() < atMost) {
|
||||
// this may modify the input item buffer in place
|
||||
size_t id = sendToClient(cur);
|
||||
|
||||
|
|
|
@ -1547,7 +1547,6 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
"Plan/Collections/" + databaseName + "/" + collectionID;
|
||||
|
||||
AgencyCommResult previous = ac.getValues2(key);
|
||||
bool usePrevious = true;
|
||||
|
||||
velocypack::Slice collection =
|
||||
previous.slice()[0].get(std::vector<std::string>(
|
||||
|
@ -1676,11 +1675,7 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
}
|
||||
|
||||
AgencyCommResult result;
|
||||
if (usePrevious) {
|
||||
result = ac.casValue(key, collection, newBuilder.slice(), 0.0, 0.0);
|
||||
} else { // only when there is no previous value
|
||||
result = ac.setValue(key, newBuilder.slice(), 0.0);
|
||||
}
|
||||
result = ac.casValue(key, collection, newBuilder.slice(), 0.0, 0.0);
|
||||
|
||||
if (!result.successful()) {
|
||||
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
|
||||
|
|
|
@ -329,7 +329,7 @@ class CollectionInfo {
|
|||
for (auto const& serverSlice: VPackArrayIterator(shardSlice.value)) {
|
||||
servers.push_back(serverSlice.copyString());
|
||||
}
|
||||
(*res).insert(make_pair(shardSlice.key.copyString(), servers));
|
||||
(*res).insert(make_pair(shard, servers));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue