mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api
This commit is contained in:
commit
1ce4d4a97e
|
@ -4,6 +4,17 @@
|
||||||
# External Projects used by ArangoDB
|
# External Projects used by ArangoDB
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
if(NOT EXISTS V8/v8/LICENSE OR
|
||||||
|
NOT EXISTS V8/v8/testing/gtest/LICENSE)
|
||||||
|
message(FATAL_ERROR "GIT sumbodules not checked out properly - aborting! Run:
|
||||||
|
git submodule update --recursive
|
||||||
|
git submodule update --init --recursive
|
||||||
|
On Windows you need to make sure git is recent enough and may create symlinks!
|
||||||
|
")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
|
@ -52,6 +52,9 @@ This exports the collection *test* into the output directory *export* as jsonl.
|
||||||
|
|
||||||
Export XGMML
|
Export XGMML
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
[XGMML](https://en.wikipedia.org/wiki/XGMML) is an XML application based on [GML](https://en.wikipedia.org/wiki/Graph_Modelling_Language). To view the XGMML file you can use for example [Cytoscape](http://cytoscape.org).
|
||||||
|
|
||||||
## XGMML specific options
|
## XGMML specific options
|
||||||
|
|
||||||
*--xgmml-label-attribute* specify the name of the attribute that will become the label in the xgmml file.
|
*--xgmml-label-attribute* specify the name of the attribute that will become the label in the xgmml file.
|
||||||
|
|
|
@ -500,8 +500,9 @@ generate
|
||||||
- `./utils/generateExamples.sh --onlyThisOne geoIndexSelect` will only produce one example - *geoIndexSelect*
|
- `./utils/generateExamples.sh --onlyThisOne geoIndexSelect` will only produce one example - *geoIndexSelect*
|
||||||
- `./utils/generateExamples.sh --onlyThisOne 'MOD.*'` will only produce the examples matching that regex; Note that
|
- `./utils/generateExamples.sh --onlyThisOne 'MOD.*'` will only produce the examples matching that regex; Note that
|
||||||
examples with enumerations in their name may base on others in their series - so you should generate the whole group.
|
examples with enumerations in their name may base on others in their series - so you should generate the whole group.
|
||||||
- `./utils/generateExamples.sh --server.endpoint tcp://127.0.0.1:8529` will utilize an existing arangod instead of starting a new one.
|
- running `onlyThisOne` in conjunction with a pre-started server cuts down the execution time even more.
|
||||||
this does seriously cut down the execution time.
|
In addition to the `--onlyThisOne ...` specify i.e. `--server.endpoint tcp://127.0.0.1:8529` to utilize your already running arangod.
|
||||||
|
Please note that examples may collide with existing collections like 'test' - you need to make sure your server is clean enough.
|
||||||
- you can use generateExamples like that:
|
- you can use generateExamples like that:
|
||||||
`./utils/generateExamples.sh \
|
`./utils/generateExamples.sh \
|
||||||
--server.endpoint 'tcp://127.0.0.1:8529' \
|
--server.endpoint 'tcp://127.0.0.1:8529' \
|
||||||
|
|
|
@ -401,12 +401,6 @@ bool ServerState::integrateIntoCluster(ServerState::RoleEnum role,
|
||||||
FATAL_ERROR_EXIT();
|
FATAL_ERROR_EXIT();
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string agencyKey = roleToAgencyKey(role);
|
|
||||||
const std::string planKey = "Plan/" + agencyKey + "/" + id;
|
|
||||||
const std::string currentKey = "Current/" + agencyKey + "/" + id;
|
|
||||||
|
|
||||||
_id = id;
|
|
||||||
|
|
||||||
findAndSetRoleBlocking();
|
findAndSetRoleBlocking();
|
||||||
LOG_TOPIC(DEBUG, Logger::CLUSTER) << "We successfully announced ourselves as "
|
LOG_TOPIC(DEBUG, Logger::CLUSTER) << "We successfully announced ourselves as "
|
||||||
<< roleToString(role) << " and our id is "
|
<< roleToString(role) << " and our id is "
|
||||||
|
|
|
@ -299,7 +299,11 @@ bool MMFilesCollection::OpenIterator(TRI_df_marker_t const* marker, MMFilesColle
|
||||||
}
|
}
|
||||||
|
|
||||||
MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
|
MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
|
||||||
: PhysicalCollection(collection), _ditches(collection), _initialCount(0), _lastRevision(0) {}
|
: PhysicalCollection(collection)
|
||||||
|
, _ditches(collection)
|
||||||
|
, _initialCount(0), _lastRevision(0)
|
||||||
|
, _uncollectedLogfileEntries(0)
|
||||||
|
{}
|
||||||
|
|
||||||
MMFilesCollection::~MMFilesCollection() {
|
MMFilesCollection::~MMFilesCollection() {
|
||||||
try {
|
try {
|
||||||
|
@ -1241,6 +1245,11 @@ int MMFilesCollection::insert(transaction::Methods* trx,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MMFilesCollection::isFullyCollected() const {
|
||||||
|
int64_t uncollected = _uncollectedLogfileEntries.load();
|
||||||
|
return (uncollected == 0);
|
||||||
|
}
|
||||||
|
|
||||||
MMFilesDocumentPosition MMFilesCollection::lookupRevision(TRI_voc_rid_t revisionId) const {
|
MMFilesDocumentPosition MMFilesCollection::lookupRevision(TRI_voc_rid_t revisionId) const {
|
||||||
TRI_ASSERT(revisionId != 0);
|
TRI_ASSERT(revisionId != 0);
|
||||||
MMFilesDocumentPosition const old = _revisionsCache.lookup(revisionId);
|
MMFilesDocumentPosition const old = _revisionsCache.lookup(revisionId);
|
||||||
|
|
|
@ -161,9 +161,6 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
|
|
||||||
Ditches* ditches() const override { return &_ditches; }
|
Ditches* ditches() const override { return &_ditches; }
|
||||||
|
|
||||||
/// @brief iterate all markers of a collection on load
|
|
||||||
int iterateMarkersOnLoad(transaction::Methods* trx) override;
|
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// -- SECTION DML Operations --
|
// -- SECTION DML Operations --
|
||||||
///////////////////////////////////
|
///////////////////////////////////
|
||||||
|
@ -174,6 +171,26 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||||
bool lock) override;
|
bool lock) override;
|
||||||
|
|
||||||
|
/// @brief iterate all markers of a collection on load
|
||||||
|
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
|
||||||
|
|
||||||
|
virtual bool isFullyCollected() const override;
|
||||||
|
|
||||||
|
int64_t uncollectedLogfileEntries() const {
|
||||||
|
return _uncollectedLogfileEntries.load();
|
||||||
|
}
|
||||||
|
|
||||||
|
void increaseUncollectedLogfileEntries(int64_t value) {
|
||||||
|
_uncollectedLogfileEntries += value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void decreaseUncollectedLogfileEntries(int64_t value) {
|
||||||
|
_uncollectedLogfileEntries -= value;
|
||||||
|
if (_uncollectedLogfileEntries < 0) {
|
||||||
|
_uncollectedLogfileEntries = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static int OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* marker,
|
static int OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* marker,
|
||||||
MMFilesDatafile* datafile,
|
MMFilesDatafile* datafile,
|
||||||
|
@ -258,6 +275,9 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
TRI_voc_rid_t _lastRevision;
|
TRI_voc_rid_t _lastRevision;
|
||||||
|
|
||||||
MMFilesRevisionsCache _revisionsCache;
|
MMFilesRevisionsCache _revisionsCache;
|
||||||
|
|
||||||
|
std::atomic<int64_t> _uncollectedLogfileEntries;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "Basics/hashes.h"
|
#include "Basics/hashes.h"
|
||||||
#include "Basics/memory-map.h"
|
#include "Basics/memory-map.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h"
|
||||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||||
#include "MMFiles/MMFilesLogfileManager.h"
|
#include "MMFiles/MMFilesLogfileManager.h"
|
||||||
#include "MMFiles/MMFilesIndexElement.h"
|
#include "MMFiles/MMFilesIndexElement.h"
|
||||||
|
@ -678,7 +679,7 @@ int MMFilesCollectorThread::processCollectionOperations(MMFilesCollectorCache* c
|
||||||
<< collection->name() << "'";
|
<< collection->name() << "'";
|
||||||
updateDatafileStatistics(collection, cache);
|
updateDatafileStatistics(collection, cache);
|
||||||
|
|
||||||
collection->decreaseUncollectedLogfileEntries(cache->totalOperationsCount);
|
static_cast<arangodb::MMFilesCollection*>(collection->getPhysical())->decreaseUncollectedLogfileEntries(cache->totalOperationsCount);
|
||||||
|
|
||||||
res = TRI_ERROR_NO_ERROR;
|
res = TRI_ERROR_NO_ERROR;
|
||||||
} catch (arangodb::basics::Exception const& ex) {
|
} catch (arangodb::basics::Exception const& ex) {
|
||||||
|
|
|
@ -125,10 +125,10 @@ std::string const MMFilesEngine::FeatureName("MMFilesEngine");
|
||||||
|
|
||||||
// create the storage engine
|
// create the storage engine
|
||||||
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
|
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
|
||||||
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory()),
|
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory())
|
||||||
_isUpgrade(false),
|
, _isUpgrade(false)
|
||||||
_maxTick(0) {
|
, _maxTick(0)
|
||||||
}
|
{}
|
||||||
|
|
||||||
MMFilesEngine::~MMFilesEngine() {
|
MMFilesEngine::~MMFilesEngine() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "Basics/Exceptions.h"
|
#include "Basics/Exceptions.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h"
|
||||||
#include "StorageEngine/TransactionState.h"
|
#include "StorageEngine/TransactionState.h"
|
||||||
#include "Transaction/Methods.h"
|
#include "Transaction/Methods.h"
|
||||||
#include "Transaction/Hints.h"
|
#include "Transaction/Hints.h"
|
||||||
|
@ -140,7 +141,8 @@ void MMFilesTransactionCollection::freeOperations(transaction::Methods* activeTr
|
||||||
_collection->setRevision(_originalRevision, true);
|
_collection->setRevision(_originalRevision, true);
|
||||||
} else if (!_collection->isVolatile() && !isSingleOperationTransaction) {
|
} else if (!_collection->isVolatile() && !isSingleOperationTransaction) {
|
||||||
// only count logfileEntries if the collection is durable
|
// only count logfileEntries if the collection is durable
|
||||||
_collection->increaseUncollectedLogfileEntries(_operations->size());
|
arangodb::PhysicalCollection* collPtr = _collection->getPhysical();
|
||||||
|
static_cast<arangodb::MMFilesCollection*>(collPtr)->increaseUncollectedLogfileEntries(_operations->size());
|
||||||
}
|
}
|
||||||
|
|
||||||
delete _operations;
|
delete _operations;
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "Aql/QueryCache.h"
|
#include "Aql/QueryCache.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "Basics/Exceptions.h"
|
#include "Basics/Exceptions.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h"
|
||||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||||
#include "MMFiles/MMFilesLogfileManager.h"
|
#include "MMFiles/MMFilesLogfileManager.h"
|
||||||
|
@ -298,7 +299,8 @@ int MMFilesTransactionState::addOperation(TRI_voc_rid_t revisionId,
|
||||||
arangodb::aql::QueryCache::instance()->invalidate(
|
arangodb::aql::QueryCache::instance()->invalidate(
|
||||||
_vocbase, collection->name());
|
_vocbase, collection->name());
|
||||||
|
|
||||||
collection->increaseUncollectedLogfileEntries(1);
|
auto cptr = collection->getPhysical();
|
||||||
|
static_cast<arangodb::MMFilesCollection*>(cptr)->increaseUncollectedLogfileEntries(1);
|
||||||
} else {
|
} else {
|
||||||
// operation is buffered and might be rolled back
|
// operation is buffered and might be rolled back
|
||||||
TransactionCollection* trxCollection = this->collection(collection->cid(), AccessMode::Type::WRITE);
|
TransactionCollection* trxCollection = this->collection(collection->cid(), AccessMode::Type::WRITE);
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "StorageEngine/EngineSelectorFeature.h"
|
#include "StorageEngine/EngineSelectorFeature.h"
|
||||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||||
//#include "MMFiles/MMFilesLogfileManager.h"
|
//#include "MMFiles/MMFilesLogfileManager.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h" //remove
|
||||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||||
#include "MMFiles/MMFilesIndexElement.h"
|
#include "MMFiles/MMFilesIndexElement.h"
|
||||||
#include "MMFiles/MMFilesToken.h"
|
#include "MMFiles/MMFilesToken.h"
|
||||||
|
@ -230,7 +231,6 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
|
||||||
_nextCompactionStartIndex(0),
|
_nextCompactionStartIndex(0),
|
||||||
_lastCompactionStatus(nullptr),
|
_lastCompactionStatus(nullptr),
|
||||||
_lastCompactionStamp(0.0),
|
_lastCompactionStamp(0.0),
|
||||||
_uncollectedLogfileEntries(0),
|
|
||||||
_isInitialIteration(false),
|
_isInitialIteration(false),
|
||||||
_revisionError(false) {
|
_revisionError(false) {
|
||||||
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
||||||
|
@ -295,7 +295,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
||||||
_nextCompactionStartIndex(0),
|
_nextCompactionStartIndex(0),
|
||||||
_lastCompactionStatus(nullptr),
|
_lastCompactionStatus(nullptr),
|
||||||
_lastCompactionStamp(0.0),
|
_lastCompactionStamp(0.0),
|
||||||
_uncollectedLogfileEntries(0),
|
|
||||||
_isInitialIteration(false),
|
_isInitialIteration(false),
|
||||||
_revisionError(false) {
|
_revisionError(false) {
|
||||||
if (!IsAllowedName(info)) {
|
if (!IsAllowedName(info)) {
|
||||||
|
@ -602,9 +601,7 @@ bool LogicalCollection::IsAllowedName(bool allowSystem,
|
||||||
|
|
||||||
/// @brief whether or not a collection is fully collected
|
/// @brief whether or not a collection is fully collected
|
||||||
bool LogicalCollection::isFullyCollected() {
|
bool LogicalCollection::isFullyCollected() {
|
||||||
int64_t uncollected = _uncollectedLogfileEntries.load();
|
return getPhysical()->isFullyCollected();
|
||||||
|
|
||||||
return (uncollected == 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogicalCollection::setNextCompactionStartIndex(size_t index) {
|
void LogicalCollection::setNextCompactionStartIndex(size_t index) {
|
||||||
|
@ -1193,7 +1190,12 @@ std::shared_ptr<arangodb::velocypack::Builder> LogicalCollection::figures() {
|
||||||
|
|
||||||
builder->add("lastTick", VPackValue(_maxTick));
|
builder->add("lastTick", VPackValue(_maxTick));
|
||||||
builder->add("uncollectedLogfileEntries",
|
builder->add("uncollectedLogfileEntries",
|
||||||
VPackValue(_uncollectedLogfileEntries));
|
VPackValue(
|
||||||
|
//MOVE TO PHYSICAL
|
||||||
|
static_cast<arangodb::MMFilesCollection*>(getPhysical())
|
||||||
|
->uncollectedLogfileEntries()
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
// fills in compaction status
|
// fills in compaction status
|
||||||
char const* lastCompactionStatus = "-";
|
char const* lastCompactionStatus = "-";
|
||||||
|
|
|
@ -111,28 +111,27 @@ class LogicalCollection {
|
||||||
void isInitialIteration(bool value) { _isInitialIteration = value; }
|
void isInitialIteration(bool value) { _isInitialIteration = value; }
|
||||||
|
|
||||||
// TODO: MOVE TO PHYSICAL?
|
// TODO: MOVE TO PHYSICAL?
|
||||||
bool isFullyCollected();
|
bool isFullyCollected(); //should not be exposed
|
||||||
int64_t uncollectedLogfileEntries() const {
|
|
||||||
return _uncollectedLogfileEntries.load();
|
void setNextCompactionStartIndex(size_t index){
|
||||||
|
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||||
|
_nextCompactionStartIndex = index;
|
||||||
}
|
}
|
||||||
|
|
||||||
void increaseUncollectedLogfileEntries(int64_t value) {
|
size_t getNextCompactionStartIndex(){
|
||||||
_uncollectedLogfileEntries += value;
|
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||||
|
return _nextCompactionStartIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
void decreaseUncollectedLogfileEntries(int64_t value) {
|
void setCompactionStatus(char const* reason){
|
||||||
_uncollectedLogfileEntries -= value;
|
TRI_ASSERT(reason != nullptr);
|
||||||
if (_uncollectedLogfileEntries < 0) {
|
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||||
_uncollectedLogfileEntries = 0;
|
_lastCompactionStatus = reason;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void setNextCompactionStartIndex(size_t);
|
|
||||||
size_t getNextCompactionStartIndex();
|
|
||||||
void setCompactionStatus(char const*);
|
|
||||||
double lastCompactionStamp() const { return _lastCompactionStamp; }
|
double lastCompactionStamp() const { return _lastCompactionStamp; }
|
||||||
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
|
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
|
||||||
|
|
||||||
|
|
||||||
void setRevisionError() { _revisionError = true; }
|
void setRevisionError() { _revisionError = true; }
|
||||||
|
|
||||||
// SECTION: Meta Information
|
// SECTION: Meta Information
|
||||||
|
@ -615,8 +614,6 @@ class LogicalCollection {
|
||||||
char const* _lastCompactionStatus;
|
char const* _lastCompactionStatus;
|
||||||
double _lastCompactionStamp;
|
double _lastCompactionStamp;
|
||||||
|
|
||||||
std::atomic<int64_t> _uncollectedLogfileEntries;
|
|
||||||
|
|
||||||
/// @brief: flag that is set to true when the documents are
|
/// @brief: flag that is set to true when the documents are
|
||||||
/// initial enumerated and the primary index is built
|
/// initial enumerated and the primary index is built
|
||||||
bool _isInitialIteration;
|
bool _isInitialIteration;
|
||||||
|
|
|
@ -107,6 +107,8 @@ class PhysicalCollection {
|
||||||
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
||||||
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
||||||
|
|
||||||
|
virtual bool isFullyCollected() const = 0;
|
||||||
|
|
||||||
virtual int insert(arangodb::transaction::Methods* trx,
|
virtual int insert(arangodb::transaction::Methods* trx,
|
||||||
arangodb::velocypack::Slice const newSlice,
|
arangodb::velocypack::Slice const newSlice,
|
||||||
arangodb::ManagedDocumentResult& result,
|
arangodb::ManagedDocumentResult& result,
|
||||||
|
|
|
@ -111,8 +111,12 @@ function main(argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (options.hasOwnProperty('server.endpoint')) {
|
if (options.hasOwnProperty('server.endpoint')) {
|
||||||
|
if (scriptArguments.hasOwnProperty('onlyThisOne')) {
|
||||||
|
throw("don't run the full suite on pre-existing servers");
|
||||||
|
}
|
||||||
startServer = false;
|
startServer = false;
|
||||||
serverEndpoint = options['server.endpoint'];
|
serverEndpoint = options['server.endpoint'];
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let args = [theScript].concat(internal.toArgv(scriptArguments));
|
let args = [theScript].concat(internal.toArgv(scriptArguments));
|
||||||
|
|
Loading…
Reference in New Issue