mirror of https://gitee.com/bigwinds/arangodb
Upgrade to jemalloc-5.1.0 with patch for static linking. (#6167)
This commit is contained in:
parent
b509e2e70a
commit
0eab3c46f8
|
@ -4,7 +4,7 @@ project(jemalloc C)
|
|||
include(ExternalProject)
|
||||
|
||||
# set version and paths
|
||||
set(JEMALLOC_VERSION "5.0.1")
|
||||
set(JEMALLOC_VERSION "5.1.0")
|
||||
set(JEMALLOC_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/v${JEMALLOC_VERSION}")
|
||||
set(JEMALLOC_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/v${JEMALLOC_VERSION}")
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
0.0.0-0-g0000000000000000000000000000000000000000
|
|
@ -1,83 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage:
|
||||
/usr/local/bin/jemalloc-config <option>
|
||||
Options:
|
||||
--help | -h : Print usage.
|
||||
--version : Print jemalloc version.
|
||||
--revision : Print shared library revision number.
|
||||
--config : Print configure options used to build jemalloc.
|
||||
--prefix : Print installation directory prefix.
|
||||
--bindir : Print binary installation directory.
|
||||
--datadir : Print data installation directory.
|
||||
--includedir : Print include installation directory.
|
||||
--libdir : Print library installation directory.
|
||||
--mandir : Print manual page installation directory.
|
||||
--cc : Print compiler used to build jemalloc.
|
||||
--cflags : Print compiler flags used to build jemalloc.
|
||||
--cppflags : Print preprocessor flags used to build jemalloc.
|
||||
--cxxflags : Print C++ compiler flags used to build jemalloc.
|
||||
--ldflags : Print library flags used to build jemalloc.
|
||||
--libs : Print libraries jemalloc was linked against.
|
||||
EOF
|
||||
}
|
||||
|
||||
prefix="/usr/local"
|
||||
exec_prefix="/usr/local"
|
||||
|
||||
case "$1" in
|
||||
--help | -h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--version)
|
||||
echo "0.0.0-0-g0000000000000000000000000000000000000000"
|
||||
;;
|
||||
--revision)
|
||||
echo "2"
|
||||
;;
|
||||
--config)
|
||||
echo "--enable-autogen"
|
||||
;;
|
||||
--prefix)
|
||||
echo "/usr/local"
|
||||
;;
|
||||
--bindir)
|
||||
echo "/usr/local/bin"
|
||||
;;
|
||||
--datadir)
|
||||
echo "/usr/local/share"
|
||||
;;
|
||||
--includedir)
|
||||
echo "/usr/local/include"
|
||||
;;
|
||||
--libdir)
|
||||
echo "/usr/local/lib"
|
||||
;;
|
||||
--mandir)
|
||||
echo "/usr/local/share/man"
|
||||
;;
|
||||
--cc)
|
||||
echo "gcc"
|
||||
;;
|
||||
--cflags)
|
||||
echo "-std=gnu11 -Wall -Wsign-compare -Wundef -pipe -g3 -fvisibility=hidden -O3 -funroll-loops"
|
||||
;;
|
||||
--cppflags)
|
||||
echo "-D_GNU_SOURCE -D_REENTRANT"
|
||||
;;
|
||||
--cxxflags)
|
||||
echo "-Wall -g3 -fvisibility=hidden -O3"
|
||||
;;
|
||||
--ldflags)
|
||||
echo " "
|
||||
;;
|
||||
--libs)
|
||||
echo "-lm -lstdc++ -lpthread -ldl"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 1
|
||||
esac
|
|
@ -1,9 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
prefix=/usr/local
|
||||
exec_prefix=/usr/local
|
||||
libdir=${exec_prefix}/lib
|
||||
|
||||
LD_PRELOAD=${libdir}/libjemalloc.so.2
|
||||
export LD_PRELOAD
|
||||
exec "$@"
|
File diff suppressed because it is too large
Load Diff
|
@ -1,316 +0,0 @@
|
|||
<?xml version='1.0'?>
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:exsl="http://exslt.org/common"
|
||||
xmlns:ng="http://docbook.org/docbook-ng"
|
||||
xmlns:db="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="exsl"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="../html/docbook.xsl"/>
|
||||
<xsl:import href="../html/manifest.xsl"/>
|
||||
<!-- * html-synop.xsl file is generated by build -->
|
||||
<xsl:import href="html-synop.xsl"/>
|
||||
<xsl:output method="text"
|
||||
encoding="UTF-8"
|
||||
indent="no"/>
|
||||
<!-- ********************************************************************
|
||||
$Id: docbook.xsl 9874 2014-01-29 19:19:48Z bobstayton $
|
||||
********************************************************************
|
||||
|
||||
This file is part of the XSL DocBook Stylesheet distribution.
|
||||
See ../README or http://docbook.sf.net/release/xsl/current/ for
|
||||
copyright and other information.
|
||||
|
||||
******************************************************************** -->
|
||||
|
||||
<!-- ==================================================================== -->
|
||||
|
||||
<xsl:include href="../common/refentry.xsl"/>
|
||||
<xsl:include href="../common/charmap.xsl"/>
|
||||
<xsl:include href="param.xsl"/>
|
||||
<xsl:include href="utility.xsl"/>
|
||||
<xsl:include href="info.xsl"/>
|
||||
<xsl:include href="other.xsl"/>
|
||||
<xsl:include href="refentry.xsl"/>
|
||||
<xsl:include href="block.xsl"/>
|
||||
<xsl:include href="inline.xsl"/>
|
||||
<xsl:include href="synop.xsl"/>
|
||||
<xsl:include href="lists.xsl"/>
|
||||
<xsl:include href="endnotes.xsl"/>
|
||||
<xsl:include href="table.xsl"/>
|
||||
<xsl:include href="pi.xsl"/>
|
||||
|
||||
<!-- * we rename the following just to avoid using params with "man" -->
|
||||
<!-- * prefixes in the table.xsl stylesheet (because that stylesheet -->
|
||||
<!-- * can potentially be reused for more than just man output) -->
|
||||
<xsl:param name="tbl.font.headings" select="$man.font.table.headings"/>
|
||||
<xsl:param name="tbl.font.title" select="$man.font.table.title"/>
|
||||
|
||||
<xsl:param name="stylesheet.result.type" select="'manpages'"/>
|
||||
|
||||
<!-- ==================================================================== -->
|
||||
|
||||
<xsl:template match="/">
|
||||
<!-- * Get a title for current doc so that we let the user -->
|
||||
<!-- * know what document we are processing at this point. -->
|
||||
<xsl:variable name="doc.title">
|
||||
<xsl:call-template name="get.doc.title"/>
|
||||
</xsl:variable>
|
||||
<xsl:choose>
|
||||
<!-- fix namespace if necessary -->
|
||||
<xsl:when test="$exsl.node.set.available != 0 and
|
||||
namespace-uri(/*) = 'http://docbook.org/ns/docbook'">
|
||||
<xsl:call-template name="log.message">
|
||||
<xsl:with-param name="level">Note</xsl:with-param>
|
||||
<xsl:with-param name="source" select="$doc.title"/>
|
||||
<xsl:with-param name="context-desc">
|
||||
<xsl:text>namesp. cut</xsl:text>
|
||||
</xsl:with-param>
|
||||
<xsl:with-param name="message">
|
||||
<xsl:text>stripped namespace before processing</xsl:text>
|
||||
</xsl:with-param>
|
||||
</xsl:call-template>
|
||||
<!-- DEBUG: uncomment to save namespace-fixed document.
|
||||
<xsl:message>Saving namespace-fixed document.</xsl:message>
|
||||
<xsl:call-template name="write.chunk">
|
||||
<xsl:with-param name="filename" select="'namespace-fixed.debug.xml'"/>
|
||||
<xsl:with-param name="method" select="'xml'"/>
|
||||
<xsl:with-param name="content">
|
||||
<xsl:copy-of select="$no.namespace"/>
|
||||
</xsl:with-param>
|
||||
</xsl:call-template>
|
||||
-->
|
||||
<xsl:apply-templates select="exsl:node-set($no.namespace)"/>
|
||||
</xsl:when>
|
||||
<!-- Can't process unless namespace fixed with exsl node-set()-->
|
||||
<xsl:when test="namespace-uri(/*) = 'http://docbook.org/ns/docbook'">
|
||||
<xsl:message terminate="yes">
|
||||
<xsl:text>Unable to strip the namespace from DB5 document,</xsl:text>
|
||||
<xsl:text> cannot proceed.</xsl:text>
|
||||
</xsl:message>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="//*[local-name() = 'refentry']">
|
||||
<!-- * Check to see if we have any refentry children in this -->
|
||||
<!-- * document; if so, process them. The reason we use -->
|
||||
<!-- * local-name()=refentry (instead of just //refentry) to to -->
|
||||
<!-- * check for refentry children is because this stylsheet is -->
|
||||
<!-- * also post-processed by the stylesheet build to create the -->
|
||||
<!-- * manpages/profile-docbook.xsl, and the refentry child check -->
|
||||
<!-- * in the profile-docbook.xsl stylesheet won't work if we do -->
|
||||
<!-- * a simple //refentry check. -->
|
||||
<xsl:apply-templates select="//refentry"/>
|
||||
<!-- * if $man.output.manifest.enabled is non-zero, -->
|
||||
<!-- * generate a manifest file -->
|
||||
<xsl:if test="not($man.output.manifest.enabled = 0)">
|
||||
<xsl:call-template name="generate.manifest">
|
||||
<xsl:with-param name="filename">
|
||||
<xsl:choose>
|
||||
<xsl:when test="not($man.output.manifest.filename = '')">
|
||||
<!-- * If a name for the manifest file is specified, -->
|
||||
<!-- * use that name. -->
|
||||
<xsl:value-of select="$man.output.manifest.filename"/>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<!-- * Otherwise, if user has unset -->
|
||||
<!-- * $man.output.manifest.filename, default to -->
|
||||
<!-- * using "MAN.MANIFEST" as the filename. Because -->
|
||||
<!-- * $man.output.manifest.enabled is non-zero and -->
|
||||
<!-- * so we must have a filename in order to -->
|
||||
<!-- * generate the manifest. -->
|
||||
<xsl:text>MAN.MANIFEST</xsl:text>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:with-param>
|
||||
</xsl:call-template>
|
||||
</xsl:if>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<!-- * Otherwise, the document does not contain any -->
|
||||
<!-- * refentry elements, so log/emit message and stop. -->
|
||||
<xsl:call-template name="log.message">
|
||||
<xsl:with-param name="level">Erro</xsl:with-param>
|
||||
<xsl:with-param name="source" select="$doc.title"/>
|
||||
<xsl:with-param name="context-desc">
|
||||
<xsl:text> no refentry</xsl:text>
|
||||
</xsl:with-param>
|
||||
<xsl:with-param name="message">
|
||||
<xsl:text>No refentry elements found</xsl:text>
|
||||
<xsl:if test="$doc.title != ''">
|
||||
<xsl:text> in "</xsl:text>
|
||||
<xsl:choose>
|
||||
<xsl:when test="string-length($doc.title) > 30">
|
||||
<xsl:value-of select="substring($doc.title,1,30)"/>
|
||||
<xsl:text>...</xsl:text>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:value-of select="$doc.title"/>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
<xsl:text>"</xsl:text>
|
||||
</xsl:if>
|
||||
<xsl:text>.</xsl:text>
|
||||
</xsl:with-param>
|
||||
</xsl:call-template>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:template>
|
||||
|
||||
<!-- ============================================================== -->
|
||||
|
||||
<xsl:template match="refentry">
|
||||
<xsl:param name="lang">
|
||||
<xsl:call-template name="l10n.language"/>
|
||||
</xsl:param>
|
||||
<!-- * Just use the first refname found as the "name" of the man -->
|
||||
<!-- * page (which may different from the "title"...) -->
|
||||
<xsl:variable name="first.refname" select="refnamediv[1]/refname[1]"/>
|
||||
|
||||
<xsl:call-template name="root.messages">
|
||||
<xsl:with-param name="refname" select="$first.refname"/>
|
||||
</xsl:call-template>
|
||||
|
||||
<!-- * Because there are several times when we need to check *info of -->
|
||||
<!-- * each refentry and its ancestors, we get those and store the -->
|
||||
<!-- * data from them as a node-set in memory. -->
|
||||
|
||||
<!-- * Make a node-set with contents of *info -->
|
||||
<xsl:variable name="get.info"
|
||||
select="ancestor-or-self::*/*[substring(local-name(),
|
||||
string-length(local-name()) - 3) = 'info']"
|
||||
/>
|
||||
<xsl:variable name="info" select="exsl:node-set($get.info)"/>
|
||||
|
||||
<!-- * The get.refentry.metadata template is in -->
|
||||
<!-- * ../common/refentry.xsl. It looks for metadata in $info -->
|
||||
<!-- * and in various other places and then puts it into a form -->
|
||||
<!-- * that's easier for us to digest. -->
|
||||
<xsl:variable name="get.refentry.metadata">
|
||||
<xsl:call-template name="get.refentry.metadata">
|
||||
<xsl:with-param name="refname" select="$first.refname"/>
|
||||
<xsl:with-param name="info" select="$info"/>
|
||||
<xsl:with-param name="prefs" select="$refentry.metadata.prefs"/>
|
||||
</xsl:call-template>
|
||||
</xsl:variable>
|
||||
<xsl:variable name="refentry.metadata" select="exsl:node-set($get.refentry.metadata)"/>
|
||||
|
||||
<!-- * Assemble the various parts into a complete page, then store into -->
|
||||
<!-- * $manpage.contents so that we can manipluate them further. -->
|
||||
<xsl:variable name="manpage.contents">
|
||||
<!-- * preprocessor invocation (need for legacy AT&T troff use) -->
|
||||
<!-- * this tells troff to pre-process the page through tbl(1) -->
|
||||
<!-- * (groff can figure it out automatically, but AT&T troff can't) -->
|
||||
<xsl:text>'\" t </xsl:text>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * top.comment = commented-out section at top of roff source -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:call-template name="top.comment">
|
||||
<xsl:with-param name="info" select="$info"/>
|
||||
<xsl:with-param name="date" select="$refentry.metadata/date"/>
|
||||
<xsl:with-param name="title" select="$refentry.metadata/title"/>
|
||||
<xsl:with-param name="manual" select="$refentry.metadata/manual"/>
|
||||
<xsl:with-param name="source" select="$refentry.metadata/source"/>
|
||||
<xsl:with-param name="refname" select="$first.refname"/>
|
||||
</xsl:call-template>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * TH.title.line = title line in header/footer of man page -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:call-template name="TH.title.line">
|
||||
<!-- * .TH TITLE section extra1 extra2 extra3 -->
|
||||
<!-- * -->
|
||||
<!-- * According to the man(7) man page: -->
|
||||
<!-- * -->
|
||||
<!-- * extra1 = date, "the date of the last revision" -->
|
||||
<!-- * extra2 = source, "the source of the command" -->
|
||||
<!-- * extra3 = manual, "the title of the manual -->
|
||||
<!-- * (e.g., Linux Programmer's Manual)" -->
|
||||
<!-- * -->
|
||||
<!-- * So, we end up with: -->
|
||||
<!-- * -->
|
||||
<!-- * .TH TITLE section date source manual -->
|
||||
<!-- * -->
|
||||
<xsl:with-param name="title" select="$refentry.metadata/title"/>
|
||||
<xsl:with-param name="section" select="$refentry.metadata/section"/>
|
||||
<xsl:with-param name="extra1" select="$refentry.metadata/date"/>
|
||||
<xsl:with-param name="extra2" select="$refentry.metadata/source"/>
|
||||
<xsl:with-param name="extra3" select="$refentry.metadata/manual"/>
|
||||
</xsl:call-template>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * (re)define some macros -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:call-template name="define.portability.macros"/>
|
||||
<xsl:if test="not($man.output.better.ps.enabled = 0)">
|
||||
<xsl:call-template name="define.macros"/>
|
||||
</xsl:if>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * Set default hyphenation, justification, indentation, and -->
|
||||
<!-- * line-breaking -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:call-template name="set.default.formatting"/>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * Main body of man page -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:text>.\" ----------------------------------------------------------------- </xsl:text>
|
||||
<xsl:text>.\" * MAIN CONTENT STARTS HERE * </xsl:text>
|
||||
<xsl:text>.\" ----------------------------------------------------------------- </xsl:text>
|
||||
<xsl:apply-templates/>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * AUTHOR section -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:if test="not($man.authors.section.enabled = 0)">
|
||||
<xsl:call-template name="author.section">
|
||||
<xsl:with-param name="info" select="$info"/>
|
||||
</xsl:call-template>
|
||||
</xsl:if>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * COPYRIGHT section -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:if test="not($man.copyright.section.enabled = 0)">
|
||||
<xsl:call-template name="copyright.section">
|
||||
<xsl:with-param name="info" select="$info"/>
|
||||
</xsl:call-template>
|
||||
</xsl:if>
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<!-- * NOTES list (only if user wants endnotes numbered and/or listed) -->
|
||||
<!-- * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
|
||||
<xsl:if test="$man.endnotes.list.enabled != 0 or
|
||||
$man.endnotes.are.numbered != 0">
|
||||
<xsl:call-template name="endnotes.list"/>
|
||||
</xsl:if>
|
||||
</xsl:variable> <!-- * end of manpage.contents -->
|
||||
|
||||
<!-- * Prepare the page contents for final output, then store in -->
|
||||
<!-- * $manpage.contents.prepared so the we can pass it on to the -->
|
||||
<!-- * write.text.chunk() function -->
|
||||
<xsl:variable name="manpage.contents.prepared">
|
||||
<!-- * "Preparing" the page contents involves, at a minimum, -->
|
||||
<!-- * doubling any backslashes found (so they aren't interpreted -->
|
||||
<!-- * as roff escapes). -->
|
||||
<!-- * -->
|
||||
<!-- * If $charmap.enabled is true, "preparing" the page contents also -->
|
||||
<!-- * involves applying a character map to convert Unicode symbols and -->
|
||||
<!-- * special characters into corresponding roff escape sequences. -->
|
||||
<xsl:call-template name="prepare.manpage.contents">
|
||||
<xsl:with-param name="content" select="$manpage.contents"/>
|
||||
</xsl:call-template>
|
||||
</xsl:variable>
|
||||
|
||||
<!-- * Write the prepared page contents to disk to create -->
|
||||
<!-- * the final man page. -->
|
||||
<xsl:call-template name="write.man.file">
|
||||
<xsl:with-param name="name" select="$first.refname"/>
|
||||
<xsl:with-param name="section" select="$refentry.metadata/section"/>
|
||||
<xsl:with-param name="lang" select="$lang"/>
|
||||
<xsl:with-param name="content" select="$manpage.contents.prepared"/>
|
||||
</xsl:call-template>
|
||||
|
||||
<!-- * Generate "stub" (alias) pages (if any needed) -->
|
||||
<xsl:call-template name="write.stubs">
|
||||
<xsl:with-param name="first.refname" select="$first.refname"/>
|
||||
<xsl:with-param name="section" select="$refentry.metadata/section"/>
|
||||
<xsl:with-param name="lang" select="$lang"/>
|
||||
</xsl:call-template>
|
||||
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
|
@ -1,5 +0,0 @@
|
|||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
|
||||
<xsl:import href="/usr/share/xml/docbook/stylesheet/docbook-xsl/html/docbook.xsl"/>
|
||||
<xsl:import href="/home/steemann/ArangoNoAsan/3rdParty/jemalloc/v5.0.1/doc/stylesheet.xsl"/>
|
||||
<xsl:output method="xml" encoding="utf-8"/>
|
||||
</xsl:stylesheet>
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +0,0 @@
|
|||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
|
||||
<xsl:import href="/usr/share/xml/docbook/stylesheet/docbook-xsl/manpages/docbook.xsl"/>
|
||||
<xsl:import href="/home/steemann/ArangoNoAsan/3rdParty/jemalloc/v5.0.1/doc/stylesheet.xsl"/>
|
||||
</xsl:stylesheet>
|
|
@ -1,7 +0,0 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||
|
||||
typedef struct base_block_s base_block_t;
|
||||
typedef struct base_s base_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
|
|
@ -1,9 +0,0 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
|
||||
typedef struct extent_s extent_t;
|
||||
typedef struct extents_s extents_t;
|
||||
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
|
@ -1,62 +0,0 @@
|
|||
#!/usr/bin/env awk -f
|
||||
|
||||
BEGIN {
|
||||
sym_prefix = ""
|
||||
split("\
|
||||
aligned_alloc \
|
||||
calloc \
|
||||
dallocx \
|
||||
free \
|
||||
mallctl \
|
||||
mallctlbymib \
|
||||
mallctlnametomib \
|
||||
malloc \
|
||||
malloc_conf \
|
||||
malloc_message \
|
||||
malloc_stats_print \
|
||||
malloc_usable_size \
|
||||
mallocx \
|
||||
nallocx \
|
||||
posix_memalign \
|
||||
rallocx \
|
||||
realloc \
|
||||
sallocx \
|
||||
sdallocx \
|
||||
xallocx \
|
||||
memalign \
|
||||
valloc \
|
||||
__libc_calloc \
|
||||
__libc_free \
|
||||
__libc_malloc \
|
||||
__libc_memalign \
|
||||
__libc_realloc \
|
||||
__libc_valloc \
|
||||
pthread_create \
|
||||
__free_hook \
|
||||
__malloc_hook \
|
||||
__realloc_hook \
|
||||
__memalign_hook \
|
||||
", exported_symbol_names)
|
||||
# Store exported symbol names as keys in exported_symbols.
|
||||
for (i in exported_symbol_names) {
|
||||
exported_symbols[exported_symbol_names[i]] = 1
|
||||
}
|
||||
}
|
||||
|
||||
# Process 'nm -a <c_source.o>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 0000000000000008 D opt_junk
|
||||
# 0000000000007574 T malloc_initialized
|
||||
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
|
||||
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
|
||||
}
|
||||
|
||||
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 353 00008098 SECT4 notype External | opt_junk
|
||||
# 3F1 00000000 SECT7 notype () External | malloc_initialized
|
||||
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
|
||||
print $NF
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
#!/usr/bin/env awk -f
|
||||
|
||||
BEGIN {
|
||||
sym_prefix = ""
|
||||
split("\
|
||||
jet_aligned_alloc \
|
||||
jet_calloc \
|
||||
jet_dallocx \
|
||||
jet_free \
|
||||
jet_mallctl \
|
||||
jet_mallctlbymib \
|
||||
jet_mallctlnametomib \
|
||||
jet_malloc \
|
||||
jet_malloc_conf \
|
||||
jet_malloc_message \
|
||||
jet_malloc_stats_print \
|
||||
jet_malloc_usable_size \
|
||||
jet_mallocx \
|
||||
jet_nallocx \
|
||||
jet_posix_memalign \
|
||||
jet_rallocx \
|
||||
jet_realloc \
|
||||
jet_sallocx \
|
||||
jet_sdallocx \
|
||||
jet_xallocx \
|
||||
jet_memalign \
|
||||
jet_valloc \
|
||||
__libc_calloc \
|
||||
__libc_free \
|
||||
__libc_malloc \
|
||||
__libc_memalign \
|
||||
__libc_realloc \
|
||||
__libc_valloc \
|
||||
pthread_create \
|
||||
__free_hook \
|
||||
__malloc_hook \
|
||||
__realloc_hook \
|
||||
__memalign_hook \
|
||||
", exported_symbol_names)
|
||||
# Store exported symbol names as keys in exported_symbols.
|
||||
for (i in exported_symbol_names) {
|
||||
exported_symbols[exported_symbol_names[i]] = 1
|
||||
}
|
||||
}
|
||||
|
||||
# Process 'nm -a <c_source.o>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 0000000000000008 D opt_junk
|
||||
# 0000000000007574 T malloc_initialized
|
||||
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
|
||||
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
|
||||
}
|
||||
|
||||
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
|
||||
#
|
||||
# Handle lines like:
|
||||
# 353 00008098 SECT4 notype External | opt_junk
|
||||
# 3F1 00000000 SECT7 notype () External | malloc_initialized
|
||||
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
|
||||
print $NF
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
|
||||
#define je_calloc JEMALLOC_N(calloc)
|
||||
#define je_dallocx JEMALLOC_N(dallocx)
|
||||
#define je_free JEMALLOC_N(free)
|
||||
#define je_mallctl JEMALLOC_N(mallctl)
|
||||
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
|
||||
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
|
||||
#define je_malloc JEMALLOC_N(malloc)
|
||||
#define je_malloc_conf JEMALLOC_N(malloc_conf)
|
||||
#define je_malloc_message JEMALLOC_N(malloc_message)
|
||||
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
|
||||
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
|
||||
#define je_mallocx JEMALLOC_N(mallocx)
|
||||
#define je_nallocx JEMALLOC_N(nallocx)
|
||||
#define je_posix_memalign JEMALLOC_N(posix_memalign)
|
||||
#define je_rallocx JEMALLOC_N(rallocx)
|
||||
#define je_realloc JEMALLOC_N(realloc)
|
||||
#define je_sallocx JEMALLOC_N(sallocx)
|
||||
#define je_sdallocx JEMALLOC_N(sdallocx)
|
||||
#define je_xallocx JEMALLOC_N(xallocx)
|
||||
#define je_memalign JEMALLOC_N(memalign)
|
||||
#define je_valloc JEMALLOC_N(valloc)
|
|
@ -1,22 +0,0 @@
|
|||
aligned_alloc:aligned_alloc
|
||||
calloc:calloc
|
||||
dallocx:dallocx
|
||||
free:free
|
||||
mallctl:mallctl
|
||||
mallctlbymib:mallctlbymib
|
||||
mallctlnametomib:mallctlnametomib
|
||||
malloc:malloc
|
||||
malloc_conf:malloc_conf
|
||||
malloc_message:malloc_message
|
||||
malloc_stats_print:malloc_stats_print
|
||||
malloc_usable_size:malloc_usable_size
|
||||
mallocx:mallocx
|
||||
nallocx:nallocx
|
||||
posix_memalign:posix_memalign
|
||||
rallocx:rallocx
|
||||
realloc:realloc
|
||||
sallocx:sallocx
|
||||
sdallocx:sdallocx
|
||||
xallocx:xallocx
|
||||
memalign:memalign
|
||||
valloc:valloc
|
|
@ -1,22 +0,0 @@
|
|||
#undef je_aligned_alloc
|
||||
#undef je_calloc
|
||||
#undef je_dallocx
|
||||
#undef je_free
|
||||
#undef je_mallctl
|
||||
#undef je_mallctlbymib
|
||||
#undef je_mallctlnametomib
|
||||
#undef je_malloc
|
||||
#undef je_malloc_conf
|
||||
#undef je_malloc_message
|
||||
#undef je_malloc_stats_print
|
||||
#undef je_malloc_usable_size
|
||||
#undef je_mallocx
|
||||
#undef je_nallocx
|
||||
#undef je_posix_memalign
|
||||
#undef je_rallocx
|
||||
#undef je_realloc
|
||||
#undef je_sallocx
|
||||
#undef je_sdallocx
|
||||
#undef je_xallocx
|
||||
#undef je_memalign
|
||||
#undef je_valloc
|
File diff suppressed because it is too large
Load Diff
|
@ -1,164 +0,0 @@
|
|||
#ifndef JEMALLOC_INTERNAL_STATS_H
|
||||
#define JEMALLOC_INTERNAL_STATS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats_tsd.h"
|
||||
|
||||
/* OPTION(opt, var_name, default, set_value_to) */
|
||||
#define STATS_PRINT_OPTIONS \
|
||||
OPTION('J', json, false, true) \
|
||||
OPTION('g', general, true, false) \
|
||||
OPTION('m', merged, config_stats, false) \
|
||||
OPTION('d', destroyed, config_stats, false) \
|
||||
OPTION('a', unmerged, config_stats, false) \
|
||||
OPTION('b', bins, true, false) \
|
||||
OPTION('l', large, true, false) \
|
||||
OPTION('x', mutex, true, false)
|
||||
|
||||
enum {
|
||||
#define OPTION(o, v, d, s) stats_print_option_num_##v,
|
||||
STATS_PRINT_OPTIONS
|
||||
#undef OPTION
|
||||
stats_print_tot_num_options
|
||||
};
|
||||
|
||||
/* Options for stats_print. */
|
||||
extern bool opt_stats_print;
|
||||
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||
|
||||
/* Implements je_malloc_stats_print. */
|
||||
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
mutex_prof_data_t mutex_data;
|
||||
} malloc_bin_stats_t;
|
||||
|
||||
typedef struct malloc_large_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
} malloc_large_stats_t;
|
||||
|
||||
typedef struct decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
} decay_stats_t;
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
decay_stats_t decay_dirty;
|
||||
decay_stats_t decay_muzzy;
|
||||
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
} arena_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
|
@ -1,12 +0,0 @@
|
|||
#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
|
||||
#define JEMALLOC_INTERNAL_STATS_TSD_H
|
||||
|
||||
typedef struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
} tcache_bin_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */
|
|
@ -1,64 +0,0 @@
|
|||
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats_tsd.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
struct tcache_bin_info_s {
|
||||
unsigned ncached_max; /* Upper limit on ncached. */
|
||||
};
|
||||
|
||||
struct tcache_bin_s {
|
||||
low_water_t low_water; /* Min # cached since last GC. */
|
||||
uint32_t ncached; /* # of cached objects. */
|
||||
/*
|
||||
* ncached and stats are both modified frequently. Let's keep them
|
||||
* close so that they have a higher chance of being on the same
|
||||
* cacheline, thus less write-backs.
|
||||
*/
|
||||
tcache_bin_stats_t tstats;
|
||||
/*
|
||||
* To make use of adjacent cacheline prefetch, the items in the avail
|
||||
* stack goes to higher address for newer allocations. avail points
|
||||
* just above the available space, which means that
|
||||
* avail[-ncached, ... -1] are available items and the lowest item will
|
||||
* be allocated first.
|
||||
*/
|
||||
void **avail; /* Stack of available objects. */
|
||||
};
|
||||
|
||||
struct tcache_s {
|
||||
/* Data accessed frequently first: prof, ticker and small bins. */
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
||||
ticker_t gc_ticker; /* Drives incremental GC. */
|
||||
/*
|
||||
* The pointer stacks associated with tbins follow as a contiguous
|
||||
* array. During tcache initialization, the avail pointer in each
|
||||
* element of tbins is initialized to point to the proper offset within
|
||||
* this array.
|
||||
*/
|
||||
tcache_bin_t tbins_small[NBINS];
|
||||
/* Data accessed less often below. */
|
||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||
arena_t *arena; /* Associated arena. */
|
||||
szind_t next_gc_bin; /* Next bin to GC. */
|
||||
/* For small bins, fill (ncached_max >> lg_fill_div). */
|
||||
uint8_t lg_fill_div[NBINS];
|
||||
tcache_bin_t tbins_large[NSIZES-NBINS];
|
||||
};
|
||||
|
||||
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
||||
struct tcaches_s {
|
||||
union {
|
||||
tcache_t *tcache;
|
||||
tcaches_t *next;
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
|
|
@ -1,66 +0,0 @@
|
|||
/*
|
||||
* By default application code must explicitly refer to mangled symbol names,
|
||||
* so that it is possible to use jemalloc in conjunction with another allocator
|
||||
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
|
||||
* name mangling that matches the API prefixing that happened as a result of
|
||||
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
|
||||
*/
|
||||
#ifdef JEMALLOC_MANGLE
|
||||
# ifndef JEMALLOC_NO_DEMANGLE
|
||||
# define JEMALLOC_NO_DEMANGLE
|
||||
# endif
|
||||
# define aligned_alloc jet_aligned_alloc
|
||||
# define calloc jet_calloc
|
||||
# define dallocx jet_dallocx
|
||||
# define free jet_free
|
||||
# define mallctl jet_mallctl
|
||||
# define mallctlbymib jet_mallctlbymib
|
||||
# define mallctlnametomib jet_mallctlnametomib
|
||||
# define malloc jet_malloc
|
||||
# define malloc_conf jet_malloc_conf
|
||||
# define malloc_message jet_malloc_message
|
||||
# define malloc_stats_print jet_malloc_stats_print
|
||||
# define malloc_usable_size jet_malloc_usable_size
|
||||
# define mallocx jet_mallocx
|
||||
# define nallocx jet_nallocx
|
||||
# define posix_memalign jet_posix_memalign
|
||||
# define rallocx jet_rallocx
|
||||
# define realloc jet_realloc
|
||||
# define sallocx jet_sallocx
|
||||
# define sdallocx jet_sdallocx
|
||||
# define xallocx jet_xallocx
|
||||
# define memalign jet_memalign
|
||||
# define valloc jet_valloc
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The jet_* macros can be used as stable alternative names for the
|
||||
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
|
||||
* meant for use in jemalloc itself, but it can be used by application code to
|
||||
* provide isolation from the name mangling specified via --with-mangling
|
||||
* and/or --with-jemalloc-prefix.
|
||||
*/
|
||||
#ifndef JEMALLOC_NO_DEMANGLE
|
||||
# undef jet_aligned_alloc
|
||||
# undef jet_calloc
|
||||
# undef jet_dallocx
|
||||
# undef jet_free
|
||||
# undef jet_mallctl
|
||||
# undef jet_mallctlbymib
|
||||
# undef jet_mallctlnametomib
|
||||
# undef jet_malloc
|
||||
# undef jet_malloc_conf
|
||||
# undef jet_malloc_message
|
||||
# undef jet_malloc_stats_print
|
||||
# undef jet_malloc_usable_size
|
||||
# undef jet_mallocx
|
||||
# undef jet_nallocx
|
||||
# undef jet_posix_memalign
|
||||
# undef jet_rallocx
|
||||
# undef jet_realloc
|
||||
# undef jet_sallocx
|
||||
# undef jet_sdallocx
|
||||
# undef jet_xallocx
|
||||
# undef jet_memalign
|
||||
# undef jet_valloc
|
||||
#endif
|
|
@ -1,66 +0,0 @@
|
|||
/*
|
||||
* The jet_ prefix on the following public symbol declarations is an artifact
|
||||
* of namespace management, and should be omitted in application code unless
|
||||
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
|
||||
*/
|
||||
extern JEMALLOC_EXPORT const char *jet_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *jet_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
|
@ -1,4 +0,0 @@
|
|||
#define JEMALLOC_SPIN_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/spin.h"
|
File diff suppressed because it is too large
Load Diff
|
@ -1,173 +0,0 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <limits.h>
|
||||
#ifndef SIZE_T_MAX
|
||||
# define SIZE_T_MAX SIZE_MAX
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#ifdef _WIN32
|
||||
# include "msvc_compat/strings.h"
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
# include "msvc_compat/windows_extra.h"
|
||||
#else
|
||||
# include <pthread.h>
|
||||
#endif
|
||||
|
||||
#include "test/jemalloc_test_defs.h"
|
||||
|
||||
#ifdef JEMALLOC_OSSPIN
|
||||
# include <libkern/OSAtomic.h>
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
|
||||
# include <altivec.h>
|
||||
#endif
|
||||
#ifdef HAVE_SSE2
|
||||
# include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* For unit tests, expose all public and private interfaces.
|
||||
*/
|
||||
#ifdef JEMALLOC_UNIT_TEST
|
||||
# define JEMALLOC_JET
|
||||
# define JEMALLOC_MANGLE
|
||||
# include "jemalloc/internal/jemalloc_preamble.h"
|
||||
# include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* For integration tests, expose the public jemalloc interfaces, but only
|
||||
* expose the minimum necessary internal utility code (to avoid re-implementing
|
||||
* essentially identical code within the test infrastructure).
|
||||
*/
|
||||
#elif defined(JEMALLOC_INTEGRATION_TEST) || \
|
||||
defined(JEMALLOC_INTEGRATION_CPP_TEST)
|
||||
# define JEMALLOC_MANGLE
|
||||
# include "jemalloc/jemalloc.h"
|
||||
# include "jemalloc/internal/jemalloc_internal_defs.h"
|
||||
# include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
# define JEMALLOC_N(n) je_##n
|
||||
# include "jemalloc/internal/private_namespace.h"
|
||||
# include "jemalloc/internal/hooks.h"
|
||||
|
||||
/* Hermetic headers. */
|
||||
# include "jemalloc/internal/assert.h"
|
||||
# include "jemalloc/internal/malloc_io.h"
|
||||
# include "jemalloc/internal/nstime.h"
|
||||
# include "jemalloc/internal/util.h"
|
||||
|
||||
/* Non-hermetic headers. */
|
||||
# include "jemalloc/internal/qr.h"
|
||||
# include "jemalloc/internal/ql.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* For stress tests, expose the public jemalloc interfaces with name mangling
|
||||
* so that they can be tested as e.g. malloc() and free(). Also expose the
|
||||
* public jemalloc interfaces with jet_ prefixes, so that stress tests can use
|
||||
* a separate allocator for their internal data structures.
|
||||
*/
|
||||
#elif defined(JEMALLOC_STRESS_TEST)
|
||||
# include "jemalloc/jemalloc.h"
|
||||
|
||||
# include "jemalloc/jemalloc_protos_jet.h"
|
||||
|
||||
# define JEMALLOC_JET
|
||||
# include "jemalloc/internal/jemalloc_preamble.h"
|
||||
# include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
# include "jemalloc/internal/public_unnamespace.h"
|
||||
# undef JEMALLOC_JET
|
||||
|
||||
# include "jemalloc/jemalloc_rename.h"
|
||||
# define JEMALLOC_MANGLE
|
||||
# ifdef JEMALLOC_STRESS_TESTLIB
|
||||
# include "jemalloc/jemalloc_mangle_jet.h"
|
||||
# else
|
||||
# include "jemalloc/jemalloc_mangle.h"
|
||||
# endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* This header does dangerous things, the effects of which only test code
|
||||
* should be subject to.
|
||||
*/
|
||||
#else
|
||||
# error "This header cannot be included outside a testing context"
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Common test utilities.
|
||||
*/
|
||||
#include "test/btalloc.h"
|
||||
#include "test/math.h"
|
||||
#include "test/mtx.h"
|
||||
#include "test/mq.h"
|
||||
#include "test/test.h"
|
||||
#include "test/timer.h"
|
||||
#include "test/thd.h"
|
||||
#define MEXP 19937
|
||||
#include "test/SFMT.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Define always-enabled assertion macros, so that test assertions execute even
|
||||
* if assertions are disabled in the library code.
|
||||
*/
|
||||
#undef assert
|
||||
#undef not_reached
|
||||
#undef not_implemented
|
||||
#undef assert_not_implemented
|
||||
|
||||
#define assert(e) do { \
|
||||
if (!(e)) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define not_reached() do { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} while (0)
|
||||
|
||||
#define not_implemented() do { \
|
||||
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} while (0)
|
||||
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (!(e)) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,10 +0,0 @@
|
|||
/* test/include/test/jemalloc_test_defs.h. Generated from jemalloc_test_defs.h.in by configure. */
|
||||
#include "jemalloc/internal/jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
/*
|
||||
* For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its
|
||||
* dependencies are notoriously unportable in practice.
|
||||
*/
|
||||
/* #undef HAVE_SSE2 */
|
||||
/* #undef HAVE_ALTIVEC */
|
|
@ -1,80 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
case elf in
|
||||
macho)
|
||||
export DYLD_FALLBACK_LIBRARY_PATH="lib"
|
||||
;;
|
||||
pecoff)
|
||||
export PATH="${PATH}:lib"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
# Make a copy of the MALLOC_CONF passed in to this script, so
|
||||
# it can be repeatedly concatenated with per test settings.
|
||||
export MALLOC_CONF_ALL=${MALLOC_CONF}
|
||||
# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL.
|
||||
export_malloc_conf() {
|
||||
if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then
|
||||
export MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}"
|
||||
else
|
||||
export MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Corresponds to test_status_t.
|
||||
pass_code=0
|
||||
skip_code=1
|
||||
fail_code=2
|
||||
|
||||
pass_count=0
|
||||
skip_count=0
|
||||
fail_count=0
|
||||
for t in $@; do
|
||||
if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
|
||||
echo
|
||||
fi
|
||||
echo "=== ${t} ==="
|
||||
if [ -e "${t}.sh" ] ; then
|
||||
# Source the shell script corresponding to the test in a subshell and
|
||||
# execute the test. This allows the shell script to set MALLOC_CONF, which
|
||||
# is then used to set MALLOC_CONF (thus allowing the
|
||||
# per test shell script to ignore the detail).
|
||||
enable_fill=1 \
|
||||
enable_prof=0 \
|
||||
. ${t}.sh && \
|
||||
export_malloc_conf && \
|
||||
$JEMALLOC_TEST_PREFIX ${t} /home/steemann/ArangoNoAsan/3rdParty/jemalloc/v5.0.1/ /home/steemann/ArangoNoAsan/3rdParty/jemalloc/v5.0.1/
|
||||
else
|
||||
export MALLOC_CONF= && \
|
||||
export_malloc_conf && \
|
||||
$JEMALLOC_TEST_PREFIX ${t} /home/steemann/ArangoNoAsan/3rdParty/jemalloc/v5.0.1/ /home/steemann/ArangoNoAsan/3rdParty/jemalloc/v5.0.1/
|
||||
fi
|
||||
result_code=$?
|
||||
case ${result_code} in
|
||||
${pass_code})
|
||||
pass_count=$((pass_count+1))
|
||||
;;
|
||||
${skip_code})
|
||||
skip_count=$((skip_count+1))
|
||||
;;
|
||||
${fail_code})
|
||||
fail_count=$((fail_count+1))
|
||||
;;
|
||||
*)
|
||||
echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2
|
||||
echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
|
||||
total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
|
||||
echo
|
||||
echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
|
||||
|
||||
if [ ${fail_count} -eq 0 ] ; then
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
|
@ -1,75 +0,0 @@
|
|||
#include "test/jemalloc_test.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <sys/wait.h>
|
||||
#endif
|
||||
|
||||
TEST_BEGIN(test_fork) {
|
||||
#ifndef _WIN32
|
||||
void *p;
|
||||
pid_t pid;
|
||||
|
||||
/* Set up a manually managed arena for test. */
|
||||
unsigned arena_ind;
|
||||
size_t sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
/* Migrate to the new arena. */
|
||||
unsigned old_arena_ind;
|
||||
sz = sizeof(old_arena_ind);
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
|
||||
(void *)&arena_ind, sizeof(arena_ind)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
p = malloc(1);
|
||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||
|
||||
pid = fork();
|
||||
|
||||
free(p);
|
||||
|
||||
p = malloc(64);
|
||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||
free(p);
|
||||
|
||||
if (pid == -1) {
|
||||
/* Error. */
|
||||
test_fail("Unexpected fork() failure");
|
||||
} else if (pid == 0) {
|
||||
/* Child. */
|
||||
_exit(0);
|
||||
} else {
|
||||
int status;
|
||||
|
||||
/* Parent. */
|
||||
while (true) {
|
||||
if (waitpid(pid, &status, 0) == -1) {
|
||||
test_fail("Unexpected waitpid() failure");
|
||||
}
|
||||
if (WIFSIGNALED(status)) {
|
||||
test_fail("Unexpected child termination due to "
|
||||
"signal %d", WTERMSIG(status));
|
||||
break;
|
||||
}
|
||||
if (WIFEXITED(status)) {
|
||||
if (WEXITSTATUS(status) != 0) {
|
||||
test_fail(
|
||||
"Unexpected child exit value %d",
|
||||
WEXITSTATUS(status));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
test_skip("fork(2) is irrelevant to Windows");
|
||||
#endif
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
return test(
|
||||
test_fork);
|
||||
}
|
|
@ -77,12 +77,14 @@ test/include/test/jemalloc_test_defs.h
|
|||
*.pdb
|
||||
*.sdf
|
||||
*.opendb
|
||||
*.VC.db
|
||||
*.opensdf
|
||||
*.cachefile
|
||||
*.suo
|
||||
*.user
|
||||
*.sln.docstates
|
||||
*.tmp
|
||||
.vs/
|
||||
/msvc/Win32/
|
||||
/msvc/x64/
|
||||
/msvc/projects/*/*/Debug*/
|
|
@ -1,4 +1,5 @@
|
|||
language: generic
|
||||
dist: precise
|
||||
|
||||
matrix:
|
||||
include:
|
|
@ -1,10 +1,10 @@
|
|||
Unless otherwise specified, files in the jemalloc source distribution are
|
||||
subject to the following license:
|
||||
--------------------------------------------------------------------------------
|
||||
Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
|
||||
Copyright (C) 2002-2018 Jason Evans <jasone@canonware.com>.
|
||||
All rights reserved.
|
||||
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||
Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
|
||||
Copyright (C) 2009-2018 Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
|
@ -4,6 +4,123 @@ brevity. Much more detail can be found in the git revision history:
|
|||
|
||||
https://github.com/jemalloc/jemalloc
|
||||
|
||||
* 5.1.0 (May 4th, 2018)
|
||||
|
||||
This release is primarily about fine-tuning, ranging from several new features
|
||||
to numerous notable performance and portability enhancements. The release and
|
||||
prior dev versions have been running in multiple large scale applications for
|
||||
months, and the cumulative improvements are substantial in many cases.
|
||||
|
||||
Given the long and successful production runs, this release is likely a good
|
||||
candidate for applications to upgrade, from both jemalloc 5.0 and before. For
|
||||
performance-critical applications, the newly added TUNING.md provides
|
||||
guidelines on jemalloc tuning.
|
||||
|
||||
New features:
|
||||
- Implement transparent huge page support for internal metadata. (@interwq)
|
||||
- Add opt.thp to allow enabling / disabling transparent huge pages for all
|
||||
mappings. (@interwq)
|
||||
- Add maximum background thread count option. (@djwatson)
|
||||
- Allow prof_active to control opt.lg_prof_interval and prof.gdump.
|
||||
(@interwq)
|
||||
- Allow arena index lookup based on allocation addresses via mallctl.
|
||||
(@lionkov)
|
||||
- Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD)
|
||||
- Add opt.lg_extent_max_active_fit to set the max ratio between the size of
|
||||
the active extent selected (to split off from) and the size of the requested
|
||||
allocation. (@interwq, @davidtgoldblatt)
|
||||
- Add retain_grow_limit to set the max size when growing virtual address
|
||||
space. (@interwq)
|
||||
- Add mallctl interfaces:
|
||||
+ arena.<i>.retain_grow_limit (@interwq)
|
||||
+ arenas.lookup (@lionkov)
|
||||
+ max_background_threads (@djwatson)
|
||||
+ opt.lg_extent_max_active_fit (@interwq)
|
||||
+ opt.max_background_threads (@djwatson)
|
||||
+ opt.metadata_thp (@interwq)
|
||||
+ opt.thp (@interwq)
|
||||
+ stats.metadata_thp (@interwq)
|
||||
|
||||
Portability improvements:
|
||||
- Support GNU/kFreeBSD configuration. (@paravoid)
|
||||
- Support m68k, nios2 and SH3 architectures. (@paravoid)
|
||||
- Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo)
|
||||
- Fix symbol listing for cross-compiling. (@tamird)
|
||||
- Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid)
|
||||
- Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin)
|
||||
- Fix MSVC 2015 & 2017 builds. (@rustyx)
|
||||
- Improve RISC-V support. (@EdSchouten)
|
||||
- Set name mangling script in strict mode. (@nicolov)
|
||||
- Avoid MADV_HUGEPAGE on ARM. (@marxin)
|
||||
- Modify configure to determine return value of strerror_r.
|
||||
(@davidtgoldblatt, @cferris1000)
|
||||
- Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani)
|
||||
- Fix 32-bit build on MSVC. (@rustyx)
|
||||
- Fix external symbol on MSVC. (@maksqwe)
|
||||
- Avoid a printf format specifier warning. (@jasone)
|
||||
- Add configure option --disable-initial-exec-tls which can allow jemalloc to
|
||||
be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD)
|
||||
- AArch64: Add ILP32 support. (@cmuellner)
|
||||
- Add --with-lg-vaddr configure option to support cross compiling.
|
||||
(@cmuellner, @davidtgoldblatt)
|
||||
|
||||
Optimizations and refactors:
|
||||
- Improve active extent fit with extent_max_active_fit. This considerably
|
||||
reduces fragmentation over time and improves virtual memory and metadata
|
||||
usage. (@davidtgoldblatt, @interwq)
|
||||
- Eagerly coalesce large extents to reduce fragmentation. (@interwq)
|
||||
- sdallocx: only read size info when page aligned (i.e. possibly sampled),
|
||||
which speeds up the sized deallocation path significantly. (@interwq)
|
||||
- Avoid attempting new mappings for in place expansion with retain, since
|
||||
it rarely succeeds in practice and causes high overhead. (@interwq)
|
||||
- Refactor OOM handling in newImpl. (@wqfish)
|
||||
- Add internal fine-grained logging functionality for debugging use.
|
||||
(@davidtgoldblatt)
|
||||
- Refactor arena / tcache interactions. (@davidtgoldblatt)
|
||||
- Refactor extent management with dumpable flag. (@davidtgoldblatt)
|
||||
- Add runtime detection of lazy purging. (@interwq)
|
||||
- Use pairing heap instead of red-black tree for extents_avail. (@djwatson)
|
||||
- Use sysctl on startup in FreeBSD. (@trasz)
|
||||
- Use thread local prng state instead of atomic. (@djwatson)
|
||||
- Make decay to always purge one more extent than before, because in
|
||||
practice large extents are usually the ones that cross the decay threshold.
|
||||
Purging the additional extent helps save memory as well as reduce VM
|
||||
fragmentation. (@interwq)
|
||||
- Fast division by dynamic values. (@davidtgoldblatt)
|
||||
- Improve the fit for aligned allocation. (@interwq, @edwinsmith)
|
||||
- Refactor extent_t bitpacking. (@rkmisra)
|
||||
- Optimize the generated assembly for ticker operations. (@davidtgoldblatt)
|
||||
- Convert stats printing to use a structured text emitter. (@davidtgoldblatt)
|
||||
- Remove preserve_lru feature for extents management. (@djwatson)
|
||||
- Consolidate two memory loads into one on the fast deallocation path.
|
||||
(@davidtgoldblatt, @interwq)
|
||||
|
||||
Bug fixes (most of the issues are only relevant to jemalloc 5.0):
|
||||
- Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt)
|
||||
- Validate returned file descriptor before use. (@zonyitoo)
|
||||
- Fix a few background thread initialization and shutdown issues. (@interwq)
|
||||
- Fix an extent coalesce + decay race by taking both coalescing extents off
|
||||
the LRU list. (@interwq)
|
||||
- Fix potentially unbound increase during decay, caused by one thread keep
|
||||
stashing memory to purge while other threads generating new pages. The
|
||||
number of pages to purge is checked to prevent this. (@interwq)
|
||||
- Fix a FreeBSD bootstrap assertion. (@strejda, @interwq)
|
||||
- Handle 32 bit mutex counters. (@rkmisra)
|
||||
- Fix a indexing bug when creating background threads. (@davidtgoldblatt,
|
||||
@binliu19)
|
||||
- Fix arguments passed to extent_init. (@yuleniwo, @interwq)
|
||||
- Fix addresses used for ordering mutexes. (@rkmisra)
|
||||
- Fix abort_conf processing during bootstrap. (@interwq)
|
||||
- Fix include path order for out-of-tree builds. (@cmuellner)
|
||||
|
||||
Incompatible changes:
|
||||
- Remove --disable-thp. (@interwq)
|
||||
- Remove mallctl interfaces:
|
||||
+ config.thp (@interwq)
|
||||
|
||||
Documentation:
|
||||
- Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson)
|
||||
|
||||
* 5.0.1 (July 1, 2017)
|
||||
|
||||
This bugfix release fixes several issues, most of which are obscure enough
|
||||
|
@ -22,7 +139,7 @@ brevity. Much more detail can be found in the git revision history:
|
|||
unlikely to be an issue with other libc implementations. (@interwq)
|
||||
- Mask signals during background thread creation. This prevents signals from
|
||||
being inadvertently delivered to background threads. (@jasone,
|
||||
@davidgoldblatt, @interwq)
|
||||
@davidtgoldblatt, @interwq)
|
||||
- Avoid inactivity checks within background threads, in order to prevent
|
||||
recursive mutex acquisition. (@interwq)
|
||||
- Fix extent_grow_retained() to use the specified hooks when the
|
||||
|
@ -515,7 +632,7 @@ brevity. Much more detail can be found in the git revision history:
|
|||
these fixes, xallocx() now tries harder to partially fulfill requests for
|
||||
optional extra space. Note that a couple of minor heap profiling
|
||||
optimizations are included, but these are better thought of as performance
|
||||
fixes that were integral to disovering most of the other bugs.
|
||||
fixes that were integral to discovering most of the other bugs.
|
||||
|
||||
Optimizations:
|
||||
- Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
|
|
@ -157,11 +157,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||
Statically link against the specified libunwind.a rather than dynamically
|
||||
linking with -lunwind.
|
||||
|
||||
* `--disable-thp`
|
||||
|
||||
Disable transparent huge page (THP) integration. This option can be useful
|
||||
when cross compiling.
|
||||
|
||||
* `--disable-fill`
|
||||
|
||||
Disable support for junk/zero filling of memory. See the "opt.junk" and
|
||||
|
@ -265,6 +260,22 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||
configuration, jemalloc will provide additional size classes that are not
|
||||
16-byte-aligned (24, 40, and 56).
|
||||
|
||||
* `--with-lg-vaddr=<lg-vaddr>`
|
||||
|
||||
Specify the number of significant virtual address bits. By default, the
|
||||
configure script attempts to detect virtual address size on those platforms
|
||||
where it knows how, and picks a default otherwise. This option may be
|
||||
useful when cross-compiling.
|
||||
|
||||
* `--disable-initial-exec-tls`
|
||||
|
||||
Disable the initial-exec TLS model for jemalloc's internal thread-local
|
||||
storage (on those platforms that support explicit settings). This can allow
|
||||
jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
|
||||
Note that in this case, there will be two malloc implementations operating
|
||||
in the same process, which will almost certainly result in confusing runtime
|
||||
crashes if pointers leak from one implementation to the other.
|
||||
|
||||
The following environment variables (not a definitive list) impact configure's
|
||||
behavior:
|
||||
|
||||
|
@ -329,6 +340,7 @@ To install only parts of jemalloc, use the following targets:
|
|||
install_include
|
||||
install_lib_shared
|
||||
install_lib_static
|
||||
install_lib_pc
|
||||
install_lib
|
||||
install_doc_html
|
||||
install_doc_man
|
|
@ -24,7 +24,7 @@ abs_srcroot := @abs_srcroot@
|
|||
abs_objroot := @abs_objroot@
|
||||
|
||||
# Build parameters.
|
||||
CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
|
||||
CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include
|
||||
CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
|
||||
SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
|
||||
EXTRA_CFLAGS := @EXTRA_CFLAGS@
|
||||
|
@ -93,15 +93,18 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
|||
$(srcroot)src/arena.c \
|
||||
$(srcroot)src/background_thread.c \
|
||||
$(srcroot)src/base.c \
|
||||
$(srcroot)src/bin.c \
|
||||
$(srcroot)src/bitmap.c \
|
||||
$(srcroot)src/ckh.c \
|
||||
$(srcroot)src/ctl.c \
|
||||
$(srcroot)src/div.c \
|
||||
$(srcroot)src/extent.c \
|
||||
$(srcroot)src/extent_dss.c \
|
||||
$(srcroot)src/extent_mmap.c \
|
||||
$(srcroot)src/hash.c \
|
||||
$(srcroot)src/hooks.c \
|
||||
$(srcroot)src/large.c \
|
||||
$(srcroot)src/log.c \
|
||||
$(srcroot)src/malloc_io.c \
|
||||
$(srcroot)src/mutex.c \
|
||||
$(srcroot)src/mutex_pool.c \
|
||||
|
@ -111,7 +114,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
|||
$(srcroot)src/prof.c \
|
||||
$(srcroot)src/rtree.c \
|
||||
$(srcroot)src/stats.c \
|
||||
$(srcroot)src/spin.c \
|
||||
$(srcroot)src/sz.c \
|
||||
$(srcroot)src/tcache.c \
|
||||
$(srcroot)src/ticker.c \
|
||||
|
@ -160,10 +162,13 @@ TESTS_UNIT := \
|
|||
$(srcroot)test/unit/arena_reset.c \
|
||||
$(srcroot)test/unit/atomic.c \
|
||||
$(srcroot)test/unit/background_thread.c \
|
||||
$(srcroot)test/unit/background_thread_enable.c \
|
||||
$(srcroot)test/unit/base.c \
|
||||
$(srcroot)test/unit/bitmap.c \
|
||||
$(srcroot)test/unit/ckh.c \
|
||||
$(srcroot)test/unit/decay.c \
|
||||
$(srcroot)test/unit/div.c \
|
||||
$(srcroot)test/unit/emitter.c \
|
||||
$(srcroot)test/unit/extent_quantize.c \
|
||||
$(srcroot)test/unit/fork.c \
|
||||
$(srcroot)test/unit/hash.c \
|
||||
|
@ -171,6 +176,7 @@ TESTS_UNIT := \
|
|||
$(srcroot)test/unit/junk.c \
|
||||
$(srcroot)test/unit/junk_alloc.c \
|
||||
$(srcroot)test/unit/junk_free.c \
|
||||
$(srcroot)test/unit/log.c \
|
||||
$(srcroot)test/unit/mallctl.c \
|
||||
$(srcroot)test/unit/malloc_io.c \
|
||||
$(srcroot)test/unit/math.c \
|
|
@ -0,0 +1,129 @@
|
|||
This document summarizes the common approaches for performance fine tuning with
|
||||
jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work
|
||||
reasonably well in practice, and most applications should not have to tune any
|
||||
options. However, in order to cover a wide range of applications and avoid
|
||||
pathological cases, the default setting is sometimes kept conservative and
|
||||
suboptimal, even for many common workloads. When jemalloc is properly tuned for
|
||||
a specific application / workload, it is common to improve system level metrics
|
||||
by a few percent, or make favorable trade-offs.
|
||||
|
||||
|
||||
## Notable runtime options for performance tuning
|
||||
|
||||
Runtime options can be set via
|
||||
[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
|
||||
|
||||
* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
|
||||
|
||||
Enabling jemalloc background threads generally improves the tail latency for
|
||||
application threads, since unused memory purging is shifted to the dedicated
|
||||
background threads. In addition, unintended purging delay caused by
|
||||
application inactivity is avoided with background threads.
|
||||
|
||||
Suggested: `background_thread:true` when jemalloc managed threads can be
|
||||
allowed.
|
||||
|
||||
* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
|
||||
|
||||
Allowing jemalloc to utilize transparent huge pages for its internal
|
||||
metadata usually reduces TLB misses significantly, especially for programs
|
||||
with large memory footprint and frequent allocation / deallocation
|
||||
activities. Metadata memory usage may increase due to the use of huge
|
||||
pages.
|
||||
|
||||
Suggested for allocation intensive programs: `metadata_thp:auto` or
|
||||
`metadata_thp:always`, which is expected to improve CPU utilization at a
|
||||
small memory cost.
|
||||
|
||||
* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
|
||||
[muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
|
||||
|
||||
Decay time determines how fast jemalloc returns unused pages back to the
|
||||
operating system, and therefore provides a fairly straightforward trade-off
|
||||
between CPU and memory usage. Shorter decay time purges unused pages faster
|
||||
to reduces memory usage (usually at the cost of more CPU cycles spent on
|
||||
purging), and vice versa.
|
||||
|
||||
Suggested: tune the values based on the desired trade-offs.
|
||||
|
||||
* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
|
||||
|
||||
By default jemalloc uses multiple arenas to reduce internal lock contention.
|
||||
However high arena count may also increase overall memory fragmentation,
|
||||
since arenas manage memory independently. When high degree of parallelism
|
||||
is not expected at the allocator level, lower number of arenas often
|
||||
improves memory usage.
|
||||
|
||||
Suggested: if low parallelism is expected, try lower arena count while
|
||||
monitoring CPU and memory usage.
|
||||
|
||||
* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
|
||||
|
||||
Enable dynamic thread to arena association based on running CPU. This has
|
||||
the potential to improve locality, e.g. when thread to CPU affinity is
|
||||
present.
|
||||
|
||||
Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
|
||||
thread migration between processors is expected to be infrequent.
|
||||
|
||||
Examples:
|
||||
|
||||
* High resource consumption application, prioritizing CPU utilization:
|
||||
|
||||
`background_thread:true,metadata_thp:auto` combined with relaxed decay time
|
||||
(increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
|
||||
e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
|
||||
|
||||
* High resource consumption application, prioritizing memory usage:
|
||||
|
||||
`background_thread:true` combined with shorter decay time (decreased
|
||||
`dirty_decay_ms` and / or `muzzy_decay_ms`,
|
||||
e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
|
||||
(e.g. number of CPUs).
|
||||
|
||||
* Low resource consumption application:
|
||||
|
||||
`narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased
|
||||
`dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
|
||||
`dirty_decay_ms:1000,muzzy_decay_ms:0`).
|
||||
|
||||
* Extremely conservative -- minimize memory usage at all costs, only suitable when
|
||||
allocation activity is very rare:
|
||||
|
||||
`narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
|
||||
|
||||
Note that it is recommended to combine the options with `abort_conf:true` which
|
||||
aborts immediately on illegal options.
|
||||
|
||||
## Beyond runtime options
|
||||
|
||||
In addition to the runtime options, there are a number of programmatic ways to
|
||||
improve application performance with jemalloc.
|
||||
|
||||
* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
|
||||
|
||||
Manually created arenas can help performance in various ways, e.g. by
|
||||
managing locality and contention for specific usages. For example,
|
||||
applications can explicitly allocate frequently accessed objects from a
|
||||
dedicated arena with
|
||||
[mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
|
||||
locality. In addition, explicit arenas often benefit from individually
|
||||
tuned options, e.g. relaxed [decay
|
||||
time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
|
||||
frequent reuse is expected.
|
||||
|
||||
* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
|
||||
|
||||
Extent hooks allow customization for managing underlying memory. One use
|
||||
case for performance purpose is to utilize huge pages -- for example,
|
||||
[HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
|
||||
uses explicit arenas with customized extent hooks to manage 1GB huge pages
|
||||
for frequently accessed data, which reduces TLB misses significantly.
|
||||
|
||||
* [Explicit thread-to-arena
|
||||
binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
|
||||
|
||||
It is common for some threads in an application to have different memory
|
||||
access / allocation patterns. Threads with heavy workloads often benefit
|
||||
from explicit binding, e.g. binding very active threads to dedicated arenas
|
||||
may reduce contention at the allocator level.
|
|
@ -0,0 +1 @@
|
|||
5.1.0-0-g61efbda7098de6fe64c362d309824864308c36d4
|
|
@ -2895,6 +2895,8 @@ sub RemoveUninterestingFrames {
|
|||
foreach my $name ('@JEMALLOC_PREFIX@calloc',
|
||||
'cfree',
|
||||
'@JEMALLOC_PREFIX@malloc',
|
||||
'newImpl',
|
||||
'void* newImpl',
|
||||
'@JEMALLOC_PREFIX@free',
|
||||
'@JEMALLOC_PREFIX@memalign',
|
||||
'@JEMALLOC_PREFIX@posix_memalign',
|
File diff suppressed because it is too large
Load Diff
|
@ -10,7 +10,7 @@ dnl Custom macro definitions.
|
|||
dnl JE_CONCAT_VVV(r, a, b)
|
||||
dnl
|
||||
dnl Set $r to the concatenation of $a and $b, with a space separating them iff
|
||||
dnl both $a and $b are non-emty.
|
||||
dnl both $a and $b are non-empty.
|
||||
AC_DEFUN([JE_CONCAT_VVV],
|
||||
if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
|
||||
$1="[$]{$2}[$]{$3}"
|
||||
|
@ -76,6 +76,7 @@ AC_MSG_CHECKING([whether compiler supports $1])
|
|||
T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
|
||||
JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
|
||||
JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
|
||||
AC_LANG_PUSH([C++])
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||
[[
|
||||
]], [[
|
||||
|
@ -87,6 +88,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
|||
AC_MSG_RESULT([no])
|
||||
[CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
|
||||
)
|
||||
AC_LANG_POP([C++])
|
||||
JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
|
||||
])
|
||||
|
||||
|
@ -243,6 +245,7 @@ if test "x$GCC" = "xyes" ; then
|
|||
JE_CFLAGS_ADD([-Wshorten-64-to-32])
|
||||
JE_CFLAGS_ADD([-Wsign-compare])
|
||||
JE_CFLAGS_ADD([-Wundef])
|
||||
JE_CFLAGS_ADD([-Wno-format-zero-length])
|
||||
JE_CFLAGS_ADD([-pipe])
|
||||
JE_CFLAGS_ADD([-g3])
|
||||
elif test "x$je_cv_msvc" = "xyes" ; then
|
||||
|
@ -380,6 +383,7 @@ dnl CPU-specific settings.
|
|||
CPU_SPINWAIT=""
|
||||
case "${host_cpu}" in
|
||||
i686|x86_64)
|
||||
HAVE_CPU_SPINWAIT=1
|
||||
if test "x${je_cv_msvc}" = "xyes" ; then
|
||||
AC_CACHE_VAL([je_cv_pause_msvc],
|
||||
[JE_COMPILABLE([pause instruction MSVC], [],
|
||||
|
@ -398,25 +402,36 @@ case "${host_cpu}" in
|
|||
fi
|
||||
fi
|
||||
;;
|
||||
powerpc*)
|
||||
AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
|
||||
CPU_SPINWAIT='__asm__ volatile("or 31,31,31")'
|
||||
;;
|
||||
*)
|
||||
HAVE_CPU_SPINWAIT=0
|
||||
;;
|
||||
esac
|
||||
AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT])
|
||||
AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
|
||||
|
||||
AC_ARG_WITH([lg_vaddr],
|
||||
[AS_HELP_STRING([--with-lg-vaddr=<lg-vaddr>], [Number of significant virtual address bits])],
|
||||
[LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"])
|
||||
|
||||
case "${host_cpu}" in
|
||||
aarch64)
|
||||
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||
LG_VADDR=48
|
||||
AC_MSG_RESULT([$LG_VADDR])
|
||||
if test "x$LG_VADDR" = "xdetect"; then
|
||||
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||
if test "x${LG_SIZEOF_PTR}" = "x2" ; then
|
||||
#aarch64 ILP32
|
||||
LG_VADDR=32
|
||||
else
|
||||
#aarch64 LP64
|
||||
LG_VADDR=48
|
||||
fi
|
||||
AC_MSG_RESULT([$LG_VADDR])
|
||||
fi
|
||||
;;
|
||||
x86_64)
|
||||
AC_CACHE_CHECK([number of significant virtual address bits],
|
||||
[je_cv_lg_vaddr],
|
||||
AC_RUN_IFELSE([AC_LANG_PROGRAM(
|
||||
if test "x$LG_VADDR" = "xdetect"; then
|
||||
AC_CACHE_CHECK([number of significant virtual address bits],
|
||||
[je_cv_lg_vaddr],
|
||||
AC_RUN_IFELSE([AC_LANG_PROGRAM(
|
||||
[[
|
||||
#include <stdio.h>
|
||||
#ifdef _WIN32
|
||||
|
@ -453,27 +468,30 @@ typedef unsigned __int32 uint32_t;
|
|||
[je_cv_lg_vaddr=`cat conftest.out`],
|
||||
[je_cv_lg_vaddr=error],
|
||||
[je_cv_lg_vaddr=57]))
|
||||
if test "x${je_cv_lg_vaddr}" != "x" ; then
|
||||
LG_VADDR="${je_cv_lg_vaddr}"
|
||||
fi
|
||||
if test "x${LG_VADDR}" != "xerror" ; then
|
||||
AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
|
||||
else
|
||||
AC_MSG_ERROR([cannot determine number of significant virtual address bits])
|
||||
if test "x${je_cv_lg_vaddr}" != "x" ; then
|
||||
LG_VADDR="${je_cv_lg_vaddr}"
|
||||
fi
|
||||
if test "x${LG_VADDR}" != "xerror" ; then
|
||||
AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
|
||||
else
|
||||
AC_MSG_ERROR([cannot determine number of significant virtual address bits])
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||
if test "x${LG_SIZEOF_PTR}" = "x3" ; then
|
||||
LG_VADDR=64
|
||||
elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
|
||||
LG_VADDR=32
|
||||
elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
|
||||
LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
|
||||
else
|
||||
AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
|
||||
if test "x$LG_VADDR" = "xdetect"; then
|
||||
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||
if test "x${LG_SIZEOF_PTR}" = "x3" ; then
|
||||
LG_VADDR=64
|
||||
elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
|
||||
LG_VADDR=32
|
||||
elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
|
||||
LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
|
||||
else
|
||||
AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
|
||||
fi
|
||||
AC_MSG_RESULT([$LG_VADDR])
|
||||
fi
|
||||
AC_MSG_RESULT([$LG_VADDR])
|
||||
;;
|
||||
esac
|
||||
AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
|
||||
|
@ -511,6 +529,11 @@ AN_PROGRAM([ar], [AC_PROG_AR])
|
|||
AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
|
||||
AC_PROG_AR
|
||||
|
||||
AN_MAKEVAR([NM], [AC_PROG_NM])
|
||||
AN_PROGRAM([nm], [AC_PROG_NM])
|
||||
AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)])
|
||||
AC_PROG_NM
|
||||
|
||||
AC_PROG_AWK
|
||||
|
||||
dnl Platform-specific settings. abi and RPATH can probably be determined
|
||||
|
@ -522,7 +545,7 @@ dnl definitions need to be seen before any headers are included, which is a pain
|
|||
dnl to make happen otherwise.
|
||||
default_retain="0"
|
||||
maps_coalesce="1"
|
||||
DUMP_SYMS="nm -a"
|
||||
DUMP_SYMS="${NM} -a"
|
||||
SYM_PREFIX=""
|
||||
case "${host}" in
|
||||
*-*-darwin* | *-*-ios*)
|
||||
|
@ -556,7 +579,7 @@ case "${host}" in
|
|||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS])
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ])
|
||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
|
@ -566,11 +589,11 @@ case "${host}" in
|
|||
default_retain="1"
|
||||
fi
|
||||
;;
|
||||
*-*-linux* | *-*-kfreebsd*)
|
||||
*-*-linux*)
|
||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS])
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ])
|
||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
|
@ -579,6 +602,15 @@ case "${host}" in
|
|||
default_retain="1"
|
||||
fi
|
||||
;;
|
||||
*-*-kfreebsd*)
|
||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||
AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
|
||||
;;
|
||||
*-*-netbsd*)
|
||||
AC_MSG_CHECKING([ABI])
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||
|
@ -630,7 +662,13 @@ case "${host}" in
|
|||
DSO_LDFLAGS="-shared"
|
||||
link_whole_archive="1"
|
||||
fi
|
||||
DUMP_SYMS="dumpbin /SYMBOLS"
|
||||
case "${host}" in
|
||||
*-*-cygwin*)
|
||||
DUMP_SYMS="dumpbin /SYMBOLS"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
a="lib"
|
||||
libprefix=""
|
||||
SOREV="${so}"
|
||||
|
@ -711,12 +749,9 @@ JE_COMPILABLE([tls_model attribute], [],
|
|||
foo = 0;],
|
||||
[je_cv_tls_model])
|
||||
JE_CFLAGS_RESTORE()
|
||||
if test "x${je_cv_tls_model}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL],
|
||||
[__attribute__((tls_model("initial-exec")))])
|
||||
else
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
|
||||
fi
|
||||
dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for
|
||||
dnl --disable-initial-exec-tls)
|
||||
|
||||
dnl Check for alloc_size attribute support.
|
||||
JE_CFLAGS_SAVE()
|
||||
JE_CFLAGS_ADD([-Werror])
|
||||
|
@ -1226,6 +1261,21 @@ if test "x$enable_cache_oblivious" = "x1" ; then
|
|||
fi
|
||||
AC_SUBST([enable_cache_oblivious])
|
||||
|
||||
dnl Do not log by default.
|
||||
AC_ARG_ENABLE([log],
|
||||
[AS_HELP_STRING([--enable-log], [Support debug logging])],
|
||||
[if test "x$enable_log" = "xno" ; then
|
||||
enable_log="0"
|
||||
else
|
||||
enable_log="1"
|
||||
fi
|
||||
],
|
||||
[enable_log="0"]
|
||||
)
|
||||
if test "x$enable_log" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_LOG], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_log])
|
||||
|
||||
|
||||
JE_COMPILABLE([a program using __builtin_unreachable], [
|
||||
|
@ -1789,6 +1839,15 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
|||
], [je_cv_madv_free])
|
||||
if test "x${je_cv_madv_free}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
|
||||
elif test "x${je_cv_madvise}" = "xyes" ; then
|
||||
case "${host_cpu}" in i686|x86_64)
|
||||
case "${host}" in *-*-linux*)
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
|
||||
AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ])
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
dnl Check for madvise(..., MADV_DONTNEED).
|
||||
|
@ -1801,6 +1860,17 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
|||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
|
||||
fi
|
||||
|
||||
dnl Check for madvise(..., MADV_DO[NT]DUMP).
|
||||
JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [
|
||||
#include <sys/mman.h>
|
||||
], [
|
||||
madvise((void *)0, 0, MADV_DONTDUMP);
|
||||
madvise((void *)0, 0, MADV_DODUMP);
|
||||
], [je_cv_madv_dontdump])
|
||||
if test "x${je_cv_madv_dontdump}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ])
|
||||
fi
|
||||
|
||||
dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
|
||||
JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
|
||||
#include <sys/mman.h>
|
||||
|
@ -1808,29 +1878,17 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
|||
madvise((void *)0, 0, MADV_HUGEPAGE);
|
||||
madvise((void *)0, 0, MADV_NOHUGEPAGE);
|
||||
], [je_cv_thp])
|
||||
case "${host_cpu}" in
|
||||
arm*)
|
||||
;;
|
||||
*)
|
||||
if test "x${je_cv_thp}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ])
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
dnl Enable transparent huge page support by default.
|
||||
AC_ARG_ENABLE([thp],
|
||||
[AS_HELP_STRING([--disable-thp],
|
||||
[Disable transparent huge page support])],
|
||||
[if test "x$enable_thp" = "xno" -o "x${je_cv_thp}" != "xyes" ; then
|
||||
enable_thp="0"
|
||||
else
|
||||
enable_thp="1"
|
||||
fi
|
||||
],
|
||||
[if test "x${je_cv_thp}" = "xyes" ; then
|
||||
enable_thp="1"
|
||||
else
|
||||
enable_thp="0"
|
||||
fi
|
||||
])
|
||||
if test "x$enable_thp" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_THP], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_thp])
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Check whether __sync_{add,sub}_and_fetch() are available despite
|
||||
dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined.
|
||||
|
@ -1948,6 +2006,29 @@ if test "x${enable_zone_allocator}" = "x1" ; then
|
|||
AC_DEFINE([JEMALLOC_ZONE], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Use initial-exec TLS by default.
|
||||
AC_ARG_ENABLE([initial-exec-tls],
|
||||
[AS_HELP_STRING([--disable-initial-exec-tls],
|
||||
[Disable the initial-exec tls model])],
|
||||
[if test "x$enable_initial_exec_tls" = "xno" ; then
|
||||
enable_initial_exec_tls="0"
|
||||
else
|
||||
enable_initial_exec_tls="1"
|
||||
fi
|
||||
],
|
||||
[enable_initial_exec_tls="1"]
|
||||
)
|
||||
AC_SUBST([enable_initial_exec_tls])
|
||||
|
||||
if test "x${je_cv_tls_model}" = "xyes" -a \
|
||||
"x${enable_initial_exec_tls}" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL],
|
||||
[__attribute__((tls_model("initial-exec")))])
|
||||
else
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Enable background threads if possible.
|
||||
|
||||
|
@ -2006,6 +2087,25 @@ if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
|
|||
AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ])
|
||||
fi
|
||||
|
||||
JE_CFLAGS_SAVE()
|
||||
JE_CFLAGS_ADD([-D_GNU_SOURCE])
|
||||
JE_CFLAGS_ADD([-Werror])
|
||||
JE_CFLAGS_ADD([-herror_on_warning])
|
||||
JE_COMPILABLE([strerror_r returns char with gnu source], [
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
], [
|
||||
char *buffer = (char *) malloc(100);
|
||||
char *error = strerror_r(EINVAL, buffer, 100);
|
||||
printf("%s\n", error);
|
||||
], [je_cv_strerror_r_returns_char_with_gnu_source])
|
||||
JE_CFLAGS_RESTORE()
|
||||
if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Check for typedefs, structures, and compiler characteristics.
|
||||
AC_HEADER_STDBOOL
|
||||
|
@ -2184,10 +2284,10 @@ AC_MSG_RESULT([prof : ${enable_prof}])
|
|||
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
|
||||
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
|
||||
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
||||
AC_MSG_RESULT([thp : ${enable_thp}])
|
||||
AC_MSG_RESULT([fill : ${enable_fill}])
|
||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||
AC_MSG_RESULT([log : ${enable_log}])
|
||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||
AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
|
||||
AC_MSG_RESULT([cxx : ${enable_cxx}])
|
|
@ -1,13 +1,13 @@
|
|||
'\" t
|
||||
.\" Title: JEMALLOC
|
||||
.\" Author: Jason Evans
|
||||
.\" Generator: DocBook XSL Stylesheets v1.79.1 <http://docbook.sf.net/>
|
||||
.\" Date: 07/31/2017
|
||||
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
|
||||
.\" Date: 05/08/2018
|
||||
.\" Manual: User Manual
|
||||
.\" Source: jemalloc 0.0.0-0-g0000000000000000000000000000000000000000
|
||||
.\" Source: jemalloc 5.1.0-0-g61efbda7098de6fe64c362d309824864308c36d4
|
||||
.\" Language: English
|
||||
.\"
|
||||
.TH "JEMALLOC" "3" "07/31/2017" "jemalloc 0.0.0-0-g000000000000" "User Manual"
|
||||
.TH "JEMALLOC" "3" "05/08/2018" "jemalloc 5.1.0-0-g61efbda7098d" "User Manual"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * Define some portability stuff
|
||||
.\" -----------------------------------------------------------------
|
||||
|
@ -31,7 +31,7 @@
|
|||
jemalloc \- general purpose memory allocation functions
|
||||
.SH "LIBRARY"
|
||||
.PP
|
||||
This manual describes jemalloc 0\&.0\&.0\-0\-g0000000000000000000000000000000000000000\&. More information can be found at the
|
||||
This manual describes jemalloc 5\&.1\&.0\-0\-g61efbda7098de6fe64c362d309824864308c36d4\&. More information can be found at the
|
||||
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
|
||||
.SH "SYNOPSIS"
|
||||
.sp
|
||||
|
@ -728,6 +728,13 @@ opt\&.background_thread
|
|||
can be used to set the default option\&. This option is only available on selected pthread\-based platforms\&.
|
||||
.RE
|
||||
.PP
|
||||
max_background_threads (\fBsize_t\fR) rw
|
||||
.RS 4
|
||||
Maximum number of background worker threads that will be created\&. This value is capped at
|
||||
opt\&.max_background_threads
|
||||
at startup\&.
|
||||
.RE
|
||||
.PP
|
||||
config\&.cache_oblivious (\fBbool\fR) r\-
|
||||
.RS 4
|
||||
\fB\-\-enable\-cache\-oblivious\fR
|
||||
|
@ -783,12 +790,6 @@ config\&.stats (\fBbool\fR) r\-
|
|||
was specified during build configuration\&.
|
||||
.RE
|
||||
.PP
|
||||
config\&.thp (\fBbool\fR) r\-
|
||||
.RS 4
|
||||
\fB\-\-disable\-thp\fR
|
||||
was not specified during build configuration, and the system supports transparent huge page manipulation\&.
|
||||
.RE
|
||||
.PP
|
||||
config\&.utrace (\fBbool\fR) r\-
|
||||
.RS 4
|
||||
\fB\-\-enable\-utrace\fR
|
||||
|
@ -821,12 +822,23 @@ in these cases\&. This option is disabled by default unless
|
|||
is specified during configuration, in which case it is enabled by default\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.metadata_thp (\fBconst char *\fR) r\-
|
||||
.RS 4
|
||||
Controls whether to allow jemalloc to use transparent huge page (THP) for internal metadata (see
|
||||
stats.metadata)\&.
|
||||
\(lqalways\(rq
|
||||
allows such usage\&.
|
||||
\(lqauto\(rq
|
||||
uses no THP initially, but may begin to do so when metadata usage reaches certain level\&. The default is
|
||||
\(lqdisabled\(rq\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.retain (\fBbool\fR) r\-
|
||||
.RS 4
|
||||
If true, retain unused virtual memory for later reuse rather than discarding it by calling
|
||||
\fBmunmap\fR(2)
|
||||
or equivalent (see
|
||||
stats\&.retained
|
||||
stats.retained
|
||||
for related details)\&. This option is disabled by default unless discarding virtual memory is known to trigger platform\-specific performance problems, e\&.g\&. for [64\-bit] Linux, which has a quirk in its virtual memory allocation algorithm that causes semi\-permanent VM map holes under normal jemalloc operation\&. Although
|
||||
\fBmunmap\fR(2)
|
||||
causes issues on 32\-bit Linux as well, retaining virtual memory for 32\-bit Linux is disabled by default due to the practical possibility of address space exhaustion\&.
|
||||
|
@ -870,11 +882,18 @@ setting uses one arena per physical CPU, which means the two hyper threads on th
|
|||
.PP
|
||||
opt\&.background_thread (\fBconst bool\fR) r\-
|
||||
.RS 4
|
||||
Internal background worker threads enabled/disabled\&. See
|
||||
Internal background worker threads enabled/disabled\&. Because of potential circular dependencies, enabling background thread using this option may cause crash or deadlock during initialization\&. For a reliable way to use this feature, see
|
||||
background_thread
|
||||
for dynamic control options and details\&. This option is disabled by default\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.max_background_threads (\fBconst size_t\fR) r\-
|
||||
.RS 4
|
||||
Maximum number of background threads that will be created if
|
||||
background_thread
|
||||
is set\&. Defaults to number of cpus\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.dirty_decay_ms (\fBssize_t\fR) r\-
|
||||
.RS 4
|
||||
Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged (i\&.e\&. converted to muzzy via e\&.g\&.
|
||||
|
@ -882,7 +901,7 @@ madvise(\fI\&.\&.\&.\fR\fI\fBMADV_FREE\fR\fR)
|
|||
if supported by the operating system, or converted to clean otherwise) and/or reused\&. Dirty pages are defined as previously having been potentially written to by the application, and therefore consuming physical memory, yet having no current use\&. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate\&. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation\&. A decay time of \-1 disables purging\&. The default decay time is 10 seconds\&. See
|
||||
arenas\&.dirty_decay_ms
|
||||
and
|
||||
arena\&.<i>\&.muzzy_decay_ms
|
||||
arena\&.<i>\&.dirty_decay_ms
|
||||
for related dynamic control options\&. See
|
||||
opt\&.muzzy_decay_ms
|
||||
for a description of muzzy pages\&.
|
||||
|
@ -898,6 +917,11 @@ arena\&.<i>\&.muzzy_decay_ms
|
|||
for related dynamic control options\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.lg_extent_max_active_fit (\fBsize_t\fR) r\-
|
||||
.RS 4
|
||||
When reusing dirty extents, this determines the (log base 2 of the) maximum ratio between the size of the active extent selected (to split off from) and the size of the requested allocation\&. This prevents the splitting of large active extents for smaller allocations, which can reduce fragmentation over the long run (especially for non\-active extents)\&. Lower value may reduce fragmentation, at the cost of extra active extents\&. The default value is 6, which gives a maximum ratio of 64 (2^6)\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.stats_print (\fBbool\fR) r\-
|
||||
.RS 4
|
||||
Enable/disable statistics printing at exit\&. If enabled, the
|
||||
|
@ -995,6 +1019,15 @@ opt\&.lg_tcache_max (\fBsize_t\fR) r\-
|
|||
Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.thp (\fBconst char *\fR) r\-
|
||||
.RS 4
|
||||
Transparent hugepage (THP) mode\&. Settings "always", "never" and "default" are available if THP is supported by the operating system\&. The "always" setting enables transparent hugepage for all user memory mappings with
|
||||
\fI\fBMADV_HUGEPAGE\fR\fR; "never" ensures no transparent hugepage with
|
||||
\fI\fBMADV_NOHUGEPAGE\fR\fR; the default setting "default" makes no changes\&. Note that: this option does not affect THP for jemalloc internal metadata (see
|
||||
opt\&.metadata_thp); in addition, for arenas with customized
|
||||
extent_hooks, this option is bypassed as it is implemented as part of the default extent hooks\&.
|
||||
.RE
|
||||
.PP
|
||||
opt\&.prof (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
|
||||
.RS 4
|
||||
Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the
|
||||
|
@ -1235,6 +1268,14 @@ opt\&.muzzy_decay_ms
|
|||
for additional information\&.
|
||||
.RE
|
||||
.PP
|
||||
arena\&.<i>\&.retain_grow_limit (\fBsize_t\fR) rw
|
||||
.RS 4
|
||||
Maximum size to grow retained region (only relevant when
|
||||
opt\&.retain
|
||||
is enabled)\&. This controls the maximum increment to expand virtual memory, or allocation through
|
||||
arena\&.<i>extent_hooks\&. In particular, if customized extent hooks reserve physical memory (e\&.g\&. 1G huge pages), this is useful to control the allocation hook\*(Aqs input size\&. The default is no limit\&.
|
||||
.RE
|
||||
.PP
|
||||
arena\&.<i>\&.extent_hooks (\fBextent_hooks_t *\fR) rw
|
||||
.RS 4
|
||||
Get or set the extent management hook functions for arena <i>\&. The functions must be capable of operating on all extant extents associated with arena <i>, usually by passing unknown extents to the replaced functions\&. In practice, it is feasible to control allocation for arenas explicitly created via
|
||||
|
@ -1265,7 +1306,7 @@ struct extent_hooks_s {
|
|||
The
|
||||
\fBextent_hooks_t\fR
|
||||
structure comprises function pointers which are described individually below\&. jemalloc uses these functions to manage extent lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation\&. However, there are performance and platform reasons to retain extents for later reuse\&. Cleanup attempts cascade from deallocation to decommit to forced purging to lazy purging, which gives the extent management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations\&. All operations except allocation can be universally opted out of by setting the hook pointers to
|
||||
\fBNULL\fR, or selectively opted out of by returning failure\&.
|
||||
\fBNULL\fR, or selectively opted out of by returning failure\&. Note that once the extent hook is set, the structure is accessed directly by the associated arenas, so it must remain valid for the entire lifetime of the arenas\&.
|
||||
.HP \w'typedef\ void\ *(extent_alloc_t)('u
|
||||
.BI "typedef void *(extent_alloc_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "new_addr" ", size_t\ " "size" ", size_t\ " "alignment" ", bool\ *" "zero" ", bool\ *" "commit" ", unsigned\ " "arena_ind" ");"
|
||||
.sp
|
||||
|
@ -1559,6 +1600,11 @@ arenas\&.create (\fBunsigned\fR, \fBextent_hooks_t *\fR) rw
|
|||
Explicitly create a new arena outside the range of automatically managed arenas, with optionally specified extent hooks, and return the new arena index\&.
|
||||
.RE
|
||||
.PP
|
||||
arenas\&.lookup (\fBunsigned\fR, \fBvoid*\fR) rw
|
||||
.RS 4
|
||||
Index of the arena to which an allocation belongs to\&.
|
||||
.RE
|
||||
.PP
|
||||
prof\&.thread_active_init (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
|
||||
.RS 4
|
||||
Control the initial setting for
|
||||
|
@ -1635,7 +1681,16 @@ stats\&.metadata (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
|||
.RS 4
|
||||
Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive allocator metadata structures (see
|
||||
stats\&.arenas\&.<i>\&.base) and internal allocations (see
|
||||
stats\&.arenas\&.<i>\&.internal)\&.
|
||||
stats\&.arenas\&.<i>\&.internal)\&. Transparent huge page (enabled with
|
||||
opt.metadata_thp) usage is not considered\&.
|
||||
.RE
|
||||
.PP
|
||||
stats\&.metadata_thp (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
.RS 4
|
||||
Number of transparent huge pages (THP) used for metadata\&. See
|
||||
stats\&.metadata
|
||||
and
|
||||
opt.metadata_thp) for details\&.
|
||||
.RE
|
||||
.PP
|
||||
stats\&.resident (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
|
@ -1818,6 +1873,13 @@ stats\&.arenas\&.<i>\&.internal (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
|||
Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&.
|
||||
.RE
|
||||
.PP
|
||||
stats\&.arenas\&.<i>\&.metadata_thp (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
.RS 4
|
||||
Number of transparent huge pages (THP) used for metadata\&. See
|
||||
opt.metadata_thp
|
||||
for details\&.
|
||||
.RE
|
||||
.PP
|
||||
stats\&.arenas\&.<i>\&.resident (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
.RS 4
|
||||
Maximum number of bytes in physically resident data pages mapped by the arena, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages\&. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand\-zeroed virtual memory that has not yet been touched\&. This is a multiple of the page size\&.
|
File diff suppressed because one or more lines are too long
|
@ -761,6 +761,18 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||
selected pthread-based platforms.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="max_background_threads">
|
||||
<term>
|
||||
<mallctl>max_background_threads</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum number of background worker threads that will
|
||||
be created. This value is capped at <link
|
||||
linkend="opt.max_background_threads"><mallctl>opt.max_background_threads</mallctl></link> at
|
||||
startup.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.cache_oblivious">
|
||||
<term>
|
||||
<mallctl>config.cache_oblivious</mallctl>
|
||||
|
@ -852,16 +864,6 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||
build configuration.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.thp">
|
||||
<term>
|
||||
<mallctl>config.thp</mallctl>
|
||||
(<type>bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para><option>--disable-thp</option> was not specified
|
||||
during build configuration, and the system supports transparent huge
|
||||
page manipulation.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.utrace">
|
||||
<term>
|
||||
|
@ -916,6 +918,20 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.metadata_thp">
|
||||
<term>
|
||||
<mallctl>opt.metadata_thp</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Controls whether to allow jemalloc to use transparent
|
||||
huge page (THP) for internal metadata (see <link
|
||||
linkend="stats.metadata">stats.metadata</link>). <quote>always</quote>
|
||||
allows such usage. <quote>auto</quote> uses no THP initially, but may
|
||||
begin to do so when metadata usage reaches certain level. The default
|
||||
is <quote>disabled</quote>.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.retain">
|
||||
<term>
|
||||
<mallctl>opt.retain</mallctl>
|
||||
|
@ -996,12 +1012,26 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||
(<type>const bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Internal background worker threads enabled/disabled. See
|
||||
<link linkend="background_thread">background_thread</link> for dynamic
|
||||
control options and details. This option is disabled by
|
||||
<listitem><para>Internal background worker threads enabled/disabled.
|
||||
Because of potential circular dependencies, enabling background thread
|
||||
using this option may cause crash or deadlock during initialization. For
|
||||
a reliable way to use this feature, see <link
|
||||
linkend="background_thread">background_thread</link> for dynamic control
|
||||
options and details. This option is disabled by
|
||||
default.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.max_background_threads">
|
||||
<term>
|
||||
<mallctl>opt.max_background_threads</mallctl>
|
||||
(<type>const size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum number of background threads that will be created
|
||||
if <link linkend="background_thread">background_thread</link> is set.
|
||||
Defaults to number of cpus.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.dirty_decay_ms">
|
||||
<term>
|
||||
<mallctl>opt.dirty_decay_ms</mallctl>
|
||||
|
@ -1022,7 +1052,7 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||
The default decay time is 10 seconds. See <link
|
||||
linkend="arenas.dirty_decay_ms"><mallctl>arenas.dirty_decay_ms</mallctl></link>
|
||||
and <link
|
||||
linkend="arena.i.muzzy_decay_ms"><mallctl>arena.<i>.muzzy_decay_ms</mallctl></link>
|
||||
linkend="arena.i.dirty_decay_ms"><mallctl>arena.<i>.dirty_decay_ms</mallctl></link>
|
||||
for related dynamic control options. See <link
|
||||
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
|
||||
for a description of muzzy pages.</para></listitem>
|
||||
|
@ -1052,6 +1082,22 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||
for related dynamic control options.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.lg_extent_max_active_fit">
|
||||
<term>
|
||||
<mallctl>opt.lg_extent_max_active_fit</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>When reusing dirty extents, this determines the (log
|
||||
base 2 of the) maximum ratio between the size of the active extent
|
||||
selected (to split off from) and the size of the requested allocation.
|
||||
This prevents the splitting of large active extents for smaller
|
||||
allocations, which can reduce fragmentation over the long run
|
||||
(especially for non-active extents). Lower value may reduce
|
||||
fragmentation, at the cost of extra active extents. The default value
|
||||
is 6, which gives a maximum ratio of 64 (2^6).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.stats_print">
|
||||
<term>
|
||||
<mallctl>opt.stats_print</mallctl>
|
||||
|
@ -1194,6 +1240,28 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
default maximum is 32 KiB (2^15).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.thp">
|
||||
<term>
|
||||
<mallctl>opt.thp</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Transparent hugepage (THP) mode. Settings "always",
|
||||
"never" and "default" are available if THP is supported by the operating
|
||||
system. The "always" setting enables transparent hugepage for all user
|
||||
memory mappings with
|
||||
<parameter><constant>MADV_HUGEPAGE</constant></parameter>; "never"
|
||||
ensures no transparent hugepage with
|
||||
<parameter><constant>MADV_NOHUGEPAGE</constant></parameter>; the default
|
||||
setting "default" makes no changes. Note that: this option does not
|
||||
affect THP for jemalloc internal metadata (see <link
|
||||
linkend="opt.metadata_thp"><mallctl>opt.metadata_thp</mallctl></link>);
|
||||
in addition, for arenas with customized <link
|
||||
linkend="arena.i.extent_hooks"><mallctl>extent_hooks</mallctl></link>,
|
||||
this option is bypassed as it is implemented as part of the default
|
||||
extent hooks.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.prof">
|
||||
<term>
|
||||
<mallctl>opt.prof</mallctl>
|
||||
|
@ -1666,6 +1734,22 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||
for additional information.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arena.i.retain_grow_limit">
|
||||
<term>
|
||||
<mallctl>arena.<i>.retain_grow_limit</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum size to grow retained region (only relevant when
|
||||
<link linkend="opt.retain"><mallctl>opt.retain</mallctl></link> is
|
||||
enabled). This controls the maximum increment to expand virtual memory,
|
||||
or allocation through <link
|
||||
linkend="arena.i.extent_hooks"><mallctl>arena.<i>extent_hooks</mallctl></link>.
|
||||
In particular, if customized extent hooks reserve physical memory
|
||||
(e.g. 1G huge pages), this is useful to control the allocation hook's
|
||||
input size. The default is no limit.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arena.i.extent_hooks">
|
||||
<term>
|
||||
<mallctl>arena.<i>.extent_hooks</mallctl>
|
||||
|
@ -1708,7 +1792,9 @@ struct extent_hooks_s {
|
|||
in favor of less permanent (and often less costly) operations. All
|
||||
operations except allocation can be universally opted out of by setting
|
||||
the hook pointers to <constant>NULL</constant>, or selectively opted out
|
||||
of by returning failure.</para>
|
||||
of by returning failure. Note that once the extent hook is set, the
|
||||
structure is accessed directly by the associated arenas, so it must
|
||||
remain valid for the entire lifetime of the arenas.</para>
|
||||
|
||||
<funcsynopsis><funcprototype>
|
||||
<funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
|
||||
|
@ -2044,6 +2130,15 @@ struct extent_hooks_s {
|
|||
and return the new arena index.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arenas.lookup">
|
||||
<term>
|
||||
<mallctl>arenas.lookup</mallctl>
|
||||
(<type>unsigned</type>, <type>void*</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Index of the arena to which an allocation belongs to.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="prof.thread_active_init">
|
||||
<term>
|
||||
<mallctl>prof.thread_active_init</mallctl>
|
||||
|
@ -2187,7 +2282,24 @@ struct extent_hooks_s {
|
|||
metadata structures (see <link
|
||||
linkend="stats.arenas.i.base"><mallctl>stats.arenas.<i>.base</mallctl></link>)
|
||||
and internal allocations (see <link
|
||||
linkend="stats.arenas.i.internal"><mallctl>stats.arenas.<i>.internal</mallctl></link>).</para></listitem>
|
||||
linkend="stats.arenas.i.internal"><mallctl>stats.arenas.<i>.internal</mallctl></link>).
|
||||
Transparent huge page (enabled with <link
|
||||
linkend="opt.metadata_thp">opt.metadata_thp</link>) usage is not
|
||||
considered.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.metadata_thp">
|
||||
<term>
|
||||
<mallctl>stats.metadata_thp</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
[<option>--enable-stats</option>]
|
||||
</term>
|
||||
<listitem><para>Number of transparent huge pages (THP) used for
|
||||
metadata. See <link
|
||||
linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
|
||||
<link linkend="opt.metadata_thp">opt.metadata_thp</link>) for
|
||||
details.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.resident">
|
||||
|
@ -2506,6 +2618,18 @@ struct extent_hooks_s {
|
|||
profiles.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.arenas.i.metadata_thp">
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.metadata_thp</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
[<option>--enable-stats</option>]
|
||||
</term>
|
||||
<listitem><para>Number of transparent huge pages (THP) used for
|
||||
metadata. See <link linkend="opt.metadata_thp">opt.metadata_thp</link>
|
||||
for details.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.arenas.i.resident">
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.resident</mallctl>
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
@ -9,25 +10,19 @@
|
|||
extern ssize_t opt_dirty_decay_ms;
|
||||
extern ssize_t opt_muzzy_decay_ms;
|
||||
|
||||
extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||
|
||||
extern percpu_arena_mode_t opt_percpu_arena;
|
||||
extern const char *percpu_arena_mode_names[];
|
||||
|
||||
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
|
||||
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests);
|
||||
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
size_t size);
|
||||
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
||||
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
|
||||
bin_stats_t *bstats, arena_stats_large_t *lstats);
|
||||
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
#ifdef JEMALLOC_JET
|
||||
|
@ -50,11 +45,11 @@ void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
|||
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
|
||||
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
|
||||
bool zero);
|
||||
|
||||
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
|
||||
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
|
||||
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
|
||||
|
||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||
|
@ -77,6 +72,8 @@ ssize_t arena_dirty_decay_ms_default_get(void);
|
|||
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
||||
ssize_t arena_muzzy_decay_ms_default_get(void);
|
||||
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
||||
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
||||
size_t *old_limit, size_t *new_limit);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
|
@ -25,7 +25,7 @@ static inline bool
|
|||
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
if (likely(prof_interval == 0)) {
|
||||
if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -8,13 +8,6 @@
|
|||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
static inline szind_t
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
||||
szind_t binind = (szind_t)(bin - arena->bins);
|
||||
assert(binind < NBINS);
|
||||
return binind;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
|
@ -35,7 +28,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
|||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
|
||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
@ -54,7 +47,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|||
}
|
||||
|
||||
static inline void
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
|
@ -0,0 +1,237 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct arena_stats_large_s arena_stats_large_t;
|
||||
struct arena_stats_large_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
};
|
||||
|
||||
typedef struct arena_stats_decay_s arena_stats_decay_t;
|
||||
struct arena_stats_decay_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
};
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
arena_stats_decay_t decay_dirty;
|
||||
arena_stats_decay_t decay_muzzy;
|
||||
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
atomic_zu_t metadata_thp;
|
||||
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
arena_stats_large_t lstats[NSIZES - NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
|
||||
assert(((char *)arena_stats)[i] == 0);
|
||||
}
|
||||
}
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
|
||||
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
/* Memory is zeroed, so there is no need to clear stats. */
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_lock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return *p;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p += x;
|
||||
#endif
|
||||
}
|
||||
|
||||
UNUSED static inline void
|
||||
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p -= x;
|
||||
assert(*p + x >= *p);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-atomically sets *dst += src. *dst needs external synchronization.
|
||||
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
|
||||
* the types here are atomic).
|
||||
*/
|
||||
static inline void
|
||||
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
|
||||
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
#else
|
||||
*dst += src;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Like the _u64 variant, needs an externally synchronized *dst. */
|
||||
static inline void
|
||||
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
|
||||
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
|
||||
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
|
||||
NBINS].nrequests, nrequests);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
|
|
@ -1,7 +1,9 @@
|
|||
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||
|
||||
#include "jemalloc/internal/arena_stats.h"
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
|
@ -10,45 +12,8 @@
|
|||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/smoothstep.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of arena_t's bins array
|
||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||
*
|
||||
* Each slab has the following layout:
|
||||
*
|
||||
* /--------------------\
|
||||
* | region 0 |
|
||||
* |--------------------|
|
||||
* | region 1 |
|
||||
* |--------------------|
|
||||
* | ... |
|
||||
* | ... |
|
||||
* | ... |
|
||||
* |--------------------|
|
||||
* | region nregs-1 |
|
||||
* \--------------------/
|
||||
*/
|
||||
struct arena_bin_info_s {
|
||||
/* Size of regions in a slab for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a slab for this bin's size class. */
|
||||
size_t slab_size;
|
||||
|
||||
/* Total number of regions in a slab for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/*
|
||||
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||
* bin.
|
||||
*/
|
||||
bitmap_info_t bitmap_info;
|
||||
};
|
||||
|
||||
struct arena_decay_s {
|
||||
/* Synchronizes all non-atomic fields. */
|
||||
malloc_mutex_t mtx;
|
||||
|
@ -104,37 +69,11 @@ struct arena_decay_s {
|
|||
* arena and ctl code.
|
||||
*
|
||||
* Synchronization: Same as associated arena's stats field. */
|
||||
decay_stats_t *stats;
|
||||
arena_stats_decay_t *stats;
|
||||
/* Peak number of pages in associated extents. Used for debug only. */
|
||||
uint64_t ceil_npages;
|
||||
};
|
||||
|
||||
struct arena_bin_s {
|
||||
/* All operations on arena_bin_t fields require lock ownership. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Current slab being used to service allocations of this bin's size
|
||||
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||
* slabcur is reassigned, the previous slab must be deallocated or
|
||||
* inserted into slabs_{nonfull,full}.
|
||||
*/
|
||||
extent_t *slabcur;
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
/* List used to track full slabs. */
|
||||
extent_list_t slabs_full;
|
||||
|
||||
/* Bin statistics. */
|
||||
malloc_bin_stats_t stats;
|
||||
};
|
||||
|
||||
struct arena_s {
|
||||
/*
|
||||
* Number of threads currently assigned to this arena. Each thread has
|
||||
|
@ -162,14 +101,15 @@ struct arena_s {
|
|||
arena_stats_t stats;
|
||||
|
||||
/*
|
||||
* List of tcaches for extant threads associated with this arena.
|
||||
* Stats from these are merged incrementally, and at exit if
|
||||
* opt_stats_print is enabled.
|
||||
* Lists of tcaches and cache_bin_array_descriptors for extant threads
|
||||
* associated with this arena. Stats from these are merged
|
||||
* incrementally, and at exit if opt_stats_print is enabled.
|
||||
*
|
||||
* Synchronization: tcache_ql_mtx.
|
||||
*/
|
||||
ql_head(tcache_t) tcache_ql;
|
||||
malloc_mutex_t tcache_ql_mtx;
|
||||
ql_head(tcache_t) tcache_ql;
|
||||
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
|
||||
malloc_mutex_t tcache_ql_mtx;
|
||||
|
||||
/* Synchronization: internal. */
|
||||
prof_accum_t prof_accum;
|
||||
|
@ -239,9 +179,14 @@ struct arena_s {
|
|||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: extent_grow_mtx
|
||||
*/
|
||||
pszind_t extent_grow_next;
|
||||
pszind_t retain_grow_limit;
|
||||
malloc_mutex_t extent_grow_mtx;
|
||||
|
||||
/*
|
||||
|
@ -258,7 +203,7 @@ struct arena_s {
|
|||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
arena_bin_t bins[NBINS];
|
||||
bin_t bins[NBINS];
|
||||
|
||||
/*
|
||||
* Base allocator, from which arena metadata are allocated.
|
|
@ -12,9 +12,7 @@
|
|||
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||
|
||||
typedef struct arena_slab_data_s arena_slab_data_t;
|
||||
typedef struct arena_bin_info_s arena_bin_info_t;
|
||||
typedef struct arena_decay_s arena_decay_t;
|
||||
typedef struct arena_bin_s arena_bin_t;
|
||||
typedef struct arena_s arena_t;
|
||||
typedef struct arena_tdata_s arena_tdata_t;
|
||||
typedef struct alloc_ctx_s alloc_ctx_t;
|
|
@ -2,9 +2,11 @@
|
|||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
|
||||
extern bool opt_background_thread;
|
||||
extern size_t opt_max_background_threads;
|
||||
extern malloc_mutex_t background_thread_lock;
|
||||
extern atomic_b_t background_thread_enabled_state;
|
||||
extern size_t n_background_threads;
|
||||
extern size_t max_background_threads;
|
||||
extern background_thread_info_t *background_thread_info;
|
||||
extern bool can_enable_background_thread;
|
||||
|
|
@ -8,6 +8,7 @@
|
|||
#endif
|
||||
|
||||
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
|
||||
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
|
||||
|
||||
typedef enum {
|
||||
background_thread_stopped,
|
|
@ -1,6 +1,9 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||
|
||||
extern metadata_thp_mode_t opt_metadata_thp;
|
||||
extern const char *metadata_thp_mode_names[];
|
||||
|
||||
base_t *b0get(void);
|
||||
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||
void base_delete(tsdn_t *tsdn, base_t *base);
|
||||
|
@ -10,7 +13,7 @@ extent_hooks_t *base_extent_hooks_set(base_t *base,
|
|||
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
|
||||
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||
size_t *resident, size_t *mapped);
|
||||
size_t *resident, size_t *mapped, size_t *n_thp);
|
||||
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_child(tsdn_t *tsdn, base_t *base);
|
|
@ -6,4 +6,8 @@ base_ind_get(const base_t *base) {
|
|||
return base->ind;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
metadata_thp_enabled(void) {
|
||||
return (opt_metadata_thp != metadata_thp_disabled);
|
||||
}
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
|
|
@ -30,6 +30,8 @@ struct base_s {
|
|||
/* Protects base_alloc() and base_stats_get() operations. */
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/* Using THP when true (metadata_thp auto mode). */
|
||||
bool auto_thp_switched;
|
||||
/*
|
||||
* Most recent size class in the series of increasingly large base
|
||||
* extents. Logarithmic spacing between subsequent allocations ensures
|
||||
|
@ -50,6 +52,8 @@ struct base_s {
|
|||
size_t allocated;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
/* Number of THP regions touched. */
|
||||
size_t n_thp;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
|
|
@ -0,0 +1,33 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||
|
||||
typedef struct base_block_s base_block_t;
|
||||
typedef struct base_s base_t;
|
||||
|
||||
#define METADATA_THP_DEFAULT metadata_thp_disabled
|
||||
|
||||
/*
|
||||
* In auto mode, arenas switch to huge pages for the base allocator on the
|
||||
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
|
||||
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
|
||||
*/
|
||||
|
||||
#define BASE_AUTO_THP_THRESHOLD 2
|
||||
#define BASE_AUTO_THP_THRESHOLD_A0 5
|
||||
|
||||
typedef enum {
|
||||
metadata_thp_disabled = 0,
|
||||
/*
|
||||
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
|
||||
* + low usage arena (i.e. THP becomes a significant percentage), the
|
||||
* "auto" option only starts using THP after a base allocator used up
|
||||
* the first THP region. Starting from the second hugepage (in a single
|
||||
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
|
||||
* right away.
|
||||
*/
|
||||
metadata_thp_auto = 1,
|
||||
metadata_thp_always = 2,
|
||||
metadata_thp_mode_limit = 3
|
||||
} metadata_thp_mode_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
|
|
@ -0,0 +1,106 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIN_H
|
||||
#define JEMALLOC_INTERNAL_BIN_H
|
||||
|
||||
#include "jemalloc/internal/extent_types.h"
|
||||
#include "jemalloc/internal/extent_structs.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/bin_stats.h"
|
||||
|
||||
/*
|
||||
* A bin contains a set of extents that are currently being used for slab
|
||||
* allocations.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of arena_t's bins array
|
||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||
*
|
||||
* Each slab has the following layout:
|
||||
*
|
||||
* /--------------------\
|
||||
* | region 0 |
|
||||
* |--------------------|
|
||||
* | region 1 |
|
||||
* |--------------------|
|
||||
* | ... |
|
||||
* | ... |
|
||||
* | ... |
|
||||
* |--------------------|
|
||||
* | region nregs-1 |
|
||||
* \--------------------/
|
||||
*/
|
||||
typedef struct bin_info_s bin_info_t;
|
||||
struct bin_info_s {
|
||||
/* Size of regions in a slab for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a slab for this bin's size class. */
|
||||
size_t slab_size;
|
||||
|
||||
/* Total number of regions in a slab for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/*
|
||||
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||
* bin.
|
||||
*/
|
||||
bitmap_info_t bitmap_info;
|
||||
};
|
||||
|
||||
extern const bin_info_t bin_infos[NBINS];
|
||||
|
||||
|
||||
typedef struct bin_s bin_t;
|
||||
struct bin_s {
|
||||
/* All operations on bin_t fields require lock ownership. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Current slab being used to service allocations of this bin's size
|
||||
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||
* slabcur is reassigned, the previous slab must be deallocated or
|
||||
* inserted into slabs_{nonfull,full}.
|
||||
*/
|
||||
extent_t *slabcur;
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
/* List used to track full slabs. */
|
||||
extent_list_t slabs_full;
|
||||
|
||||
/* Bin statistics. */
|
||||
bin_stats_t stats;
|
||||
};
|
||||
|
||||
/* Initializes a bin to empty. Returns true on error. */
|
||||
bool bin_init(bin_t *bin);
|
||||
|
||||
/* Forking. */
|
||||
void bin_prefork(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
|
||||
|
||||
/* Stats. */
|
||||
static inline void
|
||||
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
|
||||
dst_bin_stats->nmalloc += bin->stats.nmalloc;
|
||||
dst_bin_stats->ndalloc += bin->stats.ndalloc;
|
||||
dst_bin_stats->nrequests += bin->stats.nrequests;
|
||||
dst_bin_stats->curregs += bin->stats.curregs;
|
||||
dst_bin_stats->nfills += bin->stats.nfills;
|
||||
dst_bin_stats->nflushes += bin->stats.nflushes;
|
||||
dst_bin_stats->nslabs += bin->stats.nslabs;
|
||||
dst_bin_stats->reslabs += bin->stats.reslabs;
|
||||
dst_bin_stats->curslabs += bin->stats.curslabs;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_H */
|
|
@ -0,0 +1,51 @@
|
|||
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
#define JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
|
||||
typedef struct bin_stats_s bin_stats_t;
|
||||
struct bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
mutex_prof_data_t mutex_data;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
|
|
@ -0,0 +1,114 @@
|
|||
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
#define JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
/*
|
||||
* The cache_bins are the mechanism that the tcache and the arena use to
|
||||
* communicate. The tcache fills from and flushes to the arena by passing a
|
||||
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
|
||||
* tcaches associated with it, it does so by iterating over its
|
||||
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
|
||||
* contains. This makes it so that the arena need not know about the existence
|
||||
* of the tcache at all.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* The count of the number of cached allocations in a bin. We make this signed
|
||||
* so that negative numbers can encode "invalid" states (e.g. a low water mark
|
||||
* of -1 for a cache that has been depleted).
|
||||
*/
|
||||
typedef int32_t cache_bin_sz_t;
|
||||
|
||||
typedef struct cache_bin_stats_s cache_bin_stats_t;
|
||||
struct cache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
};
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
typedef struct cache_bin_info_s cache_bin_info_t;
|
||||
struct cache_bin_info_s {
|
||||
/* Upper limit on ncached. */
|
||||
cache_bin_sz_t ncached_max;
|
||||
};
|
||||
|
||||
typedef struct cache_bin_s cache_bin_t;
|
||||
struct cache_bin_s {
|
||||
/* Min # cached since last GC. */
|
||||
cache_bin_sz_t low_water;
|
||||
/* # of cached objects. */
|
||||
cache_bin_sz_t ncached;
|
||||
/*
|
||||
* ncached and stats are both modified frequently. Let's keep them
|
||||
* close so that they have a higher chance of being on the same
|
||||
* cacheline, thus less write-backs.
|
||||
*/
|
||||
cache_bin_stats_t tstats;
|
||||
/*
|
||||
* Stack of available objects.
|
||||
*
|
||||
* To make use of adjacent cacheline prefetch, the items in the avail
|
||||
* stack goes to higher address for newer allocations. avail points
|
||||
* just above the available space, which means that
|
||||
* avail[-ncached, ... -1] are available items and the lowest item will
|
||||
* be allocated first.
|
||||
*/
|
||||
void **avail;
|
||||
};
|
||||
|
||||
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
|
||||
struct cache_bin_array_descriptor_s {
|
||||
/*
|
||||
* The arena keeps a list of the cache bins associated with it, for
|
||||
* stats collection.
|
||||
*/
|
||||
ql_elm(cache_bin_array_descriptor_t) link;
|
||||
/* Pointers to the tcache bins. */
|
||||
cache_bin_t *bins_small;
|
||||
cache_bin_t *bins_large;
|
||||
};
|
||||
|
||||
static inline void
|
||||
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
|
||||
cache_bin_t *bins_small, cache_bin_t *bins_large) {
|
||||
ql_elm_new(descriptor, link);
|
||||
descriptor->bins_small = bins_small;
|
||||
descriptor->bins_large = bins_large;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
|
||||
void *ret;
|
||||
|
||||
if (unlikely(bin->ncached == 0)) {
|
||||
bin->low_water = -1;
|
||||
*success = false;
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* success (instead of ret) should be checked upon the return of this
|
||||
* function. We avoid checking (ret == NULL) because there is never a
|
||||
* null stored on the avail stack (which is unknown to the compiler),
|
||||
* and eagerly checking ret would cause pipeline stall (waiting for the
|
||||
* cacheline).
|
||||
*/
|
||||
*success = true;
|
||||
ret = *(bin->avail - bin->ncached);
|
||||
bin->ncached--;
|
||||
|
||||
if (unlikely(bin->ncached < bin->low_water)) {
|
||||
bin->low_water = bin->ncached;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
|
|
@ -40,14 +40,15 @@ typedef struct ctl_arena_stats_s {
|
|||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
bin_stats_t bstats[NBINS];
|
||||
arena_stats_large_t lstats[NSIZES - NBINS];
|
||||
} ctl_arena_stats_t;
|
||||
|
||||
typedef struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t metadata_thp;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
size_t retained;
|
|
@ -0,0 +1,41 @@
|
|||
#ifndef JEMALLOC_INTERNAL_DIV_H
|
||||
#define JEMALLOC_INTERNAL_DIV_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* This module does the division that computes the index of a region in a slab,
|
||||
* given its offset relative to the base.
|
||||
* That is, given a divisor d, an n = i * d (all integers), we'll return i.
|
||||
* We do some pre-computation to do this more quickly than a CPU division
|
||||
* instruction.
|
||||
* We bound n < 2^32, and don't support dividing by one.
|
||||
*/
|
||||
|
||||
typedef struct div_info_s div_info_t;
|
||||
struct div_info_s {
|
||||
uint32_t magic;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
size_t d;
|
||||
#endif
|
||||
};
|
||||
|
||||
void div_init(div_info_t *div_info, size_t divisor);
|
||||
|
||||
static inline size_t
|
||||
div_compute(div_info_t *div_info, size_t n) {
|
||||
assert(n <= (uint32_t)-1);
|
||||
/*
|
||||
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
|
||||
* the compilers I tried were all smart enough to turn this into the
|
||||
* appropriate "get the high 32 bits of the result of a multiply" (e.g.
|
||||
* mul; mov edx eax; on x86, umull on arm, etc.).
|
||||
*/
|
||||
size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
assert(i * div_info->d == n);
|
||||
#endif
|
||||
return i;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DIV_H */
|
|
@ -0,0 +1,435 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EMITTER_H
|
||||
#define JEMALLOC_INTERNAL_EMITTER_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
typedef enum emitter_output_e emitter_output_t;
|
||||
enum emitter_output_e {
|
||||
emitter_output_json,
|
||||
emitter_output_table
|
||||
};
|
||||
|
||||
typedef enum emitter_justify_e emitter_justify_t;
|
||||
enum emitter_justify_e {
|
||||
emitter_justify_left,
|
||||
emitter_justify_right,
|
||||
/* Not for users; just to pass to internal functions. */
|
||||
emitter_justify_none
|
||||
};
|
||||
|
||||
typedef enum emitter_type_e emitter_type_t;
|
||||
enum emitter_type_e {
|
||||
emitter_type_bool,
|
||||
emitter_type_int,
|
||||
emitter_type_unsigned,
|
||||
emitter_type_uint32,
|
||||
emitter_type_uint64,
|
||||
emitter_type_size,
|
||||
emitter_type_ssize,
|
||||
emitter_type_string,
|
||||
/*
|
||||
* A title is a column title in a table; it's just a string, but it's
|
||||
* not quoted.
|
||||
*/
|
||||
emitter_type_title,
|
||||
};
|
||||
|
||||
typedef struct emitter_col_s emitter_col_t;
|
||||
struct emitter_col_s {
|
||||
/* Filled in by the user. */
|
||||
emitter_justify_t justify;
|
||||
int width;
|
||||
emitter_type_t type;
|
||||
union {
|
||||
bool bool_val;
|
||||
int int_val;
|
||||
unsigned unsigned_val;
|
||||
uint32_t uint32_val;
|
||||
uint64_t uint64_val;
|
||||
size_t size_val;
|
||||
ssize_t ssize_val;
|
||||
const char *str_val;
|
||||
};
|
||||
|
||||
/* Filled in by initialization. */
|
||||
ql_elm(emitter_col_t) link;
|
||||
};
|
||||
|
||||
typedef struct emitter_row_s emitter_row_t;
|
||||
struct emitter_row_s {
|
||||
ql_head(emitter_col_t) cols;
|
||||
};
|
||||
|
||||
static inline void
|
||||
emitter_row_init(emitter_row_t *row) {
|
||||
ql_new(&row->cols);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
|
||||
ql_elm_new(col, link);
|
||||
ql_tail_insert(&row->cols, col, link);
|
||||
}
|
||||
|
||||
typedef struct emitter_s emitter_t;
|
||||
struct emitter_s {
|
||||
emitter_output_t output;
|
||||
/* The output information. */
|
||||
void (*write_cb)(void *, const char *);
|
||||
void *cbopaque;
|
||||
int nesting_depth;
|
||||
/* True if we've already emitted a value at the given depth. */
|
||||
bool item_at_depth;
|
||||
};
|
||||
|
||||
static inline void
|
||||
emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
|
||||
void (*write_cb)(void *, const char *), void *cbopaque) {
|
||||
emitter->output = emitter_output;
|
||||
emitter->write_cb = write_cb;
|
||||
emitter->cbopaque = cbopaque;
|
||||
emitter->item_at_depth = false;
|
||||
emitter->nesting_depth = 0;
|
||||
}
|
||||
|
||||
/* Internal convenience function. Write to the emitter the given string. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_printf(emitter_t *emitter, const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/* Write to the emitter the given string, but only in table mode. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_table_printf(emitter_t *emitter, const char *format, ...) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
|
||||
emitter_justify_t justify, int width) {
|
||||
size_t written;
|
||||
if (justify == emitter_justify_none) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%s", fmt_specifier);
|
||||
} else if (justify == emitter_justify_left) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%-%d%s", width, fmt_specifier);
|
||||
} else {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%d%s", width, fmt_specifier);
|
||||
}
|
||||
/* Only happens in case of bad format string, which *we* choose. */
|
||||
assert(written < out_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal. Emit the given value type in the relevant encoding (so that the
|
||||
* bool true gets mapped to json "true", but the string "true" gets mapped to
|
||||
* json "\"true\"", for instance.
|
||||
*
|
||||
* Width is ignored if justify is emitter_justify_none.
|
||||
*/
|
||||
static inline void
|
||||
emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
size_t str_written;
|
||||
#define BUF_SIZE 256
|
||||
#define FMT_SIZE 10
|
||||
/*
|
||||
* We dynamically generate a format string to emit, to let us use the
|
||||
* snprintf machinery. This is kinda hacky, but gets the job done
|
||||
* quickly without having to think about the various snprintf edge
|
||||
* cases.
|
||||
*/
|
||||
char fmt[FMT_SIZE];
|
||||
char buf[BUF_SIZE];
|
||||
|
||||
#define EMIT_SIMPLE(type, format) \
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \
|
||||
emitter_printf(emitter, fmt, *(const type *)value); \
|
||||
|
||||
switch (value_type) {
|
||||
case emitter_type_bool:
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width);
|
||||
emitter_printf(emitter, fmt, *(const bool *)value ?
|
||||
"true" : "false");
|
||||
break;
|
||||
case emitter_type_int:
|
||||
EMIT_SIMPLE(int, "d")
|
||||
break;
|
||||
case emitter_type_unsigned:
|
||||
EMIT_SIMPLE(unsigned, "u")
|
||||
break;
|
||||
case emitter_type_ssize:
|
||||
EMIT_SIMPLE(ssize_t, "zd")
|
||||
break;
|
||||
case emitter_type_size:
|
||||
EMIT_SIMPLE(size_t, "zu")
|
||||
break;
|
||||
case emitter_type_string:
|
||||
str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
|
||||
*(const char *const *)value);
|
||||
/*
|
||||
* We control the strings we output; we shouldn't get anything
|
||||
* anywhere near the fmt size.
|
||||
*/
|
||||
assert(str_written < BUF_SIZE);
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width);
|
||||
emitter_printf(emitter, fmt, buf);
|
||||
break;
|
||||
case emitter_type_uint32:
|
||||
EMIT_SIMPLE(uint32_t, FMTu32)
|
||||
break;
|
||||
case emitter_type_uint64:
|
||||
EMIT_SIMPLE(uint64_t, FMTu64)
|
||||
break;
|
||||
case emitter_type_title:
|
||||
EMIT_SIMPLE(char *const, "s");
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
#undef BUF_SIZE
|
||||
#undef FMT_SIZE
|
||||
}
|
||||
|
||||
|
||||
/* Internal functions. In json mode, tracks nesting state. */
|
||||
static inline void
|
||||
emitter_nest_inc(emitter_t *emitter) {
|
||||
emitter->nesting_depth++;
|
||||
emitter->item_at_depth = false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_nest_dec(emitter_t *emitter) {
|
||||
emitter->nesting_depth--;
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_indent(emitter_t *emitter) {
|
||||
int amount = emitter->nesting_depth;
|
||||
const char *indent_str;
|
||||
if (emitter->output == emitter_output_json) {
|
||||
indent_str = "\t";
|
||||
} else {
|
||||
amount *= 2;
|
||||
indent_str = " ";
|
||||
}
|
||||
for (int i = 0; i < amount; i++) {
|
||||
emitter_printf(emitter, "%s", indent_str);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_key_prefix(emitter_t *emitter) {
|
||||
emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
|
||||
emitter_indent(emitter);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth == 0);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
} else {
|
||||
// tabular init
|
||||
emitter_printf(emitter, "%s", "");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth == 1);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n}\n");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note emits a different kv pair as well, but only in table mode. Omits the
|
||||
* note if table_note_key is NULL.
|
||||
*/
|
||||
static inline void
|
||||
emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value,
|
||||
const char *table_note_key, emitter_type_t table_note_value_type,
|
||||
const void *table_note_value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": ", json_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
} else {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s: ", table_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
if (table_note_key != NULL) {
|
||||
emitter_printf(emitter, " (%s: ", table_note_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
table_note_value_type, table_note_value);
|
||||
emitter_printf(emitter, ")");
|
||||
}
|
||||
emitter_printf(emitter, "\n");
|
||||
}
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
|
||||
emitter_type_bool, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_kv(emitter_t *emitter, const char *json_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_kv(emitter, json_key, NULL, value_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_kv(emitter_t *emitter, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_kv(emitter, NULL, table_key, value_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_begin(emitter_t *emitter, const char *json_key,
|
||||
const char *table_header) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": {", json_key);
|
||||
emitter_nest_inc(emitter);
|
||||
} else {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s\n", table_header);
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "}");
|
||||
} else {
|
||||
emitter_nest_dec(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_dict_begin(emitter_t *emitter, const char *json_key) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_dict_begin(emitter, json_key, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_dict_end(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_dict_begin(emitter, NULL, table_key);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_dict_end(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_begin(emitter_t *emitter, const char *json_key) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": [", json_key);
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "]");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_obj_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_obj_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "}");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_value(emitter_t *emitter, emitter_type_t value_type,
|
||||
const void *value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
|
||||
if (emitter->output != emitter_output_table) {
|
||||
return;
|
||||
}
|
||||
emitter_col_t *col;
|
||||
ql_foreach(col, &row->cols, link) {
|
||||
emitter_print_value(emitter, col->justify, col->width,
|
||||
col->type, (const void *)&col->bool_val);
|
||||
}
|
||||
emitter_table_printf(emitter, "\n");
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EMITTER_H */
|
|
@ -4,12 +4,13 @@
|
|||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_pool.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/rb.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
extern rtree_t extents_rtree;
|
||||
extern const extent_hooks_t extent_hooks_default;
|
||||
extern mutex_pool_t extent_mutex_pool;
|
||||
extern size_t opt_lg_extent_max_active_fit;
|
||||
|
||||
extern rtree_t extents_rtree;
|
||||
extern const extent_hooks_t extent_hooks_default;
|
||||
extern mutex_pool_t extent_mutex_pool;
|
||||
|
||||
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
|
@ -93,6 +93,12 @@ extent_committed_get(const extent_t *extent) {
|
|||
EXTENT_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_dumpable_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
|
||||
EXTENT_BITS_DUMPABLE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_slab_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
||||
|
@ -184,15 +190,22 @@ extent_addr_set(extent_t *extent, void *addr) {
|
|||
}
|
||||
|
||||
static inline void
|
||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||
|
||||
if (alignment < PAGE) {
|
||||
unsigned lg_range = LG_PAGE -
|
||||
lg_floor(CACHELINE_CEILING(alignment));
|
||||
size_t r =
|
||||
prng_lg_range_zu(&extent_arena_get(extent)->offset_state,
|
||||
lg_range, true);
|
||||
size_t r;
|
||||
if (!tsdn_null(tsdn)) {
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
r = (size_t)prng_lg_range_u64(
|
||||
tsd_offset_statep_get(tsd), lg_range);
|
||||
} else {
|
||||
r = prng_lg_range_zu(
|
||||
&extent_arena_get(extent)->offset_state,
|
||||
lg_range, true);
|
||||
}
|
||||
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||
lg_range);
|
||||
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
||||
|
@ -269,6 +282,12 @@ extent_committed_set(extent_t *extent, bool committed) {
|
|||
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_dumpable_set(extent_t *extent, bool dumpable) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
|
||||
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_slab_set(extent_t *extent, bool slab) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
||||
|
@ -283,7 +302,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
|||
static inline void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed) {
|
||||
bool committed, bool dumpable) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
extent_arena_set(extent, arena);
|
||||
|
@ -295,6 +314,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
|||
extent_state_set(extent, state);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
extent_dumpable_set(extent, dumpable);
|
||||
ql_elm_new(extent, ql_link);
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
|
@ -312,6 +332,7 @@ extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
|||
extent_state_set(extent, extent_state_active);
|
||||
extent_zeroed_set(extent, true);
|
||||
extent_committed_set(extent, true);
|
||||
extent_dumpable_set(extent, true);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -334,6 +355,11 @@ extent_list_append(extent_list_t *list, extent_t *extent) {
|
|||
ql_tail_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_prepend(extent_list_t *list, extent_t *extent) {
|
||||
ql_head_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
||||
extent_t *to_insert) {
|
|
@ -5,7 +5,6 @@
|
|||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/rb.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
|
@ -24,13 +23,14 @@ struct extent_s {
|
|||
* a: arena_ind
|
||||
* b: slab
|
||||
* c: committed
|
||||
* d: dumpable
|
||||
* z: zeroed
|
||||
* t: state
|
||||
* i: szind
|
||||
* f: nfree
|
||||
* n: sn
|
||||
*
|
||||
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
|
||||
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
|
||||
*
|
||||
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||
* unassociated.
|
||||
|
@ -45,6 +45,23 @@ struct extent_s {
|
|||
* as on a system that overcommits and satisfies physical
|
||||
* memory needs on demand via soft page faults.
|
||||
*
|
||||
* dumpable: The dumpable flag indicates whether or not we've set the
|
||||
* memory in question to be dumpable. Note that this
|
||||
* interacts somewhat subtly with user-specified extent hooks,
|
||||
* since we don't know if *they* are fiddling with
|
||||
* dumpability (in which case, we don't want to undo whatever
|
||||
* they're doing). To deal with this scenario, we:
|
||||
* - Make dumpable false only for memory allocated with the
|
||||
* default hooks.
|
||||
* - Only allow memory to go from non-dumpable to dumpable,
|
||||
* and only once.
|
||||
* - Never make the OS call to allow dumping when the
|
||||
* dumpable bit is already set.
|
||||
* These three constraints mean that we will never
|
||||
* accidentally dump user memory that the user meant to set
|
||||
* nondumpable with their extent hooks.
|
||||
*
|
||||
*
|
||||
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||
* whether memory is zero-filled.
|
||||
*
|
||||
|
@ -69,38 +86,42 @@ struct extent_s {
|
|||
* serial number to both resulting adjacent extents.
|
||||
*/
|
||||
uint64_t e_bits;
|
||||
#define EXTENT_BITS_ARENA_SHIFT 0
|
||||
#define EXTENT_BITS_ARENA_MASK \
|
||||
(((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
|
||||
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
||||
|
||||
#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
|
||||
#define EXTENT_BITS_SLAB_MASK \
|
||||
((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
|
||||
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
||||
#define EXTENT_BITS_ARENA_SHIFT 0
|
||||
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
|
||||
#define EXTENT_BITS_COMMITTED_MASK \
|
||||
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
|
||||
#define EXTENT_BITS_SLAB_WIDTH 1
|
||||
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
|
||||
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
|
||||
#define EXTENT_BITS_ZEROED_MASK \
|
||||
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
|
||||
#define EXTENT_BITS_COMMITTED_WIDTH 1
|
||||
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
|
||||
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
|
||||
#define EXTENT_BITS_STATE_MASK \
|
||||
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
|
||||
#define EXTENT_BITS_DUMPABLE_WIDTH 1
|
||||
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
|
||||
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
|
||||
#define EXTENT_BITS_SZIND_MASK \
|
||||
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
|
||||
#define EXTENT_BITS_ZEROED_WIDTH 1
|
||||
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
|
||||
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_NFREE_SHIFT \
|
||||
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
|
||||
#define EXTENT_BITS_NFREE_MASK \
|
||||
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
|
||||
#define EXTENT_BITS_STATE_WIDTH 2
|
||||
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
|
||||
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT \
|
||||
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
|
||||
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
||||
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
|
||||
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
|
||||
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
|
||||
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
|
||||
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
|
||||
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
||||
|
||||
/* Pointer to the extent that this structure is responsible for. */
|
||||
void *e_addr;
|
||||
|
@ -120,20 +141,19 @@ struct extent_s {
|
|||
size_t e_bsize;
|
||||
};
|
||||
|
||||
union {
|
||||
/*
|
||||
* List linkage, used by a variety of lists:
|
||||
* - arena_bin_t's slabs_full
|
||||
* - extents_t's LRU
|
||||
* - stashed dirty extents
|
||||
* - arena's large allocations
|
||||
*/
|
||||
ql_elm(extent_t) ql_link;
|
||||
/* Red-black tree linkage, used by arena's extent_avail. */
|
||||
rb_node(extent_t) rb_link;
|
||||
};
|
||||
/*
|
||||
* List linkage, used by a variety of lists:
|
||||
* - bin_t's slabs_full
|
||||
* - extents_t's LRU
|
||||
* - stashed dirty extents
|
||||
* - arena's large allocations
|
||||
*/
|
||||
ql_elm(extent_t) ql_link;
|
||||
|
||||
/* Linkage for per size class sn/address-ordered heaps. */
|
||||
/*
|
||||
* Linkage for per size class sn/address-ordered heaps, and
|
||||
* for extent_avail
|
||||
*/
|
||||
phn(extent_t) ph_link;
|
||||
|
||||
union {
|
||||
|
@ -148,7 +168,7 @@ struct extent_s {
|
|||
};
|
||||
};
|
||||
typedef ql_head(extent_t) extent_list_t;
|
||||
typedef rb_tree(extent_t) extent_tree_t;
|
||||
typedef ph(extent_t) extent_tree_t;
|
||||
typedef ph(extent_t) extent_heap_t;
|
||||
|
||||
/* Quantized collection of extents, with built-in LRU queue. */
|
|
@ -0,0 +1,17 @@
|
|||
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
|
||||
typedef struct extent_s extent_t;
|
||||
typedef struct extents_s extents_t;
|
||||
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
|
||||
#define EXTENT_GROW_MAX_PIND (NPSIZES - 1)
|
||||
|
||||
/*
|
||||
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
|
||||
* is the max ratio between the size of the active extent and the new extent.
|
||||
*/
|
||||
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
|
@ -260,22 +260,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
|
||||
/* falls through */
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
}
|
|
@ -5,7 +5,16 @@
|
|||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
# include "msvc_compat/windows_extra.h"
|
||||
|
||||
# ifdef _WIN64
|
||||
# if LG_VADDR <= 32
|
||||
# error Generate the headers using x64 vcargs
|
||||
# endif
|
||||
# else
|
||||
# if LG_VADDR > 32
|
||||
# undef LG_VADDR
|
||||
# define LG_VADDR 32
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# include <sys/param.h>
|
||||
# include <sys/mman.h>
|
|
@ -33,6 +33,8 @@
|
|||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#undef CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#undef HAVE_CPU_SPINWAIT
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
|
@ -237,6 +239,12 @@
|
|||
*/
|
||||
#undef JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
#undef JEMALLOC_LOG
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
|
@ -254,6 +262,12 @@
|
|||
/* Defined if madvise(2) is available. */
|
||||
#undef JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
|
@ -271,6 +285,14 @@
|
|||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
#undef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#undef JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
|
@ -336,4 +358,9 @@
|
|||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#undef JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
|
@ -106,16 +106,16 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
|||
return &tdata->decay_ticker;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
||||
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
|
||||
assert(binind < NBINS);
|
||||
return &tcache->tbins_small[binind];
|
||||
return &tcache->bins_small[binind];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
||||
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
|
||||
assert(binind >= NBINS &&binind < nhbins);
|
||||
return &tcache->tbins_large[binind - NBINS];
|
||||
return &tcache->bins_large[binind - NBINS];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
|
@ -151,6 +151,7 @@ pre_reentrancy(tsd_t *tsd, arena_t *arena) {
|
|||
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
|
||||
|
||||
bool fast = tsd_fast(tsd);
|
||||
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
|
||||
++*tsd_reentrancy_levelp_get(tsd);
|
||||
if (fast) {
|
||||
/* Prepare slow path for reentrancy. */
|
|
@ -5,6 +5,24 @@
|
|||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
/*
|
||||
* Translating the names of the 'i' functions:
|
||||
* Abbreviations used in the first part of the function name (before
|
||||
* alloc/dalloc) describe what that function accomplishes:
|
||||
* a: arena (query)
|
||||
* s: size (query, or sized deallocation)
|
||||
* e: extent (query)
|
||||
* p: aligned (allocates)
|
||||
* vs: size (query, without knowing that the pointer is into the heap)
|
||||
* r: rallocx implementation
|
||||
* x: xallocx implementation
|
||||
* Abbreviations used in the second part of the function name (after
|
||||
* alloc/dalloc) describe the arguments it takes
|
||||
* z: whether to return zeroed memory
|
||||
* t: accepts a tcache_t * parameter
|
||||
* m: accepts an arena_t * parameter
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
@ -27,8 +45,10 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
|||
assert(size != 0);
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
}
|
||||
|
||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
|
@ -91,7 +111,8 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
|
|||
if (config_stats && is_internal) {
|
||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||
}
|
||||
if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
if (!is_internal && !tsdn_null(tsdn) &&
|
||||
tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
assert(tcache == NULL);
|
||||
}
|
||||
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue