Merge branch 'dev'
diff --git a/.gitignore b/.gitignore
index b468186..d6fa8fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,10 @@
 /jemalloc/config.log
 /jemalloc/config.status
 /jemalloc/configure
+/jemalloc/doc/html.xsl
+/jemalloc/doc/manpages.xsl
+/jemalloc/doc/jemalloc.xml
+/jemalloc/doc/jemalloc.html
 /jemalloc/doc/jemalloc.3
 /jemalloc/lib/
 /jemalloc/Makefile
@@ -13,4 +17,7 @@
 /jemalloc/src/*.[od]
 /jemalloc/test/*.[od]
 /jemalloc/test/*.out
+/jemalloc/test/[a-z]*
+!/jemalloc/test/*.c
+!/jemalloc/test/*.exp
 /jemalloc/VERSION
diff --git a/jemalloc/ChangeLog b/jemalloc/ChangeLog
index 7b7da78..e32a588 100644
--- a/jemalloc/ChangeLog
+++ b/jemalloc/ChangeLog
@@ -6,6 +6,23 @@
     http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
     git://canonware.com/jemalloc.git
 
+* 2.1.0
+
+  This version incorporates some optimizations that can't quite be considered
+  bug fixes.
+
+  New features:
+  - Use Linux's mremap(2) for huge object reallocation when possible.
+  - Avoid locking in mallctl*() when possible.
+  - Add the "thread.[de]allocatedp" mallctl's.
+  - Convert the manual page source from roff to DocBook, and generate both roff
+    and HTML manuals.
+
+  Bug fixes:
+  - Fix a crash due to incorrect bootstrap ordering.  This only impacted
+    --enable-debug --enable-dss configurations.
+  - Fix a minor statistics bug for mallctl("swap.avail", ...).
+
 * 2.0.1
 
   Bug fixes:
diff --git a/jemalloc/INSTALL b/jemalloc/INSTALL
index fafd788..b77ebfd 100644
--- a/jemalloc/INSTALL
+++ b/jemalloc/INSTALL
@@ -132,8 +132,11 @@
 --disable-tls
     Disable thread-local storage (TLS), which allows for fast access to
     thread-local variables via the __thread keyword.  If TLS is available,
-    jemalloc uses it for several purposes.  Note that disabling TLS implies
-    --disable-tcache.
+    jemalloc uses it for several purposes.
+
+--with-xslroot=<path>
+    Specify where to find DocBook XSL stylesheets when building the
+    documentation.
 
 The following environment variables (not a definitive list) impact configure's
 behavior:
@@ -172,7 +175,7 @@
     install_bin
     install_include
     install_lib
-    install_man
+    install_doc
 
 To clean up build results to varying degrees, use the following make targets:
 
@@ -232,11 +235,12 @@
 
 === Documentation ==============================================================
 
-The manual page that the configure script generates can be manually formatted
+The manual page is generated in both html and roff formats.  Any web browser
+can be used to view the html manual.  The roff manual page can be formatted
 prior to installation via any of the following commands:
 
-    nroff -man -man-ext -t doc/jemalloc.3
+    nroff -man -t doc/jemalloc.3
 
-    groff -man -man-ext -t -Tps doc/jemalloc.3 | ps2pdf - doc/jemalloc.3.pdf
+    groff -man -t -Tps doc/jemalloc.3 | ps2pdf - doc/jemalloc.3.pdf
 
     (cd doc; groff -man -man-ext -t -Thtml jemalloc.3 > jemalloc.3.html)
diff --git a/jemalloc/Makefile.in b/jemalloc/Makefile.in
index 46eddf4..ee674b3 100644
--- a/jemalloc/Makefile.in
+++ b/jemalloc/Makefile.in
@@ -15,6 +15,7 @@
 BINDIR := $(DESTDIR)@BINDIR@
 INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
 LIBDIR := $(DESTDIR)@LIBDIR@
+DATADIR := $(DESTDIR)@DATADIR@
 MANDIR := $(DESTDIR)@MANDIR@
 
 # Build parameters.
@@ -58,15 +59,34 @@
 	@objroot@lib/libjemalloc@install_suffix@.$(SO) \
 	@objroot@lib/libjemalloc@install_suffix@_pic.a
 MAN3 := @objroot@doc/jemalloc@install_suffix@.3
+DOCS_XML := @objroot@doc/jemalloc@install_suffix@.xml
+DOCS_HTML := $(DOCS_XML:@objroot@%.xml=@srcroot@%.html)
+DOCS_MAN3 := $(DOCS_XML:@objroot@%.xml=@srcroot@%.3)
+DOCS := $(DOCS_HTML) $(DOCS_MAN3)
 CTESTS := @srcroot@test/allocated.c @srcroot@test/allocm.c \
-	@srcroot@test/posix_memalign.c \
+	@srcroot@test/mremap.c @srcroot@test/posix_memalign.c \
 	@srcroot@test/rallocm.c @srcroot@test/thread_arena.c
 
-.PHONY: all dist install check clean distclean relclean
+.PHONY: all dist doc_html doc_man doc
+.PHONY: install_bin install_include install_lib
+.PHONY: install_html install_man install_doc install
+.PHONY: tests check clean distclean relclean
 
 # Default target.
 all: $(DSOS)
 
+dist: doc
+
+@srcroot@doc/%.html : @objroot@doc/%.xml @srcroot@doc/stylesheet.xsl @objroot@doc/html.xsl
+	@XSLTPROC@ -o $@ @objroot@doc/html.xsl $<
+
+@srcroot@doc/%.3 : @objroot@doc/%.xml @srcroot@doc/stylesheet.xsl @objroot@doc/manpages.xsl
+	@XSLTPROC@ -o $@ @objroot@doc/manpages.xsl $<
+
+doc_html: $(DOCS_HTML)
+doc_man: $(DOCS_MAN3)
+doc: $(DOCS)
+
 #
 # Include generated dependency files.
 #
@@ -123,14 +143,23 @@
 	ln -sf libjemalloc@install_suffix@.$(SO).$(REV) $(LIBDIR)/libjemalloc@install_suffix@.$(SO)
 	install -m 755 @objroot@lib/libjemalloc@install_suffix@_pic.a $(LIBDIR)
 
-install_man:
-	install -d $(MANDIR)/man3
-	@for m in $(MAN3); do \
-	echo "install -m 644 $$m $(MANDIR)/man3"; \
-	install -m 644 $$m $(MANDIR)/man3; \
+install_html:
+	install -d $(DATADIR)/doc/jemalloc@install_suffix@
+	@for d in $(DOCS_HTML); do \
+	echo "install -m 644 $$d $(DATADIR)/doc/jemalloc@install_suffix@"; \
+	install -m 644 $$d $(DATADIR)/doc/jemalloc@install_suffix@; \
 done
 
-install: install_bin install_include install_lib install_man
+install_man:
+	install -d $(MANDIR)/man3
+	@for d in $(DOCS_MAN3); do \
+	echo "install -m 644 $$d $(MANDIR)/man3"; \
+	install -m 644 $$d $(MANDIR)/man3; \
+done
+
+install_doc: install_html install_man
+
+install: install_bin install_include install_lib install_doc
 
 tests: $(CTESTS:@srcroot@%.c=@objroot@%)
 
@@ -182,6 +211,8 @@
 relclean: distclean
 	rm -f @objroot@configure
 	rm -f @srcroot@VERSION
+	rm -f $(DOCS_HTML)
+	rm -f $(DOCS_MAN3)
 
 #===============================================================================
 # Re-configuration rules.
diff --git a/jemalloc/configure.ac b/jemalloc/configure.ac
index 0ed1373..46a2bd4 100644
--- a/jemalloc/configure.ac
+++ b/jemalloc/configure.ac
@@ -80,6 +80,19 @@
 MANDIR=`eval echo $MANDIR`
 AC_SUBST([MANDIR])
 
+dnl Support for building documentation.
+AC_PATH_PROG([XSLTPROC], [xsltproc], , [$PATH])
+AC_ARG_WITH([xslroot],
+  [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])],
+if test "x$with_xslroot" = "xno" ; then
+  XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
+else
+  XSLROOT="${with_xslroot}"
+fi,
+  XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
+)
+AC_SUBST([XSLROOT])
+
 dnl If CFLAGS isn't defined, set CFLAGS to something reasonable.  Otherwise,
 dnl just prevent autoconf from molesting CFLAGS.
 CFLAGS=$CFLAGS
@@ -214,6 +227,16 @@
 AC_SUBST([abi])
 AC_SUBST([RPATH])
 
+JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [
+#define _GNU_SOURCE
+#include <sys/mman.h>
+], [
+void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
+], [mremap_fixed])
+if test "x${mremap_fixed}" = "xyes" ; then
+  AC_DEFINE([JEMALLOC_MREMAP_FIXED])
+fi
+
 dnl Support optional additions to rpath.
 AC_ARG_WITH([rpath],
   [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
@@ -275,17 +298,26 @@
 install_suffix="$INSTALL_SUFFIX"
 AC_SUBST([install_suffix])
 
-cfgoutputs_in="${srcroot}Makefile.in ${srcroot}doc/jemalloc.3.in"
+cfgoutputs_in="${srcroot}Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in"
 cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in"
 cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in"
 cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in"
 
-cfgoutputs_out="Makefile doc/jemalloc${install_suffix}.3"
+cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml"
 cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h"
 cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
 cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h"
 
-cfgoutputs_tup="Makefile doc/jemalloc${install_suffix}.3:doc/jemalloc.3.in"
+cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in"
 cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in"
 cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
 cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in"
@@ -329,15 +361,6 @@
   AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
 fi
 AC_SUBST([enable_debug])
-if test "x$enable_debug" = "x0" ; then
-  roff_debug=".\\\" "
-  roff_no_debug=""
-else
-  roff_debug=""
-  roff_no_debug=".\\\" "
-fi
-AC_SUBST([roff_debug])
-AC_SUBST([roff_no_debug])
 
 dnl Only optimize if not debugging.
 if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
@@ -369,12 +392,6 @@
   AC_DEFINE([JEMALLOC_STATS], [ ])
 fi
 AC_SUBST([enable_stats])
-if test "x$enable_stats" = "x0" ; then
-  roff_stats=".\\\" "
-else
-  roff_stats=""
-fi
-AC_SUBST([roff_stats])
 
 dnl Do not enable profiling by default.
 AC_ARG_ENABLE([prof],
@@ -438,15 +455,6 @@
   fi
 fi
 AC_SUBST([enable_prof])
-if test "x$enable_prof" = "x0" ; then
-  roff_prof=".\\\" "
-  roff_no_prof=""
-else
-  roff_prof=""
-  roff_no_prof=".\\\" "
-fi
-AC_SUBST([roff_prof])
-AC_SUBST([roff_no_prof])
 
 dnl If libunwind isn't enabled, try to use libgcc rather than gcc intrinsics
 dnl for backtracing.
@@ -478,15 +486,6 @@
   AC_DEFINE([JEMALLOC_TINY], [ ])
 fi
 AC_SUBST([enable_tiny])
-if test "x$enable_tiny" = "x0" ; then
-  roff_tiny=".\\\" "
-  roff_no_tiny=""
-else
-  roff_tiny=""
-  roff_no_tiny=".\\\" "
-fi
-AC_SUBST([roff_tiny])
-AC_SUBST([roff_no_tiny])
 
 dnl Enable thread-specific caching by default.
 AC_ARG_ENABLE([tcache],
@@ -503,15 +502,6 @@
   AC_DEFINE([JEMALLOC_TCACHE], [ ])
 fi
 AC_SUBST([enable_tcache])
-if test "x$enable_tcache" = "x0" ; then
-  roff_tcache=".\\\" "
-  roff_no_tcache=""
-else
-  roff_tcache=""
-  roff_no_tcache=".\\\" "
-fi
-AC_SUBST([roff_tcache])
-AC_SUBST([roff_no_tcache])
 
 dnl Do not enable mmap()ped swap files by default.
 AC_ARG_ENABLE([swap],
@@ -528,12 +518,6 @@
   AC_DEFINE([JEMALLOC_SWAP], [ ])
 fi
 AC_SUBST([enable_swap])
-if test "x$enable_swap" = "x0" ; then
-  roff_swap=".\\\" "
-else
-  roff_swap=""
-fi
-AC_SUBST([roff_swap])
 
 dnl Do not enable allocation from DSS by default.
 AC_ARG_ENABLE([dss],
@@ -550,12 +534,6 @@
   AC_DEFINE([JEMALLOC_DSS], [ ])
 fi
 AC_SUBST([enable_dss])
-if test "x$enable_dss" = "x0" ; then
-  roff_dss=".\\\" "
-else
-  roff_dss=""
-fi
-AC_SUBST([roff_dss])
 
 dnl Do not support the junk/zero filling option by default.
 AC_ARG_ENABLE([fill],
@@ -572,12 +550,6 @@
   AC_DEFINE([JEMALLOC_FILL], [ ])
 fi
 AC_SUBST([enable_fill])
-if test "x$enable_fill" = "x0" ; then
-  roff_fill=".\\\" "
-else
-  roff_fill=""
-fi
-AC_SUBST([roff_fill])
 
 dnl Do not support the xmalloc option by default.
 AC_ARG_ENABLE([xmalloc],
@@ -594,12 +566,6 @@
   AC_DEFINE([JEMALLOC_XMALLOC], [ ])
 fi
 AC_SUBST([enable_xmalloc])
-if test "x$enable_xmalloc" = "x0" ; then
-  roff_xmalloc=".\\\" "
-else
-  roff_xmalloc=""
-fi
-AC_SUBST([roff_xmalloc])
 
 dnl Do not support the SYSV option by default.
 AC_ARG_ENABLE([sysv],
@@ -616,12 +582,6 @@
   AC_DEFINE([JEMALLOC_SYSV], [ ])
 fi
 AC_SUBST([enable_sysv])
-if test "x$enable_sysv" = "x0" ; then
-  roff_sysv=".\\\" "
-else
-  roff_sysv=""
-fi
-AC_SUBST([roff_sysv])
 
 dnl Do not determine page shift at run time by default.
 AC_ARG_ENABLE([dynamic_page_shift],
@@ -828,6 +788,9 @@
 AC_MSG_RESULT([LIBS               : ${LIBS}])
 AC_MSG_RESULT([RPATH_EXTRA        : ${RPATH_EXTRA}])
 AC_MSG_RESULT([])
+AC_MSG_RESULT([XSLTPROC           : ${XSLTPROC}])
+AC_MSG_RESULT([XSLROOT            : ${XSLROOT}])
+AC_MSG_RESULT([])
 AC_MSG_RESULT([PREFIX             : ${PREFIX}])
 AC_MSG_RESULT([BINDIR             : ${BINDIR}])
 AC_MSG_RESULT([INCLUDEDIR         : ${INCLUDEDIR}])
diff --git a/jemalloc/doc/html.xsl.in b/jemalloc/doc/html.xsl.in
new file mode 100644
index 0000000..a91d974
--- /dev/null
+++ b/jemalloc/doc/html.xsl.in
@@ -0,0 +1,4 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:import href="@XSLROOT@/html/docbook.xsl"/>
+  <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
+</xsl:stylesheet>
diff --git a/jemalloc/doc/jemalloc.3.in b/jemalloc/doc/jemalloc.3.in
deleted file mode 100644
index 6286664..0000000
--- a/jemalloc/doc/jemalloc.3.in
+++ /dev/null
@@ -1,1688 +0,0 @@
-.\" Copyright (c) 2006-2010 Jason Evans <jasone@canonware.com>.
-.\" All rights reserved.
-.\" Copyright (c) 2009 Facebook, Inc.  All rights reserved.
-.\"
-.\" See COPYING for licensing terms provided by the above copyright holders.
-.\"
-.\" Copyright (c) 1980, 1991, 1993
-.\"	The Regents of the University of California.  All rights reserved.
-.\"
-.\" This code is derived from software contributed to Berkeley by
-.\" the American National Standards Committee X3, on Information
-.\" Processing Systems.
-.\"
-.\" Redistribution and use in source and binary forms, with or without
-.\" modification, are permitted provided that the following conditions
-.\" are met:
-.\" 1. Redistributions of source code must retain the above copyright
-.\"    notice, this list of conditions and the following disclaimer.
-.\" 2. Redistributions in binary form must reproduce the above copyright
-.\"    notice, this list of conditions and the following disclaimer in the
-.\"    documentation and/or other materials provided with the distribution.
-.\" 3. Neither the name of the University nor the names of its contributors
-.\"    may be used to endorse or promote products derived from this software
-.\"    without specific prior written permission.
-.\"
-.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
-.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-.\" ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
-.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-.\" SUCH DAMAGE.
-.\"
-.\"     @(#)malloc.3	8.1 (Berkeley) 6/4/93
-.\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $
-.\"
-.Dd October 24, 2010
-.Dt jemalloc 3
-.Os
-.Sh NAME
-.Nm @jemalloc_prefix@malloc ,
-.Nm @jemalloc_prefix@calloc ,
-.Nm @jemalloc_prefix@posix_memalign ,
-.Nm @jemalloc_prefix@realloc ,
-.Nm @jemalloc_prefix@free ,
-.Nm @jemalloc_prefix@malloc_usable_size ,
-.Nm @jemalloc_prefix@malloc_stats_print ,
-.Nm @jemalloc_prefix@mallctl ,
-.Nm @jemalloc_prefix@mallctlnametomib ,
-.Nm @jemalloc_prefix@mallctlbymib ,
-.Nm @jemalloc_prefix@allocm ,
-.Nm @jemalloc_prefix@rallocm ,
-.Nm @jemalloc_prefix@sallocm ,
-.Nm @jemalloc_prefix@dallocm
-.Nd general purpose memory allocation functions
-.Sh LIBRARY
-.Sy libjemalloc@install_suffix@
-.Pp
-This manual describes jemalloc @jemalloc_version@.
-More information can be found at the
-.UR http://\:www.canonware.com/\:jemalloc/
-jemalloc website
-.UE .
-.Sh SYNOPSIS
-.In stdlib.h
-.In jemalloc/jemalloc@install_suffix@.h
-.Ss Standard API
-.Ft void *
-.Fn @jemalloc_prefix@malloc "size_t size"
-.Ft void *
-.Fn @jemalloc_prefix@calloc "size_t number" "size_t size"
-.Ft int
-.Fn @jemalloc_prefix@posix_memalign "void **ptr" "size_t alignment" "size_t size"
-.Ft void *
-.Fn @jemalloc_prefix@realloc "void *ptr" "size_t size"
-.Ft void
-.Fn @jemalloc_prefix@free "void *ptr"
-.Ss Non-standard API
-.Ft size_t
-.Fn @jemalloc_prefix@malloc_usable_size "const void *ptr"
-.Ft void
-.Fn @jemalloc_prefix@malloc_stats_print "void (*write_cb)(void *" "const char *)" "void *cbopaque" "const char *opts"
-.Ft int
-.Fn @jemalloc_prefix@mallctl "const char *name" "void *oldp" "size_t *oldlenp" "void *newp" "size_t newlen"
-.Ft int
-.Fn @jemalloc_prefix@mallctlnametomib "const char *name" "int *mibp" "size_t *miblenp"
-.Ft int
-.Fn @jemalloc_prefix@mallctlbymib "const size_t *mib" "size_t miblen" "void *oldp" "size_t *oldlenp" "void *newp" "size_t newlen"
-.Ft const char *
-.Va @jemalloc_prefix@malloc_conf ;
-.Ft void
-.Fn \*(lp*@jemalloc_prefix@malloc_message\*(rp "void *cbopaque" "const char *s"
-.Ss Experimental API
-.Ft int
-.Fn @jemalloc_prefix@allocm "void **ptr" "size_t *rsize" "size_t size" "int flags"
-.Ft int
-.Fn @jemalloc_prefix@rallocm "void **ptr" "size_t *rsize" "size_t size" "size_t extra" "int flags"
-.Ft int
-.Fn @jemalloc_prefix@sallocm "const void *ptr" "size_t *rsize" "int flags"
-.Ft int
-.Fn @jemalloc_prefix@dallocm "void *ptr" "int flags"
-.Sh DESCRIPTION
-.Ss Standard API
-The
-.Fn @jemalloc_prefix@malloc
-function allocates
-.Fa size
-bytes of uninitialized memory.
-The allocated space is suitably aligned
-@roff_tiny@(after possible pointer coercion)
-for storage of any type of object.
-.Pp
-The
-.Fn @jemalloc_prefix@calloc
-function allocates space for
-.Fa number
-objects,
-each
-.Fa size
-bytes in length.
-The result is identical to calling
-.Fn @jemalloc_prefix@malloc
-with an argument of
-.Dq "number * size" ,
-with the exception that the allocated memory is explicitly initialized
-to zero bytes.
-.Pp
-The
-.Fn @jemalloc_prefix@posix_memalign
-function allocates
-.Fa size
-bytes of memory such that the allocation's base address is an even multiple of
-.Fa alignment ,
-and returns the allocation in the value pointed to by
-.Fa ptr .
-The requested
-.Fa alignment
-must be a power of 2 at least as large as
-.Fn sizeof "void *" .
-.Pp
-The
-.Fn @jemalloc_prefix@realloc
-function changes the size of the previously allocated memory referenced by
-.Fa ptr
-to
-.Fa size
-bytes.
-The contents of the memory are unchanged up to the lesser of the new and
-old sizes.
-If the new size is larger,
-the contents of the newly allocated portion of the memory are undefined.
-Upon success, the memory referenced by
-.Fa ptr
-is freed and a pointer to the newly allocated memory is returned.
-Note that
-.Fn @jemalloc_prefix@realloc
-may move the memory allocation, resulting in a different return value than
-.Fa ptr .
-If
-.Fa ptr
-is
-.Dv NULL ,
-the
-.Fn @jemalloc_prefix@realloc
-function behaves identically to
-.Fn @jemalloc_prefix@malloc
-for the specified size.
-.Pp
-The
-.Fn @jemalloc_prefix@free
-function causes the allocated memory referenced by
-.Fa ptr
-to be made available for future allocations.
-If
-.Fa ptr
-is
-.Dv NULL ,
-no action occurs.
-.Ss Non-standard API
-The
-.Fn @jemalloc_prefix@malloc_usable_size
-function returns the usable size of the allocation pointed to by
-.Fa ptr .
-The return value may be larger than the size that was requested during
-allocation.
-The
-.Fn @jemalloc_prefix@malloc_usable_size
-function is not a mechanism for in-place
-.Fn @jemalloc_prefix@realloc ;
-rather it is provided solely as a tool for introspection purposes.
-Any discrepancy between the requested allocation size and the size reported by
-.Fn @jemalloc_prefix@malloc_usable_size
-should not be depended on, since such behavior is entirely
-implementation-dependent.
-.Pp
-The
-.Fn @jemalloc_prefix@malloc_stats_print
-function writes human-readable summary statistics via the
-.Fa write_cb
-callback function pointer and
-.Fa cbopaque
-data passed to
-.Fn write_cb ,
-or
-.Fn @jemalloc_prefix@malloc_message
-if
-.Fa write_cb
-is
-.Dv NULL .
-This function can be called repeatedly.
-General information that never changes
-during execution can be omitted by specifying
-.Dq g
-as a character within the
-.Fa opts
-string.
-Note that
-.Fn @jemalloc_prefix@malloc_message
-uses the
-.Fn @jemalloc_prefix@mallctl*
-functions internally, so inconsistent statistics can be reported if multiple
-threads use these functions simultaneously.
-@roff_stats@.Dq m
-@roff_stats@and
-@roff_stats@.Dq a
-@roff_stats@can be specified to omit merged arena and per arena statistics,
-@roff_stats@respectively.
-@roff_stats@.Dq b
-@roff_stats@and
-@roff_stats@.Dq l
-@roff_stats@can be specified to omit per size class statistics for bins and
-@roff_stats@large objects, respectively.
-Unrecognized characters are silently ignored.
-@roff_tcache@Note that thread caching may prevent some statistics from being
-@roff_tcache@completely up to date, since extra locking would be required to
-@roff_tcache@merge counters that track thread cache operations.
-.Pp
-The
-.Fn @jemalloc_prefix@mallctl
-function provides a general interface for introspecting the memory allocator,
-as well as setting modifiable parameters and triggering actions.
-The period-separated
-.Fa name
-argument specifies a location in a tree-structured namespace; see the
-.Sx "MALLCTL NAMESPACE"
-section for documentation on the tree contents.
-To read a value, pass a pointer via
-.Fa oldp
-to adequate space to contain the value, and a pointer to its length via
-.Fa oldlenp ;
-otherwise pass
-.Dv NULL
-and
-.Dv NULL .
-Similarly, to write a value, pass a pointer to the value via
-.Fa newp ,
-and its length via
-.Fa newlen ;
-otherwise pass
-.Dv NULL
-and 0.
-.Pp
-The
-.Fn @jemalloc_prefix@mallctlnametomib
-function provides a way to avoid repeated name lookups for applications that
-repeatedly query the same portion of the namespace, by translating a name to a
-.Dq Management Information Base
-(MIB) that can be passed repeatedly to
-.Fn @jemalloc_prefix@mallctlbymib .
-Upon successful return from
-.Fn @jemalloc_prefix@mallctlnametomib ,
-.Fa mibp
-contains an array of
-.Fa *miblenp
-integers, where
-.Fa *miblenp
-is the lesser of the number of components in
-.Fa name
-and the input value of
-.Fa *miblenp .
-Thus it is possible to pass a
-.Fa *miblenp
-that is smaller than the number of period-separated name components, which
-results in a partial MIB that can be used as the basis for constructing a
-complete MIB.
-For name components that are integers (e.g. the 2 in
-.Qq arenas.bin.2.size ) ,
-the corresponding MIB component will always be that integer.
-Therefore, it is legitimate to construct code like the following:
-.Pp
-.Bd -literal -offset indent -compact
-unsigned nbins, i;
-int mib[4];
-size_t len, miblen;
-
-len = sizeof(nbins);
-@jemalloc_prefix@mallctl("arenas.nbins", &nbins, &len, NULL, 0);
-
-miblen = 4;
-@jemalloc_prefix@mallnametomib("arenas.bin.0.size", mib, &miblen);
-for (i = 0; i < nbins; i++) {
-	size_t bin_size;
-
-	mib[2] = i;
-	len = sizeof(bin_size);
-	@jemalloc_prefix@mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
-	/* Do something with bin_size... */
-}
-.Ed
-.Ss Experimental API
-The experimental API is subject to change or removal without regard for
-backward compatibility.
-.Pp
-The
-.Fn @jemalloc_prefix@allocm ,
-.Fn @jemalloc_prefix@rallocm ,
-.Fn @jemalloc_prefix@sallocm ,
-and
-.Fn @jemalloc_prefix@dallocm
-functions all have a
-.Fa flags
-argument that can be used to specify options.
-The functions only check the options that are contextually relevant.
-Use bitwise or (|) operations to specify one or more of the following:
-.Bl -tag -width ".Dv ALLOCM_LG_ALIGN(la)"
-.It ALLOCM_LG_ALIGN(la)
-Align the memory allocation to start at an address that is a multiple of
-(1 <<
-.Fa la ) .
-This macro does not validate that
-.Fa la
-is within the valid range.
-.It ALLOCM_ALIGN(a)
-Align the memory allocation to start at an address that is a multiple of
-.Fa a ,
-where
-.Fa a
-is a power of two.
-This macro does not validate that
-.Fa a
-is a power of 2.
-.It ALLOCM_ZERO
-Initialize newly allocated memory to contain zero bytes.
-In the growing reallocation case, the real size prior to reallocation defines
-the boundary between untouched bytes and those that are initialized to contain
-zero bytes.
-If this option is absent, newly allocated memory is uninitialized.
-.It ALLOCM_NO_MOVE
-For reallocation, fail rather than moving the object.
-This constraint can apply to both growth and shrinkage.
-.El
-.Pp
-The
-.Fn @jemalloc_prefix@allocm
-function allocates at least
-.Fa size
-bytes of memory, sets
-.Fa *ptr
-to the base address of the allocation, and sets
-.Fa *rsize
-to the real size of the allocation if
-.Fa rsize
-is not
-.Dv NULL .
-.Pp
-The
-.Fn @jemalloc_prefix@rallocm
-function resizes the allocation at
-.Fa *ptr
-to be at least
-.Fa size
-bytes, sets
-.Fa *ptr
-to the base address of the allocation if it moved, and sets
-.Fa *rsize
-to the real size of the allocation if
-.Fa rsize
-is not
-.Dv NULL .
-If
-.Fa extra
-is non-zero, an attempt is made to resize the allocation to be at least
-.Fa ( size
-+
-.Fa extra )
-bytes, though inability to allocate the extra byte(s) will not by itself result
-in failure.
-Behavior is undefined if
-.Fa ( size
-+
-.Fa extra
->
-.Dv SIZE_T_MAX ) .
-.Pp
-The
-.Fn @jemalloc_prefix@sallocm
-function sets
-.Fa *rsize
-to the real size of the allocation.
-.Pp
-The
-.Fn @jemalloc_prefix@dallocm
-function causes the memory referenced by
-.Fa ptr
-to be made available for future allocations.
-.Sh TUNING
-Once, when the first call is made to one of the memory allocation routines, the
-allocator initializes its internals based in part on various options that can
-be specified at compile- or run-time.
-.Pp
-The string pointed to by the global variable
-.Va @jemalloc_prefix@malloc_conf ,
-the
-.Dq name
-of the file referenced by the symbolic link named
-.Pa /etc/@jemalloc_prefix@malloc.conf ,
-and the value of the environment variable
-.Ev @jemalloc_cprefix@MALLOC_CONF ,
-will be interpreted, in that order, from left to right as options.
-.Pp
-An options string is a comma-separated list of option:value pairs.
-There is one key corresponding to each
-.Dq opt.*
-mallctl.
-For example,
-.Dq abort:true,narenas:1
-sets the
-.Dq opt.abort
-and
-.Dq opt.narenas
-options.
-Some options have boolean values (true/false), others have integer values (base
-8, 10, or 16, depending on prefix), and yet others have raw string values.
-.Sh IMPLEMENTATION NOTES
-@roff_dss@Traditionally, allocators have used
-@roff_dss@.Xr sbrk 2
-@roff_dss@to obtain memory, which is suboptimal for several reasons, including
-@roff_dss@race conditions, increased fragmentation, and artificial limitations
-@roff_dss@on maximum usable memory.
-@roff_dss@This allocator uses both
-@roff_dss@.Xr sbrk 2
-@roff_dss@and
-@roff_dss@.Xr mmap 2 ,
-@roff_dss@in that order of preference.
-.Pp
-This allocator uses multiple arenas in order to reduce lock contention for
-threaded programs on multi-processor systems.
-This works well with regard to threading scalability, but incurs some costs.
-There is a small fixed per-arena overhead, and additionally, arenas manage
-memory completely independently of each other, which means a small fixed
-increase in overall memory fragmentation.
-These overheads are not generally an issue, given the number of arenas normally
-used.
-Note that using substantially more arenas than the default is not likely to
-improve performance, mainly due to reduced cache performance.
-However, it may make sense to reduce the number of arenas if an application
-does not make much use of the allocation functions.
-.Pp
-@roff_tcache@In addition to multiple arenas, this allocator supports
-@roff_tcache@thread-specific caching for small and large objects, in order to
-@roff_tcache@make it possible to completely avoid synchronization for most
-@roff_tcache@allocation requests.
-@roff_tcache@Such caching allows very fast allocation in the common case, but it
-@roff_tcache@increases memory usage and fragmentation, since a bounded number of
-@roff_tcache@objects can remain allocated in each thread cache.
-@roff_tcache@.Pp
-Memory is conceptually broken into equal-sized chunks, where the chunk size is
-a power of two that is greater than the page size.
-Chunks are always aligned to multiples of the chunk size.
-This alignment makes it possible to find metadata for user objects very
-quickly.
-.Pp
-User objects are broken into three categories according to size: small, large,
-and huge.
-Small objects are smaller than one page.
-Large objects are smaller than the chunk size.
-Huge objects are a multiple of the chunk size.
-Small and large objects are managed by arenas; huge objects are managed
-separately in a single data structure that is shared by all threads.
-Huge objects are used by applications infrequently enough that this single
-data structure is not a scalability issue.
-.Pp
-Each chunk that is managed by an arena tracks its contents as runs of
-contiguous pages (unused, backing a set of small objects, or backing one large
-object).
-The combination of chunk alignment and chunk page maps makes it possible to
-determine all metadata regarding small and large allocations in constant time.
-.Pp
-Small objects are managed in groups by page runs.
-Each run maintains a frontier and free list to track which regions are in use.
-@roff_tiny@Allocation requests that are no more than half the quantum (8 or 16,
-@roff_tiny@depending on architecture) are rounded up to the nearest power of
-@roff_tiny@two.
-Allocation requests that are
-@roff_tiny@more than half the quantum, but
-no more than the minimum cacheline-multiple size class (see the
-.Dq opt.lg_qspace_max
-option) are rounded up to the nearest multiple of the
-@roff_tiny@quantum.
-@roff_no_tiny@quantum (8 or 16, depending on architecture).
-Allocation requests that are more than the minimum cacheline-multiple size
-class, but no more than the minimum subpage-multiple size class (see the
-.Dq opt.lg_cspace_max
-option) are rounded up to the nearest multiple of the cacheline size (64).
-Allocation requests that are more than the minimum subpage-multiple size class,
-but no more than the maximum subpage-multiple size class are rounded up to the
-nearest multiple of the subpage size (256).
-Allocation requests that are more than the maximum subpage-multiple size class,
-but small enough to fit in an arena-managed chunk (see the
-.Dq opt.lg_chunk
-option), are rounded up to the nearest run size.
-Allocation requests that are too large to fit in an arena-managed chunk are
-rounded up to the nearest multiple of the chunk size.
-.Pp
-Allocations are packed tightly together, which can be an issue for
-multi-threaded applications.
-If you need to assure that allocations do not suffer from cacheline sharing,
-round your allocation requests up to the nearest multiple of the cacheline
-size, or specify cacheline alignment when allocating.
-.Pp
-Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit system,
-the size classes in each category are as follows:
-.\"-----------------------------------------------------------------------------
-.TS
-allbox tab(;);
-LLL
-LLL
-^LL
-^LL
-^LL
-LsL
-LsL.
-Category;Subcategory;Size
-@roff_tiny@Small;Tiny;[8]
-@roff_no_tiny@Small;Tiny;[disabled]
-;Quantum-spaced;[16, 32, 48, ..., 128]
-;Cacheline-spaced;[192, 256, 320, ..., 512]
-;Sub-page-spaced;[768, 1024, 1280, ..., 3840]
-Large;[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]
-Huge;[4 MiB, 8 MiB, 12 MiB, ...]
-.TE
-.\"-----------------------------------------------------------------------------
-.Sh MALLCTL NAMESPACE
-The following names are defined in the namespace accessible via the
-.Fn @jemalloc_prefix@mallctl*
-functions.
-Value types are specified in parentheses, and their readable/writable statuses
-are encoded as rw, r-, -w, or --.
-A name element encoded as <i> or <j> indicates an integer component, where the
-integer varies from 0 to some upper value that must be determined via
-introspection.
-@roff_stats@In the case of
-@roff_stats@.Dq stats.arenas.<i>.* ,
-@roff_stats@<i> equal to
-@roff_stats@.Dq arenas.narenas
-@roff_stats@can be used to access the summation of statistics from all arenas.
-.Pp
-Take special note of the
-.Dq epoch
-mallctl, which controls refreshing of cached dynamic statistics.
-.Bl -ohang
-.\"-----------------------------------------------------------------------------
-.It Sy "version (const char *) r-"
-.Bd -ragged -offset indent -compact
-Return the jemalloc version string.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "epoch (uint64_t) rw"
-.Bd -ragged -offset indent -compact
-If a value is passed in, refresh the data from which the
-.Fn @jemalloc_prefix@mallctl*
-functions report values, and increment the epoch.
-Return the current epoch.
-This is useful for detecting whether another thread caused a refresh.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.debug (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-debug was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.dss (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-dss was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.dynamic_page_shift (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-dynamic-page-shift was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.fill (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-fill was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.lazy_lock (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-lazy-lock was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.prof (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-prof was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.prof_libgcc (bool) r-"
-.Bd -ragged -offset indent -compact
---disable-prof-libgcc was not specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.prof_libunwind (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-prof-libunwind was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.stats (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-stats was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.swap (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-swap was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.sysv (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-sysv was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.tcache (bool) r-"
-.Bd -ragged -offset indent -compact
---disable-tcache was not specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.tiny (bool) r-"
-.Bd -ragged -offset indent -compact
---disable-tiny was not specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.tls (bool) r-"
-.Bd -ragged -offset indent -compact
---disable-tls was not specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "config.xmalloc (bool) r-"
-.Bd -ragged -offset indent -compact
---enable-xmalloc was specified during build configuration.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.abort (bool) r-"
-.Bd -ragged -offset indent -compact
-Abort-on-warning enabled/disabled.
-If true, most warnings are fatal.
-The process will call
-.Xr abort 3
-in these cases.
-This option is
-@roff_debug@enabled
-@roff_no_debug@disabled
-by default.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.lg_qspace_max (size_t) r-"
-.Bd -ragged -offset indent -compact
-Size (log base 2) of the maximum size class that is a multiple of the quantum
-(8 or 16 bytes, depending on architecture).
-Above this size, cacheline spacing is used for size classes.
-The default value is 128 bytes (2^7).
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.lg_cspace_max (size_t) r-"
-.Bd -ragged -offset indent -compact
-Size (log base 2) of the maximum size class that is a multiple of the cacheline
-size (64).
-Above this size, subpage spacing (256 bytes) is used for size classes.
-The default value is 512 bytes (2^9).
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.lg_chunk (size_t) r-"
-.Bd -ragged -offset indent -compact
-Virtual memory chunk size (log base 2).
-The default chunk size is 4 MiB (2^22).
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.narenas (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum number of arenas to use.
-The default maximum number of arenas is four times the number of CPUs, or one
-if there is a single CPU.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.lg_dirty_mult (ssize_t) r-"
-.Bd -ragged -offset indent -compact
-Per-arena minimum ratio (log base 2) of active to dirty pages.
-Some dirty unused pages may be allowed to accumulate, within the limit set by
-the ratio (or one chunk worth of dirty pages, whichever is greater), before
-informing the kernel about some of those pages via
-.Xr madvise 2
-or a similar system call.
-This provides the kernel with sufficient information to recycle dirty pages if
-physical memory becomes scarce and the pages remain unused.
-The default minimum ratio is 32:1 (2^5:1); an option value of -1 will disable
-dirty page purging.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.stats_print (bool) r-"
-.Bd -ragged -offset indent -compact
-Enable/disable statistics printing at exit.
-If enabled, the
-.Fn @jemalloc_prefix@malloc_stats_print
-function is called at program exit via an
-.Xr atexit 3
-function.
-@roff_stats@This has the potential to cause deadlock for a multi-threaded
-@roff_stats@process that exits while one or more threads are executing in the
-@roff_stats@memory allocation functions.
-@roff_stats@Therefore, this option should only be used with care; it is
-@roff_stats@primarily intended as a performance tuning aid during application
-@roff_stats@development.
-This option is disabled by default.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_fill@.It Sy "opt.junk (bool) r-"
-@roff_fill@.Bd -ragged -offset indent -compact
-@roff_fill@Junk filling enabled/disabled.
-@roff_fill@If enabled, each byte of uninitialized allocated memory will be
-@roff_fill@initialized to 0xa5.
-@roff_fill@All deallocated memory will be initialized to 0x5a.
-@roff_fill@This is intended for debugging and will impact performance
-@roff_fill@negatively.
-@roff_fill@This option is
-@roff_fill@@roff_debug@enabled
-@roff_fill@@roff_no_debug@disabled
-@roff_fill@by default.
-@roff_fill@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_fill@.It Sy "opt.zero (bool) r-"
-@roff_fill@.Bd -ragged -offset indent -compact
-@roff_fill@Zero filling enabled/disabled.
-@roff_fill@If enabled, each byte of uninitialized allocated memory will be
-@roff_fill@initialized to 0.
-@roff_fill@Note that this initialization only happens once for each byte, so
-@roff_fill@.Fn @jemalloc_prefix@realloc
-@roff_fill@calls do not zero memory that was previously allocated.
-@roff_fill@This is intended for debugging and will impact performance
-@roff_fill@negatively.
-@roff_fill@This option is disabled by default.
-@roff_fill@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_sysv@.It Sy "opt.sysv (bool) r-"
-@roff_sysv@.Bd -ragged -offset indent -compact
-@roff_sysv@If enabled, attempting to allocate zero bytes will return a
-@roff_sysv@.Dv NULL
-@roff_sysv@pointer instead of a valid pointer.
-@roff_sysv@(The default behavior is to make a minimal allocation and return a
-@roff_sysv@pointer to it.)
-@roff_sysv@This option is provided for System V compatibility.
-@roff_sysv@@roff_xmalloc@This option is incompatible with the
-@roff_sysv@@roff_xmalloc@.Dq opt.xmalloc
-@roff_sysv@@roff_xmalloc@option.
-@roff_sysv@This option is disabled by default.
-@roff_sysv@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_xmalloc@.It Sy "opt.xmalloc (bool) r-"
-@roff_xmalloc@.Bd -ragged -offset indent -compact
-@roff_xmalloc@Abort-on-out-of-memory enabled/disabled.
-@roff_xmalloc@If enabled, rather than returning failure for any allocation
-@roff_xmalloc@function, display a diagnostic message on
-@roff_xmalloc@.Dv STDERR_FILENO
-@roff_xmalloc@and cause the program to drop core (using
-@roff_xmalloc@.Xr abort 3 ) .
-@roff_xmalloc@If an application is designed to depend on this behavior, set the
-@roff_xmalloc@option at compile time by including the following in the source
-@roff_xmalloc@code:
-@roff_xmalloc@.Bd -literal -offset indent
-@roff_xmalloc@@jemalloc_prefix@malloc_conf = "xmalloc:true";
-@roff_xmalloc@.Ed
-@roff_xmalloc@.Pp
-@roff_xmalloc@This option is disabled by default.
-@roff_xmalloc@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_tcache@.It Sy "opt.tcache (bool) r-"
-@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_tcache@Thread-specific caching enabled/disabled.
-@roff_tcache@When there are multiple threads, each thread uses a
-@roff_tcache@thread-specific cache for objects up to a certain size.
-@roff_tcache@Thread-specific caching allows many allocations to be satisfied
-@roff_tcache@without performing any thread synchronization, at the cost of
-@roff_tcache@increased memory use.
-@roff_tcache@See the
-@roff_tcache@.Dq opt.lg_tcache_gc_sweep
-@roff_tcache@and
-@roff_tcache@.Dq opt.tcache_max
-@roff_tcache@options for related tuning information.
-@roff_tcache@This option is enabled by default.
-@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_tcache@.It Sy "opt.lg_tcache_gc_sweep (ssize_t) r-"
-@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_tcache@Approximate interval (log base 2) between full thread-specific
-@roff_tcache@cache garbage collection sweeps, counted in terms of
-@roff_tcache@thread-specific cache allocation/deallocation events.
-@roff_tcache@Garbage collection is actually performed incrementally, one size
-@roff_tcache@class at a time, in order to avoid large collection pauses.
-@roff_tcache@The default sweep interval is 8192 (2^13); setting this option to
-@roff_tcache@-1 will disable garbage collection.
-@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_tcache@.It Sy "opt.lg_tcache_max (size_t) r-"
-@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_tcache@Maximum size class (log base 2) to cache in the thread-specific
-@roff_tcache@cache.
-@roff_tcache@At a minimum, all small size classes are cached, and at a maximum
-@roff_tcache@all large size classes are cached.
-@roff_tcache@The default maximum is 32 KiB (2^15).
-@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.prof (bool) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Memory profiling enabled/disabled.
-@roff_prof@If enabled, profile memory allocation activity, and use an
-@roff_prof@.Xr atexit 3
-@roff_prof@function to dump final memory usage to a file named according to
-@roff_prof@the pattern
-@roff_prof@.Pa <prefix>.<pid>.<seq>.f.heap ,
-@roff_prof@where
-@roff_prof@.Pa <prefix>
-@roff_prof@is controlled by the
-@roff_prof@.Dq opt.prof_prefix
-@roff_prof@option.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_bt_max
-@roff_prof@option for backtrace depth control.
-@roff_prof@See the
-@roff_prof@.Dq opt.prof_active
-@roff_prof@option for on-the-fly activation/deactivation.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_sample
-@roff_prof@option for probabilistic sampling control.
-@roff_prof@See the
-@roff_prof@.Dq opt.prof_accum
-@roff_prof@option for control of cumulative sample reporting.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_tcmax
-@roff_prof@option for control of per thread backtrace caching.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_interval
-@roff_prof@option for information on interval-triggered profile dumping, and the
-@roff_prof@.Dq opt.prof_gdump
-@roff_prof@option for information on high-water-triggered profile dumping.
-@roff_prof@Profile output is compatible with the included pprof Perl script,
-@roff_prof@which originates from the
-@roff_prof@.UR http://\:code.google.com/\:p/\:google-perftools/
-@roff_prof@google-perftools package
-@roff_prof@.UE .
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.prof_prefix (const char *) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Filename prefix for profile dumps.
-@roff_prof@If the prefix is set to the empty string, no automatic dumps will
-@roff_prof@occur; this is primarily useful for disabling the automatic final
-@roff_prof@heap dump (which also disables leak reporting, if enabled).
-@roff_prof@The default prefix is
-@roff_prof@.Pa jeprof .
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.lg_prof_bt_max (size_t) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Maximum backtrace depth (log base 2) when profiling memory
-@roff_prof@allocation activity.
-@roff_prof@The default is 128 (2^7).
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.prof_active (bool) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Profiling activated/deactivated.
-@roff_prof@This is a secondary control mechanism that makes it possible to
-@roff_prof@start the application with profiling enabled (see the
-@roff_prof@.Dq opt.prof
-@roff_prof@option) but inactive, then toggle profiling at any time during
-@roff_prof@program execution with the
-@roff_prof@.Dq prof.active
-@roff_prof@mallctl.
-@roff_prof@This option is enabled by default.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.lg_prof_sample (ssize_t) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Average interval (log base 2) between allocation samples, as
-@roff_prof@measured in bytes of allocation activity.
-@roff_prof@Increasing the sampling interval decreases profile fidelity, but
-@roff_prof@also decreases the computational overhead.
-@roff_prof@The default sample interval is 1 (2^0) (i.e. all allocations are
-@roff_prof@sampled).
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.prof_accum (bool) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Reporting of cumulative object/byte counts in profile dumps
-@roff_prof@enabled/disabled.
-@roff_prof@If this option is enabled, every unique backtrace must be stored for
-@roff_prof@the duration of execution.
-@roff_prof@Depending on the application, this can impose a large memory
-@roff_prof@overhead, and the cumulative counts are not always of interest.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_tcmax
-@roff_prof@option for control of per thread backtrace caching, which has
-@roff_prof@important interactions.
-@roff_prof@This option is enabled by default.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.lg_prof_tcmax (ssize_t) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Maximum per thread backtrace cache (log base 2) used for heap
-@roff_prof@profiling.
-@roff_prof@A backtrace can only be discarded if the
-@roff_prof@.Dq opt.prof_accum
-@roff_prof@option is disabled, and no thread caches currently refer to the
-@roff_prof@backtrace.
-@roff_prof@Therefore, a backtrace cache limit should be imposed if the
-@roff_prof@intention is to limit how much memory is used by backtraces.
-@roff_prof@By default, no limit is imposed (encoded as -1).
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.lg_prof_interval (ssize_t) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Average interval (log base 2) between memory profile dumps, as
-@roff_prof@measured in bytes of allocation activity.
-@roff_prof@The actual interval between dumps may be sporadic because
-@roff_prof@decentralized allocation counters are used to avoid synchronization
-@roff_prof@bottlenecks.
-@roff_prof@Profiles are dumped to files named according to the pattern
-@roff_prof@.Pa <prefix>.<pid>.<seq>.i<iseq>.heap ,
-@roff_prof@where
-@roff_prof@.Pa <prefix>
-@roff_prof@is controlled by the
-@roff_prof@.Dq opt.prof_prefix
-@roff_prof@option.
-@roff_prof@By default, interval-triggered profile dumping is disabled (encoded
-@roff_prof@as -1).
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.prof_gdump (bool) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Trigger a memory profile dump every time the total virtual memory
-@roff_prof@exceeds the previous maximum.
-@roff_prof@Profiles are dumped to files named according to the pattern
-@roff_prof@.Pa <prefix>.<pid>.<seq>.u<useq>.heap ,
-@roff_prof@where
-@roff_prof@.Pa <prefix>
-@roff_prof@is controlled by the
-@roff_prof@.Dq opt.prof_prefix
-@roff_prof@option.
-@roff_prof@This option is disabled by default.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "opt.prof_leak (bool) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Leak reporting enabled/disabled.
-@roff_prof@If enabled, use an
-@roff_prof@.Xr atexit 3
-@roff_prof@function to report memory leaks detected by allocation sampling.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_bt_max
-@roff_prof@option for backtrace depth control.
-@roff_prof@See the
-@roff_prof@.Dq opt.prof
-@roff_prof@option for information on analyzing heap profile output.
-@roff_prof@This option is disabled by default.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "opt.overcommit (bool) r-"
-.Bd -ragged -offset indent -compact
-@roff_swap@Over-commit enabled/disabled.
-@roff_swap@If enabled, over-commit memory as a side effect of using anonymous
-@roff_swap@.Xr mmap 2
-@roff_swap@@roff_dss@ and
-@roff_swap@@roff_dss@.Xr sbrk 2
-@roff_swap@for virtual memory allocation.
-@roff_swap@In order for overcommit to be disabled, the
-@roff_swap@.Dq swap.fds
-@roff_swap@mallctl must have been successfully written to.
-@roff_swap@This option is enabled by default.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_tcache@.It Sy "tcache.flush (void) --"
-@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_tcache@Flush calling thread's tcache.
-@roff_tcache@This interface releases all cached objects and internal data
-@roff_tcache@structures associated with the calling thread's thread-specific
-@roff_tcache@cache.
-@roff_tcache@Ordinarily, this interface need not be called, since automatic
-@roff_tcache@periodic incremental garbage collection occurs, and the thread
-@roff_tcache@cache is automatically discarded when a thread exits.
-@roff_tcache@However, garbage collection is triggered by allocation activity,
-@roff_tcache@so it is possible for a thread that stops allocating/deallocating
-@roff_tcache@to retain its cache indefinitely, in which case the developer may
-@roff_tcache@find manual flushing useful.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "thread.arena (unsigned) rw"
-.Bd -ragged -offset indent -compact
-Get or set the arena associated with the calling thread.
-The arena index must be less than the maximum number of arenas (see the
-.Dq arenas.narenas
-mallctl).
-If the specified arena was not initialized beforehand (see the
-.Dq arenas.initialized
-mallctl), it will be automatically initialized as a side effect of calling this
-interface.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "thread.allocated (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Get the total number of bytes ever allocated by the calling thread.
-@roff_stats@This counter has the potential to wrap around; it is up to the
-@roff_stats@application to appropriately interpret the counter in such cases.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "thread.deallocated (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Get the total number of bytes ever deallocated by the calling
-@roff_stats@thread.
-@roff_stats@This counter has the potential to wrap around; it is up to the
-@roff_stats@application to appropriately interpret the counter in such cases.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.narenas (unsigned) r-"
-.Bd -ragged -offset indent -compact
-Maximum number of arenas.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.initialized (bool *) r-"
-.Bd -ragged -offset indent -compact
-An array of arenas.narenas booleans.
-Each boolean indicates whether the corresponding arena is initialized.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.quantum (size_t) r-"
-.Bd -ragged -offset indent -compact
-Quantum size.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.cacheline (size_t) r-"
-.Bd -ragged -offset indent -compact
-Assumed cacheline size.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.subpage (size_t) r-"
-.Bd -ragged -offset indent -compact
-Subpage size class interval.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.pagesize (size_t) r-"
-.Bd -ragged -offset indent -compact
-Page size.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.chunksize (size_t) r-"
-.Bd -ragged -offset indent -compact
-Chunk size.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.tspace_min (size_t) r-"
-.Bd -ragged -offset indent -compact
-Minimum tiny size class.
-Tiny size classes are powers of two.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.tspace_max (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum tiny size class.
-Tiny size classes are powers of two.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.qspace_min (size_t) r-"
-.Bd -ragged -offset indent -compact
-Minimum quantum-spaced size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.qspace_max (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum quantum-spaced size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.cspace_min (size_t) r-"
-.Bd -ragged -offset indent -compact
-Minimum cacheline-spaced size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.cspace_max (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum cacheline-spaced size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.sspace_min (size_t) r-"
-.Bd -ragged -offset indent -compact
-Minimum subpage-spaced size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.sspace_max (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum subpage-spaced size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_tcache@.It Sy "arenas.tcache_max (size_t) r-"
-@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_tcache@Maximum thread-cached size class.
-@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.ntbins (unsigned) r-"
-.Bd -ragged -offset indent -compact
-Number of tiny bin size classes.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.nqbins (unsigned) r-"
-.Bd -ragged -offset indent -compact
-Number of quantum-spaced bin size classes.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.ncbins (unsigned) r-"
-.Bd -ragged -offset indent -compact
-Number of cacheline-spaced bin size classes.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.nsbins (unsigned) r-"
-.Bd -ragged -offset indent -compact
-Number of subpage-spaced bin size classes.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.nbins (unsigned) r-"
-.Bd -ragged -offset indent -compact
-Total number of bin size classes.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_tcache@.It Sy "arenas.nhbins (unsigned) r-"
-@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_tcache@Total number of thread cache bin size classes.
-@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.bin.<i>.size (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum size supported by size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.bin.<i>.nregs (uint32_t) r-"
-.Bd -ragged -offset indent -compact
-Number of regions per page run.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.bin.<i>.run_size (size_t) r-"
-.Bd -ragged -offset indent -compact
-Number of bytes per page run.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.nlruns (size_t) r-"
-.Bd -ragged -offset indent -compact
-Total number of large size classes.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.lrun.<i>.size (size_t) r-"
-.Bd -ragged -offset indent -compact
-Maximum size supported by this large size class.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "arenas.purge (unsigned) -w"
-.Bd -ragged -offset indent -compact
-Purge unused dirty pages for the specified arena, or for all arenas if none is
-specified.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "prof.active (bool) rw"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Control whether sampling is currently active.
-@roff_prof@See the
-@roff_prof@.Dq opt.prof_active
-@roff_prof@option for additional information.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "prof.dump (const char *) -w"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Dump a memory profile to the specified file, or if NULL is specified,
-@roff_prof@to a file according to the pattern
-@roff_prof@.Pa <prefix>.<pid>.<seq>.m<mseq>.heap ,
-@roff_prof@where
-@roff_prof@.Pa <prefix>
-@roff_prof@is controlled by the
-@roff_prof@.Dq opt.prof_prefix
-@roff_prof@option.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_prof@.It Sy "prof.interval (uint64_t) r-"
-@roff_prof@.Bd -ragged -offset indent -compact
-@roff_prof@Average number of bytes allocated between inverval-based profile
-@roff_prof@dumps.
-@roff_prof@See the
-@roff_prof@.Dq opt.lg_prof_interval
-@roff_prof@option for additional information.
-@roff_prof@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.allocated (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Total number of bytes allocated by the application.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.active (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Total number of bytes in active pages allocated by the application.
-@roff_stats@This is a multiple of the page size, and greater than or equal to
-@roff_stats@.Dq stats.allocated .
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.mapped (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Total number of bytes in chunks mapped on behalf of the application.
-@roff_stats@This is a multiple of the chunk size, and is at least as large as
-@roff_stats@.Dq stats.active .
-@roff_stats@@roff_swap@This does not include inactive chunks backed by swap
-@roff_stats@@roff_swap@files.
-@roff_stats@@roff_dss@This does not include inactive chunks embedded in the DSS.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.chunks.current (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Total number of chunks actively mapped on behalf of the application.
-@roff_stats@@roff_swap@This does not include inactive chunks backed by swap
-@roff_stats@@roff_swap@files.
-@roff_stats@@roff_dss@This does not include inactive chunks embedded in the DSS.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.chunks.total (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of chunks allocated.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.chunks.high (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Maximum number of active chunks at any time thus far.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.huge.allocated (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of bytes currently allocated by huge objects.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.huge.nmalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of huge allocation requests.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.huge.ndalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of huge deallocation requests.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "stats.arenas.<i>.pactive (size_t) r-"
-.Bd -ragged -offset indent -compact
-Number of pages in active runs.
-.Ed
-.\"-----------------------------------------------------------------------------
-.It Sy "stats.arenas.<i>.pdirty (size_t) r-"
-.Bd -ragged -offset indent -compact
-Number of pages within unused runs that are potentially dirty, and for which
-.Fn madvise "..." "MADV_DONTNEED"
-has not been called.
-.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.mapped (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of mapped bytes.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.npurge (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of dirty page purge sweeps performed.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.nmadvise (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of
-@roff_stats@.Fn madvise "..." "MADV_DONTNEED"
-@roff_stats@calls made to purge dirty pages.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.npurged (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of pages purged.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.small.allocated (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of bytes currently allocated by small objects.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.small.nmalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of allocation requests served by small bins.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.small.ndalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of small objects returned to bins.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.small.nrequests (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of small allocation requests.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.large.allocated (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Number of bytes currently allocated by large objects.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.large.nmalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of large allocation requests served directly by
-@roff_stats@the arena.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.large.ndalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of large deallocation requests served directly by
-@roff_stats@the arena.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.large.nrequests (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of large allocation requests.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.allocated (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Current number of bytes allocated by bin.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nmalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of allocations served by bin.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.ndalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of allocations returned to bin.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nrequests (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of allocation requests.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@@roff_tcache@.It Sy "stats.arenas.<i>.bins.<j>.nfills (uint64_t) r-"
-@roff_stats@@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_stats@@roff_tcache@Cumulative number of tcache fills.
-@roff_stats@@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@@roff_tcache@.It Sy "stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r-"
-@roff_stats@@roff_tcache@.Bd -ragged -offset indent -compact
-@roff_stats@@roff_tcache@Cumulative number of tcache flushes.
-@roff_stats@@roff_tcache@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nruns (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of runs created.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nreruns (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of times the current run from which to allocate
-@roff_stats@changed.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.highruns (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Maximum number of runs at any time thus far.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.curruns (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Current number of runs.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.nmalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of allocation requests for this size class served
-@roff_stats@directly by the arena.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.ndalloc (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of deallocation requests for this size class
-@roff_stats@served directly by the arena.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.nrequests (uint64_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Cumulative number of allocation requests for this size class.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.highruns (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Maximum number of runs at any time thus far for this size class.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.curruns (size_t) r-"
-@roff_stats@.Bd -ragged -offset indent -compact
-@roff_stats@Current number of runs for this size class.
-@roff_stats@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_stats@@roff_swap@.It Sy "swap.avail (size_t) r-"
-@roff_stats@@roff_swap@.Bd -ragged -offset indent -compact
-@roff_stats@@roff_swap@Number of swap file bytes that are currently not
-@roff_stats@@roff_swap@associated with any chunk (i.e. mapped, but otherwise
-@roff_stats@@roff_swap@completely unmanaged).
-@roff_stats@@roff_swap@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_swap@.It Sy "swap.prezeroed (bool) rw"
-@roff_swap@.Bd -ragged -offset indent -compact
-@roff_swap@If true, the allocator assumes that the swap file(s) contain nothing
-@roff_swap@but nil bytes.
-@roff_swap@If this assumption is violated, allocator behavior is undefined.
-@roff_swap@This value becomes read-only after
-@roff_swap@.Dq swap.fds
-@roff_swap@is successfully written to.
-@roff_swap@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_swap@.It Sy "swap.nfds (size_t) r-"
-@roff_swap@.Bd -ragged -offset indent -compact
-@roff_swap@Number of file descriptors in use for swap.
-@roff_swap@.Ed
-.\"-----------------------------------------------------------------------------
-@roff_swap@.It Sy "swap.fds (int *) r-"
-@roff_swap@.Bd -ragged -offset indent -compact
-@roff_swap@When written to, the files associated with the specified file
-@roff_swap@descriptors are contiguously mapped via
-@roff_swap@.Xr mmap 2 .
-@roff_swap@The resulting virtual memory region is preferred over anonymous
-@roff_swap@.Xr mmap 2
-@roff_swap@@roff_dss@and
-@roff_swap@@roff_dss@.Xr sbrk 2
-@roff_swap@memory.
-@roff_swap@Note that if a file's size is not a multiple of the page size, it is
-@roff_swap@automatically truncated to the nearest page size multiple.
-@roff_swap@See the
-@roff_swap@.Dq swap.prezeroed
-@roff_swap@interface for specifying that the files are pre-zeroed.
-@roff_swap@.Ed
-.\"-----------------------------------------------------------------------------
-.El
-.Sh DEBUGGING MALLOC PROBLEMS
-Start by setting the
-.Dq opt.abort
-option, which forces a coredump (if possible) at the first sign of trouble,
-rather than the normal policy of trying to continue if at all possible.
-.Pp
-It is probably also a good idea to recompile the program with suitable
-options and symbols for debugger support.
-.Pp
-@roff_fill@If the program starts to give unusual results, coredump or generally
-@roff_fill@behave differently without emitting any of the messages mentioned in
-@roff_fill@the next section, it is likely because the program depends on the
-@roff_fill@storage being filled with zero bytes.
-@roff_fill@Try running it with the
-@roff_fill@.Dq opt.zero
-@roff_fill@option set;
-@roff_fill@if that improves the situation, this diagnosis has been confirmed.
-@roff_fill@If the program still misbehaves,
-@roff_fill@the likely problem is accessing memory outside the allocated area.
-@roff_fill@.Pp
-@roff_fill@Alternatively, if the symptoms are not easy to reproduce, setting the
-@roff_fill@.Dq opt.junk
-@roff_fill@option may help provoke the problem.
-@roff_fill@.Pp
-This implementation does not provide much detail about the problems it detects,
-because the performance impact for storing such information would be
-prohibitive.
-There are a number of allocator implementations available on the Internet
-which focus on detecting and pinpointing problems by trading performance for
-extra sanity checks and detailed diagnostics.
-.Sh DIAGNOSTIC MESSAGES
-If any of the memory allocation/deallocation functions detect an error or
-warning condition, a message will be printed to file descriptor
-.Dv STDERR_FILENO .
-Errors will result in the process dumping core.
-If the
-.Dq opt.abort
-option is set, most warnings are treated as errors.
-.Pp
-The
-.Va @jemalloc_prefix@malloc_message
-variable allows the programmer to override the function which emits the text
-strings forming the errors and warnings if for some reason the
-.Dv STDERR_FILENO
-file descriptor is not suitable for this.
-.Va @jemalloc_prefix@malloc_message
-takes the
-.Fa cbopaque
-pointer argument that is
-.Dv NULL
-unless overridden by the arguments in a call to
-.Fn @jemalloc_prefix@malloc_stats_print ,
-followed by a string pointer.
-Please note that doing anything which tries to allocate memory in this function
-is likely to result in a crash or deadlock.
-.Pp
-All messages are prefixed by
-.Dq <jemalloc>: .
-.Sh RETURN VALUES
-.Ss Standard API
-The
-.Fn @jemalloc_prefix@malloc
-and
-.Fn @jemalloc_prefix@calloc
-functions return a pointer to the allocated memory if successful; otherwise
-a
-.Dv NULL
-pointer is returned and
-.Va errno
-is set to
-.Er ENOMEM .
-.Pp
-The
-.Fn @jemalloc_prefix@posix_memalign
-function returns the value 0 if successful; otherwise it returns an error value.
-The
-.Fn @jemalloc_prefix@posix_memalign
-function will fail if:
-.Bl -tag -width Er
-.It Bq Er EINVAL
-The
-.Fa alignment
-parameter is not a power of 2 at least as large as
-.Fn sizeof "void *" .
-.It Bq Er ENOMEM
-Memory allocation error.
-.El
-.Pp
-The
-.Fn @jemalloc_prefix@realloc
-function returns a pointer, possibly identical to
-.Fa ptr ,
-to the allocated memory
-if successful; otherwise a
-.Dv NULL
-pointer is returned, and
-.Va errno
-is set to
-.Er ENOMEM
-if the error was the result of an allocation failure.
-The
-.Fn @jemalloc_prefix@realloc
-function always leaves the original buffer intact
-when an error occurs.
-.Pp
-The
-.Fn @jemalloc_prefix@free
-function returns no value.
-.Ss Non-standard API
-The
-.Fn @jemalloc_prefix@malloc_usable_size
-function returns the usable size of the allocation pointed to by
-.Fa ptr .
-.Pp
-The
-.Fn @jemalloc_prefix@mallctl ,
-.Fn @jemalloc_prefix@mallctlnametomib ,
-and
-.Fn @jemalloc_prefix@mallctlbymib
-functions return 0 on success; otherwise they return an error value.
-The functions will fail if:
-.Bl -tag -width Er
-.It Bq Er EINVAL
-.Fa newp
-is
-.Dv non-NULL ,
-and
-.Fa newlen
-is too large or too small.
-Alternatively,
-.Fa *oldlenp
-is too large or too small; in this case as much data as possible are read
-despite the error.
-.It Bq Er ENOMEM
-.Fa *oldlenp
-is too short to hold the requested value.
-.It Bq Er ENOENT
-.Fa name
-or
-.Fa mib
-specifies an unknown/invalid value.
-.It Bq Er EPERM
-Attempt to read or write void value, or attempt to write read-only value.
-.It Bq Er EAGAIN
-A memory allocation failure occurred.
-.It Bq Er EFAULT
-An interface with side effects failed in some way not directly related to
-.Fn @jemalloc_prefix@mallctl*
-read/write processing.
-.El
-.Ss Experimental API
-The
-.Fn @jemalloc_prefix@allocm ,
-.Fn @jemalloc_prefix@rallocm ,
-.Fn @jemalloc_prefix@sallocm ,
-and
-.Fn @jemalloc_prefix@dallocm
-functions return
-.Dv ALLOCM_SUCCESS
-on success; otherwise they return an error value.
-The
-.Fn @jemalloc_prefix@allocm
-and
-.Fn @jemalloc_prefix@rallocm
-functions will fail if:
-.Bl -tag -width ".Bq Er ALLOCM_ERR_OOM"
-.It Bq Er ALLOCM_ERR_OOM
-Out of memory.
-Insufficient contiguous memory was available to service the allocation request.
-The
-.Fn @jemalloc_prefix@allocm
-function additionally sets
-.Fa *ptr
-to
-.Dv NULL ,
-whereas the
-.Fn @jemalloc_prefix@rallocm
-function leaves
-.Fa *ptr
-unmodified.
-.El
-.Pp
-The
-.Fn @jemalloc_prefix@rallocm
-function will also fail if:
-.Bl -tag -width ".Bq Er ALLOCM_ERR_NOT_MOVED"
-.It Bq Er ALLOCM_ERR_NOT_MOVED
-.Dv ALLOCM_NO_MOVE
-was specified, but the reallocation request could not be serviced without
-moving the object.
-.El
-.Sh ENVIRONMENT
-The following environment variable affects the execution of the allocation
-functions:
-.Bl -tag -width ".Ev @jemalloc_cprefix@MALLOC_CONF"
-.It Ev @jemalloc_cprefix@MALLOC_CONF
-If the environment variable
-.Ev @jemalloc_cprefix@MALLOC_CONF
-is set, the characters it contains will be interpreted as options.
-.El
-.Sh EXAMPLES
-To dump core whenever a problem occurs:
-.Pp
-.Bd -literal -offset indent
-ln -s 'abort:true' /etc/@jemalloc_prefix@malloc.conf
-.Ed
-.Pp
-To specify in the source a chunk size that is 16 MiB:
-.Bd -literal -offset indent
-@jemalloc_prefix@malloc_conf = "lg_chunk:24";
-.Ed
-.Sh SEE ALSO
-.Xr madvise 2 ,
-.Xr mmap 2 ,
-@roff_dss@.Xr sbrk 2 ,
-.Xr alloca 3 ,
-.Xr atexit 3 ,
-.Xr getpagesize 3
-.Sh STANDARDS
-The
-.Fn @jemalloc_prefix@malloc ,
-.Fn @jemalloc_prefix@calloc ,
-.Fn @jemalloc_prefix@realloc
-and
-.Fn @jemalloc_prefix@free
-functions conform to
-.St -isoC .
-.Pp
-The
-.Fn @jemalloc_prefix@posix_memalign
-function conforms to
-.St -p1003.1-2001 .
diff --git a/jemalloc/doc/jemalloc.xml.in b/jemalloc/doc/jemalloc.xml.in
new file mode 100644
index 0000000..97893c1
--- /dev/null
+++ b/jemalloc/doc/jemalloc.xml.in
@@ -0,0 +1,2251 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<?xml-stylesheet type="text/xsl"
+        href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
+        "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [
+]>
+
+<refentry>
+  <refentryinfo>
+    <title>User Manual</title>
+    <productname>jemalloc</productname>
+    <releaseinfo role="version">@jemalloc_version@</releaseinfo>
+    <authorgroup>
+      <author>
+        <firstname>Jason</firstname>
+        <surname>Evans</surname>
+        <personblurb>Author</personblurb>
+      </author>
+    </authorgroup>
+  </refentryinfo>
+  <refmeta>
+    <refentrytitle>JEMALLOC</refentrytitle>
+    <manvolnum>3</manvolnum>
+  </refmeta>
+  <refnamediv>
+    <refdescriptor>jemalloc</refdescriptor>
+    <refname>jemalloc</refname>
+    <!-- Each refname causes a man page file to be created.  Only if this were
+         the system malloc(3) implementation would these files be appropriate.
+    <refname>malloc</refname>
+    <refname>calloc</refname>
+    <refname>posix_memalign</refname>
+    <refname>realloc</refname>
+    <refname>free</refname>
+    <refname>malloc_usable_size</refname>
+    <refname>malloc_stats_print</refname>
+    <refname>mallctl</refname>
+    <refname>mallctlnametomib</refname>
+    <refname>mallctlbymib</refname>
+    <refname>allocm</refname>
+    <refname>rallocm</refname>
+    <refname>sallocm</refname>
+    <refname>dallocm</refname>
+    -->
+    <refpurpose>general purpose memory allocation functions</refpurpose>
+  </refnamediv>
+  <refsect1 id="library">
+    <title>LIBRARY</title>
+    <para>This manual describes jemalloc @jemalloc_version@.  More information
+    can be found at the <ulink
+    url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para>
+  </refsect1>
+  <refsynopsisdiv>
+    <title>SYNOPSIS</title>
+    <funcsynopsis>
+      <funcsynopsisinfo>#include &lt;<filename class="headerfile">stdlib.h</filename>&gt;
+#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
+      <refsect2>
+        <title>Standard API</title>
+        <funcprototype>
+          <funcdef>void *<function>malloc</function></funcdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>void *<function>calloc</function></funcdef>
+          <paramdef>size_t <parameter>number</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>posix_memalign</function></funcdef>
+          <paramdef>void **<parameter>ptr</parameter></paramdef>
+          <paramdef>size_t <parameter>alignment</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>void *<function>realloc</function></funcdef>
+          <paramdef>void *<parameter>ptr</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>void <function>free</function></funcdef>
+          <paramdef>void *<parameter>ptr</parameter></paramdef>
+        </funcprototype>
+      </refsect2>
+      <refsect2>
+        <title>Non-standard API</title>
+        <funcprototype>
+          <funcdef>size_t <function>malloc_usable_size</function></funcdef>
+          <paramdef>const void *<parameter>ptr</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>void <function>malloc_stats_print</function></funcdef>
+          <paramdef>void <parameter>(*write_cb)</parameter>
+            <funcparams>void *, const char *</funcparams>
+          </paramdef>
+          <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+          <paramdef>const char *<parameter>opts</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>mallctl</function></funcdef>
+          <paramdef>const char *<parameter>name</parameter></paramdef>
+          <paramdef>void *<parameter>oldp</parameter></paramdef>
+          <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+          <paramdef>void *<parameter>newp</parameter></paramdef>
+          <paramdef>size_t <parameter>newlen</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>mallctlnametomib</function></funcdef>
+          <paramdef>const char *<parameter>name</parameter></paramdef>
+          <paramdef>size_t *<parameter>mibp</parameter></paramdef>
+          <paramdef>size_t *<parameter>miblenp</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>mallctlbymib</function></funcdef>
+          <paramdef>const size_t *<parameter>mib</parameter></paramdef>
+          <paramdef>size_t <parameter>miblen</parameter></paramdef>
+          <paramdef>void *<parameter>oldp</parameter></paramdef>
+          <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+          <paramdef>void *<parameter>newp</parameter></paramdef>
+          <paramdef>size_t <parameter>newlen</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>void <function>(*malloc_message)</function></funcdef>
+          <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+          <paramdef>const char *<parameter>s</parameter></paramdef>
+        </funcprototype>
+        <para><type>const char *</type><varname>malloc_conf</varname>;</para>
+      </refsect2>
+      <refsect2>
+      <title>Experimental API</title>
+        <funcprototype>
+          <funcdef>int <function>allocm</function></funcdef>
+          <paramdef>void **<parameter>ptr</parameter></paramdef>
+          <paramdef>size_t *<parameter>rsize</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+          <paramdef>int <parameter>flags</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>rallocm</function></funcdef>
+          <paramdef>void **<parameter>ptr</parameter></paramdef>
+          <paramdef>size_t *<parameter>rsize</parameter></paramdef>
+          <paramdef>size_t <parameter>size</parameter></paramdef>
+          <paramdef>size_t <parameter>extra</parameter></paramdef>
+          <paramdef>int <parameter>flags</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>sallocm</function></funcdef>
+          <paramdef>const void *<parameter>ptr</parameter></paramdef>
+          <paramdef>size_t *<parameter>rsize</parameter></paramdef>
+          <paramdef>int <parameter>flags</parameter></paramdef>
+        </funcprototype>
+        <funcprototype>
+          <funcdef>int <function>dallocm</function></funcdef>
+          <paramdef>void *<parameter>ptr</parameter></paramdef>
+          <paramdef>int <parameter>flags</parameter></paramdef>
+        </funcprototype>
+      </refsect2>
+    </funcsynopsis>
+  </refsynopsisdiv>
+  <refsect1 id="description">
+    <title>DESCRIPTION</title>
+    <refsect2>
+      <title>Standard API</title>
+
+      <para>The <function>malloc<parameter/></function> function allocates
+      <parameter>size</parameter> bytes of uninitialized memory.  The allocated
+      space is suitably aligned (after possible pointer coercion) for storage
+      of any type of object.</para>
+
+      <para>The <function>calloc<parameter/></function> function allocates
+      space for <parameter>number</parameter> objects, each
+      <parameter>size</parameter> bytes in length.  The result is identical to
+      calling <function>malloc<parameter/></function> with an argument of
+      <parameter>number</parameter> * <parameter>size</parameter>, with the
+      exception that the allocated memory is explicitly initialized to zero
+      bytes.</para>
+
+      <para>The <function>posix_memalign<parameter/></function> function
+      allocates <parameter>size</parameter> bytes of memory such that the
+      allocation's base address is an even multiple of
+      <parameter>alignment</parameter>, and returns the allocation in the value
+      pointed to by <parameter>ptr</parameter>.  The requested
+      <parameter>alignment</parameter> must be a power of 2 at least as large
+      as <code language="C">sizeof(<type>void *</type>)</code>.</para>
+
+      <para>The <function>realloc<parameter/></function> function changes the
+      size of the previously allocated memory referenced by
+      <parameter>ptr</parameter> to <parameter>size</parameter> bytes.  The
+      contents of the memory are unchanged up to the lesser of the new and old
+      sizes.  If the new size is larger, the contents of the newly allocated
+      portion of the memory are undefined.  Upon success, the memory referenced
+      by <parameter>ptr</parameter> is freed and a pointer to the newly
+      allocated memory is returned.  Note that
+      <function>realloc<parameter/></function> may move the memory allocation,
+      resulting in a different return value than <parameter>ptr</parameter>.
+      If <parameter>ptr</parameter> is <constant>NULL</constant>, the
+      <function>realloc<parameter/></function> function behaves identically to
+      <function>malloc<parameter/></function> for the specified size.</para>
+
+      <para>The <function>free<parameter/></function> function causes the
+      allocated memory referenced by <parameter>ptr</parameter> to be made
+      available for future allocations.  If <parameter>ptr</parameter> is
+      <constant>NULL</constant>, no action occurs.</para>
+    </refsect2>
+    <refsect2>
+      <title>Non-standard API</title>
+
+      <para>The <function>malloc_usable_size<parameter/></function> function
+      returns the usable size of the allocation pointed to by
+      <parameter>ptr</parameter>.  The return value may be larger than the size
+      that was requested during allocation.  The
+      <function>malloc_usable_size<parameter/></function> function is not a
+      mechanism for in-place <function>realloc<parameter/></function>; rather
+      it is provided solely as a tool for introspection purposes.  Any
+      discrepancy between the requested allocation size and the size reported
+      by <function>malloc_usable_size<parameter/></function> should not be
+      depended on, since such behavior is entirely implementation-dependent.
+      </para>
+
+      <para>The <function>malloc_stats_print<parameter/></function> function
+      writes human-readable summary statistics via the
+      <parameter>write_cb</parameter> callback function pointer and
+      <parameter>cbopaque</parameter> data passed to
+      <parameter>write_cb</parameter>, or
+      <function>malloc_message<parameter/></function> if
+      <parameter>write_cb</parameter> is <constant>NULL</constant>.  This
+      function can be called repeatedly.  General information that never
+      changes during execution can be omitted by specifying "g" as a character
+      within the <parameter>opts</parameter> string.  Note that
+      <function>malloc_message<parameter/></function> uses the
+      <function>mallctl*<parameter/></function> functions internally, so
+      inconsistent statistics can be reported if multiple threads use these
+      functions simultaneously.  If <option>--enable-stats</option> is
+      specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
+      be specified to omit merged arena and per arena statistics, respectively;
+      &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
+      class statistics for bins and large objects, respectively.  Unrecognized
+      characters are silently ignored.  Note that thread caching may prevent
+      some statistics from being completely up to date, since extra locking
+      would be required to merge counters that track thread cache operations.
+      </para>
+
+      <para>The <function>mallctl<parameter/></function> function provides a
+      general interface for introspecting the memory allocator, as well as
+      setting modifiable parameters and triggering actions.  The
+      period-separated <parameter>name</parameter> argument specifies a
+      location in a tree-structured namespace; see the <xref
+      linkend="mallctl_namespace" xrefstyle="template:%t"/> section for
+      documentation on the tree contents.  To read a value, pass a pointer via
+      <parameter>oldp</parameter> to adequate space to contain the value, and a
+      pointer to its length via <parameter>oldlenp</parameter>; otherwise pass
+      <constant>NULL</constant> and <constant>NULL</constant>.  Similarly, to
+      write a value, pass a pointer to the value via
+      <parameter>newp</parameter>, and its length via
+      <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
+      and <constant>0</constant>.</para>
+
+      <para>The <function>mallctlnametomib<parameter/></function> function
+      provides a way to avoid repeated name lookups for applications that
+      repeatedly query the same portion of the namespace, by translating a name
+      to a &ldquo;Management Information Base&rdquo; (MIB) that can be passed
+      repeatedly to <function>mallctlbymib<parameter/></function>.  Upon
+      successful return from <function>mallctlnametomib<parameter/></function>,
+      <parameter>mibp</parameter> contains an array of
+      <parameter>*miblenp</parameter> integers, where
+      <parameter>*miblenp</parameter> is the lesser of the number of components
+      in <parameter>name</parameter> and the input value of
+      <parameter>*miblenp</parameter>.  Thus it is possible to pass a
+      <parameter>*miblenp</parameter> that is smaller than the number of
+      period-separated name components, which results in a partial MIB that can
+      be used as the basis for constructing a complete MIB.  For name
+      components that are integers (e.g. the 2 in
+      <link
+      linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>),
+      the corresponding MIB component will always be that integer.  Therefore,
+      it is legitimate to construct code like the following: <programlisting
+      language="C"><![CDATA[
+unsigned nbins, i;
+
+int mib[4];
+size_t len, miblen;
+
+len = sizeof(nbins);
+mallctl("arenas.nbins", &nbins, &len, NULL, 0);
+
+miblen = 4;
+mallnametomib("arenas.bin.0.size", mib, &miblen);
+for (i = 0; i < nbins; i++) {
+	size_t bin_size;
+
+	mib[2] = i;
+	len = sizeof(bin_size);
+	mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
+	/* Do something with bin_size... */
+}]]></programlisting></para>
+    </refsect2>
+    <refsect2>
+      <title>Experimental API</title>
+      <para>The experimental API is subject to change or removal without regard
+      for backward compatibility.</para>
+
+      <para>The <function>allocm<parameter/></function>,
+      <function>rallocm<parameter/></function>,
+      <function>sallocm<parameter/></function>, and
+      <function>dallocm<parameter/></function> functions all have a
+      <parameter>flags</parameter> argument that can be used to specify
+      options.  The functions only check the options that are contextually
+      relevant.  Use bitwise or (<code language="C">|</code>) operations to
+      specify one or more of the following:
+        <variablelist>
+          <varlistentry>
+            <term><constant>ALLOCM_LG_ALIGN(<parameter>la</parameter>)
+            </constant></term>
+
+            <listitem><para>Align the memory allocation to start at an address
+            that is a multiple of <code language="C">(1 &lt;&lt;
+            <parameter>la</parameter>)</code>.  This macro does not validate
+            that <parameter>la</parameter> is within the valid
+            range.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><constant>ALLOCM_ALIGN(<parameter>a</parameter>)
+            </constant></term>
+
+            <listitem><para>Align the memory allocation to start at an address
+            that is a multiple of <parameter>a</parameter>, where
+            <parameter>a</parameter> is a power of two.  This macro does not
+            validate that <parameter>a</parameter> is a power of 2.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><constant>ALLOCM_ZERO</constant></term>
+
+            <listitem><para>Initialize newly allocated memory to contain zero
+            bytes.  In the growing reallocation case, the real size prior to
+            reallocation defines the boundary between untouched bytes and those
+            that are initialized to contain zero bytes.  If this option is
+            absent, newly allocated memory is uninitialized.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><constant>ALLOCM_NO_MOVE</constant></term>
+
+            <listitem><para>For reallocation, fail rather than moving the
+            object.  This constraint can apply to both growth and
+            shrinkage.</para></listitem>
+          </varlistentry>
+        </variablelist>
+      </para>
+
+      <para>The <function>allocm<parameter/></function> function allocates at
+      least <parameter>size</parameter> bytes of memory, sets
+      <parameter>*ptr</parameter> to the base address of the allocation, and
+      sets <parameter>*rsize</parameter> to the real size of the allocation if
+      <parameter>rsize</parameter> is not <constant>NULL</constant>.</para>
+
+      <para>The <function>rallocm<parameter/></function> function resizes the
+      allocation at <parameter>*ptr</parameter> to be at least
+      <parameter>size</parameter> bytes, sets <parameter>*ptr</parameter> to
+      the base address of the allocation if it moved, and sets
+      <parameter>*rsize</parameter> to the real size of the allocation if
+      <parameter>rsize</parameter> is not <constant>NULL</constant>.  If
+      <parameter>extra</parameter> is non-zero, an attempt is made to resize
+      the allocation to be at least <code
+      language="C"><parameter>size</parameter> +
+      <parameter>extra</parameter>)</code> bytes, though inability to allocate
+      the extra byte(s) will not by itself result in failure.  Behavior is
+      undefined if <code language="C">(<parameter>size</parameter> +
+      <parameter>extra</parameter> &gt;
+      <constant>SIZE_T_MAX</constant>)</code>.</para>
+
+      <para>The <function>sallocm<parameter/></function> function sets
+      <parameter>*rsize</parameter> to the real size of the allocation.</para>
+
+      <para>The <function>dallocm<parameter/></function> function causes the
+      memory referenced by <parameter>ptr</parameter> to be made available for
+      future allocations.</para>
+    </refsect2>
+  </refsect1>
+  <refsect1 id="tuning">
+    <title>TUNING</title>
+    <para>Once, when the first call is made to one of the memory allocation
+    routines, the allocator initializes its internals based in part on various
+    options that can be specified at compile- or run-time.</para>
+
+    <para>The string pointed to by the global variable
+    <varname>malloc_conf</varname>, the &ldquo;name&rdquo; of the file
+    referenced by the symbolic link named <filename
+    class="symlink">/etc/malloc.conf</filename>, and the value of the
+    environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
+    that order, from left to right as options.</para>
+
+    <para>An options string is a comma-separated list of option:value pairs.
+    There is one key corresponding to each <link
+    linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref
+    linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options
+    documentation).  For example, <literal>abort:true,narenas:1</literal> sets
+    the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link
+    linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options.  Some
+    options have boolean values (true/false), others have integer values (base
+    8, 10, or 16, depending on prefix), and yet others have raw string
+    values.</para>
+  </refsect1>
+  <refsect1 id="implementation_notes">
+    <title>IMPLEMENTATION NOTES</title>
+    <para>Traditionally, allocators have used
+    <citerefentry><refentrytitle>sbrk</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
+    suboptimal for several reasons, including race conditions, increased
+    fragmentation, and artificial limitations on maximum usable memory.  If
+    <option>--enable-dss</option> is specified during configuration, this
+    allocator uses both <citerefentry><refentrytitle>sbrk</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry> and
+    <citerefentry><refentrytitle>mmap</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry>, in that order of preference;
+    otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry> is used.</para>
+
+    <para>This allocator uses multiple arenas in order to reduce lock
+    contention for threaded programs on multi-processor systems.  This works
+    well with regard to threading scalability, but incurs some costs.  There is
+    a small fixed per-arena overhead, and additionally, arenas manage memory
+    completely independently of each other, which means a small fixed increase
+    in overall memory fragmentation.  These overheads are not generally an
+    issue, given the number of arenas normally used.  Note that using
+    substantially more arenas than the default is not likely to improve
+    performance, mainly due to reduced cache performance.  However, it may make
+    sense to reduce the number of arenas if an application does not make much
+    use of the allocation functions.</para>
+
+    <para>In addition to multiple arenas, unless
+    <option>--disable-tcache</option> is specified during configuration, this
+    allocator supports thread-specific caching for small and large objects, in
+    order to make it possible to completely avoid synchronization for most
+    allocation requests.  Such caching allows very fast allocation in the
+    common case, but it increases memory usage and fragmentation, since a
+    bounded number of objects can remain allocated in each thread cache.</para>
+
+    <para>Memory is conceptually broken into equal-sized chunks, where the
+    chunk size is a power of two that is greater than the page size.  Chunks
+    are always aligned to multiples of the chunk size.  This alignment makes it
+    possible to find metadata for user objects very quickly.</para>
+
+    <para>User objects are broken into three categories according to size:
+    small, large, and huge.  Small objects are smaller than one page.  Large
+    objects are smaller than the chunk size.  Huge objects are a multiple of
+    the chunk size.  Small and large objects are managed by arenas; huge
+    objects are managed separately in a single data structure that is shared by
+    all threads.  Huge objects are used by applications infrequently enough
+    that this single data structure is not a scalability issue.</para>
+
+    <para>Each chunk that is managed by an arena tracks its contents as runs of
+    contiguous pages (unused, backing a set of small objects, or backing one
+    large object).  The combination of chunk alignment and chunk page maps
+    makes it possible to determine all metadata regarding small and large
+    allocations in constant time.</para>
+
+    <para>Small objects are managed in groups by page runs.  Each run maintains
+    a frontier and free list to track which regions are in use.  Unless
+    <option>--disable-tiny</option> is specified during configuration,
+    allocation requests that are no more than half the quantum (8 or 16,
+    depending on architecture) are rounded up to the nearest power of two that
+    is at least <code language="C">sizeof(<type>void *</type>)</code>.
+    Allocation requests that are more than half the quantum, but no more than
+    the minimum cacheline-multiple size class (see the <link
+    linkend="opt.lg_qspace_max"><mallctl>opt.lg_qspace_max</mallctl></link>
+    option) are rounded up to the nearest multiple of the quantum.  Allocation
+    requests that are more than the minimum cacheline-multiple size class, but
+    no more than the minimum subpage-multiple size class (see the <link
+    linkend="opt.lg_cspace_max"><mallctl>opt.lg_cspace_max</mallctl></link>
+    option) are rounded up to the nearest multiple of the cacheline size (64).
+    Allocation requests that are more than the minimum subpage-multiple size
+    class, but no more than the maximum subpage-multiple size class are rounded
+    up to the nearest multiple of the subpage size (256).  Allocation requests
+    that are more than the maximum subpage-multiple size class, but small
+    enough to fit in an arena-managed chunk (see the <link
+    linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), are
+    rounded up to the nearest run size.  Allocation requests that are too large
+    to fit in an arena-managed chunk are rounded up to the nearest multiple of
+    the chunk size.</para>
+
+    <para>Allocations are packed tightly together, which can be an issue for
+    multi-threaded applications.  If you need to assure that allocations do not
+    suffer from cacheline sharing, round your allocation requests up to the
+    nearest multiple of the cacheline size, or specify cacheline alignment when
+    allocating.</para>
+
+    <para>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit
+    system, the size classes in each category are as shown in <xref
+    linkend="size_classes" xrefstyle="template:Table %n"/>.</para>
+
+    <table xml:id="size_classes" frame="all">
+      <title>Size classes</title>
+      <tgroup cols="3" align="left" colsep="1" rowsep="1">
+      <colspec colname="c1"/>
+      <colspec colname="c2"/>
+      <colspec colname="c3"/>
+      <thead>
+        <row>
+          <entry>Category</entry>
+          <entry>Subcategory</entry>
+          <entry>Size</entry>
+        </row>
+      </thead>
+      <tbody>
+        <row>
+          <entry morerows="3">Small</entry>
+          <entry>Tiny</entry>
+          <entry>[8]</entry>
+        </row>
+        <row>
+          <entry>Quantum-spaced</entry>
+          <entry>[16, 32, 48, ..., 128]</entry>
+        </row>
+        <row>
+          <entry>Cacheline-spaced</entry>
+          <entry>[192, 256, 320, ..., 512]</entry>
+        </row>
+        <row>
+          <entry>Subpage-spaced</entry>
+          <entry>[768, 1024, 1280, ..., 3840]</entry>
+        </row>
+        <row>
+          <entry namest="c1" nameend="c2">Large</entry>
+          <entry>[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</entry>
+        </row>
+        <row>
+          <entry namest="c1" nameend="c2">Huge</entry>
+          <entry>[4 MiB, 8 MiB, 12 MiB, ...]</entry>
+        </row>
+      </tbody>
+      </tgroup>
+    </table>
+  </refsect1>
+  <refsect1 id="mallctl_namespace">
+    <title>MALLCTL NAMESPACE</title>
+    <para>The following names are defined in the namespace accessible via the
+    <function>mallctl*<parameter/></function> functions.  Value types are
+    specified in parentheses, their readable/writable statuses are encoded as
+    <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
+    <literal>--</literal>, and required build configuration flags follow, if
+    any.  A name element encoded as <literal>&lt;i&gt;</literal> or
+    <literal>&lt;j&gt;</literal> indicates an integer component, where the
+    integer varies from 0 to some upper value that must be determined via
+    introspection.  In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>,
+    <literal>&lt;i&gt;</literal> equal to <link
+    linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link> can be
+    used to access the summation of statistics from all arenas.  Take special
+    note of the <link linkend="epoch"><mallctl>epoch</mallctl></link> mallctl,
+    which controls refreshing of cached dynamic statistics.</para>
+
+    <variablelist>
+      <varlistentry>
+        <term>
+          <mallctl>version</mallctl>
+          (<type>const char *</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Return the jemalloc version string.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="epoch">
+        <term>
+          <mallctl>epoch</mallctl>
+          (<type>uint64_t</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>If a value is passed in, refresh the data from which
+        the <function>mallctl*<parameter/></function> functions report values,
+        and increment the epoch.  Return the current epoch.  This is useful for
+        detecting whether another thread caused a refresh.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.debug</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-debug</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.dss</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-dss</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.dynamic_page_shift</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-dynamic-page-shift</option> was
+        specified during build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.fill</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-fill</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.lazy_lock</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-lazy-lock</option> was specified
+        during build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.prof</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-prof</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.prof_libgcc</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--disable-prof-libgcc</option> was not
+        specified during build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.prof_libunwind</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-prof-libunwind</option> was specified
+        during build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.stats</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-stats</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.swap</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-swap</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.sysv</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-sysv</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.tcache</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--disable-tcache</option> was not specified
+        during build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.tiny</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--disable-tiny</option> was not specified
+        during build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.tls</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--disable-tls</option> was not specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>config.xmalloc</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para><option>--enable-xmalloc</option> was specified during
+        build configuration.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.abort">
+        <term>
+          <mallctl>opt.abort</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Abort-on-warning enabled/disabled.  If true, most
+        warnings are fatal.  The process will call
+        <citerefentry><refentrytitle>abort</refentrytitle>
+        <manvolnum>3</manvolnum></citerefentry> in these cases.  This option is
+        disabled by default unless <option>--enable-debug</option> is
+        specified during configuration, in which case it is enabled by default.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_qspace_max">
+        <term>
+          <mallctl>opt.lg_qspace_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Size (log base 2) of the maximum size class that is a
+        multiple of the quantum (8 or 16 bytes, depending on architecture).
+        Above this size, cacheline spacing is used for size classes.  The
+        default value is 128 bytes (2^7).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_cspace_max">
+        <term>
+          <mallctl>opt.lg_cspace_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Size (log base 2) of the maximum size class that is a
+        multiple of the cacheline size (64).  Above this size, subpage spacing
+        (256 bytes) is used for size classes.  The default value is 512 bytes
+        (2^9).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_chunk">
+        <term>
+          <mallctl>opt.lg_chunk</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Virtual memory chunk size (log base 2).  The default
+        chunk size is 4 MiB (2^22).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.narenas">
+        <term>
+          <mallctl>opt.narenas</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum number of arenas to use.  The default maximum
+        number of arenas is four times the number of CPUs, or one if there is a
+        single CPU.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_dirty_mult">
+        <term>
+          <mallctl>opt.lg_dirty_mult</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Per-arena minimum ratio (log base 2) of active to dirty
+        pages.  Some dirty unused pages may be allowed to accumulate, within
+        the limit set by the ratio (or one chunk worth of dirty pages,
+        whichever is greater), before informing the kernel about some of those
+        pages via <citerefentry><refentrytitle>madvise</refentrytitle>
+        <manvolnum>2</manvolnum></citerefentry> or a similar system call.  This
+        provides the kernel with sufficient information to recycle dirty pages
+        if physical memory becomes scarce and the pages remain unused.  The
+        default minimum ratio is 32:1 (2^5:1); an option value of -1 will
+        disable dirty page purging.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.stats_print">
+        <term>
+          <mallctl>opt.stats_print</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Enable/disable statistics printing at exit.  If
+        enabled, the <function>malloc_stats_print<parameter/></function>
+        function is called at program exit via an
+        <citerefentry><refentrytitle>atexit</refentrytitle>
+        <manvolnum>3</manvolnum></citerefentry> function.  If
+        <option>--enable-stats</option> is specified during configuration, this
+        has the potential to cause deadlock for a multi-threaded process that
+        exits while one or more threads are executing in the memory allocation
+        functions.  Therefore, this option should only be used with care; it is
+        primarily intended as a performance tuning aid during application
+        development.  This option is disabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.junk">
+        <term>
+          <mallctl>opt.junk</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-fill</option>]
+        </term>
+        <listitem><para>Junk filling enabled/disabled.  If enabled, each byte
+        of uninitialized allocated memory will be initialized to
+        <literal>0xa5</literal>.  All deallocated memory will be initialized to
+        <literal>0x5a</literal>.  This is intended for debugging and will
+        impact performance negatively.  This option is disabled by default
+        unless <option>--enable-debug</option> is specified during
+        configuration, in which case it is enabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.zero">
+        <term>
+          <mallctl>opt.zero</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-fill</option>]
+        </term>
+        <listitem><para>Zero filling enabled/disabled.  If enabled, each byte
+        of uninitialized allocated memory will be initialized to 0.  Note that
+        this initialization only happens once for each byte, so
+        <function>realloc<parameter/></function> and
+        <function>rallocm<parameter/></function> calls do not zero memory that
+        was previously allocated.  This is intended for debugging and will
+        impact performance negatively.  This option is disabled by default.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.sysv">
+        <term>
+          <mallctl>opt.sysv</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-sysv</option>]
+        </term>
+        <listitem><para>If enabled, attempting to allocate zero bytes will
+        return a <constant>NULL</constant> pointer instead of a valid pointer.
+        (The default behavior is to make a minimal allocation and return a
+        pointer to it.) This option is provided for System V compatibility.
+        This option is incompatible with the <link
+        linkend="opt.xmalloc"><mallctl>opt.xmalloc</mallctl></link> option.
+        This option is disabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.xmalloc">
+        <term>
+          <mallctl>opt.xmalloc</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-xmalloc</option>]
+        </term>
+        <listitem><para>Abort-on-out-of-memory enabled/disabled.  If enabled,
+        rather than returning failure for any allocation function, display a
+        diagnostic message on <constant>STDERR_FILENO</constant> and cause the
+        program to drop core (using
+        <citerefentry><refentrytitle>abort</refentrytitle>
+        <manvolnum>3</manvolnum></citerefentry>).  If an application is
+        designed to depend on this behavior, set the option at compile time by
+        including the following in the source code:
+        <programlisting language="C"><![CDATA[
+malloc_conf = "xmalloc:true";]]></programlisting>
+        This option is disabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.tcache">
+        <term>
+          <mallctl>opt.tcache</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Thread-specific caching enabled/disabled.  When there
+        are multiple threads, each thread uses a thread-specific cache for
+        objects up to a certain size.  Thread-specific caching allows many
+        allocations to be satisfied without performing any thread
+        synchronization, at the cost of increased memory use.  See the
+        <link
+        linkend="opt.lg_tcache_gc_sweep"><mallctl>opt.lg_tcache_gc_sweep</mallctl></link>
+        and <link
+        linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
+        options for related tuning information.  This option is enabled by
+        default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_tcache_gc_sweep">
+        <term>
+          <mallctl>opt.lg_tcache_gc_sweep</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Approximate interval (log base 2) between full
+        thread-specific cache garbage collection sweeps, counted in terms of
+        thread-specific cache allocation/deallocation events.  Garbage
+        collection is actually performed incrementally, one size class at a
+        time, in order to avoid large collection pauses.  The default sweep
+        interval is 8192 (2^13); setting this option to -1 will disable garbage
+        collection.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_tcache_max">
+        <term>
+          <mallctl>opt.lg_tcache_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Maximum size class (log base 2) to cache in the
+        thread-specific cache.  At a minimum, all small size classes are
+        cached, and at a maximum all large size classes are cached.  The
+        default maximum is 32 KiB (2^15).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.prof">
+        <term>
+          <mallctl>opt.prof</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Memory profiling enabled/disabled.  If enabled, profile
+        memory allocation activity, and use an
+        <citerefentry><refentrytitle>atexit</refentrytitle>
+        <manvolnum>3</manvolnum></citerefentry> function to dump final memory
+        usage to a file named according to the pattern
+        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
+        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+        option.  See the <link
+        linkend="opt.lg_prof_bt_max"><mallctl>opt.lg_prof_bt_max</mallctl></link>
+        option for backtrace depth control.  See the <link
+        linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+        option for on-the-fly activation/deactivation.  See the <link
+        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+        option for probabilistic sampling control.  See the <link
+        linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
+        option for control of cumulative sample reporting.  See the <link
+        linkend="opt.lg_prof_tcmax"><mallctl>opt.lg_prof_tcmax</mallctl></link>
+        option for control of per thread backtrace caching.  See the <link
+        linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+        option for information on interval-triggered profile dumping, and the
+        <link linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link>
+        option for information on high-water-triggered profile dumping.
+        Profile output is compatible with the included <command>pprof</command>
+        Perl script, which originates from the <ulink
+        url="http://code.google.com/p/google-perftools/">google-perftools
+        package</ulink>.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.prof_prefix">
+        <term>
+          <mallctl>opt.prof_prefix</mallctl>
+          (<type>const char *</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Filename prefix for profile dumps.  If the prefix is
+        set to the empty string, no automatic dumps will occur; this is
+        primarily useful for disabling the automatic final heap dump (which
+        also disables leak reporting, if enabled).  The default prefix is
+        <filename>jeprof</filename>.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_prof_bt_max">
+        <term>
+          <mallctl>opt.lg_prof_bt_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Maximum backtrace depth (log base 2) when profiling
+        memory allocation activity.  The default is 128 (2^7).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.prof_active">
+        <term>
+          <mallctl>opt.prof_active</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Profiling activated/deactivated.  This is a secondary
+        control mechanism that makes it possible to start the application with
+        profiling enabled (see the <link
+        linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but
+        inactive, then toggle profiling at any time during program execution
+        with the <link
+        linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl.
+        This option is enabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_prof_sample">
+        <term>
+          <mallctl>opt.lg_prof_sample</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Average interval (log base 2) between allocation
+        samples, as measured in bytes of allocation activity.  Increasing the
+        sampling interval decreases profile fidelity, but also decreases the
+        computational overhead.  The default sample interval is 1 (2^0) (i.e.
+        all allocations are sampled).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.prof_accum">
+        <term>
+          <mallctl>opt.prof_accum</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Reporting of cumulative object/byte counts in profile
+        dumps enabled/disabled.  If this option is enabled, every unique
+        backtrace must be stored for the duration of execution.  Depending on
+        the application, this can impose a large memory overhead, and the
+        cumulative counts are not always of interest.  See the
+        <link
+        linkend="opt.lg_prof_tcmax"><mallctl>opt.lg_prof_tcmax</mallctl></link>
+        option for control of per thread backtrace caching, which has important
+        interactions.  This option is enabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_prof_tcmax">
+        <term>
+          <mallctl>opt.lg_prof_tcmax</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Maximum per thread backtrace cache (log base 2) used
+        for heap profiling.  A backtrace can only be discarded if the
+        <link linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
+        option is disabled, and no thread caches currently refer to the
+        backtrace.  Therefore, a backtrace cache limit should be imposed if the
+        intention is to limit how much memory is used by backtraces.  By
+        default, no limit is imposed (encoded as -1).
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.lg_prof_interval">
+        <term>
+          <mallctl>opt.lg_prof_interval</mallctl>
+          (<type>ssize_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Average interval (log base 2) between memory profile
+        dumps, as measured in bytes of allocation activity.  The actual
+        interval between dumps may be sporadic because decentralized allocation
+        counters are used to avoid synchronization bottlenecks.  Profiles are
+        dumped to files named according to the pattern
+        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
+        where <literal>&lt;prefix&gt;</literal> is controlled by the
+        <link
+        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+        option.  By default, interval-triggered profile dumping is disabled
+        (encoded as -1).
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.prof_gdump">
+        <term>
+          <mallctl>opt.prof_gdump</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Trigger a memory profile dump every time the total
+        virtual memory exceeds the previous maximum.  Profiles are dumped to
+        files named according to the pattern
+        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
+        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+        option.  This option is disabled by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.prof_leak">
+        <term>
+          <mallctl>opt.prof_leak</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Leak reporting enabled/disabled.  If enabled, use an
+        <citerefentry><refentrytitle>atexit</refentrytitle>
+        <manvolnum>3</manvolnum></citerefentry> function to report memory leaks
+        detected by allocation sampling.  See the
+        <link
+        linkend="opt.lg_prof_bt_max"><mallctl>opt.lg_prof_bt_max</mallctl></link>
+        option for backtrace depth control.  See the
+        <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
+        information on analyzing heap profile output.  This option is disabled
+        by default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="opt.overcommit">
+        <term>
+          <mallctl>opt.overcommit</mallctl>
+          (<type>bool</type>)
+          <literal>r-</literal>
+          [<option>--enable-swap</option>]
+        </term>
+        <listitem><para>Over-commit enabled/disabled.  If enabled, over-commit
+        memory as a side effect of using anonymous
+        <citerefentry><refentrytitle>mmap</refentrytitle>
+        <manvolnum>2</manvolnum></citerefentry> or
+        <citerefentry><refentrytitle>sbrk</refentrytitle>
+        <manvolnum>2</manvolnum></citerefentry> for virtual memory allocation.
+        In order for overcommit to be disabled, the <link
+        linkend="swap.fds"><mallctl>swap.fds</mallctl></link> mallctl must have
+        been successfully written to.  This option is enabled by
+        default.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>tcache.flush</mallctl>
+          (<type>void</type>)
+          <literal>--</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Flush calling thread's tcache.  This interface releases
+        all cached objects and internal data structures associated with the
+        calling thread's thread-specific cache.  Ordinarily, this interface
+        need not be called, since automatic periodic incremental garbage
+        collection occurs, and the thread cache is automatically discarded when
+        a thread exits.  However, garbage collection is triggered by allocation
+        activity, so it is possible for a thread that stops
+        allocating/deallocating to retain its cache indefinitely, in which case
+        the developer may find manual flushing useful.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>thread.arena</mallctl>
+          (<type>unsigned</type>)
+          <literal>rw</literal>
+        </term>
+        <listitem><para>Get or set the arena associated with the calling
+        thread.  The arena index must be less than the maximum number of arenas
+        (see the <link
+        linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>
+        mallctl).  If the specified arena was not initialized beforehand (see
+        the <link
+        linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link>
+        mallctl), it will be automatically initialized as a side effect of
+        calling this interface.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="thread.allocated">
+        <term>
+          <mallctl>thread.allocated</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Get the total number of bytes ever allocated by the
+        calling thread.  This counter has the potential to wrap around; it is
+        up to the application to appropriately interpret the counter in such
+        cases.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>thread.allocatedp</mallctl>
+          (<type>uint64_t *</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Get a pointer to the the value that is returned by the
+        <link
+        linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
+        mallctl.  This is useful for avoiding the overhead of repeated
+        <function>mallctl*<parameter/></function> calls.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="thread.deallocated">
+        <term>
+          <mallctl>thread.deallocated</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Get the total number of bytes ever deallocated by the
+        calling thread.  This counter has the potential to wrap around; it is
+        up to the application to appropriately interpret the counter in such
+        cases.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>thread.deallocatedp</mallctl>
+          (<type>uint64_t *</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Get a pointer to the the value that is returned by the
+        <link
+        linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
+        mallctl.  This is useful for avoiding the overhead of repeated
+        <function>mallctl*<parameter/></function> calls.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arenas.narenas">
+        <term>
+          <mallctl>arenas.narenas</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum number of arenas.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arenas.initialized">
+        <term>
+          <mallctl>arenas.initialized</mallctl>
+          (<type>bool *</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>An array of <link
+        linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>
+        booleans.  Each boolean indicates whether the corresponding arena is
+        initialized.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.quantum</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Quantum size.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.cacheline</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Assumed cacheline size.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.subpage</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Subpage size class interval.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.pagesize</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Page size.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.chunksize</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Chunk size.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.tspace_min</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Minimum tiny size class.  Tiny size classes are powers
+        of two.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.tspace_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum tiny size class.  Tiny size classes are powers
+        of two.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.qspace_min</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Minimum quantum-spaced size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.qspace_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum quantum-spaced size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.cspace_min</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Minimum cacheline-spaced size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.cspace_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum cacheline-spaced size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.sspace_min</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Minimum subpage-spaced size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.sspace_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum subpage-spaced size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.tcache_max</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Maximum thread-cached size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.ntbins</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of tiny bin size classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.nqbins</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of quantum-spaced bin size
+        classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.ncbins</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of cacheline-spaced bin size
+        classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.nsbins</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of subpage-spaced bin size
+        classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.nbins</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Total number of bin size classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.nhbins</mallctl>
+          (<type>unsigned</type>)
+          <literal>r-</literal>
+          [<option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Total number of thread cache bin size
+        classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="arenas.bin.i.size">
+        <term>
+          <mallctl>arenas.bin.&lt;i&gt;.size</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum size supported by size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
+          (<type>uint32_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of regions per page run.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.bin.&lt;i&gt;.run_size</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of bytes per page run.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.nlruns</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Total number of large size classes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.lrun.&lt;i&gt;.size</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Maximum size supported by this large size
+        class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>arenas.purge</mallctl>
+          (<type>unsigned</type>)
+          <literal>-w</literal>
+        </term>
+        <listitem><para>Purge unused dirty pages for the specified arena, or
+        for all arenas if none is specified.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="prof.active">
+        <term>
+          <mallctl>prof.active</mallctl>
+          (<type>bool</type>)
+          <literal>rw</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Control whether sampling is currently active.  See the
+        <link
+        linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+        option for additional information.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>prof.dump</mallctl>
+          (<type>const char *</type>)
+          <literal>-w</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Dump a memory profile to the specified file, or if NULL
+        is specified, to a file according to the pattern
+        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
+        where <literal>&lt;prefix&gt;</literal> is controlled by the
+        <link
+        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+        option.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>prof.interval</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-prof</option>]
+        </term>
+        <listitem><para>Average number of bytes allocated between
+        inverval-based profile dumps.  See the
+        <link
+        linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+        option for additional information.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.allocated">
+        <term>
+          <mallctl>stats.allocated</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Total number of bytes allocated by the
+        application.</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="stats.active">
+        <term>
+          <mallctl>stats.active</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Total number of bytes in active pages allocated by the
+        application.  This is a multiple of the page size, and greater than or
+        equal to <link
+        linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.mapped</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Total number of bytes in chunks mapped on behalf of the
+        application.  This is a multiple of the chunk size, and is at least as
+        large as <link
+        linkend="stats.active"><mallctl>stats.active</mallctl></link>.  This
+        does not include inactive chunks backed by swap files.  his does not
+        include inactive chunks embedded in the DSS.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.chunks.current</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Total number of chunks actively mapped on behalf of the
+        application.  This does not include inactive chunks backed by swap
+        files.  This does not include inactive chunks embedded in the DSS.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.chunks.total</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of chunks allocated.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.chunks.high</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Maximum number of active chunks at any time thus far.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.huge.allocated</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of bytes currently allocated by huge objects.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.huge.nmalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of huge allocation requests.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.huge.ndalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of huge deallocation requests.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of pages in active runs.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+        </term>
+        <listitem><para>Number of pages within unused runs that are potentially
+        dirty, and for which <function>madvise<parameter>...</parameter>
+        <parameter><constant>MADV_DONTNEED</constant></parameter></function> or
+        similar has not been called.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of mapped bytes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of dirty page purge sweeps performed.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.nmadvise</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of <function>madvise<parameter>...</parameter>
+        <parameter><constant>MADV_DONTNEED</constant></parameter></function> or
+        similar calls made to purge dirty pages.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.npurged</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of pages purged.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of bytes currently allocated by small objects.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocation requests served by
+        small bins.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of small objects returned to bins.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of small allocation requests.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Number of bytes currently allocated by large objects.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of large allocation requests served
+        directly by the arena.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of large deallocation requests served
+        directly by the arena.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of large allocation requests.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Current number of bytes allocated by
+        bin.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocations served by bin.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocations returned to bin.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocation
+        requests.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option> <option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Cumulative number of tcache fills.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option> <option>--enable-tcache</option>]
+        </term>
+        <listitem><para>Cumulative number of tcache flushes.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of runs created.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of times the current run from which
+        to allocate changed.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.highruns</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Maximum number of runs at any time thus far.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Current number of runs.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocation requests for this size
+        class served directly by the arena.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of deallocation requests for this
+        size class served directly by the arena.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</mallctl>
+          (<type>uint64_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Cumulative number of allocation requests for this size
+        class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.highruns</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Maximum number of runs at any time thus far for this
+        size class.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats</option>]
+        </term>
+        <listitem><para>Current number of runs for this size class.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>swap.avail</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-stats --enable-swap</option>]
+        </term>
+        <listitem><para>Number of swap file bytes that are currently not
+        associated with any chunk (i.e. mapped, but otherwise completely
+        unmanaged).</para></listitem>
+      </varlistentry>
+
+      <varlistentry id="swap.prezeroed">
+        <term>
+          <mallctl>swap.prezeroed</mallctl>
+          (<type>bool</type>)
+          <literal>rw</literal>
+          [<option>--enable-swap</option>]
+        </term>
+        <listitem><para>If true, the allocator assumes that the swap file(s)
+        contain nothing but nil bytes.  If this assumption is violated,
+        allocator behavior is undefined.  This value becomes read-only after
+        <link linkend="swap.fds"><mallctl>swap.fds</mallctl></link> is
+        successfully written to.</para></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>
+          <mallctl>swap.nfds</mallctl>
+          (<type>size_t</type>)
+          <literal>r-</literal>
+          [<option>--enable-swap</option>]
+        </term>
+        <listitem><para>Number of file descriptors in use for swap.
+        </para></listitem>
+      </varlistentry>
+
+      <varlistentry id="swap.fds">
+        <term>
+          <mallctl>swap.fds</mallctl>
+          (<type>int *</type>)
+          <literal>r-</literal>
+          [<option>--enable-swap</option>]
+        </term>
+        <listitem><para>When written to, the files associated with the
+        specified file descriptors are contiguously mapped via
+        <citerefentry><refentrytitle>mmap</refentrytitle>
+        <manvolnum>2</manvolnum></citerefentry>.  The resulting virtual memory
+        region is preferred over anonymous
+        <citerefentry><refentrytitle>mmap</refentrytitle>
+        <manvolnum>2</manvolnum></citerefentry> and
+        <citerefentry><refentrytitle>sbrk</refentrytitle>
+        <manvolnum>2</manvolnum></citerefentry> memory.  Note that if a file's
+        size is not a multiple of the page size, it is automatically truncated
+        to the nearest page size multiple.  See the
+        <link linkend="swap.prezeroed"><mallctl>swap.prezeroed</mallctl></link>
+        mallctl for specifying that the files are pre-zeroed.</para></listitem>
+      </varlistentry>
+    </variablelist>
+  </refsect1>
+  <refsect1 id="debugging_malloc_problems">
+    <title>DEBUGGING MALLOC PROBLEMS</title>
+    <para>When debugging, it is a good idea to configure/build jemalloc with
+    the <option>--enable-debug</option> and <option>--enable-fill</option>
+    options, and recompile the program with suitable options and symbols for
+    debugger support.  When so configured, jemalloc incorporates a wide variety
+    of run-time assertions that catch application errors such as double-free,
+    write-after-free, etc.</para>
+
+    <para>Programs often accidentally depend on &ldquo;uninitialized&rdquo;
+    memory actually being filled with zero bytes.  Junk filling
+    (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
+    option) tends to expose such bugs in the form of obviously incorrect
+    results and/or coredumps.  Conversely, zero
+    filling (see the <link
+    linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates
+    the symptoms of such bugs.  Between these two options, it is usually
+    possible to quickly detect, diagnose, and eliminate such bugs.</para>
+
+    <para>This implementation does not provide much detail about the problems
+    it detects, because the performance impact for storing such information
+    would be prohibitive.  There are a number of allocator implementations
+    available on the Internet which focus on detecting and pinpointing problems
+    by trading performance for extra sanity checks and detailed
+    diagnostics.</para>
+  </refsect1>
+  <refsect1 id="diagnostic_messages">
+    <title>DIAGNOSTIC MESSAGES</title>
+    <para>If any of the memory allocation/deallocation functions detect an
+    error or warning condition, a message will be printed to file descriptor
+    <constant>STDERR_FILENO</constant>.  Errors will result in the process
+    dumping core.  If the <link
+    linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most
+    warnings are treated as errors.</para>
+
+    <para>The <varname>malloc_message</varname> variable allows the programmer
+    to override the function which emits the text strings forming the errors
+    and warnings if for some reason the <constant>STDERR_FILENO</constant> file
+    descriptor is not suitable for this.
+    <function>malloc_message<parameter/></function> takes the
+    <parameter>cbopaque</parameter> pointer argument that is
+    <constant>NULL</constant> unless overridden by the arguments in a call to
+    <function>malloc_stats_print<parameter/></function>, followed by a string
+    pointer.  Please note that doing anything which tries to allocate memory in
+    this function is likely to result in a crash or deadlock.</para>
+
+    <para>All messages are prefixed by
+    &ldquo;<computeroutput>&lt;jemalloc&gt;: </computeroutput>&rdquo;.</para>
+  </refsect1>
+  <refsect1 id="return_values">
+    <title>RETURN VALUES</title>
+    <refsect2>
+      <title>Standard API</title>
+      <para>The <function>malloc<parameter/></function> and
+      <function>calloc<parameter/></function> functions return a pointer to the
+      allocated memory if successful; otherwise a <constant>NULL</constant>
+      pointer is returned and <varname>errno</varname> is set to
+      <errorname>ENOMEM</errorname>.</para>
+
+      <para>The <function>posix_memalign<parameter/></function> function
+      returns the value 0 if successful; otherwise it returns an error value.
+      The <function>posix_memalign<parameter/></function> function will fail
+      if:
+        <variablelist>
+          <varlistentry>
+            <term><errorname>EINVAL</errorname></term>
+
+            <listitem><para>The <parameter>alignment</parameter> parameter is
+            not a power of 2 at least as large as
+            <code language="C">sizeof(<type>void *</type>)</code>.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><errorname>ENOMEM</errorname></term>
+
+            <listitem><para>Memory allocation error.</para></listitem>
+          </varlistentry>
+        </variablelist>
+      </para>
+
+      <para>The <function>realloc<parameter/></function> function returns a
+      pointer, possibly identical to <parameter>ptr</parameter>, to the
+      allocated memory if successful; otherwise a <constant>NULL</constant>
+      pointer is returned, and <varname>errno</varname> is set to
+      <errorname>ENOMEM</errorname> if the error was the result of an
+      allocation failure.  The <function>realloc<parameter/></function>
+      function always leaves the original buffer intact when an error occurs.
+      </para>
+
+      <para>The <function>free<parameter/></function> function returns no
+      value.</para>
+    </refsect2>
+    <refsect2>
+      <title>Non-standard API</title>
+      <para>The <function>malloc_usable_size<parameter/></function> function
+      returns the usable size of the allocation pointed to by
+      <parameter>ptr</parameter>.  </para>
+
+      <para>The <function>mallctl<parameter/></function>,
+      <function>mallctlnametomib<parameter/></function>, and
+      <function>mallctlbymib<parameter/></function> functions return 0 on
+      success; otherwise they return an error value.  The functions will fail
+      if:
+        <variablelist>
+          <varlistentry>
+            <term><errorname>EINVAL</errorname></term>
+
+            <listitem><para><parameter>newp</parameter> is not
+            <constant>NULL</constant>, and <parameter>newlen</parameter> is too
+            large or too small.  Alternatively, <parameter>*oldlenp</parameter>
+            is too large or too small; in this case as much data as possible
+            are read despite the error.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><errorname>ENOMEM</errorname></term>
+
+            <listitem><para><parameter>*oldlenp</parameter> is too short to
+            hold the requested value.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><errorname>ENOENT</errorname></term>
+
+            <listitem><para><parameter>name</parameter> or
+            <parameter>mib</parameter> specifies an unknown/invalid
+            value.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><errorname>EPERM</errorname></term>
+
+            <listitem><para>Attempt to read or write void value, or attempt to
+            write read-only value.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><errorname>EAGAIN</errorname></term>
+
+            <listitem><para>A memory allocation failure
+            occurred.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><errorname>EFAULT</errorname></term>
+
+            <listitem><para>An interface with side effects failed in some way
+            not directly related to <function>mallctl*<parameter/></function>
+            read/write processing.</para></listitem>
+          </varlistentry>
+        </variablelist>
+      </para>
+    </refsect2>
+    <refsect2>
+      <title>Experimental API</title>
+      <para>The <function>allocm<parameter/></function>,
+      <function>rallocm<parameter/></function>,
+      <function>sallocm<parameter/></function>, and
+      <function>dallocm<parameter/></function> functions return
+      <constant>ALLOCM_SUCCESS</constant> on success; otherwise they return an
+      error value.  The <function>allocm<parameter/></function> and
+      <function>rallocm<parameter/></function> functions will fail if:
+        <variablelist>
+          <varlistentry>
+            <term><errorname>ALLOCM_ERR_OOM</errorname></term>
+
+            <listitem><para>Out of memory.  Insufficient contiguous memory was
+            available to service the allocation request.  The
+            <function>allocm<parameter/></function> function additionally sets
+            <parameter>*ptr</parameter> to <constant>NULL</constant>, whereas
+            the <function>rallocm<parameter/></function> function leaves
+            <constant>*ptr</constant> unmodified.</para></listitem>
+          </varlistentry>
+        </variablelist>
+      The <function>rallocm<parameter/></function> function will also
+      fail if:
+        <variablelist>
+          <varlistentry>
+            <term><errorname>ALLOCM_ERR_NOT_MOVED</errorname></term>
+
+            <listitem><para><constant>ALLOCM_NO_MOVE</constant> was specified,
+            but the reallocation request could not be serviced without moving
+            the object.</para></listitem>
+          </varlistentry>
+        </variablelist>
+      </para>
+    </refsect2>
+  </refsect1>
+  <refsect1 id="environment">
+    <title>ENVIRONMENT</title>
+    <para>The following environment variable affects the execution of the
+    allocation functions:
+      <variablelist>
+        <varlistentry>
+          <term><envar>MALLOC_CONF</envar></term>
+
+          <listitem><para>If the environment variable
+          <envar>MALLOC_CONF</envar> is set, the characters it contains
+          will be interpreted as options.</para></listitem>
+        </varlistentry>
+      </variablelist>
+    </para>
+  </refsect1>
+  <refsect1 id="examples">
+    <title>EXAMPLES</title>
+    <para>To dump core whenever a problem occurs:
+      <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
+    </para>
+    <para>To specify in the source a chunk size that is 16 MiB:
+      <programlisting language="C"><![CDATA[
+malloc_conf = "lg_chunk:24";]]></programlisting></para>
+  </refsect1>
+  <refsect1 id="see_also">
+    <title>SEE ALSO</title>
+    <para><citerefentry><refentrytitle>madvise</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry>,
+    <citerefentry><refentrytitle>mmap</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry>,
+    <citerefentry><refentrytitle>sbrk</refentrytitle>
+    <manvolnum>2</manvolnum></citerefentry>,
+    <citerefentry><refentrytitle>alloca</refentrytitle>
+    <manvolnum>3</manvolnum></citerefentry>,
+    <citerefentry><refentrytitle>atexit</refentrytitle>
+    <manvolnum>3</manvolnum></citerefentry>,
+    <citerefentry><refentrytitle>getpagesize</refentrytitle>
+    <manvolnum>3</manvolnum></citerefentry></para>
+  </refsect1>
+  <refsect1 id="standards">
+    <title>STANDARDS</title>
+    <para>The <function>malloc<parameter/></function>,
+    <function>calloc<parameter/></function>,
+    <function>realloc<parameter/></function>, and
+    <function>free<parameter/></function> functions conform to ISO/IEC
+    9899:1990 (&ldquo;ISO C90&rdquo;).</para>
+
+    <para>The <function>posix_memalign<parameter/></function> function conforms
+    to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
+  </refsect1>
+</refentry>
diff --git a/jemalloc/doc/manpages.xsl.in b/jemalloc/doc/manpages.xsl.in
new file mode 100644
index 0000000..88b2626
--- /dev/null
+++ b/jemalloc/doc/manpages.xsl.in
@@ -0,0 +1,4 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:import href="@XSLROOT@/manpages/docbook.xsl"/>
+  <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
+</xsl:stylesheet>
diff --git a/jemalloc/doc/stylesheet.xsl b/jemalloc/doc/stylesheet.xsl
new file mode 100644
index 0000000..4e334a8
--- /dev/null
+++ b/jemalloc/doc/stylesheet.xsl
@@ -0,0 +1,7 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:param name="funcsynopsis.style">ansi</xsl:param>
+  <xsl:param name="function.parens" select="1"/>
+  <xsl:template match="mallctl">
+    "<xsl:call-template name="inline.monoseq"/>"
+  </xsl:template>
+</xsl:stylesheet>
diff --git a/jemalloc/include/jemalloc/internal/chunk_dss.h b/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6be4ad1..6f00522 100644
--- a/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -17,6 +17,7 @@
 extern malloc_mutex_t	dss_mtx;
 
 void	*chunk_alloc_dss(size_t size, bool *zero);
+bool	chunk_in_dss(void *chunk);
 bool	chunk_dealloc_dss(void *chunk, size_t size);
 bool	chunk_dss_boot(void);
 
diff --git a/jemalloc/include/jemalloc/internal/chunk_swap.h b/jemalloc/include/jemalloc/internal/chunk_swap.h
index d50cb19..9faa739 100644
--- a/jemalloc/include/jemalloc/internal/chunk_swap.h
+++ b/jemalloc/include/jemalloc/internal/chunk_swap.h
@@ -20,6 +20,7 @@
 #endif
 
 void	*chunk_alloc_swap(size_t size, bool *zero);
+bool	chunk_in_swap(void *chunk);
 bool	chunk_dealloc_swap(void *chunk, size_t size);
 bool	chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
 bool	chunk_swap_boot(void);
diff --git a/jemalloc/include/jemalloc/internal/huge.h b/jemalloc/include/jemalloc/internal/huge.h
index bf23127..66544cf 100644
--- a/jemalloc/include/jemalloc/internal/huge.h
+++ b/jemalloc/include/jemalloc/internal/huge.h
@@ -25,7 +25,7 @@
     size_t extra);
 void	*huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
     size_t alignment, bool zero);
-void	huge_dalloc(void *ptr);
+void	huge_dalloc(void *ptr, bool unmap);
 size_t	huge_salloc(const void *ptr);
 #ifdef JEMALLOC_PROF
 prof_ctx_t	*huge_prof_ctx_get(const void *ptr);
diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index 3d25300..0680b43 100644
--- a/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -666,7 +666,7 @@
 	if (chunk != ptr)
 		arena_dalloc(chunk->arena, chunk, ptr);
 	else
-		huge_dalloc(ptr);
+		huge_dalloc(ptr, true);
 }
 
 JEMALLOC_INLINE void *
diff --git a/jemalloc/include/jemalloc/jemalloc_defs.h.in b/jemalloc/include/jemalloc/jemalloc_defs.h.in
index b8f3f36..5f46c5c 100644
--- a/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ b/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -115,6 +115,9 @@
 #undef JEMALLOC_ZONE
 #undef JEMALLOC_ZONE_VERSION
 
+/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */
+#undef JEMALLOC_MREMAP_FIXED
+
 /*
  * Methods for purging unused pages differ between operating systems.
  *
diff --git a/jemalloc/src/chunk.c b/jemalloc/src/chunk.c
index 00bf50a..301519e 100644
--- a/jemalloc/src/chunk.c
+++ b/jemalloc/src/chunk.c
@@ -146,11 +146,6 @@
 	chunksize_mask = chunksize - 1;
 	chunk_npages = (chunksize >> PAGE_SHIFT);
 
-#ifdef JEMALLOC_IVSALLOC
-	chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
-	if (chunks_rtree == NULL)
-		return (true);
-#endif
 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
 	if (malloc_mutex_init(&chunks_mtx))
 		return (true);
@@ -166,6 +161,11 @@
 	if (chunk_dss_boot())
 		return (true);
 #endif
+#ifdef JEMALLOC_IVSALLOC
+	chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
+	if (chunks_rtree == NULL)
+		return (true);
+#endif
 
 	return (false);
 }
diff --git a/jemalloc/src/chunk_dss.c b/jemalloc/src/chunk_dss.c
index d9bd63c..5c0e290 100644
--- a/jemalloc/src/chunk_dss.c
+++ b/jemalloc/src/chunk_dss.c
@@ -200,6 +200,22 @@
 }
 
 bool
+chunk_in_dss(void *chunk)
+{
+	bool ret;
+
+	malloc_mutex_lock(&dss_mtx);
+	if ((uintptr_t)chunk >= (uintptr_t)dss_base
+	    && (uintptr_t)chunk < (uintptr_t)dss_max)
+		ret = true;
+	else
+		ret = false;
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (ret);
+}
+
+bool
 chunk_dealloc_dss(void *chunk, size_t size)
 {
 	bool ret;
diff --git a/jemalloc/src/chunk_swap.c b/jemalloc/src/chunk_swap.c
index ee038ba..cb25ae0 100644
--- a/jemalloc/src/chunk_swap.c
+++ b/jemalloc/src/chunk_swap.c
@@ -185,6 +185,24 @@
 }
 
 bool
+chunk_in_swap(void *chunk)
+{
+	bool ret;
+
+	assert(swap_enabled);
+
+	malloc_mutex_lock(&swap_mtx);
+	if ((uintptr_t)chunk >= (uintptr_t)swap_base
+	    && (uintptr_t)chunk < (uintptr_t)swap_max)
+		ret = true;
+	else
+		ret = false;
+	malloc_mutex_unlock(&swap_mtx);
+
+	return (ret);
+}
+
+bool
 chunk_dealloc_swap(void *chunk, size_t size)
 {
 	bool ret;
@@ -219,15 +237,15 @@
 		} else
 			madvise(chunk, size, MADV_DONTNEED);
 
+#ifdef JEMALLOC_STATS
+		swap_avail += size;
+#endif
 		ret = false;
 		goto RETURN;
 	}
 
 	ret = true;
 RETURN:
-#ifdef JEMALLOC_STATS
-	swap_avail += size;
-#endif
 	malloc_mutex_unlock(&swap_mtx);
 	return (ret);
 }
diff --git a/jemalloc/src/ctl.c b/jemalloc/src/ctl.c
index c83ee4f..3c8adab 100644
--- a/jemalloc/src/ctl.c
+++ b/jemalloc/src/ctl.c
@@ -4,6 +4,13 @@
 /******************************************************************************/
 /* Data. */
 
+/*
+ * ctl_mtx protects the following:
+ * - ctl_stats.*
+ * - opt_prof_active
+ * - swap_enabled
+ * - swap_prezeroed
+ */
 static malloc_mutex_t	ctl_mtx;
 static bool		ctl_initialized;
 static uint64_t		ctl_epoch;
@@ -44,7 +51,9 @@
 CTL_PROTO(thread_arena)
 #ifdef JEMALLOC_STATS
 CTL_PROTO(thread_allocated)
+CTL_PROTO(thread_allocatedp)
 CTL_PROTO(thread_deallocated)
+CTL_PROTO(thread_deallocatedp)
 #endif
 CTL_PROTO(config_debug)
 CTL_PROTO(config_dss)
@@ -223,7 +232,9 @@
 #ifdef JEMALLOC_STATS
 	,
 	{NAME("allocated"),	CTL(thread_allocated)},
-	{NAME("deallocated"),	CTL(thread_deallocated)}
+	{NAME("allocatedp"),	CTL(thread_allocatedp)},
+	{NAME("deallocated"),	CTL(thread_deallocated)},
+	{NAME("deallocatedp"),	CTL(thread_deallocatedp)}
 #endif
 };
 
@@ -680,7 +691,9 @@
 static bool
 ctl_init(void)
 {
+	bool ret;
 
+	malloc_mutex_lock(&ctl_mtx);
 	if (ctl_initialized == false) {
 #ifdef JEMALLOC_STATS
 		unsigned i;
@@ -692,8 +705,10 @@
 		 */
 		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
 		    (narenas + 1) * sizeof(ctl_arena_stats_t));
-		if (ctl_stats.arenas == NULL)
-			return (true);
+		if (ctl_stats.arenas == NULL) {
+			ret = true;
+			goto RETURN;
+		}
 		memset(ctl_stats.arenas, 0, (narenas + 1) *
 		    sizeof(ctl_arena_stats_t));
 
@@ -704,8 +719,10 @@
 		 */
 #ifdef JEMALLOC_STATS
 		for (i = 0; i <= narenas; i++) {
-			if (ctl_arena_init(&ctl_stats.arenas[i]))
-				return (true);
+			if (ctl_arena_init(&ctl_stats.arenas[i])) {
+				ret = true;
+				goto RETURN;
+			}
 		}
 #endif
 		ctl_stats.arenas[narenas].initialized = true;
@@ -715,7 +732,10 @@
 		ctl_initialized = true;
 	}
 
-	return (false);
+	ret = false;
+RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
 }
 
 static int
@@ -825,8 +845,7 @@
 	ctl_node_t const *nodes[CTL_MAX_DEPTH];
 	size_t mib[CTL_MAX_DEPTH];
 
-	malloc_mutex_lock(&ctl_mtx);
-	if (ctl_init()) {
+	if (ctl_initialized == false && ctl_init()) {
 		ret = EAGAIN;
 		goto RETURN;
 	}
@@ -841,10 +860,9 @@
 		ret = ENOENT;
 		goto RETURN;
 	}
-	ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
 
+	ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
 RETURN:
-	malloc_mutex_unlock(&ctl_mtx);
 	return(ret);
 }
 
@@ -853,16 +871,13 @@
 {
 	int ret;
 
-	malloc_mutex_lock(&ctl_mtx);
-	if (ctl_init()) {
+	if (ctl_initialized == false && ctl_init()) {
 		ret = EAGAIN;
 		goto RETURN;
 	}
 
 	ret = ctl_lookup(name, NULL, mibp, miblenp);
-
 RETURN:
-	malloc_mutex_unlock(&ctl_mtx);
 	return(ret);
 }
 
@@ -874,8 +889,7 @@
 	const ctl_node_t *node;
 	size_t i;
 
-	malloc_mutex_lock(&ctl_mtx);
-	if (ctl_init()) {
+	if (ctl_initialized == false && ctl_init()) {
 		ret = EAGAIN;
 		goto RETURN;
 	}
@@ -912,7 +926,6 @@
 	ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
 
 RETURN:
-	malloc_mutex_unlock(&ctl_mtx);
 	return(ret);
 }
 
@@ -981,6 +994,29 @@
 	int ret;							\
 	t oldval;							\
 									\
+	malloc_mutex_lock(&ctl_mtx);					\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+RETURN:									\
+	malloc_mutex_unlock(&ctl_mtx);					\
+	return (ret);							\
+}
+
+/*
+ * ctl_mtx is not acquired, under the assumption that no pertinent data will
+ * mutate during the call.
+ */
+#define	CTL_RO_NL_GEN(n, v, t)					\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
 	READONLY();							\
 	oldval = v;							\
 	READ(oldval, t);						\
@@ -1024,7 +1060,7 @@
 	return (ret);							\
 }
 
-CTL_RO_GEN(version, JEMALLOC_VERSION, const char *)
+CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
 
 static int
 epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1033,6 +1069,7 @@
 	int ret;
 	uint64_t newval;
 
+	malloc_mutex_lock(&ctl_mtx);
 	newval = 0;
 	WRITE(newval, uint64_t);
 	if (newval != 0)
@@ -1041,6 +1078,7 @@
 
 	ret = 0;
 RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
 	return (ret);
 }
 
@@ -1107,8 +1145,10 @@
 }
 
 #ifdef JEMALLOC_STATS
-CTL_RO_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
-CTL_RO_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
+CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
+CTL_RO_NL_GEN(thread_allocatedp, &ALLOCATED_GET(), uint64_t *);
+CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
+CTL_RO_NL_GEN(thread_deallocatedp, &DEALLOCATED_GET(), uint64_t *);
 #endif
 
 /******************************************************************************/
@@ -1205,48 +1245,48 @@
 
 /******************************************************************************/
 
-CTL_RO_GEN(opt_abort, opt_abort, bool)
-CTL_RO_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
-CTL_RO_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
-CTL_RO_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_GEN(opt_narenas, opt_narenas, size_t)
-CTL_RO_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
+CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
 #ifdef JEMALLOC_FILL
-CTL_RO_GEN(opt_junk, opt_junk, bool)
-CTL_RO_GEN(opt_zero, opt_zero, bool)
+CTL_RO_NL_GEN(opt_junk, opt_junk, bool)
+CTL_RO_NL_GEN(opt_zero, opt_zero, bool)
 #endif
 #ifdef JEMALLOC_SYSV
-CTL_RO_GEN(opt_sysv, opt_sysv, bool)
+CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool)
 #endif
 #ifdef JEMALLOC_XMALLOC
-CTL_RO_GEN(opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool)
 #endif
 #ifdef JEMALLOC_TCACHE
-CTL_RO_GEN(opt_tcache, opt_tcache, bool)
-CTL_RO_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
+CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
 #endif
 #ifdef JEMALLOC_PROF
-CTL_RO_GEN(opt_prof, opt_prof, bool)
-CTL_RO_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_GEN(opt_prof_active, opt_prof_active, bool)
-CTL_RO_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
-CTL_RO_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_GEN(opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_GEN(opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_GEN(opt_prof_accum, opt_prof_accum, bool)
-CTL_RO_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
+CTL_RO_NL_GEN(opt_prof, opt_prof, bool)
+CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
+CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
 #endif
 #ifdef JEMALLOC_SWAP
-CTL_RO_GEN(opt_overcommit, opt_overcommit, bool)
+CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool)
 #endif
 
 /******************************************************************************/
 
-CTL_RO_GEN(arenas_bin_i_size, arenas[0]->bins[mib[2]].reg_size, size_t)
-CTL_RO_GEN(arenas_bin_i_nregs, arenas[0]->bins[mib[2]].nregs, uint32_t)
-CTL_RO_GEN(arenas_bin_i_run_size, arenas[0]->bins[mib[2]].run_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_size, arenas[0]->bins[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arenas[0]->bins[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arenas[0]->bins[mib[2]].run_size, size_t)
 const ctl_node_t *
 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
 {
@@ -1256,7 +1296,7 @@
 	return (super_arenas_bin_i_node);
 }
 
-CTL_RO_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
+CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
 const ctl_node_t *
 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
 {
@@ -1266,7 +1306,7 @@
 	return (super_arenas_lrun_i_node);
 }
 
-CTL_RO_GEN(arenas_narenas, narenas, unsigned)
+CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
 
 static int
 arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1275,6 +1315,7 @@
 	int ret;
 	unsigned nread, i;
 
+	malloc_mutex_lock(&ctl_mtx);
 	READONLY();
 	if (*oldlenp != narenas * sizeof(bool)) {
 		ret = EINVAL;
@@ -1289,36 +1330,37 @@
 		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
 
 RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
 	return (ret);
 }
 
-CTL_RO_GEN(arenas_quantum, QUANTUM, size_t)
-CTL_RO_GEN(arenas_cacheline, CACHELINE, size_t)
-CTL_RO_GEN(arenas_subpage, SUBPAGE, size_t)
-CTL_RO_GEN(arenas_pagesize, PAGE_SIZE, size_t)
-CTL_RO_GEN(arenas_chunksize, chunksize, size_t)
+CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
+CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t)
+CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
+CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
+CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
 #ifdef JEMALLOC_TINY
-CTL_RO_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
-CTL_RO_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
+CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
+CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
 #endif
-CTL_RO_GEN(arenas_qspace_min, qspace_min, size_t)
-CTL_RO_GEN(arenas_qspace_max, qspace_max, size_t)
-CTL_RO_GEN(arenas_cspace_min, cspace_min, size_t)
-CTL_RO_GEN(arenas_cspace_max, cspace_max, size_t)
-CTL_RO_GEN(arenas_sspace_min, sspace_min, size_t)
-CTL_RO_GEN(arenas_sspace_max, sspace_max, size_t)
+CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
+CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
+CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
+CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
+CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
+CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
 #ifdef JEMALLOC_TCACHE
-CTL_RO_GEN(arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
 #endif
-CTL_RO_GEN(arenas_ntbins, ntbins, unsigned)
-CTL_RO_GEN(arenas_nqbins, nqbins, unsigned)
-CTL_RO_GEN(arenas_ncbins, ncbins, unsigned)
-CTL_RO_GEN(arenas_nsbins, nsbins, unsigned)
-CTL_RO_GEN(arenas_nbins, nbins, unsigned)
+CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
+CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
+CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
+CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
+CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
 #ifdef JEMALLOC_TCACHE
-CTL_RO_GEN(arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
 #endif
-CTL_RO_GEN(arenas_nlruns, nlclasses, size_t)
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
 
 static int
 arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1368,6 +1410,7 @@
 	int ret;
 	bool oldval;
 
+	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
 	oldval = opt_prof_active;
 	if (newp != NULL) {
 		/*
@@ -1382,6 +1425,7 @@
 
 	ret = 0;
 RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
 	return (ret);
 }
 
@@ -1405,7 +1449,7 @@
 	return (ret);
 }
 
-CTL_RO_GEN(prof_interval, prof_interval, uint64_t)
+CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t)
 #endif
 
 /******************************************************************************/
@@ -1503,10 +1547,18 @@
 const ctl_node_t *
 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
 {
+	const ctl_node_t * ret;
 
-	if (ctl_stats.arenas[i].initialized == false)
-		return (NULL);
-	return (super_stats_arenas_i_node);
+	malloc_mutex_lock(&ctl_mtx);
+	if (ctl_stats.arenas[i].initialized == false) {
+		ret = NULL;
+		goto RETURN;
+	}
+
+	ret = super_stats_arenas_i_node;
+RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
+	return (ret);
 }
 
 #ifdef JEMALLOC_STATS
@@ -1528,6 +1580,7 @@
 {
 	int ret;
 
+	malloc_mutex_lock(&ctl_mtx);
 	if (swap_enabled) {
 		READONLY();
 	} else {
@@ -1545,6 +1598,7 @@
 
 	ret = 0;
 RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
 	return (ret);
 }
 
@@ -1556,6 +1610,7 @@
 {
 	int ret;
 
+	malloc_mutex_lock(&ctl_mtx);
 	if (swap_enabled) {
 		READONLY();
 	} else if (newp != NULL) {
@@ -1586,6 +1641,7 @@
 
 	ret = 0;
 RETURN:
+	malloc_mutex_unlock(&ctl_mtx);
 	return (ret);
 }
 #endif
diff --git a/jemalloc/src/huge.c b/jemalloc/src/huge.c
index a035197..0aadc43 100644
--- a/jemalloc/src/huge.c
+++ b/jemalloc/src/huge.c
@@ -215,13 +215,56 @@
 	 * expectation that the extra bytes will be reliably preserved.
 	 */
 	copysize = (size < oldsize) ? size : oldsize;
-	memcpy(ret, ptr, copysize);
-	idalloc(ptr);
+
+	/*
+	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
+	 * source nor the destination are in swap or dss.
+	 */
+#ifdef JEMALLOC_MREMAP_FIXED
+	if (oldsize >= chunksize
+#  ifdef JEMALLOC_SWAP
+	    && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
+	    chunk_in_swap(ret) == false))
+#  endif
+#  ifdef JEMALLOC_DSS
+	    && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
+#  endif
+	    ) {
+		size_t newsize = huge_salloc(ret);
+
+		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
+		    ret) == MAP_FAILED) {
+			/*
+			 * Assuming no chunk management bugs in the allocator,
+			 * the only documented way an error can occur here is
+			 * if the application changed the map type for a
+			 * portion of the old allocation.  This is firmly in
+			 * undefined behavior territory, so write a diagnostic
+			 * message, and optionally abort.
+			 */
+			char buf[BUFERROR_BUF];
+
+			buferror(errno, buf, sizeof(buf));
+			malloc_write("<jemalloc>: Error in mremap(): ");
+			malloc_write(buf);
+			malloc_write("\n");
+			if (opt_abort)
+				abort();
+			memcpy(ret, ptr, copysize);
+			idalloc(ptr);
+		} else
+			huge_dalloc(ptr, false);
+	} else
+#endif
+	{
+		memcpy(ret, ptr, copysize);
+		idalloc(ptr);
+	}
 	return (ret);
 }
 
 void
-huge_dalloc(void *ptr)
+huge_dalloc(void *ptr, bool unmap)
 {
 	extent_node_t *node, key;
 
@@ -241,14 +284,16 @@
 
 	malloc_mutex_unlock(&huge_mtx);
 
+	if (unmap) {
 	/* Unmap chunk. */
 #ifdef JEMALLOC_FILL
 #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
-	if (opt_junk)
-		memset(node->addr, 0x5a, node->size);
+		if (opt_junk)
+			memset(node->addr, 0x5a, node->size);
 #endif
 #endif
-	chunk_dealloc(node->addr, node->size);
+		chunk_dealloc(node->addr, node->size);
+	}
 
 	base_node_dealloc(node);
 }
diff --git a/jemalloc/test/mremap.c b/jemalloc/test/mremap.c
new file mode 100644
index 0000000..146c66f
--- /dev/null
+++ b/jemalloc/test/mremap.c
@@ -0,0 +1,67 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#define	JEMALLOC_MANGLE
+#include "jemalloc_test.h"
+
+int
+main(void)
+{
+	int ret, err;
+	size_t sz, lg_chunk, chunksize, i;
+	char *p, *q;
+
+	fprintf(stderr, "Test begin\n");
+
+	sz = sizeof(lg_chunk);
+	if ((err = JEMALLOC_P(mallctl)("opt.lg_chunk", &lg_chunk, &sz, NULL,
+	    0))) {
+		assert(err != ENOENT);
+		fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
+		    strerror(err));
+		ret = 1;
+		goto RETURN;
+	}
+	chunksize = ((size_t)1U) << lg_chunk;
+
+	p = (char *)malloc(chunksize);
+	if (p == NULL) {
+		fprintf(stderr, "malloc(%zu) --> %p\n", chunksize, p);
+		ret = 1;
+		goto RETURN;
+	}
+	memset(p, 'a', chunksize);
+
+	q = (char *)realloc(p, chunksize * 2);
+	if (q == NULL) {
+		fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize * 2,
+		    q);
+		ret = 1;
+		goto RETURN;
+	}
+	for (i = 0; i < chunksize; i++) {
+		assert(q[i] == 'a');
+	}
+
+	p = q;
+
+	q = (char *)realloc(p, chunksize);
+	if (q == NULL) {
+		fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize, q);
+		ret = 1;
+		goto RETURN;
+	}
+	for (i = 0; i < chunksize; i++) {
+		assert(q[i] == 'a');
+	}
+
+	free(q);
+
+	ret = 0;
+RETURN:
+	fprintf(stderr, "Test end\n");
+	return (ret);
+}
diff --git a/jemalloc/test/mremap.exp b/jemalloc/test/mremap.exp
new file mode 100644
index 0000000..369a88d
--- /dev/null
+++ b/jemalloc/test/mremap.exp
@@ -0,0 +1,2 @@
+Test begin
+Test end