[jemalloc] delete zircon's copy of jemalloc

Looks like it is not needed.

TEST= system still works

Change-Id: If84af2736dc449fee0a582edfd33321a90ee4bc0
diff --git a/zircon/third_party/ulib/jemalloc/.appveyor.yml b/zircon/third_party/ulib/jemalloc/.appveyor.yml
deleted file mode 100644
index ddd5c571..0000000
--- a/zircon/third_party/ulib/jemalloc/.appveyor.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-version: '{build}'
-
-environment:
-  matrix:
-  - MSYSTEM: MINGW64
-    CPU: x86_64
-    MSVC: amd64
-  - MSYSTEM: MINGW32
-    CPU: i686
-    MSVC: x86
-  - MSYSTEM: MINGW64
-    CPU: x86_64
-  - MSYSTEM: MINGW32
-    CPU: i686
-
-install:
-  - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
-  - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC%
-  - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc
-  - pacman --noconfirm -Suy mingw-w64-%CPU%-make
-
-build_script:
-  - bash -c "autoconf"
-  - bash -c "./configure"
-  - mingw32-make -j3
-  - file lib/jemalloc.dll
-  - mingw32-make -j3 tests
-  - mingw32-make -k check
diff --git a/zircon/third_party/ulib/jemalloc/.autom4te.cfg b/zircon/third_party/ulib/jemalloc/.autom4te.cfg
deleted file mode 100644
index fe2424d..0000000
--- a/zircon/third_party/ulib/jemalloc/.autom4te.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache
-end-language: "Autoconf-without-aclocal-m4"
diff --git a/zircon/third_party/ulib/jemalloc/.gitattributes b/zircon/third_party/ulib/jemalloc/.gitattributes
deleted file mode 100644
index 6313b56..0000000
--- a/zircon/third_party/ulib/jemalloc/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-* text=auto eol=lf
diff --git a/zircon/third_party/ulib/jemalloc/.gitignore b/zircon/third_party/ulib/jemalloc/.gitignore
deleted file mode 100644
index 0c460fc..0000000
--- a/zircon/third_party/ulib/jemalloc/.gitignore
+++ /dev/null
@@ -1,80 +0,0 @@
-/*.gcov.*
-
-/bin/jemalloc-config
-/bin/jemalloc.sh
-/bin/jeprof
-
-/config.stamp
-/config.log
-/config.status
-/configure
-
-/doc/html.xsl
-/doc/manpages.xsl
-/doc/jemalloc.xml
-/doc/jemalloc.html
-/doc/jemalloc.3
-
-/jemalloc.pc
-
-/lib/
-
-/Makefile
-
-/src/*.[od]
-/src/*.gcda
-/src/*.gcno
-
-/test/test.sh
-test/include/test/jemalloc_test.h
-test/include/test/jemalloc_test_defs.h
-
-/test/integration/[A-Za-z]*
-!/test/integration/[A-Za-z]*.*
-/test/integration/*.[od]
-/test/integration/*.gcda
-/test/integration/*.gcno
-/test/integration/*.out
-
-/test/integration/cpp/[A-Za-z]*
-!/test/integration/cpp/[A-Za-z]*.*
-/test/integration/cpp/*.[od]
-/test/integration/cpp/*.gcda
-/test/integration/cpp/*.gcno
-/test/integration/cpp/*.out
-
-/test/src/*.[od]
-/test/src/*.gcda
-/test/src/*.gcno
-
-/test/stress/[A-Za-z]*
-!/test/stress/[A-Za-z]*.*
-/test/stress/*.[od]
-/test/stress/*.gcda
-/test/stress/*.gcno
-/test/stress/*.out
-
-/test/unit/[A-Za-z]*
-!/test/unit/[A-Za-z]*.*
-/test/unit/*.[od]
-/test/unit/*.gcda
-/test/unit/*.gcno
-/test/unit/*.out
-
-/VERSION
-
-*.pdb
-*.sdf
-*.opendb
-*.opensdf
-*.cachefile
-*.suo
-*.user
-*.sln.docstates
-*.tmp
-/msvc/Win32/
-/msvc/x64/
-/msvc/projects/*/*/Debug*/
-/msvc/projects/*/*/Release*/
-/msvc/projects/*/*/Win32/
-/msvc/projects/*/*/x64/
diff --git a/zircon/third_party/ulib/jemalloc/.travis.yml b/zircon/third_party/ulib/jemalloc/.travis.yml
deleted file mode 100644
index 97641ec..0000000
--- a/zircon/third_party/ulib/jemalloc/.travis.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-language: generic
-
-matrix:
-  include:
-    - os: linux
-      env: CC=gcc CXX=g++
-    - os: linux
-      env: CC=clang CXX=clang++
-    - os: linux
-      env: CC=gcc CXX=g++ EXTRA_FLAGS=-m32
-      addons:
-        apt:
-          packages:
-          - gcc-multilib
-    - os: linux
-      env: CC=clang CXX=clang++ EXTRA_FLAGS=-m32
-      addons:
-        apt:
-          packages:
-          - gcc-multilib
-    - os: osx
-      env: CC=clang CXX=clang++
-    - os: osx
-      env: CC=clang CXX=clang++ EXTRA_FLAGS=-m32
-
-before_script:
-  - autoconf
-  - ./configure${EXTRA_FLAGS:+ CC="$CC $EXTRA_FLAGS" CXX="$CXX $EXTRA_FLAGS"}
-  - make -j3
-  - make -j3 tests
-
-script:
-  - make check
diff --git a/zircon/third_party/ulib/jemalloc/BUILD.gn b/zircon/third_party/ulib/jemalloc/BUILD.gn
deleted file mode 100644
index 9c9dcc2..0000000
--- a/zircon/third_party/ulib/jemalloc/BUILD.gn
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-source_set("jemalloc") {
-  deps = [
-    "$zx/third_party/ulib/musl:musl_internal",
-  ]
-  include_dirs = [ "include" ]
-  sources = [
-    "src/arena.c",
-    "src/base.c",
-    "src/bitmap.c",
-    "src/ckh.c",
-    "src/ctl.c",
-    "src/extent.c",
-    "src/extent_dss.c",
-    "src/extent_mmap.c",
-    "src/jemalloc.c",
-    "src/large.c",
-    "src/mutex.c",
-    "src/nstime.c",
-    "src/pages.c",
-    "src/prof.c",
-    "src/rtree.c",
-    "src/stats.c",
-    "src/tcache.c",
-    "src/tsd.c",
-    "src/util.c",
-    "src/witness.c",
-  ]
-}
diff --git a/zircon/third_party/ulib/jemalloc/COPYING b/zircon/third_party/ulib/jemalloc/COPYING
deleted file mode 100644
index 104b1f8..0000000
--- a/zircon/third_party/ulib/jemalloc/COPYING
+++ /dev/null
@@ -1,27 +0,0 @@
-Unless otherwise specified, files in the jemalloc source distribution are
-subject to the following license:
---------------------------------------------------------------------------------
-Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>.
-All rights reserved.
-Copyright (C) 2007-2012 Mozilla Foundation.  All rights reserved.
-Copyright (C) 2009-2016 Facebook, Inc.  All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright notice(s),
-   this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice(s),
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
-EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------------
diff --git a/zircon/third_party/ulib/jemalloc/ChangeLog b/zircon/third_party/ulib/jemalloc/ChangeLog
deleted file mode 100644
index f75edd9..0000000
--- a/zircon/third_party/ulib/jemalloc/ChangeLog
+++ /dev/null
@@ -1,1008 +0,0 @@
-Following are change highlights associated with official releases.  Important
-bug fixes are all mentioned, but some internal enhancements are omitted here for
-brevity.  Much more detail can be found in the git revision history:
-
-    https://github.com/jemalloc/jemalloc
-
-* 4.4.0 (December 3, 2016)
-
-  New features:
-  - Add configure support for *-*-linux-android.  (@cferris1000, @jasone)
-  - Add the --disable-syscall configure option, for use on systems that place
-    security-motivated limitations on syscall(2).  (@jasone)
-  - Add support for Debian GNU/kFreeBSD.  (@thesam)
-
-  Optimizations:
-  - Add extent serial numbers and use them where appropriate as a sort key that
-    is higher priority than address, so that the allocation policy prefers older
-    extents.  This tends to improve locality (decrease fragmentation) when
-    memory grows downward.  (@jasone)
-  - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
-    on Linux 4.5 and newer.  (@jasone)
-  - Mark partially purged arena chunks as non-huge-page.  This improves
-    interaction with Linux's transparent huge page functionality.  (@jasone)
-
-  Bug fixes:
-  - Fix size class computations for edge conditions involving extremely large
-    allocations.  This regression was first released in 4.0.0.  (@jasone,
-    @ingvarha)
-  - Remove overly restrictive assertions related to the cactive statistic.  This
-    regression was first released in 4.1.0.  (@jasone)
-  - Implement a more reliable detection scheme for os_unfair_lock on macOS.
-    (@jszakmeister)
-
-* 4.3.1 (November 7, 2016)
-
-  Bug fixes:
-  - Fix a severe virtual memory leak.  This regression was first released in
-    4.3.0.  (@interwq, @jasone)
-  - Refactor atomic and prng APIs to restore support for 32-bit platforms that
-    use pre-C11 toolchains, e.g. FreeBSD's mips.  (@jasone)
-
-* 4.3.0 (November 4, 2016)
-
-  This is the first release that passes the test suite for multiple Windows
-  configurations, thanks in large part to @glandium setting up continuous
-  integration via AppVeyor (and Travis CI for Linux and OS X).
-
-  New features:
-  - Add "J" (JSON) support to malloc_stats_print().  (@jasone)
-  - Add Cray compiler support.  (@ronawho)
-
-  Optimizations:
-  - Add/use adaptive spinning for bootstrapping and radix tree node
-    initialization.  (@jasone)
-
-  Bug fixes:
-  - Fix large allocation to search starting in the optimal size class heap,
-    which can substantially reduce virtual memory churn and fragmentation.  This
-    regression was first released in 4.0.0.  (@mjp41, @jasone)
-  - Fix stats.arenas.<i>.nthreads accounting.  (@interwq)
-  - Fix and simplify decay-based purging.  (@jasone)
-  - Make DSS (sbrk(2)-related) operations lockless, which resolves potential
-    deadlocks during thread exit.  (@jasone)
-  - Fix over-sized allocation of radix tree leaf nodes.  (@mjp41, @ogaun,
-    @jasone)
-  - Fix over-sized allocation of arena_t (plus associated stats) data
-    structures.  (@jasone, @interwq)
-  - Fix EXTRA_CFLAGS to not affect configuration.  (@jasone)
-  - Fix a Valgrind integration bug.  (@ronawho)
-  - Disallow 0x5a junk filling when running in Valgrind.  (@jasone)
-  - Fix a file descriptor leak on Linux.  This regression was first released in
-    4.2.0.  (@vsarunas, @jasone)
-  - Fix static linking of jemalloc with glibc.  (@djwatson)
-  - Use syscall(2) rather than {open,read,close}(2) during boot on Linux.  This
-    works around other libraries' system call wrappers performing reentrant
-    allocation.  (@kspinka, @Whissi, @jasone)
-  - Fix OS X default zone replacement to work with OS X 10.12.  (@glandium,
-    @jasone)
-  - Fix cached memory management to avoid needless commit/decommit operations
-    during purging, which resolves permanent virtual memory map fragmentation
-    issues on Windows.  (@mjp41, @jasone)
-  - Fix TSD fetches to avoid (recursive) allocation.  This is relevant to
-    non-TLS and Windows configurations.  (@jasone)
-  - Fix malloc_conf overriding to work on Windows.  (@jasone)
-  - Forcibly disable lazy-lock on Windows (was forcibly *enabled*).  (@jasone)
-
-* 4.2.1 (June 8, 2016)
-
-  Bug fixes:
-  - Fix bootstrapping issues for configurations that require allocation during
-    tsd initialization (e.g. --disable-tls).  (@cferris1000, @jasone)
-  - Fix gettimeofday() version of nstime_update().  (@ronawho)
-  - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper().  (@ronawho)
-  - Fix potential VM map fragmentation regression.  (@jasone)
-  - Fix opt_zero-triggered in-place huge reallocation zeroing.  (@jasone)
-  - Fix heap profiling context leaks in reallocation edge cases.  (@jasone)
-
-* 4.2.0 (May 12, 2016)
-
-  New features:
-  - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
-    an arena's allocations in a single operation.  (@jasone)
-  - Add the stats.retained and stats.arenas.<i>.retained statistics.  (@jasone)
-  - Add the --with-version configure option.  (@jasone)
-  - Support --with-lg-page values larger than actual page size.  (@jasone)
-
-  Optimizations:
-  - Use pairing heaps rather than red-black trees for various hot data
-    structures.  (@djwatson, @jasone)
-  - Streamline fast paths of rtree operations.  (@jasone)
-  - Optimize the fast paths of calloc() and [m,d,sd]allocx().  (@jasone)
-  - Decommit unused virtual memory if the OS does not overcommit.  (@jasone)
-  - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
-    to avoid unfortunate interactions during fork(2).  (@jasone)
-
-  Bug fixes:
-  - Fix chunk accounting related to triggering gdump profiles.  (@jasone)
-  - Link against librt for clock_gettime(2) if glibc < 2.17.  (@jasone)
-  - Scale leak report summary according to sampling probability.  (@jasone)
-
-* 4.1.1 (May 3, 2016)
-
-  This bugfix release resolves a variety of mostly minor issues, though the
-  bitmap fix is critical for 64-bit Windows.
-
-  Bug fixes:
-  - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
-    even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
-    Windows.  (@jasone)
-  - Fix hashing functions to avoid unaligned memory accesses (and resulting
-    crashes).  This is relevant at least to some ARM-based platforms.
-    (@rkmisra)
-  - Fix fork()-related lock rank ordering reversals.  These reversals were
-    unlikely to cause deadlocks in practice except when heap profiling was
-    enabled and active.  (@jasone)
-  - Fix various chunk leaks in OOM code paths.  (@jasone)
-  - Fix malloc_stats_print() to print opt.narenas correctly.  (@jasone)
-  - Fix MSVC-specific build/test issues.  (@rustyx, @yuslepukhin)
-  - Fix a variety of test failures that were due to test fragility rather than
-    core bugs.  (@jasone)
-
-* 4.1.0 (February 28, 2016)
-
-  This release is primarily about optimizations, but it also incorporates a lot
-  of portability-motivated refactoring and enhancements.  Many people worked on
-  this release, to an extent that even with the omission here of minor changes
-  (see git revision history), and of the people who reported and diagnosed
-  issues, so much of the work was contributed that starting with this release,
-  changes are annotated with author credits to help reflect the collaborative
-  effort involved.
-
-  New features:
-  - Implement decay-based unused dirty page purging, a major optimization with
-    mallctl API impact.  This is an alternative to the existing ratio-based
-    unused dirty page purging, and is intended to eventually become the sole
-    purging mechanism.  New mallctls:
-    + opt.purge
-    + opt.decay_time
-    + arena.<i>.decay
-    + arena.<i>.decay_time
-    + arenas.decay_time
-    + stats.arenas.<i>.decay_time
-    (@jasone, @cevans87)
-  - Add --with-malloc-conf, which makes it possible to embed a default
-    options string during configuration.  This was motivated by the desire to
-    specify --with-malloc-conf=purge:decay , since the default must remain
-    purge:ratio until the 5.0.0 release.  (@jasone)
-  - Add MS Visual Studio 2015 support.  (@rustyx, @yuslepukhin)
-  - Make *allocx() size class overflow behavior defined.  The maximum
-    size class is now less than PTRDIFF_MAX to protect applications against
-    numerical overflow, and all allocation functions are guaranteed to indicate
-    errors rather than potentially crashing if the request size exceeds the
-    maximum size class.  (@jasone)
-  - jeprof:
-    + Add raw heap profile support.  (@jasone)
-    + Add --retain and --exclude for backtrace symbol filtering.  (@jasone)
-
-  Optimizations:
-  - Optimize the fast path to combine various bootstrapping and configuration
-    checks and execute more streamlined code in the common case.  (@interwq)
-  - Use linear scan for small bitmaps (used for small object tracking).  In
-    addition to speeding up bitmap operations on 64-bit systems, this reduces
-    allocator metadata overhead by approximately 0.2%.  (@djwatson)
-  - Separate arena_avail trees, which substantially speeds up run tree
-    operations.  (@djwatson)
-  - Use memoization (boot-time-computed table) for run quantization.  Separate
-    arena_avail trees reduced the importance of this optimization.  (@jasone)
-  - Attempt mmap-based in-place huge reallocation.  This can dramatically speed
-    up incremental huge reallocation.  (@jasone)
-
-  Incompatible changes:
-  - Make opt.narenas unsigned rather than size_t.  (@jasone)
-
-  Bug fixes:
-  - Fix stats.cactive accounting regression.  (@rustyx, @jasone)
-  - Handle unaligned keys in hash().  This caused problems for some ARM systems.
-    (@jasone, @cferris1000)
-  - Refactor arenas array.  In addition to fixing a fork-related deadlock, this
-    makes arena lookups faster and simpler.  (@jasone)
-  - Move retained memory allocation out of the default chunk allocation
-    function, to a location that gets executed even if the application installs
-    a custom chunk allocation function.  This resolves a virtual memory leak.
-    (@buchgr)
-  - Fix a potential tsd cleanup leak.  (@cferris1000, @jasone)
-  - Fix run quantization.  In practice this bug had no impact unless
-    applications requested memory with alignment exceeding one page.
-    (@jasone, @djwatson)
-  - Fix LinuxThreads-specific bootstrapping deadlock.  (Cosmin Paraschiv)
-  - jeprof:
-    + Don't discard curl options if timeout is not defined.  (@djwatson)
-    + Detect failed profile fetches.  (@djwatson)
-  - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
-    --disable-stats case.  (@jasone)
-
-* 4.0.4 (October 24, 2015)
-
-  This bugfix release fixes another xallocx() regression.  No other regressions
-  have come to light in over a month, so this is likely a good starting point
-  for people who prefer to wait for "dot one" releases with all the major issues
-  shaken out.
-
-  Bug fixes:
-  - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
-    allocations that have been randomly assigned an offset of 0 when
-    --enable-cache-oblivious configure option is enabled.
-
-* 4.0.3 (September 24, 2015)
-
-  This bugfix release continues the trend of xallocx() and heap profiling fixes.
-
-  Bug fixes:
-  - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
-    allocations when --enable-cache-oblivious configure option is enabled.
-  - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
-    when resizing from/to a size class that is not a multiple of the chunk size.
-  - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
-    profile dumping started.
-  - Work around a potentially bad thread-specific data initialization
-    interaction with NPTL (glibc's pthreads implementation).
-
-* 4.0.2 (September 21, 2015)
-
-  This bugfix release addresses a few bugs specific to heap profiling.
-
-  Bug fixes:
-  - Fix ixallocx_prof_sample() to never modify nor create sampled small
-    allocations.  xallocx() is in general incapable of moving small allocations,
-    so this fix removes buggy code without loss of generality.
-  - Fix irallocx_prof_sample() to always allocate large regions, even when
-    alignment is non-zero.
-  - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
-    than dereferencing a potentially invalid tctx.
-
-* 4.0.1 (September 15, 2015)
-
-  This is a bugfix release that is somewhat high risk due to the amount of
-  refactoring required to address deep xallocx() problems.  As a side effect of
-  these fixes, xallocx() now tries harder to partially fulfill requests for
-  optional extra space.  Note that a couple of minor heap profiling
-  optimizations are included, but these are better thought of as performance
-  fixes that were integral to disovering most of the other bugs.
-
-  Optimizations:
-  - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
-    fast path when heap profiling is enabled.  Additionally, split a special
-    case out into arena_prof_tctx_reset(), which also avoids chunk metadata
-    reads.
-  - Optimize irallocx_prof() to optimistically update the sampler state.  The
-    prior implementation appears to have been a holdover from when
-    rallocx()/xallocx() functionality was combined as rallocm().
-
-  Bug fixes:
-  - Fix TLS configuration such that it is enabled by default for platforms on
-    which it works correctly.
-  - Fix arenas_cache_cleanup() and arena_get_hard() to handle
-    allocation/deallocation within the application's thread-specific data
-    cleanup functions even after arenas_cache is torn down.
-  - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
-  - Fix chunk purge hook calls for in-place huge shrinking reallocation to
-    specify the old chunk size rather than the new chunk size.  This bug caused
-    no correctness issues for the default chunk purge function, but was
-    visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
-  - Fix heap profiling bugs:
-    + Fix heap profiling to distinguish among otherwise identical sample sites
-      with interposed resets (triggered via the "prof.reset" mallctl).  This bug
-      could cause data structure corruption that would most likely result in a
-      segfault.
-    + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
-    + Make one call to prof_active_get_unlocked() per allocation event, and use
-      the result throughout the relevant functions that handle an allocation
-      event.  Also add a missing check in prof_realloc().  These fixes protect
-      allocation events against concurrent prof_active changes.
-    + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
-      in the correct order.
-    + Fix prof_realloc() to call prof_free_sampled_object() after calling
-      prof_malloc_sample_object().  Prior to this fix, if tctx and old_tctx were
-      the same, the tctx could have been prematurely destroyed.
-  - Fix portability bugs:
-    + Don't bitshift by negative amounts when encoding/decoding run sizes in
-      chunk header maps.  This affected systems with page sizes greater than 8
-      KiB.
-    + Rename index_t to szind_t to avoid an existing type on Solaris.
-    + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
-      match glibc and avoid compilation errors when including both
-      jemalloc/jemalloc.h and malloc.h in C++ code.
-    + Don't assume that /bin/sh is appropriate when running size_classes.sh
-      during configuration.
-    + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
-    + Link tests to librt if it contains clock_gettime(2).
-
-* 4.0.0 (August 17, 2015)
-
-  This version contains many speed and space optimizations, both minor and
-  major.  The major themes are generalization, unification, and simplification.
-  Although many of these optimizations cause no visible behavior change, their
-  cumulative effect is substantial.
-
-  New features:
-  - Normalize size class spacing to be consistent across the complete size
-    range.  By default there are four size classes per size doubling, but this
-    is now configurable via the --with-lg-size-class-group option.  Also add the
-    --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
-    --with-lg-tiny-min options, which can be used to tweak page and size class
-    settings.  Impacts:
-    + Worst case performance for incrementally growing/shrinking reallocation
-      is improved because there are far fewer size classes, and therefore
-      copying happens less often.
-    + Internal fragmentation is limited to 20% for all but the smallest size
-      classes (those less than four times the quantum).  (1B + 4 KiB)
-      and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
-    + Chunk fragmentation tends to be lower because there are fewer distinct run
-      sizes to pack.
-  - Add support for explicit tcaches.  The "tcache.create", "tcache.flush", and
-    "tcache.destroy" mallctls control tcache lifetime and flushing, and the
-    MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
-    control which tcache is used for each operation.
-  - Implement per thread heap profiling, as well as the ability to
-    enable/disable heap profiling on a per thread basis.  Add the "prof.reset",
-    "prof.lg_sample", "thread.prof.name", "thread.prof.active",
-    "opt.prof_thread_active_init", "prof.thread_active_init", and
-    "thread.prof.active" mallctls.
-  - Add support for per arena application-specified chunk allocators, configured
-    via the "arena.<i>.chunk_hooks" mallctl.
-  - Refactor huge allocation to be managed by arenas, so that arenas now
-    function as general purpose independent allocators.  This is important in
-    the context of user-specified chunk allocators, aside from the scalability
-    benefits.  Related new statistics:
-    + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
-      "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
-      mallctls provide high level per arena huge allocation statistics.
-    + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
-      "stats.arenas.<i>.hchunks.<j>.nmalloc",
-      "stats.arenas.<i>.hchunks.<j>.ndalloc",
-      "stats.arenas.<i>.hchunks.<j>.nrequests", and
-      "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
-      statistics.
-  - Add the 'util' column to malloc_stats_print() output, which reports the
-    proportion of available regions that are currently in use for each small
-    size class.
-  - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
-    mallctl), so that it is possible to separately enable junk filling for
-    allocation versus deallocation.
-  - Add the jemalloc-config script, which provides information about how
-    jemalloc was configured, and how to integrate it into application builds.
-  - Add metadata statistics, which are accessible via the "stats.metadata",
-    "stats.arenas.<i>.metadata.mapped", and
-    "stats.arenas.<i>.metadata.allocated" mallctls.
-  - Add the "stats.resident" mallctl, which reports the upper limit of
-    physically resident memory mapped by the allocator.
-  - Add per arena control over unused dirty page purging, via the
-    "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
-    "stats.arenas.<i>.lg_dirty_mult" mallctls.
-  - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
-    feature on/off during program execution.
-  - Add sdallocx(), which implements sized deallocation.  The primary
-    optimization over dallocx() is the removal of a metadata read, which often
-    suffers an L1 cache miss.
-  - Add missing header includes in jemalloc/jemalloc.h, so that applications
-    only have to #include <jemalloc/jemalloc.h>.
-  - Add support for additional platforms:
-    + Bitrig
-    + Cygwin
-    + DragonFlyBSD
-    + iOS
-    + OpenBSD
-    + OpenRISC/or1k
-
-  Optimizations:
-  - Maintain dirty runs in per arena LRUs rather than in per arena trees of
-    dirty-run-containing chunks.  In practice this change significantly reduces
-    dirty page purging volume.
-  - Integrate whole chunks into the unused dirty page purging machinery.  This
-    reduces the cost of repeated huge allocation/deallocation, because it
-    effectively introduces a cache of chunks.
-  - Split the arena chunk map into two separate arrays, in order to increase
-    cache locality for the frequently accessed bits.
-  - Move small run metadata out of runs, into arena chunk headers.  This reduces
-    run fragmentation, smaller runs reduce external fragmentation for small size
-    classes, and packed (less uniformly aligned) metadata layout improves CPU
-    cache set distribution.
-  - Randomly distribute large allocation base pointer alignment relative to page
-    boundaries in order to more uniformly utilize CPU cache sets.  This can be
-    disabled via the --disable-cache-oblivious configure option, and queried via
-    the "config.cache_oblivious" mallctl.
-  - Micro-optimize the fast paths for the public API functions.
-  - Refactor thread-specific data to reside in a single structure.  This assures
-    that only a single TLS read is necessary per call into the public API.
-  - Implement in-place huge allocation growing and shrinking.
-  - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
-    additional optimizations that reduce maximum lookup depth to one or two
-    levels.  This resolves what was a concurrency bottleneck for per arena huge
-    allocation, because a global data structure is critical for determining
-    which arenas own which huge allocations.
-
-  Incompatible changes:
-  - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
-    warnings by default.
-  - Assure that the constness of malloc_usable_size()'s return type matches that
-    of the system implementation.
-  - Change the heap profile dump format to support per thread heap profiling,
-    rename pprof to jeprof, and enhance it with the --thread=<n> option.  As a
-    result, the bundled jeprof must now be used rather than the upstream
-    (gperftools) pprof.
-  - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
-    internally deadlock on some platforms.
-  - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
-  - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
-    "stats.arenas.<i>.bins.<j>.curregs".
-  - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
-  - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
-    MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
-
-  Removed features:
-  - Remove the *allocm() API, which is superseded by the *allocx() API.
-  - Remove the --enable-dss options, and make dss non-optional on all platforms
-    which support sbrk(2).
-  - Remove the "arenas.purge" mallctl, which was obsoleted by the
-    "arena.<i>.purge" mallctl in 3.1.0.
-  - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
-    detects whether it is running inside Valgrind.
-  - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
-    "stats.huge.ndalloc" mallctls.
-  - Remove the --enable-mremap option.
-  - Remove the "stats.chunks.current", "stats.chunks.total", and
-    "stats.chunks.high" mallctls.
-
-  Bug fixes:
-  - Fix the cactive statistic to decrease (rather than increase) when active
-    memory decreases.  This regression was first released in 3.5.0.
-  - Fix OOM handling in memalign() and valloc().  A variant of this bug existed
-    in all releases since 2.0.0, which introduced these functions.
-  - Fix an OOM-related regression in arena_tcache_fill_small(), which could
-    cause cache corruption on OOM.  This regression was present in all releases
-    from 2.2.0 through 3.6.0.
-  - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
-    calloc(), and realloc() when profiling is enabled.
-  - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
-    "secondary" precedence is specified, but sbrk(2) is not supported.
-  - Fix fallback lg_floor() implementations to handle extremely large inputs.
-  - Ensure the default purgeable zone is after the default zone on OS X.
-  - Fix latent bugs in atomic_*().
-  - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
-  - Fix tls_model configuration to enable the initial-exec model when possible.
-  - Mark malloc_conf as a weak symbol so that the application can override it.
-  - Correctly detect glibc's adaptive pthread mutexes.
-  - Fix the --without-export configure option.
-
-* 3.6.0 (March 31, 2014)
-
-  This version contains a critical bug fix for a regression present in 3.5.0 and
-  3.5.1.
-
-  Bug fixes:
-  - Fix a regression in arena_chunk_alloc() that caused crashes during
-    small/large allocation if chunk allocation failed.  In the absence of this
-    bug, chunk allocation failure would result in allocation failure, e.g.  NULL
-    return from malloc().  This regression was introduced in 3.5.0.
-  - Fix backtracing for gcc intrinsics-based backtracing by specifying
-    -fno-omit-frame-pointer to gcc.  Note that the application (and all the
-    libraries it links to) must also be compiled with this option for
-    backtracing to be reliable.
-  - Use dss allocation precedence for huge allocations as well as small/large
-    allocations.
-  - Fix test assertion failure message formatting.  This bug did not manifest on
-    x86_64 systems because of implementation subtleties in va_list.
-  - Fix inconsequential test failures for hash and SFMT code.
-
-  New features:
-  - Support heap profiling on FreeBSD.  This feature depends on the proc
-    filesystem being mounted during heap profile dumping.
-
-* 3.5.1 (February 25, 2014)
-
-  This version primarily addresses minor bugs in test code.
-
-  Bug fixes:
-  - Configure Solaris/Illumos to use MADV_FREE.
-  - Fix junk filling for mremap(2)-based huge reallocation.  This is only
-    relevant if configuring with the --enable-mremap option specified.
-  - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
-    compiler.
-  - Add a configure test for SSE2 rather than assuming it is usable on i686
-    systems.  This fixes test compilation errors, especially on 32-bit Linux
-    systems.
-  - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
-    test.
-  - Fix/remove flawed alignment-related overflow tests.
-  - Prevent compiler optimizations that could change backtraces in the
-    prof_accum unit test.
-
-* 3.5.0 (January 22, 2014)
-
-  This version focuses on refactoring and automated testing, though it also
-  includes some non-trivial heap profiling optimizations not mentioned below.
-
-  New features:
-  - Add the *allocx() API, which is a successor to the experimental *allocm()
-    API.  The *allocx() functions are slightly simpler to use because they have
-    fewer parameters, they directly return the results of primary interest, and
-    mallocx()/rallocx() avoid the strict aliasing pitfall that
-    allocm()/rallocm() share with posix_memalign().  Note that *allocm() is
-    slated for removal in the next non-bugfix release.
-  - Add support for LinuxThreads.
-
-  Bug fixes:
-  - Unless heap profiling is enabled, disable floating point code and don't link
-    with libm.  This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
-    systems, makes it possible to completely disable floating point register
-    use.  Some versions of glibc neglect to save/restore caller-saved floating
-    point registers during dynamic lazy symbol loading, and the symbol loading
-    code uses whatever malloc the application happens to have linked/loaded
-    with, the result being potential floating point register corruption.
-  - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
-    backtrace creation in imemalign().  This bug impacted posix_memalign() and
-    aligned_alloc().
-  - Fix a file descriptor leak in a prof_dump_maps() error path.
-  - Fix prof_dump() to close the dump file descriptor for all relevant error
-    paths.
-  - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
-    allocation, not just deallocation.
-  - Fix a data race for large allocation stats counters.
-  - Fix a potential infinite loop during thread exit.  This bug occurred on
-    Solaris, and could affect other platforms with similar pthreads TSD
-    implementations.
-  - Don't junk-fill reallocations unless usable size changes.  This fixes a
-    violation of the *allocx()/*allocm() semantics.
-  - Fix growing large reallocation to junk fill new space.
-  - Fix huge deallocation to junk fill when munmap is disabled.
-  - Change the default private namespace prefix from empty to je_, and change
-    --with-private-namespace-prefix so that it prepends an additional prefix
-    rather than replacing je_.  This reduces the likelihood of applications
-    which statically link jemalloc experiencing symbol name collisions.
-  - Add missing private namespace mangling (relevant when
-    --with-private-namespace is specified).
-  - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
-    static even for debug builds.
-  - Add a missing mutex unlock in a malloc_init_hard() error path.  In practice
-    this error path is never executed.
-  - Fix numerous bugs in malloc_strotumax() error handling/reporting.  These
-    bugs had no impact except for malformed inputs.
-  - Fix numerous bugs in malloc_snprintf().  These bugs were not exercised by
-    existing calls, so they had no impact.
-
-* 3.4.1 (October 20, 2013)
-
-  Bug fixes:
-  - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
-    of internal data structures and subsequent crashes.
-  - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
-    uninitialized memory in:
-    + arena chunk headers
-    + internal zero-initialized data structures (relevant to tcache and prof
-      code)
-  - Preserve errno during the first allocation.  A readlink(2) call during
-    initialization fails unless /etc/malloc.conf exists, so errno was typically
-    set during the first allocation prior to this fix.
-  - Fix compilation warnings reported by gcc 4.8.1.
-
-* 3.4.0 (June 2, 2013)
-
-  This version is essentially a small bugfix release, but the addition of
-  aarch64 support requires that the minor version be incremented.
-
-  Bug fixes:
-  - Fix race-triggered deadlocks in chunk_record().  These deadlocks were
-    typically triggered by multiple threads concurrently deallocating huge
-    objects.
-
-  New features:
-  - Add support for the aarch64 architecture.
-
-* 3.3.1 (March 6, 2013)
-
-  This version fixes bugs that are typically encountered only when utilizing
-  custom run-time options.
-
-  Bug fixes:
-  - Fix a locking order bug that could cause deadlock during fork if heap
-    profiling were enabled.
-  - Fix a chunk recycling bug that could cause the allocator to lose track of
-    whether a chunk was zeroed.  On FreeBSD, NetBSD, and OS X, it could cause
-    corruption if allocating via sbrk(2) (unlikely unless running with the
-    "dss:primary" option specified).  This was completely harmless on Linux
-    unless using mlockall(2) (and unlikely even then, unless the
-    --disable-munmap configure option or the "dss:primary" option was
-    specified).  This regression was introduced in 3.1.0 by the
-    mlockall(2)/madvise(2) interaction fix.
-  - Fix TLS-related memory corruption that could occur during thread exit if the
-    thread never allocated memory.  Only the quarantine and prof facilities were
-    susceptible.
-  - Fix two quarantine bugs:
-    + Internal reallocation of the quarantined object array leaked the old
-      array.
-    + Reallocation failure for internal reallocation of the quarantined object
-      array (very unlikely) resulted in memory corruption.
-  - Fix Valgrind integration to annotate all internally allocated memory in a
-    way that keeps Valgrind happy about internal data structure access.
-  - Fix building for s390 systems.
-
-* 3.3.0 (January 23, 2013)
-
-  This version includes a few minor performance improvements in addition to the
-  listed new features and bug fixes.
-
-  New features:
-  - Add clipping support to lg_chunk option processing.
-  - Add the --enable-ivsalloc option.
-  - Add the --without-export option.
-  - Add the --disable-zone-allocator option.
-
-  Bug fixes:
-  - Fix "arenas.extend" mallctl to output the number of arenas.
-  - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
-    is undefined.
-  - Fix build break on FreeBSD related to alloca.h.
-
-* 3.2.0 (November 9, 2012)
-
-  In addition to a couple of bug fixes, this version modifies page run
-  allocation and dirty page purging algorithms in order to better control
-  page-level virtual memory fragmentation.
-
-  Incompatible changes:
-  - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
-
-  Bug fixes:
-  - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
-    after primary dss allocation fails.
-  - Fix deadlock in the "arenas.purge" mallctl.  This regression was introduced
-    in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
-
-* 3.1.0 (October 16, 2012)
-
-  New features:
-  - Auto-detect whether running inside Valgrind, thus removing the need to
-    manually specify MALLOC_CONF=valgrind:true.
-  - Add the "arenas.extend" mallctl, which allows applications to create
-    manually managed arenas.
-  - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
-  - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
-    which provide control over dss/mmap precedence.
-  - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
-  - Define LG_QUANTUM for hppa.
-
-  Incompatible changes:
-  - Disable tcache by default if running inside Valgrind, in order to avoid
-    making unallocated objects appear reachable to Valgrind.
-  - Drop const from malloc_usable_size() argument on Linux.
-
-  Bug fixes:
-  - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
-  - Remove const from __*_hook variable declarations, so that glibc can modify
-    them during process forking.
-  - Fix mlockall(2)/madvise(2) interaction.
-  - Fix fork(2)-related deadlocks.
-  - Fix error return value for "thread.tcache.enabled" mallctl.
-
-* 3.0.0 (May 11, 2012)
-
-  Although this version adds some major new features, the primary focus is on
-  internal code cleanup that facilitates maintainability and portability, most
-  of which is not reflected in the ChangeLog.  This is the first release to
-  incorporate substantial contributions from numerous other developers, and the
-  result is a more broadly useful allocator (see the git revision history for
-  contribution details).  Note that the license has been unified, thanks to
-  Facebook granting a license under the same terms as the other copyright
-  holders (see COPYING).
-
-  New features:
-  - Implement Valgrind support, redzones, and quarantine.
-  - Add support for additional platforms:
-    + FreeBSD
-    + Mac OS X Lion
-    + MinGW
-    + Windows (no support yet for replacing the system malloc)
-  - Add support for additional architectures:
-    + MIPS
-    + SH4
-    + Tilera
-  - Add support for cross compiling.
-  - Add nallocm(), which rounds a request size up to the nearest size class
-    without actually allocating.
-  - Implement aligned_alloc() (blame C11).
-  - Add the "thread.tcache.enabled" mallctl.
-  - Add the "opt.prof_final" mallctl.
-  - Update pprof (from gperftools 2.0).
-  - Add the --with-mangling option.
-  - Add the --disable-experimental option.
-  - Add the --disable-munmap option, and make it the default on Linux.
-  - Add the --enable-mremap option, which disables use of mremap(2) by default.
-
-  Incompatible changes:
-  - Enable stats by default.
-  - Enable fill by default.
-  - Disable lazy locking by default.
-  - Rename the "tcache.flush" mallctl to "thread.tcache.flush".
-  - Rename the "arenas.pagesize" mallctl to "arenas.page".
-  - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
-  - Change the "opt.prof_accum" default from true to false.
-
-  Removed features:
-  - Remove the swap feature, including the "config.swap", "swap.avail",
-    "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
-  - Remove highruns statistics, including the
-    "stats.arenas.<i>.bins.<j>.highruns" and
-    "stats.arenas.<i>.lruns.<j>.highruns" mallctls.
-  - As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
-    "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
-    "arenas.[tqcs]bins" mallctls.
-  - Remove the "arenas.chunksize" mallctl.
-  - Remove the "opt.lg_prof_tcmax" option.
-  - Remove the "opt.lg_prof_bt_max" option.
-  - Remove the "opt.lg_tcache_gc_sweep" option.
-  - Remove the --disable-tiny option, including the "config.tiny" mallctl.
-  - Remove the --enable-dynamic-page-shift configure option.
-  - Remove the --enable-sysv configure option.
-
-  Bug fixes:
-  - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
-    invalid statistics and crashes.
-  - Work around TLS deallocation via free() on Linux.  This bug could cause
-    write-after-free memory corruption.
-  - Fix a potential deadlock that could occur during interval- and
-    growth-triggered heap profile dumps.
-  - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
-  - Fix chunk_alloc_dss() to stop claiming memory is zeroed.  This bug could
-    cause memory corruption and crashes with --enable-dss specified.
-  - Fix fork-related bugs that could cause deadlock in children between fork
-    and exec.
-  - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
-  - Fix realloc(p, 0) to act like free(p).
-  - Do not enforce minimum alignment in memalign().
-  - Check for NULL pointer in malloc_usable_size().
-  - Fix an off-by-one heap profile statistics bug that could be observed in
-    interval- and growth-triggered heap profiles.
-  - Fix the "epoch" mallctl to update cached stats even if the passed in epoch
-    is 0.
-  - Fix bin->runcur management to fix a layout policy bug.  This bug did not
-    affect correctness.
-  - Fix a bug in choose_arena_hard() that potentially caused more arenas to be
-    initialized than necessary.
-  - Add missing "opt.lg_tcache_max" mallctl implementation.
-  - Use glibc allocator hooks to make mixed allocator usage less likely.
-  - Fix build issues for --disable-tcache.
-  - Don't mangle pthread_create() when --with-private-namespace is specified.
-
-* 2.2.5 (November 14, 2011)
-
-  Bug fixes:
-  - Fix huge_ralloc() race when using mremap(2).  This is a serious bug that
-    could cause memory corruption and/or crashes.
-  - Fix huge_ralloc() to maintain chunk statistics.
-  - Fix malloc_stats_print(..., "a") output.
-
-* 2.2.4 (November 5, 2011)
-
-  Bug fixes:
-  - Initialize arenas_tsd before using it.  This bug existed for 2.2.[0-3], as
-    well as for --disable-tls builds in earlier releases.
-  - Do not assume a 4 KiB page size in test/rallocm.c.
-
-* 2.2.3 (August 31, 2011)
-
-  This version fixes numerous bugs related to heap profiling.
-
-  Bug fixes:
-  - Fix a prof-related race condition.  This bug could cause memory corruption,
-    but only occurred in non-default configurations (prof_accum:false).
-  - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is
-    excluded from backtraces).
-  - Fix a prof-related bug in realloc() (only triggered by OOM errors).
-  - Fix prof-related bugs in allocm() and rallocm().
-  - Fix prof_tdata_cleanup() for --disable-tls builds.
-  - Fix a relative include path, to fix objdir builds.
-
-* 2.2.2 (July 30, 2011)
-
-  Bug fixes:
-  - Fix a build error for --disable-tcache.
-  - Fix assertions in arena_purge() (for real this time).
-  - Add the --with-private-namespace option.  This is a workaround for symbol
-    conflicts that can inadvertently arise when using static libraries.
-
-* 2.2.1 (March 30, 2011)
-
-  Bug fixes:
-  - Implement atomic operations for x86/x64.  This fixes compilation failures
-    for versions of gcc that are still in wide use.
-  - Fix an assertion in arena_purge().
-
-* 2.2.0 (March 22, 2011)
-
-  This version incorporates several improvements to algorithms and data
-  structures that tend to reduce fragmentation and increase speed.
-
-  New features:
-  - Add the "stats.cactive" mallctl.
-  - Update pprof (from google-perftools 1.7).
-  - Improve backtracing-related configuration logic, and add the
-    --disable-prof-libgcc option.
-
-  Bug fixes:
-  - Change default symbol visibility from "internal", to "hidden", which
-    decreases the overhead of library-internal function calls.
-  - Fix symbol visibility so that it is also set on OS X.
-  - Fix a build dependency regression caused by the introduction of the .pic.o
-    suffix for PIC object files.
-  - Add missing checks for mutex initialization failures.
-  - Don't use libgcc-based backtracing except on x64, where it is known to work.
-  - Fix deadlocks on OS X that were due to memory allocation in
-    pthread_mutex_lock().
-  - Heap profiling-specific fixes:
-    + Fix memory corruption due to integer overflow in small region index
-      computation, when using a small enough sample interval that profiling
-      context pointers are stored in small run headers.
-    + Fix a bootstrap ordering bug that only occurred with TLS disabled.
-    + Fix a rallocm() rsize bug.
-    + Fix error detection bugs for aligned memory allocation.
-
-* 2.1.3 (March 14, 2011)
-
-  Bug fixes:
-  - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix
-    for OS X in 2.1.2).
-  - Fix a "thread.arena" mallctl bug.
-  - Fix a thread cache stats merging bug.
-
-* 2.1.2 (March 2, 2011)
-
-  Bug fixes:
-  - Fix "thread.{de,}allocatedp" mallctl for OS X.
-  - Add missing jemalloc.a to build system.
-
-* 2.1.1 (January 31, 2011)
-
-  Bug fixes:
-  - Fix aligned huge reallocation (affected allocm()).
-  - Fix the ALLOCM_LG_ALIGN macro definition.
-  - Fix a heap dumping deadlock.
-  - Fix a "thread.arena" mallctl bug.
-
-* 2.1.0 (December 3, 2010)
-
-  This version incorporates some optimizations that can't quite be considered
-  bug fixes.
-
-  New features:
-  - Use Linux's mremap(2) for huge object reallocation when possible.
-  - Avoid locking in mallctl*() when possible.
-  - Add the "thread.[de]allocatedp" mallctl's.
-  - Convert the manual page source from roff to DocBook, and generate both roff
-    and HTML manuals.
-
-  Bug fixes:
-  - Fix a crash due to incorrect bootstrap ordering.  This only impacted
-    --enable-debug --enable-dss configurations.
-  - Fix a minor statistics bug for mallctl("swap.avail", ...).
-
-* 2.0.1 (October 29, 2010)
-
-  Bug fixes:
-  - Fix a race condition in heap profiling that could cause undefined behavior
-    if "opt.prof_accum" were disabled.
-  - Add missing mutex unlocks for some OOM error paths in the heap profiling
-    code.
-  - Fix a compilation error for non-C99 builds.
-
-* 2.0.0 (October 24, 2010)
-
-  This version focuses on the experimental *allocm() API, and on improved
-  run-time configuration/introspection.  Nonetheless, numerous performance
-  improvements are also included.
-
-  New features:
-  - Implement the experimental {,r,s,d}allocm() API, which provides a superset
-    of the functionality available via malloc(), calloc(), posix_memalign(),
-    realloc(), malloc_usable_size(), and free().  These functions can be used to
-    allocate/reallocate aligned zeroed memory, ask for optional extra memory
-    during reallocation, prevent object movement during reallocation, etc.
-  - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is
-    more human-readable, and more flexible.  For example:
-      JEMALLOC_OPTIONS=AJP
-    is now:
-      MALLOC_CONF=abort:true,fill:true,stats_print:true
-  - Port to Apple OS X.  Sponsored by Mozilla.
-  - Make it possible for the application to control thread-->arena mappings via
-    the "thread.arena" mallctl.
-  - Add compile-time support for all TLS-related functionality via pthreads TSD.
-    This is mainly of interest for OS X, which does not support TLS, but has a
-    TSD implementation with similar performance.
-  - Override memalign() and valloc() if they are provided by the system.
-  - Add the "arenas.purge" mallctl, which can be used to synchronously purge all
-    dirty unused pages.
-  - Make cumulative heap profiling data optional, so that it is possible to
-    limit the amount of memory consumed by heap profiling data structures.
-  - Add per thread allocation counters that can be accessed via the
-    "thread.allocated" and "thread.deallocated" mallctls.
-
-  Incompatible changes:
-  - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above).
-  - Increase default backtrace depth from 4 to 128 for heap profiling.
-  - Disable interval-based profile dumps by default.
-
-  Bug fixes:
-  - Remove bad assertions in fork handler functions.  These assertions could
-    cause aborts for some combinations of configure settings.
-  - Fix strerror_r() usage to deal with non-standard semantics in GNU libc.
-  - Fix leak context reporting.  This bug tended to cause the number of contexts
-    to be underreported (though the reported number of objects and bytes were
-    correct).
-  - Fix a realloc() bug for large in-place growing reallocation.  This bug could
-    cause memory corruption, but it was hard to trigger.
-  - Fix an allocation bug for small allocations that could be triggered if
-    multiple threads raced to create a new run of backing pages.
-  - Enhance the heap profiler to trigger samples based on usable size, rather
-    than request size.
-  - Fix a heap profiling bug due to sometimes losing track of requested object
-    size for sampled objects.
-
-* 1.0.3 (August 12, 2010)
-
-  Bug fixes:
-  - Fix the libunwind-based implementation of stack backtracing (used for heap
-    profiling).  This bug could cause zero-length backtraces to be reported.
-  - Add a missing mutex unlock in library initialization code.  If multiple
-    threads raced to initialize malloc, some of them could end up permanently
-    blocked.
-
-* 1.0.2 (May 11, 2010)
-
-  Bug fixes:
-  - Fix junk filling of large objects, which could cause memory corruption.
-  - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual
-    memory limits could cause swap file configuration to fail.  Contributed by
-    Jordan DeLong.
-
-* 1.0.1 (April 14, 2010)
-
-  Bug fixes:
-  - Fix compilation when --enable-fill is specified.
-  - Fix threads-related profiling bugs that affected accuracy and caused memory
-    to be leaked during thread exit.
-  - Fix dirty page purging race conditions that could cause crashes.
-  - Fix crash in tcache flushing code during thread destruction.
-
-* 1.0.0 (April 11, 2010)
-
-  This release focuses on speed and run-time introspection.  Numerous
-  algorithmic improvements make this release substantially faster than its
-  predecessors.
-
-  New features:
-  - Implement autoconf-based configuration system.
-  - Add mallctl*(), for the purposes of introspection and run-time
-    configuration.
-  - Make it possible for the application to manually flush a thread's cache, via
-    the "tcache.flush" mallctl.
-  - Base maximum dirty page count on proportion of active memory.
-  - Compute various additional run-time statistics, including per size class
-    statistics for large objects.
-  - Expose malloc_stats_print(), which can be called repeatedly by the
-    application.
-  - Simplify the malloc_message() signature to only take one string argument,
-    and incorporate an opaque data pointer argument for use by the application
-    in combination with malloc_stats_print().
-  - Add support for allocation backed by one or more swap files, and allow the
-    application to disable over-commit if swap files are in use.
-  - Implement allocation profiling and leak checking.
-
-  Removed features:
-  - Remove the dynamic arena rebalancing code, since thread-specific caching
-    reduces its utility.
-
-  Bug fixes:
-  - Modify chunk allocation to work when address space layout randomization
-    (ASLR) is in use.
-  - Fix thread cleanup bugs related to TLS destruction.
-  - Handle 0-size allocation requests in posix_memalign().
-  - Fix a chunk leak.  The leaked chunks were never touched, so this impacted
-    virtual memory usage, but not physical memory usage.
-
-* linux_2008082[78]a (August 27/28, 2008)
-
-  These snapshot releases are the simple result of incorporating Linux-specific
-  support into the FreeBSD malloc sources.
-
---------------------------------------------------------------------------------
-vim:filetype=text:textwidth=80
diff --git a/zircon/third_party/ulib/jemalloc/INSTALL b/zircon/third_party/ulib/jemalloc/INSTALL
deleted file mode 100644
index d749661..0000000
--- a/zircon/third_party/ulib/jemalloc/INSTALL
+++ /dev/null
@@ -1,435 +0,0 @@
-Building and installing a packaged release of jemalloc can be as simple as
-typing the following while in the root directory of the source tree:
-
-    ./configure
-    make
-    make install
-
-If building from unpackaged developer sources, the simplest command sequence
-that might work is:
-
-    ./autogen.sh
-    make dist
-    make
-    make install
-
-Note that documentation is not built by the default target because doing so
-would create a dependency on xsltproc in packaged releases, hence the
-requirement to either run 'make dist' or avoid installing docs via the various
-install_* targets documented below.
-
-=== Advanced configuration =====================================================
-
-The 'configure' script supports numerous options that allow control of which
-functionality is enabled, where jemalloc is installed, etc.  Optionally, pass
-any of the following arguments (not a definitive list) to 'configure':
-
---help
-    Print a definitive list of options.
-
---prefix=<install-root-dir>
-    Set the base directory in which to install.  For example:
-
-        ./configure --prefix=/usr/local
-
-    will cause files to be installed into /usr/local/include, /usr/local/lib,
-    and /usr/local/man.
-
---with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
-    Use the specified version string rather than trying to generate one (if in
-    a git repository) or use existing the VERSION file (if present).
-
---with-rpath=<colon-separated-rpath>
-    Embed one or more library paths, so that libjemalloc can find the libraries
-    it is linked to.  This works only on ELF-based systems.
-
---with-mangling=<map>
-    Mangle public symbols specified in <map> which is a comma-separated list of
-    name:mangled pairs.
-
-    For example, to use ld's --wrap option as an alternative method for
-    overriding libc's malloc implementation, specify something like:
-
-      --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
-
-    Note that mangling happens prior to application of the prefix specified by
-    --with-jemalloc-prefix, and mangled symbols are then ignored when applying
-    the prefix.
-
---with-jemalloc-prefix=<prefix>
-    Prefix all public APIs with <prefix>.  For example, if <prefix> is
-    "prefix_", API changes like the following occur:
-
-      malloc()         --> prefix_malloc()
-      malloc_conf      --> prefix_malloc_conf
-      /etc/malloc.conf --> /etc/prefix_malloc.conf
-      MALLOC_CONF      --> PREFIX_MALLOC_CONF
-
-    This makes it possible to use jemalloc at the same time as the system
-    allocator, or even to use multiple copies of jemalloc simultaneously.
-
-    By default, the prefix is "", except on OS X, where it is "je_".  On OS X,
-    jemalloc overlays the default malloc zone, but makes no attempt to actually
-    replace the "malloc", "calloc", etc. symbols.
-
---without-export
-    Don't export public APIs.  This can be useful when building jemalloc as a
-    static library, or to avoid exporting public APIs when using the zone
-    allocator on OSX.
-
---with-private-namespace=<prefix>
-    Prefix all library-private APIs with <prefix>je_.  For shared libraries,
-    symbol visibility mechanisms prevent these symbols from being exported, but
-    for static libraries, naming collisions are a real possibility.  By
-    default, <prefix> is empty, which results in a symbol prefix of je_ .
-
---with-install-suffix=<suffix>
-    Append <suffix> to the base name of all installed files, such that multiple
-    versions of jemalloc can coexist in the same installation directory.  For
-    example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
-
---with-malloc-conf=<malloc_conf>
-    Embed <malloc_conf> as a run-time options string that is processed prior to
-    the malloc_conf global variable, the /etc/malloc.conf symlink, and the
-    MALLOC_CONF environment variable.  For example, to change the default decay
-    time to 30 seconds:
-
-      --with-malloc-conf=decay_time:30
-
---disable-cc-silence
-    Disable code that silences non-useful compiler warnings.  This is mainly
-    useful during development when auditing the set of warnings that are being
-    silenced.
-
---enable-debug
-    Enable assertions and validation code.  This incurs a substantial
-    performance hit, but is very useful during application development.
-    Implies --enable-ivsalloc.
-
---enable-code-coverage
-    Enable code coverage support, for use during jemalloc test development.
-    Additional testing targets are available if this option is enabled:
-
-      coverage
-      coverage_unit
-      coverage_integration
-      coverage_stress
-
-    These targets do not clear code coverage results from previous runs, and
-    there are interactions between the various coverage targets, so it is
-    usually advisable to run 'make clean' between repeated code coverage runs.
-
---disable-stats
-    Disable statistics gathering functionality.  See the "opt.stats_print"
-    option documentation for usage details.
-
---enable-ivsalloc
-    Enable validation code for malloc_usable_size() and sallocx(), which
-    verifies that pointers reside within jemalloc-owned extents before
-    dereferencing metadata.  This incurs a minor performance hit, and causes
-    the functions to return 0 for failed lookups.
-
---enable-prof
-    Enable heap profiling and leak detection functionality.  See the "opt.prof"
-    option documentation for usage details.  When enabled, there are several
-    approaches to backtracing, and the configure script chooses the first one
-    in the following list that appears to function correctly:
-
-    + libunwind      (requires --enable-prof-libunwind)
-    + libgcc         (unless --disable-prof-libgcc)
-    + gcc intrinsics (unless --disable-prof-gcc)
-
---enable-prof-libunwind
-    Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
-    backtracing.
-
---disable-prof-libgcc
-    Disable the use of libgcc's backtracing functionality.
-
---disable-prof-gcc
-    Disable the use of gcc intrinsics for backtracing.
-
---with-static-libunwind=<libunwind.a>
-    Statically link against the specified libunwind.a rather than dynamically
-    linking with -lunwind.
-
---disable-tcache
-    Disable thread-specific caches for small objects.  Objects are cached and
-    released in bulk, thus reducing the total number of mutex operations.  See
-    the "opt.tcache" option for usage details.
-
---disable-munmap
-    Disable virtual memory deallocation via munmap(2); instead keep track of
-    the virtual memory for later use.  munmap() is disabled by default (i.e.
-    --disable-munmap is implied) on Linux, which has a quirk in its virtual
-    memory allocation algorithm that causes semi-permanent VM map holes under
-    normal jemalloc operation.
-
---disable-fill
-    Disable support for junk/zero filling of memory.  See the "opt.junk" and
-    "opt.zero" option documentation for usage details.
-
---disable-zone-allocator
-    Disable zone allocator for Darwin.  This means jemalloc won't be hooked as
-    the default allocator on OSX/iOS.
-
---enable-utrace
-    Enable utrace(2)-based allocation tracing.  This feature is not broadly
-    portable (FreeBSD has it, but Linux and OS X do not).
-
---enable-xmalloc
-    Enable support for optional immediate termination due to out-of-memory
-    errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
-    See the "opt.xmalloc" option documentation for usage details.
-
---enable-lazy-lock
-    Enable code that wraps pthread_create() to detect when an application
-    switches from single-threaded to multi-threaded mode, so that it can avoid
-    mutex locking/unlocking operations while in single-threaded mode.  In
-    practice, this feature usually has little impact on performance unless
-    thread-specific caching is disabled.
-
---disable-tls
-    Disable thread-local storage (TLS), which allows for fast access to
-    thread-local variables via the __thread keyword.  If TLS is available,
-    jemalloc uses it for several purposes.
-
---disable-cache-oblivious
-    Disable cache-oblivious large allocation alignment for large allocation
-    requests with no alignment constraints.  If this feature is disabled, all
-    large allocations are page-aligned as an implementation artifact, which can
-    severely harm CPU cache utilization.  However, the cache-oblivious layout
-    comes at the cost of one extra page per large allocation, which in the
-    most extreme case increases physical memory usage for the 16 KiB size class
-    to 20 KiB.
-
---disable-syscall
-    Disable use of syscall(2) rather than {open,read,write,close}(2).  This is
-    intended as a workaround for systems that place security limitations on
-    syscall(2).
-
---disable-cxx
-    Disable C++ integration.  This will cause new and delete operator
-    implementations to be omitted.
-
---with-xslroot=<path>
-    Specify where to find DocBook XSL stylesheets when building the
-    documentation.
-
---with-lg-page=<lg-page>
-    Specify the base 2 log of the system page size.  This option is only useful
-    when cross compiling, since the configure script automatically determines
-    the host's page size by default.
-
---with-lg-page-sizes=<lg-page-sizes>
-    Specify the comma-separated base 2 logs of the page sizes to support.  This
-    option may be useful when cross-compiling in combination with
-    --with-lg-page, but its primary use case is for integration with FreeBSD's
-    libc, wherein jemalloc is embedded.
-
---with-lg-hugepage=<lg-hugepage>
-    Specify the base 2 log of the system huge page size.  This option is useful
-    when cross compiling, or when overriding the default for systems that do
-    not explicitly support huge pages.
-
---with-lg-size-class-group=<lg-size-class-group>
-    Specify the base 2 log of how many size classes to use for each doubling in
-    size.  By default jemalloc uses <lg-size-class-group>=2, which results in
-    e.g. the following size classes:
-
-      [...], 64,
-      80, 96, 112, 128,
-      160, [...]
-
-    <lg-size-class-group>=3 results in e.g. the following size classes:
-
-      [...], 64,
-      72, 80, 88, 96, 104, 112, 120, 128,
-      144, [...]
-
-    The minimal <lg-size-class-group>=0 causes jemalloc to only provide size
-    classes that are powers of 2:
-
-      [...],
-      64,
-      128,
-      256,
-      [...]
-
-    An implementation detail currently limits the total number of small size
-    classes to 255, and a compilation error will result if the
-    <lg-size-class-group> you specify cannot be supported.  The limit is
-    roughly <lg-size-class-group>=4, depending on page size.
-
---with-lg-quantum=<lg-quantum>
-    Specify the base 2 log of the minimum allocation alignment.  jemalloc needs
-    to know the minimum alignment that meets the following C standard
-    requirement (quoted from the April 12, 2011 draft of the C11 standard):
-
-      The pointer returned if the allocation succeeds is suitably aligned so
-      that it may be assigned to a pointer to any type of object with a
-      fundamental alignment requirement and then used to access such an object
-      or an array of such objects in the space allocated [...]
-
-    This setting is architecture-specific, and although jemalloc includes known
-    safe values for the most commonly used modern architectures, there is a
-    wrinkle related to GNU libc (glibc) that may impact your choice of
-    <lg-quantum>.  On most modern architectures, this mandates 16-byte alignment
-    (<lg-quantum>=4), but the glibc developers chose not to meet this
-    requirement for performance reasons.  An old discussion can be found at
-    https://sourceware.org/bugzilla/show_bug.cgi?id=206 .  Unlike glibc,
-    jemalloc does follow the C standard by default (caveat: jemalloc
-    technically cheats if --with-lg-tiny-min is smaller than
-    --with-lg-quantum), but the fact that Linux systems already work around
-    this allocator noncompliance means that it is generally safe in practice to
-    let jemalloc's minimum alignment follow glibc's lead.  If you specify
-    --with-lg-quantum=3 during configuration, jemalloc will provide additional
-    size classes that are not 16-byte-aligned (24, 40, and 56, assuming
-    --with-lg-size-class-group=2).
-
---with-lg-tiny-min=<lg-tiny-min>
-    Specify the base 2 log of the minimum tiny size class to support.  Tiny
-    size classes are powers of 2 less than the quantum, and are only
-    incorporated if <lg-tiny-min> is less than <lg-quantum> (see
-    --with-lg-quantum).  Tiny size classes technically violate the C standard
-    requirement for minimum alignment, and crashes could conceivably result if
-    the compiler were to generate instructions that made alignment assumptions,
-    both because illegal instruction traps could result, and because accesses
-    could straddle page boundaries and cause segmentation faults due to
-    accessing unmapped addresses.
-
-    The default of <lg-tiny-min>=3 works well in practice even on architectures
-    that technically require 16-byte alignment, probably for the same reason
-    --with-lg-quantum=3 works.  Smaller tiny size classes can, and will, cause
-    crashes (see https://bugzilla.mozilla.org/show_bug.cgi?id=691003 for an
-    example).
-
-    This option is rarely useful, and is mainly provided as documentation of a
-    subtle implementation detail.  If you do use this option, specify a
-    value in [3, ..., <lg-quantum>].
-
-The following environment variables (not a definitive list) impact configure's
-behavior:
-
-CFLAGS="?"
-CXXFLAGS="?"
-    Pass these flags to the C/C++ compiler.  Any flags set by the configure
-    script are prepended, which means explicitly set flags generally take
-    precedence.  Take care when specifying flags such as -Werror, because
-    configure tests may be affected in undesirable ways.
-
-EXTRA_CFLAGS="?"
-EXTRA_CXXFLAGS="?"
-    Append these flags to CFLAGS/CXXFLAGS, without passing them to the
-    compiler(s) during configuration.  This makes it possible to add flags such
-    as -Werror, while allowing the configure script to determine what other
-    flags are appropriate for the specified configuration.
-
-CPPFLAGS="?"
-    Pass these flags to the C preprocessor.  Note that CFLAGS is not passed to
-    'cpp' when 'configure' is looking for include files, so you must use
-    CPPFLAGS instead if you need to help 'configure' find header files.
-
-LD_LIBRARY_PATH="?"
-    'ld' uses this colon-separated list to find libraries.
-
-LDFLAGS="?"
-    Pass these flags when linking.
-
-PATH="?"
-    'configure' uses this to find programs.
-
-In some cases it may be necessary to work around configuration results that do
-not match reality.  For example, Linux 4.5 added support for the MADV_FREE flag
-to madvise(2), which can cause problems if building on a host with MADV_FREE
-support and deploying to a target without.  To work around this, use a cache
-file to override the relevant configuration variable defined in configure.ac,
-e.g.:
-
-    echo "je_cv_madv_free=no" > config.cache && ./configure -C
-
-=== Advanced compilation =======================================================
-
-To build only parts of jemalloc, use the following targets:
-
-    build_lib_shared
-    build_lib_static
-    build_lib
-    build_doc_html
-    build_doc_man
-    build_doc
-
-To install only parts of jemalloc, use the following targets:
-
-    install_bin
-    install_include
-    install_lib_shared
-    install_lib_static
-    install_lib
-    install_doc_html
-    install_doc_man
-    install_doc
-
-To clean up build results to varying degrees, use the following make targets:
-
-    clean
-    distclean
-    relclean
-
-=== Advanced installation ======================================================
-
-Optionally, define make variables when invoking make, including (not
-exclusively):
-
-INCLUDEDIR="?"
-    Use this as the installation prefix for header files.
-
-LIBDIR="?"
-    Use this as the installation prefix for libraries.
-
-MANDIR="?"
-    Use this as the installation prefix for man pages.
-
-DESTDIR="?"
-    Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR.  This is useful
-    when installing to a different path than was specified via --prefix.
-
-CC="?"
-    Use this to invoke the C compiler.
-
-CFLAGS="?"
-    Pass these flags to the compiler.
-
-CPPFLAGS="?"
-    Pass these flags to the C preprocessor.
-
-LDFLAGS="?"
-    Pass these flags when linking.
-
-PATH="?"
-    Use this to search for programs used during configuration and building.
-
-=== Development ================================================================
-
-If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
-script rather than 'configure'.  This re-generates 'configure', enables
-configuration dependency rules, and enables re-generation of automatically
-generated source files.
-
-The build system supports using an object directory separate from the source
-tree.  For example, you can create an 'obj' directory, and from within that
-directory, issue configuration and build commands:
-
-    autoconf
-    mkdir obj
-    cd obj
-    ../configure --enable-autogen
-    make
-
-=== Documentation ==============================================================
-
-The manual page is generated in both html and roff formats.  Any web browser
-can be used to view the html manual.  The roff manual page can be formatted
-prior to installation via the following command:
-
-    nroff -man -t doc/jemalloc.3
diff --git a/zircon/third_party/ulib/jemalloc/Makefile.in b/zircon/third_party/ulib/jemalloc/Makefile.in
deleted file mode 100644
index acd31f7..0000000
--- a/zircon/third_party/ulib/jemalloc/Makefile.in
+++ /dev/null
@@ -1,563 +0,0 @@
-# Clear out all vpaths, then set just one (default vpath) for the main build
-# directory.
-vpath
-vpath % .
-
-# Clear the default suffixes, so that built-in rules are not used.
-.SUFFIXES :
-
-SHELL := /bin/sh
-
-CC := @CC@
-CXX := @CXX@
-
-# Configuration parameters.
-DESTDIR =
-BINDIR := $(DESTDIR)@BINDIR@
-INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
-LIBDIR := $(DESTDIR)@LIBDIR@
-DATADIR := $(DESTDIR)@DATADIR@
-MANDIR := $(DESTDIR)@MANDIR@
-srcroot := @srcroot@
-objroot := @objroot@
-abs_srcroot := @abs_srcroot@
-abs_objroot := @abs_objroot@
-
-# Build parameters.
-CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
-CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
-SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
-EXTRA_CFLAGS := @EXTRA_CFLAGS@
-CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
-CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@
-SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@
-EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@
-CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS))
-LDFLAGS := @LDFLAGS@
-EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
-LIBS := @LIBS@
-RPATH_EXTRA := @RPATH_EXTRA@
-SO := @so@
-IMPORTLIB := @importlib@
-O := @o@
-A := @a@
-EXE := @exe@
-LIBPREFIX := @libprefix@
-REV := @rev@
-install_suffix := @install_suffix@
-ABI := @abi@
-XSLTPROC := @XSLTPROC@
-AUTOCONF := @AUTOCONF@
-_RPATH = @RPATH@
-RPATH = $(if $(1),$(call _RPATH,$(1)))
-cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
-cfghdrs_out := @cfghdrs_out@
-cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
-cfgoutputs_out := @cfgoutputs_out@
-enable_autogen := @enable_autogen@
-enable_code_coverage := @enable_code_coverage@
-enable_prof := @enable_prof@
-enable_zone_allocator := @enable_zone_allocator@
-MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
-link_whole_archive := @link_whole_archive@
-DSO_LDFLAGS = @DSO_LDFLAGS@
-SOREV = @SOREV@
-PIC_CFLAGS = @PIC_CFLAGS@
-CTARGET = @CTARGET@
-LDTARGET = @LDTARGET@
-TEST_LD_MODE = @TEST_LD_MODE@
-MKLIB = @MKLIB@
-AR = @AR@
-ARFLAGS = @ARFLAGS@
-CC_MM = @CC_MM@
-LM := @LM@
-INSTALL = @INSTALL@
-
-ifeq (macho, $(ABI))
-TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
-else
-ifeq (pecoff, $(ABI))
-TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
-else
-TEST_LIBRARY_PATH :=
-endif
-endif
-
-LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
-
-# Lists of files.
-BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
-C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
-C_SRCS := $(srcroot)src/jemalloc.c \
-	$(srcroot)src/arena.c \
-	$(srcroot)src/atomic.c \
-	$(srcroot)src/base.c \
-	$(srcroot)src/bitmap.c \
-	$(srcroot)src/ckh.c \
-	$(srcroot)src/ctl.c \
-	$(srcroot)src/extent.c \
-	$(srcroot)src/extent_dss.c \
-	$(srcroot)src/extent_mmap.c \
-	$(srcroot)src/hash.c \
-	$(srcroot)src/large.c \
-	$(srcroot)src/mb.c \
-	$(srcroot)src/mutex.c \
-	$(srcroot)src/nstime.c \
-	$(srcroot)src/pages.c \
-	$(srcroot)src/prng.c \
-	$(srcroot)src/prof.c \
-	$(srcroot)src/rtree.c \
-	$(srcroot)src/stats.c \
-	$(srcroot)src/spin.c \
-	$(srcroot)src/tcache.c \
-	$(srcroot)src/ticker.c \
-	$(srcroot)src/tsd.c \
-	$(srcroot)src/util.c \
-	$(srcroot)src/witness.c
-ifeq ($(enable_zone_allocator), 1)
-C_SRCS += $(srcroot)src/zone.c
-endif
-ifeq ($(IMPORTLIB),$(SO))
-STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
-endif
-ifdef PIC_CFLAGS
-STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
-else
-STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
-endif
-DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
-ifneq ($(SOREV),$(SO))
-DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
-endif
-ifeq (1, $(link_whole_archive))
-LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
-else
-LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
-endif
-PC := $(objroot)jemalloc.pc
-MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
-DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
-DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
-DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
-DOCS := $(DOCS_HTML) $(DOCS_MAN3)
-C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
-	$(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
-	$(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
-	$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
-	$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
-ifeq (1, $(link_whole_archive))
-C_UTIL_INTEGRATION_SRCS :=
-C_UTIL_CPP_SRCS :=
-else
-C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
-C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
-endif
-TESTS_UNIT := \
-	$(srcroot)test/unit/a0.c \
-	$(srcroot)test/unit/arena_reset.c \
-	$(srcroot)test/unit/atomic.c \
-	$(srcroot)test/unit/base.c \
-	$(srcroot)test/unit/bitmap.c \
-	$(srcroot)test/unit/ckh.c \
-	$(srcroot)test/unit/decay.c \
-	$(srcroot)test/unit/extent_quantize.c \
-	$(srcroot)test/unit/fork.c \
-	$(srcroot)test/unit/hash.c \
-	$(srcroot)test/unit/junk.c \
-	$(srcroot)test/unit/junk_alloc.c \
-	$(srcroot)test/unit/junk_free.c \
-	$(srcroot)test/unit/mallctl.c \
-	$(srcroot)test/unit/math.c \
-	$(srcroot)test/unit/mq.c \
-	$(srcroot)test/unit/mtx.c \
-	$(srcroot)test/unit/pack.c \
-	$(srcroot)test/unit/pages.c \
-	$(srcroot)test/unit/ph.c \
-	$(srcroot)test/unit/prng.c \
-	$(srcroot)test/unit/prof_accum.c \
-	$(srcroot)test/unit/prof_active.c \
-	$(srcroot)test/unit/prof_gdump.c \
-	$(srcroot)test/unit/prof_idump.c \
-	$(srcroot)test/unit/prof_reset.c \
-	$(srcroot)test/unit/prof_tctx.c \
-	$(srcroot)test/unit/prof_thread_name.c \
-	$(srcroot)test/unit/ql.c \
-	$(srcroot)test/unit/qr.c \
-	$(srcroot)test/unit/rb.c \
-	$(srcroot)test/unit/rtree.c \
-	$(srcroot)test/unit/SFMT.c \
-	$(srcroot)test/unit/size_classes.c \
-	$(srcroot)test/unit/slab.c \
-	$(srcroot)test/unit/smoothstep.c \
-	$(srcroot)test/unit/stats.c \
-	$(srcroot)test/unit/stats_print.c \
-	$(srcroot)test/unit/ticker.c \
-	$(srcroot)test/unit/nstime.c \
-	$(srcroot)test/unit/tsd.c \
-	$(srcroot)test/unit/util.c \
-	$(srcroot)test/unit/witness.c \
-	$(srcroot)test/unit/zero.c
-ifeq (@enable_prof@, 1)
-TESTS_UNIT += \
-	$(srcroot)test/unit/arena_reset_prof.c
-endif
-TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
-	$(srcroot)test/integration/allocated.c \
-	$(srcroot)test/integration/extent.c \
-	$(srcroot)test/integration/mallocx.c \
-	$(srcroot)test/integration/MALLOCX_ARENA.c \
-	$(srcroot)test/integration/overflow.c \
-	$(srcroot)test/integration/posix_memalign.c \
-	$(srcroot)test/integration/rallocx.c \
-	$(srcroot)test/integration/sdallocx.c \
-	$(srcroot)test/integration/thread_arena.c \
-	$(srcroot)test/integration/thread_tcache_enabled.c \
-	$(srcroot)test/integration/xallocx.c
-ifeq (@enable_cxx@, 1)
-CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
-TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp
-else
-CPP_SRCS :=
-TESTS_INTEGRATION_CPP :=
-endif
-TESTS_STRESS := $(srcroot)test/stress/microbench.c
-
-TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS)
-
-C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
-CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O))
-C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
-CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O))
-C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
-C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
-C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
-C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
-C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
-C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS)
-
-TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
-TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
-TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
-TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
-TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
-TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
-
-.PHONY: all dist build_doc_html build_doc_man build_doc
-.PHONY: install_bin install_include install_lib
-.PHONY: install_doc_html install_doc_man install_doc install
-.PHONY: tests check clean distclean relclean
-
-.SECONDARY : $(TESTS_OBJS) $(TESTS_CPP_OBJS)
-
-# Default target.
-all: build_lib
-
-dist: build_doc
-
-$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
-	$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
-
-$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
-	$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
-
-build_doc_html: $(DOCS_HTML)
-build_doc_man: $(DOCS_MAN3)
-build_doc: $(DOCS)
-
-#
-# Include generated dependency files.
-#
-ifdef CC_MM
--include $(C_OBJS:%.$(O)=%.d)
--include $(CPP_OBJS:%.$(O)=%.d)
--include $(C_PIC_OBJS:%.$(O)=%.d)
--include $(CPP_PIC_OBJS:%.$(O)=%.d)
--include $(C_JET_OBJS:%.$(O)=%.d)
--include $(C_TESTLIB_OBJS:%.$(O)=%.d)
--include $(TESTS_OBJS:%.$(O)=%.d)
--include $(TESTS_CPP_OBJS:%.$(O)=%.d)
-endif
-
-$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
-$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp
-$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
-$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
-$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp
-$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS)
-$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
-$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET
-$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
-$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
-$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
-$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
-$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
-$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
-$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
-$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
-$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
-$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
-$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
-$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
-$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
-$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
-$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
-$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
-ifneq ($(IMPORTLIB),$(SO))
-$(CPP_OBJS) $(C_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
-endif
-
-ifndef CC_MM
-# Dependencies.
-HEADER_DIRS = $(srcroot)include/jemalloc/internal \
-	$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
-HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))
-$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS)
-$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
-endif
-
-$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
-	@mkdir -p $(@D)
-	$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
-ifdef CC_MM
-	@$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
-endif
-
-$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
-	@mkdir -p $(@D)
-	$(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
-ifdef CC_MM
-	@$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
-endif
-
-ifneq ($(SOREV),$(SO))
-%.$(SO) : %.$(SOREV)
-	@mkdir -p $(@D)
-	ln -sf $(<F) $@
-endif
-
-$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS)) $(if $(PIC_CFLAGS),$(CPP_PIC_OBJS),$(CPP_OBJS))
-	@mkdir -p $(@D)
-	$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
-
-$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS) $(CPP_PIC_OBJS)
-$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS) $(CPP_OBJS)
-$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(CPP_OBJS)
-
-$(STATIC_LIBS):
-	@mkdir -p $(@D)
-	$(AR) $(ARFLAGS)@AROUT@ $+
-
-$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
-	@mkdir -p $(@D)
-	$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
-
-$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
-	@mkdir -p $(@D)
-	$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
-
-$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
-	@mkdir -p $(@D)
-	$(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
-
-$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
-	@mkdir -p $(@D)
-	$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
-
-build_lib_shared: $(DSOS)
-build_lib_static: $(STATIC_LIBS)
-build_lib: build_lib_shared build_lib_static
-
-install_bin:
-	$(INSTALL) -d $(BINDIR)
-	@for b in $(BINS); do \
-	echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \
-	$(INSTALL) -m 755 $$b $(BINDIR); \
-done
-
-install_include:
-	$(INSTALL) -d $(INCLUDEDIR)/jemalloc
-	@for h in $(C_HDRS); do \
-	echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
-	$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \
-done
-
-install_lib_shared: $(DSOS)
-	$(INSTALL) -d $(LIBDIR)
-	$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
-ifneq ($(SOREV),$(SO))
-	ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
-endif
-
-install_lib_static: $(STATIC_LIBS)
-	$(INSTALL) -d $(LIBDIR)
-	@for l in $(STATIC_LIBS); do \
-	echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \
-	$(INSTALL) -m 755 $$l $(LIBDIR); \
-done
-
-install_lib_pc: $(PC)
-	$(INSTALL) -d $(LIBDIR)/pkgconfig
-	@for l in $(PC); do \
-	echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \
-	$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
-done
-
-install_lib: install_lib_shared install_lib_static install_lib_pc
-
-install_doc_html:
-	$(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
-	@for d in $(DOCS_HTML); do \
-	echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
-	$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
-done
-
-install_doc_man:
-	$(INSTALL) -d $(MANDIR)/man3
-	@for d in $(DOCS_MAN3); do \
-	echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \
-	$(INSTALL) -m 644 $$d $(MANDIR)/man3; \
-done
-
-install_doc: install_doc_html install_doc_man
-
-install: install_bin install_include install_lib install_doc
-
-tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
-tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
-tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
-tests: tests_unit tests_integration tests_stress
-
-check_unit_dir:
-	@mkdir -p $(objroot)test/unit
-check_integration_dir:
-	@mkdir -p $(objroot)test/integration
-stress_dir:
-	@mkdir -p $(objroot)test/stress
-check_dir: check_unit_dir check_integration_dir
-
-check_unit: tests_unit check_unit_dir
-	$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
-check_integration_prof: tests_integration check_integration_dir
-ifeq ($(enable_prof), 1)
-	$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
-	$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
-endif
-check_integration_decay: tests_integration check_integration_dir
-	$(MALLOC_CONF)="decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
-	$(MALLOC_CONF)="decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
-check_integration: tests_integration check_integration_dir
-	$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
-stress: tests_stress stress_dir
-	$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
-check: check_unit check_integration check_integration_decay check_integration_prof
-
-ifeq ($(enable_code_coverage), 1)
-coverage_unit: check_unit
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS)
-
-coverage_integration: check_integration
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration/cpp integration $(TESTS_INTEGRATION_CPP_OBJS)
-
-coverage_stress: stress
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress stress $(TESTS_STRESS_OBJS)
-
-coverage: check
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration/cpp integration $(TESTS_INTEGRATION_CPP_OBJS)
-	$(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress integration $(TESTS_STRESS_OBJS)
-endif
-
-clean:
-	rm -f $(C_OBJS)
-	rm -f $(CPP_OBJS)
-	rm -f $(C_PIC_OBJS)
-	rm -f $(CPP_PIC_OBJS)
-	rm -f $(C_JET_OBJS)
-	rm -f $(C_TESTLIB_OBJS)
-	rm -f $(C_OBJS:%.$(O)=%.d)
-	rm -f $(C_OBJS:%.$(O)=%.gcda)
-	rm -f $(C_OBJS:%.$(O)=%.gcno)
-	rm -f $(CPP_OBJS:%.$(O)=%.d)
-	rm -f $(CPP_OBJS:%.$(O)=%.gcda)
-	rm -f $(CPP_OBJS:%.$(O)=%.gcno)
-	rm -f $(C_PIC_OBJS:%.$(O)=%.d)
-	rm -f $(C_PIC_OBJS:%.$(O)=%.gcda)
-	rm -f $(C_PIC_OBJS:%.$(O)=%.gcno)
-	rm -f $(CPP_PIC_OBJS:%.$(O)=%.d)
-	rm -f $(CPP_PIC_OBJS:%.$(O)=%.gcda)
-	rm -f $(CPP_PIC_OBJS:%.$(O)=%.gcno)
-	rm -f $(C_JET_OBJS:%.$(O)=%.d)
-	rm -f $(C_JET_OBJS:%.$(O)=%.gcda)
-	rm -f $(C_JET_OBJS:%.$(O)=%.gcno)
-	rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
-	rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcda)
-	rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcno)
-	rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
-	rm -f $(TESTS_OBJS)
-	rm -f $(TESTS_OBJS:%.$(O)=%.d)
-	rm -f $(TESTS_OBJS:%.$(O)=%.gcda)
-	rm -f $(TESTS_OBJS:%.$(O)=%.gcno)
-	rm -f $(TESTS_OBJS:%.$(O)=%.out)
-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%$(EXE))
-	rm -f $(TESTS_CPP_OBJS)
-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%.d)
-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%.gcda)
-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%.gcno)
-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%.out)
-	rm -f $(DSOS) $(STATIC_LIBS)
-	rm -f $(objroot)*.gcov.*
-
-distclean: clean
-	rm -f $(objroot)bin/jemalloc-config
-	rm -f $(objroot)bin/jemalloc.sh
-	rm -f $(objroot)bin/jeprof
-	rm -f $(objroot)config.log
-	rm -f $(objroot)config.status
-	rm -f $(objroot)config.stamp
-	rm -f $(cfghdrs_out)
-	rm -f $(cfgoutputs_out)
-
-relclean: distclean
-	rm -f $(objroot)configure
-	rm -f $(objroot)VERSION
-	rm -f $(DOCS_HTML)
-	rm -f $(DOCS_MAN3)
-
-#===============================================================================
-# Re-configuration rules.
-
-ifeq ($(enable_autogen), 1)
-$(srcroot)configure : $(srcroot)configure.ac
-	cd ./$(srcroot) && $(AUTOCONF)
-
-$(objroot)config.status : $(srcroot)configure
-	./$(objroot)config.status --recheck
-
-$(srcroot)config.stamp.in : $(srcroot)configure.ac
-	echo stamp > $(srcroot)config.stamp.in
-
-$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
-	./$(objroot)config.status
-	@touch $@
-
-# There must be some action in order for make to re-read Makefile when it is
-# out of date.
-$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
-	@true
-endif
diff --git a/zircon/third_party/ulib/jemalloc/README b/zircon/third_party/ulib/jemalloc/README
deleted file mode 100644
index 3a6e0d2..0000000
--- a/zircon/third_party/ulib/jemalloc/README
+++ /dev/null
@@ -1,20 +0,0 @@
-jemalloc is a general purpose malloc(3) implementation that emphasizes
-fragmentation avoidance and scalable concurrency support.  jemalloc first came
-into use as the FreeBSD libc allocator in 2005, and since then it has found its
-way into numerous applications that rely on its predictable behavior.  In 2010
-jemalloc development efforts broadened to include developer support features
-such as heap profiling and extensive monitoring/tuning hooks.  Modern jemalloc
-releases continue to be integrated back into FreeBSD, and therefore versatility
-remains critical.  Ongoing development efforts trend toward making jemalloc
-among the best allocators for a broad range of demanding applications, and
-eliminating/mitigating weaknesses that have practical repercussions for real
-world applications.
-
-The COPYING file contains copyright and licensing information.
-
-The INSTALL file contains information on how to configure, build, and install
-jemalloc.
-
-The ChangeLog file contains a brief summary of changes for each release.
-
-URL: http://jemalloc.net/
diff --git a/zircon/third_party/ulib/jemalloc/README.fuchsia b/zircon/third_party/ulib/jemalloc/README.fuchsia
deleted file mode 100644
index c8a3d51..0000000
--- a/zircon/third_party/ulib/jemalloc/README.fuchsia
+++ /dev/null
@@ -1,3 +0,0 @@
-Source: https://github.com/jemalloc/jemalloc
-Git Commit: 5154ff32ee8c37bacb6afd8a07b923eb33228357
-License: 2-Clause BSD
diff --git a/zircon/third_party/ulib/jemalloc/autogen.sh b/zircon/third_party/ulib/jemalloc/autogen.sh
deleted file mode 100755
index 75f32da..0000000
--- a/zircon/third_party/ulib/jemalloc/autogen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-for i in autoconf; do
-    echo "$i"
-    $i
-    if [ $? -ne 0 ]; then
-	echo "Error $? in $i"
-	exit 1
-    fi
-done
-
-echo "./configure --enable-autogen $@"
-./configure --enable-autogen $@
-if [ $? -ne 0 ]; then
-    echo "Error $? in ./configure"
-    exit 1
-fi
diff --git a/zircon/third_party/ulib/jemalloc/bin/jemalloc-config.in b/zircon/third_party/ulib/jemalloc/bin/jemalloc-config.in
deleted file mode 100644
index 80eca2e..0000000
--- a/zircon/third_party/ulib/jemalloc/bin/jemalloc-config.in
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/sh
-
-usage() {
-	cat <<EOF
-Usage:
-  @BINDIR@/jemalloc-config <option>
-Options:
-  --help | -h  : Print usage.
-  --version    : Print jemalloc version.
-  --revision   : Print shared library revision number.
-  --config     : Print configure options used to build jemalloc.
-  --prefix     : Print installation directory prefix.
-  --bindir     : Print binary installation directory.
-  --datadir    : Print data installation directory.
-  --includedir : Print include installation directory.
-  --libdir     : Print library installation directory.
-  --mandir     : Print manual page installation directory.
-  --cc         : Print compiler used to build jemalloc.
-  --cflags     : Print compiler flags used to build jemalloc.
-  --cppflags   : Print preprocessor flags used to build jemalloc.
-  --cxxflags   : Print C++ compiler flags used to build jemalloc.
-  --ldflags    : Print library flags used to build jemalloc.
-  --libs       : Print libraries jemalloc was linked against.
-EOF
-}
-
-prefix="@prefix@"
-exec_prefix="@exec_prefix@"
-
-case "$1" in
---help | -h)
-	usage
-	exit 0
-	;;
---version)
-	echo "@jemalloc_version@"
-	;;
---revision)
-	echo "@rev@"
-	;;
---config)
-	echo "@CONFIG@"
-	;;
---prefix)
-	echo "@PREFIX@"
-	;;
---bindir)
-	echo "@BINDIR@"
-	;;
---datadir)
-	echo "@DATADIR@"
-	;;
---includedir)
-	echo "@INCLUDEDIR@"
-	;;
---libdir)
-	echo "@LIBDIR@"
-	;;
---mandir)
-	echo "@MANDIR@"
-	;;
---cc)
-	echo "@CC@"
-	;;
---cflags)
-	echo "@CFLAGS@"
-	;;
---cppflags)
-	echo "@CPPFLAGS@"
-	;;
---cxxflags)
-	echo "@CXXFLAGS@"
-	;;
---ldflags)
-	echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
-	;;
---libs)
-	echo "@LIBS@"
-	;;
-*)
-	usage
-	exit 1
-esac
diff --git a/zircon/third_party/ulib/jemalloc/bin/jemalloc.sh.in b/zircon/third_party/ulib/jemalloc/bin/jemalloc.sh.in
deleted file mode 100644
index cdf3673..0000000
--- a/zircon/third_party/ulib/jemalloc/bin/jemalloc.sh.in
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-
-@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@
-export @LD_PRELOAD_VAR@
-exec "$@"
diff --git a/zircon/third_party/ulib/jemalloc/bin/jeprof.in b/zircon/third_party/ulib/jemalloc/bin/jeprof.in
deleted file mode 100644
index 42087fc..0000000
--- a/zircon/third_party/ulib/jemalloc/bin/jeprof.in
+++ /dev/null
@@ -1,5611 +0,0 @@
-#! /usr/bin/env perl
-
-# Copyright (c) 1998-2007, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# ---
-# Program for printing the profile generated by common/profiler.cc,
-# or by the heap profiler (common/debugallocation.cc)
-#
-# The profile contains a sequence of entries of the form:
-#       <count> <stack trace>
-# This program parses the profile, and generates user-readable
-# output.
-#
-# Examples:
-#
-# % tools/jeprof "program" "profile"
-#   Enters "interactive" mode
-#
-# % tools/jeprof --text "program" "profile"
-#   Generates one line per procedure
-#
-# % tools/jeprof --gv "program" "profile"
-#   Generates annotated call-graph and displays via "gv"
-#
-# % tools/jeprof --gv --focus=Mutex "program" "profile"
-#   Restrict to code paths that involve an entry that matches "Mutex"
-#
-# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
-#   Restrict to code paths that involve an entry that matches "Mutex"
-#   and does not match "string"
-#
-# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
-#   Generates disassembly listing of all routines with at least one
-#   sample that match the --list=<regexp> pattern.  The listing is
-#   annotated with the flat and cumulative sample counts at each line.
-#
-# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
-#   Generates disassembly listing of all routines with at least one
-#   sample that match the --disasm=<regexp> pattern.  The listing is
-#   annotated with the flat and cumulative sample counts at each PC value.
-#
-# TODO: Use color to indicate files?
-
-use strict;
-use warnings;
-use Getopt::Long;
-
-my $JEPROF_VERSION = "@jemalloc_version@";
-my $PPROF_VERSION = "2.0";
-
-# These are the object tools we use which can come from a
-# user-specified location using --tools, from the JEPROF_TOOLS
-# environment variable, or from the environment.
-my %obj_tool_map = (
-  "objdump" => "objdump",
-  "nm" => "nm",
-  "addr2line" => "addr2line",
-  "c++filt" => "c++filt",
-  ## ConfigureObjTools may add architecture-specific entries:
-  #"nm_pdb" => "nm-pdb",       # for reading windows (PDB-format) executables
-  #"addr2line_pdb" => "addr2line-pdb",                                # ditto
-  #"otool" => "otool",         # equivalent of objdump on OS X
-);
-# NOTE: these are lists, so you can put in commandline flags if you want.
-my @DOT = ("dot");          # leave non-absolute, since it may be in /usr/local
-my @GV = ("gv");
-my @EVINCE = ("evince");    # could also be xpdf or perhaps acroread
-my @KCACHEGRIND = ("kcachegrind");
-my @PS2PDF = ("ps2pdf");
-# These are used for dynamic profiles
-my @URL_FETCHER = ("curl", "-s", "--fail");
-
-# These are the web pages that servers need to support for dynamic profiles
-my $HEAP_PAGE = "/pprof/heap";
-my $PROFILE_PAGE = "/pprof/profile";   # must support cgi-param "?seconds=#"
-my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
-                                                # ?seconds=#&event=x&period=n
-my $GROWTH_PAGE = "/pprof/growth";
-my $CONTENTION_PAGE = "/pprof/contention";
-my $WALL_PAGE = "/pprof/wall(?:\\?.*)?";  # accepts options like namefilter
-my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
-my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
-                                                       # "?seconds=#",
-                                                       # "?tags_regexp=#" and
-                                                       # "?type=#".
-my $SYMBOL_PAGE = "/pprof/symbol";     # must support symbol lookup via POST
-my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
-
-# These are the web pages that can be named on the command line.
-# All the alternatives must begin with /.
-my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
-               "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
-               "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
-
-# default binary name
-my $UNKNOWN_BINARY = "(unknown)";
-
-# There is a pervasive dependency on the length (in hex characters,
-# i.e., nibbles) of an address, distinguishing between 32-bit and
-# 64-bit profiles.  To err on the safe size, default to 64-bit here:
-my $address_length = 16;
-
-my $dev_null = "/dev/null";
-if (! -e $dev_null && $^O =~ /MSWin/) {    # $^O is the OS perl was built for
-  $dev_null = "nul";
-}
-
-# A list of paths to search for shared object files
-my @prefix_list = ();
-
-# Special routine name that should not have any symbols.
-# Used as separator to parse "addr2line -i" output.
-my $sep_symbol = '_fini';
-my $sep_address = undef;
-
-##### Argument parsing #####
-
-sub usage_string {
-  return <<EOF;
-Usage:
-jeprof [options] <program> <profiles>
-   <profiles> is a space separated list of profile names.
-jeprof [options] <symbolized-profiles>
-   <symbolized-profiles> is a list of profile files where each file contains
-   the necessary symbol mappings  as well as profile data (likely generated
-   with --raw).
-jeprof [options] <profile>
-   <profile> is a remote form.  Symbols are obtained from host:port$SYMBOL_PAGE
-
-   Each name can be:
-   /path/to/profile        - a path to a profile file
-   host:port[/<service>]   - a location of a service to get profile from
-
-   The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
-                         $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
-                         $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
-   For instance:
-     jeprof http://myserver.com:80$HEAP_PAGE
-   If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
-jeprof --symbols <program>
-   Maps addresses to symbol names.  In this mode, stdin should be a
-   list of library mappings, in the same format as is found in the heap-
-   and cpu-profile files (this loosely matches that of /proc/self/maps
-   on linux), followed by a list of hex addresses to map, one per line.
-
-   For more help with querying remote servers, including how to add the
-   necessary server-side support code, see this filename (or one like it):
-
-   /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
-
-Options:
-   --cum               Sort by cumulative data
-   --base=<base>       Subtract <base> from <profile> before display
-   --interactive       Run in interactive mode (interactive "help" gives help) [default]
-   --seconds=<n>       Length of time for dynamic profiles [default=30 secs]
-   --add_lib=<file>    Read additional symbols and line info from the given library
-   --lib_prefix=<dir>  Comma separated list of library path prefixes
-
-Reporting Granularity:
-   --addresses         Report at address level
-   --lines             Report at source line level
-   --functions         Report at function level [default]
-   --files             Report at source file level
-
-Output type:
-   --text              Generate text report
-   --callgrind         Generate callgrind format to stdout
-   --gv                Generate Postscript and display
-   --evince            Generate PDF and display
-   --web               Generate SVG and display
-   --list=<regexp>     Generate source listing of matching routines
-   --disasm=<regexp>   Generate disassembly of matching routines
-   --symbols           Print demangled symbol names found at given addresses
-   --dot               Generate DOT file to stdout
-   --ps                Generate Postcript to stdout
-   --pdf               Generate PDF to stdout
-   --svg               Generate SVG to stdout
-   --gif               Generate GIF to stdout
-   --raw               Generate symbolized jeprof data (useful with remote fetch)
-
-Heap-Profile Options:
-   --inuse_space       Display in-use (mega)bytes [default]
-   --inuse_objects     Display in-use objects
-   --alloc_space       Display allocated (mega)bytes
-   --alloc_objects     Display allocated objects
-   --show_bytes        Display space in bytes
-   --drop_negative     Ignore negative differences
-
-Contention-profile options:
-   --total_delay       Display total delay at each region [default]
-   --contentions       Display number of delays at each region
-   --mean_delay        Display mean delay at each region
-
-Call-graph Options:
-   --nodecount=<n>     Show at most so many nodes [default=80]
-   --nodefraction=<f>  Hide nodes below <f>*total [default=.005]
-   --edgefraction=<f>  Hide edges below <f>*total [default=.001]
-   --maxdegree=<n>     Max incoming/outgoing edges per node [default=8]
-   --focus=<regexp>    Focus on backtraces with nodes matching <regexp>
-   --thread=<n>        Show profile for thread <n>
-   --ignore=<regexp>   Ignore backtraces with nodes matching <regexp>
-   --scale=<n>         Set GV scaling [default=0]
-   --heapcheck         Make nodes with non-0 object counts
-                       (i.e. direct leak generators) more visible
-   --retain=<regexp>   Retain only nodes that match <regexp>
-   --exclude=<regexp>  Exclude all nodes that match <regexp>
-
-Miscellaneous:
-   --tools=<prefix or binary:fullpath>[,...]   \$PATH for object tool pathnames
-   --test              Run unit tests
-   --help              This message
-   --version           Version information
-
-Environment Variables:
-   JEPROF_TMPDIR        Profiles directory. Defaults to \$HOME/jeprof
-   JEPROF_TOOLS         Prefix for object tools pathnames
-
-Examples:
-
-jeprof /bin/ls ls.prof
-                       Enters "interactive" mode
-jeprof --text /bin/ls ls.prof
-                       Outputs one line per procedure
-jeprof --web /bin/ls ls.prof
-                       Displays annotated call-graph in web browser
-jeprof --gv /bin/ls ls.prof
-                       Displays annotated call-graph via 'gv'
-jeprof --gv --focus=Mutex /bin/ls ls.prof
-                       Restricts to code paths including a .*Mutex.* entry
-jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
-                       Code paths including Mutex but not string
-jeprof --list=getdir /bin/ls ls.prof
-                       (Per-line) annotated source listing for getdir()
-jeprof --disasm=getdir /bin/ls ls.prof
-                       (Per-PC) annotated disassembly for getdir()
-
-jeprof http://localhost:1234/
-                       Enters "interactive" mode
-jeprof --text localhost:1234
-                       Outputs one line per procedure for localhost:1234
-jeprof --raw localhost:1234 > ./local.raw
-jeprof --text ./local.raw
-                       Fetches a remote profile for later analysis and then
-                       analyzes it in text mode.
-EOF
-}
-
-sub version_string {
-  return <<EOF
-jeprof (part of jemalloc $JEPROF_VERSION)
-based on pprof (part of gperftools $PPROF_VERSION)
-
-Copyright 1998-2007 Google Inc.
-
-This is BSD licensed software; see the source for copying conditions
-and license information.
-There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
-PARTICULAR PURPOSE.
-EOF
-}
-
-sub usage {
-  my $msg = shift;
-  print STDERR "$msg\n\n";
-  print STDERR usage_string();
-  print STDERR "\nFATAL ERROR: $msg\n";    # just as a reminder
-  exit(1);
-}
-
-sub Init() {
-  # Setup tmp-file name and handler to clean it up.
-  # We do this in the very beginning so that we can use
-  # error() and cleanup() function anytime here after.
-  $main::tmpfile_sym = "/tmp/jeprof$$.sym";
-  $main::tmpfile_ps = "/tmp/jeprof$$";
-  $main::next_tmpfile = 0;
-  $SIG{'INT'} = \&sighandler;
-
-  # Cache from filename/linenumber to source code
-  $main::source_cache = ();
-
-  $main::opt_help = 0;
-  $main::opt_version = 0;
-
-  $main::opt_cum = 0;
-  $main::opt_base = '';
-  $main::opt_addresses = 0;
-  $main::opt_lines = 0;
-  $main::opt_functions = 0;
-  $main::opt_files = 0;
-  $main::opt_lib_prefix = "";
-
-  $main::opt_text = 0;
-  $main::opt_callgrind = 0;
-  $main::opt_list = "";
-  $main::opt_disasm = "";
-  $main::opt_symbols = 0;
-  $main::opt_gv = 0;
-  $main::opt_evince = 0;
-  $main::opt_web = 0;
-  $main::opt_dot = 0;
-  $main::opt_ps = 0;
-  $main::opt_pdf = 0;
-  $main::opt_gif = 0;
-  $main::opt_svg = 0;
-  $main::opt_raw = 0;
-
-  $main::opt_nodecount = 80;
-  $main::opt_nodefraction = 0.005;
-  $main::opt_edgefraction = 0.001;
-  $main::opt_maxdegree = 8;
-  $main::opt_focus = '';
-  $main::opt_thread = undef;
-  $main::opt_ignore = '';
-  $main::opt_scale = 0;
-  $main::opt_heapcheck = 0;
-  $main::opt_retain = '';
-  $main::opt_exclude = '';
-  $main::opt_seconds = 30;
-  $main::opt_lib = "";
-
-  $main::opt_inuse_space   = 0;
-  $main::opt_inuse_objects = 0;
-  $main::opt_alloc_space   = 0;
-  $main::opt_alloc_objects = 0;
-  $main::opt_show_bytes    = 0;
-  $main::opt_drop_negative = 0;
-  $main::opt_interactive   = 0;
-
-  $main::opt_total_delay = 0;
-  $main::opt_contentions = 0;
-  $main::opt_mean_delay = 0;
-
-  $main::opt_tools   = "";
-  $main::opt_debug   = 0;
-  $main::opt_test    = 0;
-
-  # These are undocumented flags used only by unittests.
-  $main::opt_test_stride = 0;
-
-  # Are we using $SYMBOL_PAGE?
-  $main::use_symbol_page = 0;
-
-  # Files returned by TempName.
-  %main::tempnames = ();
-
-  # Type of profile we are dealing with
-  # Supported types:
-  #     cpu
-  #     heap
-  #     growth
-  #     contention
-  $main::profile_type = '';     # Empty type means "unknown"
-
-  GetOptions("help!"          => \$main::opt_help,
-             "version!"       => \$main::opt_version,
-             "cum!"           => \$main::opt_cum,
-             "base=s"         => \$main::opt_base,
-             "seconds=i"      => \$main::opt_seconds,
-             "add_lib=s"      => \$main::opt_lib,
-             "lib_prefix=s"   => \$main::opt_lib_prefix,
-             "functions!"     => \$main::opt_functions,
-             "lines!"         => \$main::opt_lines,
-             "addresses!"     => \$main::opt_addresses,
-             "files!"         => \$main::opt_files,
-             "text!"          => \$main::opt_text,
-             "callgrind!"     => \$main::opt_callgrind,
-             "list=s"         => \$main::opt_list,
-             "disasm=s"       => \$main::opt_disasm,
-             "symbols!"       => \$main::opt_symbols,
-             "gv!"            => \$main::opt_gv,
-             "evince!"        => \$main::opt_evince,
-             "web!"           => \$main::opt_web,
-             "dot!"           => \$main::opt_dot,
-             "ps!"            => \$main::opt_ps,
-             "pdf!"           => \$main::opt_pdf,
-             "svg!"           => \$main::opt_svg,
-             "gif!"           => \$main::opt_gif,
-             "raw!"           => \$main::opt_raw,
-             "interactive!"   => \$main::opt_interactive,
-             "nodecount=i"    => \$main::opt_nodecount,
-             "nodefraction=f" => \$main::opt_nodefraction,
-             "edgefraction=f" => \$main::opt_edgefraction,
-             "maxdegree=i"    => \$main::opt_maxdegree,
-             "focus=s"        => \$main::opt_focus,
-             "thread=s"       => \$main::opt_thread,
-             "ignore=s"       => \$main::opt_ignore,
-             "scale=i"        => \$main::opt_scale,
-             "heapcheck"      => \$main::opt_heapcheck,
-             "retain=s"       => \$main::opt_retain,
-             "exclude=s"      => \$main::opt_exclude,
-             "inuse_space!"   => \$main::opt_inuse_space,
-             "inuse_objects!" => \$main::opt_inuse_objects,
-             "alloc_space!"   => \$main::opt_alloc_space,
-             "alloc_objects!" => \$main::opt_alloc_objects,
-             "show_bytes!"    => \$main::opt_show_bytes,
-             "drop_negative!" => \$main::opt_drop_negative,
-             "total_delay!"   => \$main::opt_total_delay,
-             "contentions!"   => \$main::opt_contentions,
-             "mean_delay!"    => \$main::opt_mean_delay,
-             "tools=s"        => \$main::opt_tools,
-             "test!"          => \$main::opt_test,
-             "debug!"         => \$main::opt_debug,
-             # Undocumented flags used only by unittests:
-             "test_stride=i"  => \$main::opt_test_stride,
-      ) || usage("Invalid option(s)");
-
-  # Deal with the standard --help and --version
-  if ($main::opt_help) {
-    print usage_string();
-    exit(0);
-  }
-
-  if ($main::opt_version) {
-    print version_string();
-    exit(0);
-  }
-
-  # Disassembly/listing/symbols mode requires address-level info
-  if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
-    $main::opt_functions = 0;
-    $main::opt_lines = 0;
-    $main::opt_addresses = 1;
-    $main::opt_files = 0;
-  }
-
-  # Check heap-profiling flags
-  if ($main::opt_inuse_space +
-      $main::opt_inuse_objects +
-      $main::opt_alloc_space +
-      $main::opt_alloc_objects > 1) {
-    usage("Specify at most on of --inuse/--alloc options");
-  }
-
-  # Check output granularities
-  my $grains =
-      $main::opt_functions +
-      $main::opt_lines +
-      $main::opt_addresses +
-      $main::opt_files +
-      0;
-  if ($grains > 1) {
-    usage("Only specify one output granularity option");
-  }
-  if ($grains == 0) {
-    $main::opt_functions = 1;
-  }
-
-  # Check output modes
-  my $modes =
-      $main::opt_text +
-      $main::opt_callgrind +
-      ($main::opt_list eq '' ? 0 : 1) +
-      ($main::opt_disasm eq '' ? 0 : 1) +
-      ($main::opt_symbols == 0 ? 0 : 1) +
-      $main::opt_gv +
-      $main::opt_evince +
-      $main::opt_web +
-      $main::opt_dot +
-      $main::opt_ps +
-      $main::opt_pdf +
-      $main::opt_svg +
-      $main::opt_gif +
-      $main::opt_raw +
-      $main::opt_interactive +
-      0;
-  if ($modes > 1) {
-    usage("Only specify one output mode");
-  }
-  if ($modes == 0) {
-    if (-t STDOUT) {  # If STDOUT is a tty, activate interactive mode
-      $main::opt_interactive = 1;
-    } else {
-      $main::opt_text = 1;
-    }
-  }
-
-  if ($main::opt_test) {
-    RunUnitTests();
-    # Should not return
-    exit(1);
-  }
-
-  # Binary name and profile arguments list
-  $main::prog = "";
-  @main::pfile_args = ();
-
-  # Remote profiling without a binary (using $SYMBOL_PAGE instead)
-  if (@ARGV > 0) {
-    if (IsProfileURL($ARGV[0])) {
-      $main::use_symbol_page = 1;
-    } elsif (IsSymbolizedProfileFile($ARGV[0])) {
-      $main::use_symbolized_profile = 1;
-      $main::prog = $UNKNOWN_BINARY;  # will be set later from the profile file
-    }
-  }
-
-  if ($main::use_symbol_page || $main::use_symbolized_profile) {
-    # We don't need a binary!
-    my %disabled = ('--lines' => $main::opt_lines,
-                    '--disasm' => $main::opt_disasm);
-    for my $option (keys %disabled) {
-      usage("$option cannot be used without a binary") if $disabled{$option};
-    }
-    # Set $main::prog later...
-    scalar(@ARGV) || usage("Did not specify profile file");
-  } elsif ($main::opt_symbols) {
-    # --symbols needs a binary-name (to run nm on, etc) but not profiles
-    $main::prog = shift(@ARGV) || usage("Did not specify program");
-  } else {
-    $main::prog = shift(@ARGV) || usage("Did not specify program");
-    scalar(@ARGV) || usage("Did not specify profile file");
-  }
-
-  # Parse profile file/location arguments
-  foreach my $farg (@ARGV) {
-    if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
-      my $machine = $1;
-      my $num_machines = $2;
-      my $path = $3;
-      for (my $i = 0; $i < $num_machines; $i++) {
-        unshift(@main::pfile_args, "$i.$machine$path");
-      }
-    } else {
-      unshift(@main::pfile_args, $farg);
-    }
-  }
-
-  if ($main::use_symbol_page) {
-    unless (IsProfileURL($main::pfile_args[0])) {
-      error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
-    }
-    CheckSymbolPage();
-    $main::prog = FetchProgramName();
-  } elsif (!$main::use_symbolized_profile) {  # may not need objtools!
-    ConfigureObjTools($main::prog)
-  }
-
-  # Break the opt_lib_prefix into the prefix_list array
-  @prefix_list = split (',', $main::opt_lib_prefix);
-
-  # Remove trailing / from the prefixes, in the list to prevent
-  # searching things like /my/path//lib/mylib.so
-  foreach (@prefix_list) {
-    s|/+$||;
-  }
-}
-
-sub FilterAndPrint {
-  my ($profile, $symbols, $libs, $thread) = @_;
-
-  # Get total data in profile
-  my $total = TotalProfile($profile);
-
-  # Remove uniniteresting stack items
-  $profile = RemoveUninterestingFrames($symbols, $profile);
-
-  # Focus?
-  if ($main::opt_focus ne '') {
-    $profile = FocusProfile($symbols, $profile, $main::opt_focus);
-  }
-
-  # Ignore?
-  if ($main::opt_ignore ne '') {
-    $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
-  }
-
-  my $calls = ExtractCalls($symbols, $profile);
-
-  # Reduce profiles to required output granularity, and also clean
-  # each stack trace so a given entry exists at most once.
-  my $reduced = ReduceProfile($symbols, $profile);
-
-  # Get derived profiles
-  my $flat = FlatProfile($reduced);
-  my $cumulative = CumulativeProfile($reduced);
-
-  # Print
-  if (!$main::opt_interactive) {
-    if ($main::opt_disasm) {
-      PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
-    } elsif ($main::opt_list) {
-      PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
-    } elsif ($main::opt_text) {
-      # Make sure the output is empty when have nothing to report
-      # (only matters when --heapcheck is given but we must be
-      # compatible with old branches that did not pass --heapcheck always):
-      if ($total != 0) {
-        printf("Total%s: %s %s\n",
-               (defined($thread) ? " (t$thread)" : ""),
-               Unparse($total), Units());
-      }
-      PrintText($symbols, $flat, $cumulative, -1);
-    } elsif ($main::opt_raw) {
-      PrintSymbolizedProfile($symbols, $profile, $main::prog);
-    } elsif ($main::opt_callgrind) {
-      PrintCallgrind($calls);
-    } else {
-      if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
-        if ($main::opt_gv) {
-          RunGV(TempName($main::next_tmpfile, "ps"), "");
-        } elsif ($main::opt_evince) {
-          RunEvince(TempName($main::next_tmpfile, "pdf"), "");
-        } elsif ($main::opt_web) {
-          my $tmp = TempName($main::next_tmpfile, "svg");
-          RunWeb($tmp);
-          # The command we run might hand the file name off
-          # to an already running browser instance and then exit.
-          # Normally, we'd remove $tmp on exit (right now),
-          # but fork a child to remove $tmp a little later, so that the
-          # browser has time to load it first.
-          delete $main::tempnames{$tmp};
-          if (fork() == 0) {
-            sleep 5;
-            unlink($tmp);
-            exit(0);
-          }
-        }
-      } else {
-        cleanup();
-        exit(1);
-      }
-    }
-  } else {
-    InteractiveMode($profile, $symbols, $libs, $total);
-  }
-}
-
-sub Main() {
-  Init();
-  $main::collected_profile = undef;
-  @main::profile_files = ();
-  $main::op_time = time();
-
-  # Printing symbols is special and requires a lot less info that most.
-  if ($main::opt_symbols) {
-    PrintSymbols(*STDIN);   # Get /proc/maps and symbols output from stdin
-    return;
-  }
-
-  # Fetch all profile data
-  FetchDynamicProfiles();
-
-  # this will hold symbols that we read from the profile files
-  my $symbol_map = {};
-
-  # Read one profile, pick the last item on the list
-  my $data = ReadProfile($main::prog, pop(@main::profile_files));
-  my $profile = $data->{profile};
-  my $pcs = $data->{pcs};
-  my $libs = $data->{libs};   # Info about main program and shared libraries
-  $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
-
-  # Add additional profiles, if available.
-  if (scalar(@main::profile_files) > 0) {
-    foreach my $pname (@main::profile_files) {
-      my $data2 = ReadProfile($main::prog, $pname);
-      $profile = AddProfile($profile, $data2->{profile});
-      $pcs = AddPcs($pcs, $data2->{pcs});
-      $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
-    }
-  }
-
-  # Subtract base from profile, if specified
-  if ($main::opt_base ne '') {
-    my $base = ReadProfile($main::prog, $main::opt_base);
-    $profile = SubtractProfile($profile, $base->{profile});
-    $pcs = AddPcs($pcs, $base->{pcs});
-    $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
-  }
-
-  # Collect symbols
-  my $symbols;
-  if ($main::use_symbolized_profile) {
-    $symbols = FetchSymbols($pcs, $symbol_map);
-  } elsif ($main::use_symbol_page) {
-    $symbols = FetchSymbols($pcs);
-  } else {
-    # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
-    # which may differ from the data from subsequent profiles, especially
-    # if they were run on different machines.  Use appropriate libs for
-    # each pc somehow.
-    $symbols = ExtractSymbols($libs, $pcs);
-  }
-
-  if (!defined($main::opt_thread)) {
-    FilterAndPrint($profile, $symbols, $libs);
-  }
-  if (defined($data->{threads})) {
-    foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
-      if (defined($main::opt_thread) &&
-          ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
-        my $thread_profile = $data->{threads}{$thread};
-        FilterAndPrint($thread_profile, $symbols, $libs, $thread);
-      }
-    }
-  }
-
-  cleanup();
-  exit(0);
-}
-
-##### Entry Point #####
-
-Main();
-
-# Temporary code to detect if we're running on a Goobuntu system.
-# These systems don't have the right stuff installed for the special
-# Readline libraries to work, so as a temporary workaround, we default
-# to using the normal stdio code, rather than the fancier readline-based
-# code
-sub ReadlineMightFail {
-  if (-e '/lib/libtermcap.so.2') {
-    return 0;  # libtermcap exists, so readline should be okay
-  } else {
-    return 1;
-  }
-}
-
-sub RunGV {
-  my $fname = shift;
-  my $bg = shift;       # "" or " &" if we should run in background
-  if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
-    # Options using double dash are supported by this gv version.
-    # Also, turn on noantialias to better handle bug in gv for
-    # postscript files with large dimensions.
-    # TODO: Maybe we should not pass the --noantialias flag
-    # if the gv version is known to work properly without the flag.
-    system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
-           . $bg);
-  } else {
-    # Old gv version - only supports options that use single dash.
-    print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
-    system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
-  }
-}
-
-sub RunEvince {
-  my $fname = shift;
-  my $bg = shift;       # "" or " &" if we should run in background
-  system(ShellEscape(@EVINCE, $fname) . $bg);
-}
-
-sub RunWeb {
-  my $fname = shift;
-  print STDERR "Loading web page file:///$fname\n";
-
-  if (`uname` =~ /Darwin/) {
-    # OS X: open will use standard preference for SVG files.
-    system("/usr/bin/open", $fname);
-    return;
-  }
-
-  # Some kind of Unix; try generic symlinks, then specific browsers.
-  # (Stop once we find one.)
-  # Works best if the browser is already running.
-  my @alt = (
-    "/etc/alternatives/gnome-www-browser",
-    "/etc/alternatives/x-www-browser",
-    "google-chrome",
-    "firefox",
-  );
-  foreach my $b (@alt) {
-    if (system($b, $fname) == 0) {
-      return;
-    }
-  }
-
-  print STDERR "Could not load web browser.\n";
-}
-
-sub RunKcachegrind {
-  my $fname = shift;
-  my $bg = shift;       # "" or " &" if we should run in background
-  print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
-  system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
-}
-
-
-##### Interactive helper routines #####
-
-sub InteractiveMode {
-  $| = 1;  # Make output unbuffered for interactive mode
-  my ($orig_profile, $symbols, $libs, $total) = @_;
-
-  print STDERR "Welcome to jeprof!  For help, type 'help'.\n";
-
-  # Use ReadLine if it's installed and input comes from a console.
-  if ( -t STDIN &&
-       !ReadlineMightFail() &&
-       defined(eval {require Term::ReadLine}) ) {
-    my $term = new Term::ReadLine 'jeprof';
-    while ( defined ($_ = $term->readline('(jeprof) '))) {
-      $term->addhistory($_) if /\S/;
-      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
-        last;    # exit when we get an interactive command to quit
-      }
-    }
-  } else {       # don't have readline
-    while (1) {
-      print STDERR "(jeprof) ";
-      $_ = <STDIN>;
-      last if ! defined $_ ;
-      s/\r//g;         # turn windows-looking lines into unix-looking lines
-
-      # Save some flags that might be reset by InteractiveCommand()
-      my $save_opt_lines = $main::opt_lines;
-
-      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
-        last;    # exit when we get an interactive command to quit
-      }
-
-      # Restore flags
-      $main::opt_lines = $save_opt_lines;
-    }
-  }
-}
-
-# Takes two args: orig profile, and command to run.
-# Returns 1 if we should keep going, or 0 if we were asked to quit
-sub InteractiveCommand {
-  my($orig_profile, $symbols, $libs, $total, $command) = @_;
-  $_ = $command;                # just to make future m//'s easier
-  if (!defined($_)) {
-    print STDERR "\n";
-    return 0;
-  }
-  if (m/^\s*quit/) {
-    return 0;
-  }
-  if (m/^\s*help/) {
-    InteractiveHelpMessage();
-    return 1;
-  }
-  # Clear all the mode options -- mode is controlled by "$command"
-  $main::opt_text = 0;
-  $main::opt_callgrind = 0;
-  $main::opt_disasm = 0;
-  $main::opt_list = 0;
-  $main::opt_gv = 0;
-  $main::opt_evince = 0;
-  $main::opt_cum = 0;
-
-  if (m/^\s*(text|top)(\d*)\s*(.*)/) {
-    $main::opt_text = 1;
-
-    my $line_limit = ($2 ne "") ? int($2) : 10;
-
-    my $routine;
-    my $ignore;
-    ($routine, $ignore) = ParseInteractiveArgs($3);
-
-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    PrintText($symbols, $flat, $cumulative, $line_limit);
-    return 1;
-  }
-  if (m/^\s*callgrind\s*([^ \n]*)/) {
-    $main::opt_callgrind = 1;
-
-    # Get derived profiles
-    my $calls = ExtractCalls($symbols, $orig_profile);
-    my $filename = $1;
-    if ( $1 eq '' ) {
-      $filename = TempName($main::next_tmpfile, "callgrind");
-    }
-    PrintCallgrind($calls, $filename);
-    if ( $1 eq '' ) {
-      RunKcachegrind($filename, " & ");
-      $main::next_tmpfile++;
-    }
-
-    return 1;
-  }
-  if (m/^\s*(web)?list\s*(.+)/) {
-    my $html = (defined($1) && ($1 eq "web"));
-    $main::opt_list = 1;
-
-    my $routine;
-    my $ignore;
-    ($routine, $ignore) = ParseInteractiveArgs($2);
-
-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
-    return 1;
-  }
-  if (m/^\s*disasm\s*(.+)/) {
-    $main::opt_disasm = 1;
-
-    my $routine;
-    my $ignore;
-    ($routine, $ignore) = ParseInteractiveArgs($1);
-
-    # Process current profile to account for various settings
-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    PrintDisassembly($libs, $flat, $cumulative, $routine);
-    return 1;
-  }
-  if (m/^\s*(gv|web|evince)\s*(.*)/) {
-    $main::opt_gv = 0;
-    $main::opt_evince = 0;
-    $main::opt_web = 0;
-    if ($1 eq "gv") {
-      $main::opt_gv = 1;
-    } elsif ($1 eq "evince") {
-      $main::opt_evince = 1;
-    } elsif ($1 eq "web") {
-      $main::opt_web = 1;
-    }
-
-    my $focus;
-    my $ignore;
-    ($focus, $ignore) = ParseInteractiveArgs($2);
-
-    # Process current profile to account for various settings
-    my $profile = ProcessProfile($total, $orig_profile, $symbols,
-                                 $focus, $ignore);
-    my $reduced = ReduceProfile($symbols, $profile);
-
-    # Get derived profiles
-    my $flat = FlatProfile($reduced);
-    my $cumulative = CumulativeProfile($reduced);
-
-    if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
-      if ($main::opt_gv) {
-        RunGV(TempName($main::next_tmpfile, "ps"), " &");
-      } elsif ($main::opt_evince) {
-        RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
-      } elsif ($main::opt_web) {
-        RunWeb(TempName($main::next_tmpfile, "svg"));
-      }
-      $main::next_tmpfile++;
-    }
-    return 1;
-  }
-  if (m/^\s*$/) {
-    return 1;
-  }
-  print STDERR "Unknown command: try 'help'.\n";
-  return 1;
-}
-
-
-sub ProcessProfile {
-  my $total_count = shift;
-  my $orig_profile = shift;
-  my $symbols = shift;
-  my $focus = shift;
-  my $ignore = shift;
-
-  # Process current profile to account for various settings
-  my $profile = $orig_profile;
-  printf("Total: %s %s\n", Unparse($total_count), Units());
-  if ($focus ne '') {
-    $profile = FocusProfile($symbols, $profile, $focus);
-    my $focus_count = TotalProfile($profile);
-    printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
-           $focus,
-           Unparse($focus_count), Units(),
-           Unparse($total_count), ($focus_count*100.0) / $total_count);
-  }
-  if ($ignore ne '') {
-    $profile = IgnoreProfile($symbols, $profile, $ignore);
-    my $ignore_count = TotalProfile($profile);
-    printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
-           $ignore,
-           Unparse($ignore_count), Units(),
-           Unparse($total_count),
-           ($ignore_count*100.0) / $total_count);
-  }
-
-  return $profile;
-}
-
-sub InteractiveHelpMessage {
-  print STDERR <<ENDOFHELP;
-Interactive jeprof mode
-
-Commands:
-  gv
-  gv [focus] [-ignore1] [-ignore2]
-      Show graphical hierarchical display of current profile.  Without
-      any arguments, shows all samples in the profile.  With the optional
-      "focus" argument, restricts the samples shown to just those where
-      the "focus" regular expression matches a routine name on the stack
-      trace.
-
-  web
-  web [focus] [-ignore1] [-ignore2]
-      Like GV, but displays profile in your web browser instead of using
-      Ghostview. Works best if your web browser is already running.
-      To change the browser that gets used:
-      On Linux, set the /etc/alternatives/gnome-www-browser symlink.
-      On OS X, change the Finder association for SVG files.
-
-  list [routine_regexp] [-ignore1] [-ignore2]
-      Show source listing of routines whose names match "routine_regexp"
-
-  weblist [routine_regexp] [-ignore1] [-ignore2]
-     Displays a source listing of routines whose names match "routine_regexp"
-     in a web browser.  You can click on source lines to view the
-     corresponding disassembly.
-
-  top [--cum] [-ignore1] [-ignore2]
-  top20 [--cum] [-ignore1] [-ignore2]
-  top37 [--cum] [-ignore1] [-ignore2]
-      Show top lines ordered by flat profile count, or cumulative count
-      if --cum is specified.  If a number is present after 'top', the
-      top K routines will be shown (defaults to showing the top 10)
-
-  disasm [routine_regexp] [-ignore1] [-ignore2]
-      Show disassembly of routines whose names match "routine_regexp",
-      annotated with sample counts.
-
-  callgrind
-  callgrind [filename]
-      Generates callgrind file. If no filename is given, kcachegrind is called.
-
-  help - This listing
-  quit or ^D - End jeprof
-
-For commands that accept optional -ignore tags, samples where any routine in
-the stack trace matches the regular expression in any of the -ignore
-parameters will be ignored.
-
-Further pprof details are available at this location (or one similar):
-
- /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
- /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
-
-ENDOFHELP
-}
-sub ParseInteractiveArgs {
-  my $args = shift;
-  my $focus = "";
-  my $ignore = "";
-  my @x = split(/ +/, $args);
-  foreach $a (@x) {
-    if ($a =~ m/^(--|-)lines$/) {
-      $main::opt_lines = 1;
-    } elsif ($a =~ m/^(--|-)cum$/) {
-      $main::opt_cum = 1;
-    } elsif ($a =~ m/^-(.*)/) {
-      $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
-    } else {
-      $focus .= (($focus ne "") ? "|" : "" ) . $a;
-    }
-  }
-  if ($ignore ne "") {
-    print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
-  }
-  return ($focus, $ignore);
-}
-
-##### Output code #####
-
-sub TempName {
-  my $fnum = shift;
-  my $ext = shift;
-  my $file = "$main::tmpfile_ps.$fnum.$ext";
-  $main::tempnames{$file} = 1;
-  return $file;
-}
-
-# Print profile data in packed binary format (64-bit) to standard out
-sub PrintProfileData {
-  my $profile = shift;
-
-  # print header (64-bit style)
-  # (zero) (header-size) (version) (sample-period) (zero)
-  print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
-
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    if ($#addrs >= 0) {
-      my $depth = $#addrs + 1;
-      # int(foo / 2**32) is the only reliable way to get rid of bottom
-      # 32 bits on both 32- and 64-bit systems.
-      print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
-      print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
-
-      foreach my $full_addr (@addrs) {
-        my $addr = $full_addr;
-        $addr =~ s/0x0*//;  # strip off leading 0x, zeroes
-        if (length($addr) > 16) {
-          print STDERR "Invalid address in profile: $full_addr\n";
-          next;
-        }
-        my $low_addr = substr($addr, -8);       # get last 8 hex chars
-        my $high_addr = substr($addr, -16, 8);  # get up to 8 more hex chars
-        print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
-      }
-    }
-  }
-}
-
-# Print symbols and profile data
-sub PrintSymbolizedProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $prog = shift;
-
-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $symbol_marker = $&;
-
-  print '--- ', $symbol_marker, "\n";
-  if (defined($prog)) {
-    print 'binary=', $prog, "\n";
-  }
-  while (my ($pc, $name) = each(%{$symbols})) {
-    my $sep = ' ';
-    print '0x', $pc;
-    # We have a list of function names, which include the inlined
-    # calls.  They are separated (and terminated) by --, which is
-    # illegal in function names.
-    for (my $j = 2; $j <= $#{$name}; $j += 3) {
-      print $sep, $name->[$j];
-      $sep = '--';
-    }
-    print "\n";
-  }
-  print '---', "\n";
-
-  my $profile_marker;
-  if ($main::profile_type eq 'heap') {
-    $HEAP_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-    $profile_marker = $&;
-  } elsif ($main::profile_type eq 'growth') {
-    $GROWTH_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-    $profile_marker = $&;
-  } elsif ($main::profile_type eq 'contention') {
-    $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-    $profile_marker = $&;
-  } else { # elsif ($main::profile_type eq 'cpu')
-    $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-    $profile_marker = $&;
-  }
-
-  print '--- ', $profile_marker, "\n";
-  if (defined($main::collected_profile)) {
-    # if used with remote fetch, simply dump the collected profile to output.
-    open(SRC, "<$main::collected_profile");
-    while (<SRC>) {
-      print $_;
-    }
-    close(SRC);
-  } else {
-    # --raw/http: For everything to work correctly for non-remote profiles, we
-    # would need to extend PrintProfileData() to handle all possible profile
-    # types, re-enable the code that is currently disabled in ReadCPUProfile()
-    # and FixCallerAddresses(), and remove the remote profile dumping code in
-    # the block above.
-    die "--raw/http: jeprof can only dump remote profiles for --raw\n";
-    # dump a cpu-format profile to standard out
-    PrintProfileData($profile);
-  }
-}
-
-# Print text output
-sub PrintText {
-  my $symbols = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $line_limit = shift;
-
-  my $total = TotalProfile($flat);
-
-  # Which profile to sort by?
-  my $s = $main::opt_cum ? $cumulative : $flat;
-
-  my $running_sum = 0;
-  my $lines = 0;
-  foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
-                 keys(%{$cumulative})) {
-    my $f = GetEntry($flat, $k);
-    my $c = GetEntry($cumulative, $k);
-    $running_sum += $f;
-
-    my $sym = $k;
-    if (exists($symbols->{$k})) {
-      $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
-      if ($main::opt_addresses) {
-        $sym = $k . " " . $sym;
-      }
-    }
-
-    if ($f != 0 || $c != 0) {
-      printf("%8s %6s %6s %8s %6s %s\n",
-             Unparse($f),
-             Percent($f, $total),
-             Percent($running_sum, $total),
-             Unparse($c),
-             Percent($c, $total),
-             $sym);
-    }
-    $lines++;
-    last if ($line_limit >= 0 && $lines >= $line_limit);
-  }
-}
-
-# Callgrind format has a compression for repeated function and file
-# names.  You show the name the first time, and just use its number
-# subsequently.  This can cut down the file to about a third or a
-# quarter of its uncompressed size.  $key and $val are the key/value
-# pair that would normally be printed by callgrind; $map is a map from
-# value to number.
-sub CompressedCGName {
-  my($key, $val, $map) = @_;
-  my $idx = $map->{$val};
-  # For very short keys, providing an index hurts rather than helps.
-  if (length($val) <= 3) {
-    return "$key=$val\n";
-  } elsif (defined($idx)) {
-    return "$key=($idx)\n";
-  } else {
-    # scalar(keys $map) gives the number of items in the map.
-    $idx = scalar(keys(%{$map})) + 1;
-    $map->{$val} = $idx;
-    return "$key=($idx) $val\n";
-  }
-}
-
-# Print the call graph in a way that's suiteable for callgrind.
-sub PrintCallgrind {
-  my $calls = shift;
-  my $filename;
-  my %filename_to_index_map;
-  my %fnname_to_index_map;
-
-  if ($main::opt_interactive) {
-    $filename = shift;
-    print STDERR "Writing callgrind file to '$filename'.\n"
-  } else {
-    $filename = "&STDOUT";
-  }
-  open(CG, ">$filename");
-  printf CG ("events: Hits\n\n");
-  foreach my $call ( map { $_->[0] }
-                     sort { $a->[1] cmp $b ->[1] ||
-                            $a->[2] <=> $b->[2] }
-                     map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
-                           [$_, $1, $2] }
-                     keys %$calls ) {
-    my $count = int($calls->{$call});
-    $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
-    my ( $caller_file, $caller_line, $caller_function,
-         $callee_file, $callee_line, $callee_function ) =
-       ( $1, $2, $3, $5, $6, $7 );
-
-    # TODO(csilvers): for better compression, collect all the
-    # caller/callee_files and functions first, before printing
-    # anything, and only compress those referenced more than once.
-    printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
-    printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
-    if (defined $6) {
-      printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
-      printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
-      printf CG ("calls=$count $callee_line\n");
-    }
-    printf CG ("$caller_line $count\n\n");
-  }
-}
-
-# Print disassembly for all all routines that match $main::opt_disasm
-sub PrintDisassembly {
-  my $libs = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $disasm_opts = shift;
-
-  my $total = TotalProfile($flat);
-
-  foreach my $lib (@{$libs}) {
-    my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
-    my $offset = AddressSub($lib->[1], $lib->[3]);
-    foreach my $routine (sort ByName keys(%{$symbol_table})) {
-      my $start_addr = $symbol_table->{$routine}->[0];
-      my $end_addr = $symbol_table->{$routine}->[1];
-      # See if there are any samples in this routine
-      my $length = hex(AddressSub($end_addr, $start_addr));
-      my $addr = AddressAdd($start_addr, $offset);
-      for (my $i = 0; $i < $length; $i++) {
-        if (defined($cumulative->{$addr})) {
-          PrintDisassembledFunction($lib->[0], $offset,
-                                    $routine, $flat, $cumulative,
-                                    $start_addr, $end_addr, $total);
-          last;
-        }
-        $addr = AddressInc($addr);
-      }
-    }
-  }
-}
-
-# Return reference to array of tuples of the form:
-#       [start_address, filename, linenumber, instruction, limit_address]
-# E.g.,
-#       ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
-sub Disassemble {
-  my $prog = shift;
-  my $offset = shift;
-  my $start_addr = shift;
-  my $end_addr = shift;
-
-  my $objdump = $obj_tool_map{"objdump"};
-  my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
-                        "--start-address=0x$start_addr",
-                        "--stop-address=0x$end_addr", $prog);
-  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
-  my @result = ();
-  my $filename = "";
-  my $linenumber = -1;
-  my $last = ["", "", "", ""];
-  while (<OBJDUMP>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    chop;
-    if (m|\s*([^:\s]+):(\d+)\s*$|) {
-      # Location line of the form:
-      #   <filename>:<linenumber>
-      $filename = $1;
-      $linenumber = $2;
-    } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
-      # Disassembly line -- zero-extend address to full length
-      my $addr = HexExtend($1);
-      my $k = AddressAdd($addr, $offset);
-      $last->[4] = $k;   # Store ending address for previous instruction
-      $last = [$k, $filename, $linenumber, $2, $end_addr];
-      push(@result, $last);
-    }
-  }
-  close(OBJDUMP);
-  return @result;
-}
-
-# The input file should contain lines of the form /proc/maps-like
-# output (same format as expected from the profiles) or that looks
-# like hex addresses (like "0xDEADBEEF").  We will parse all
-# /proc/maps output, and for all the hex addresses, we will output
-# "short" symbol names, one per line, in the same order as the input.
-sub PrintSymbols {
-  my $maps_and_symbols_file = shift;
-
-  # ParseLibraries expects pcs to be in a set.  Fine by us...
-  my @pclist = ();   # pcs in sorted order
-  my $pcs = {};
-  my $map = "";
-  foreach my $line (<$maps_and_symbols_file>) {
-    $line =~ s/\r//g;    # turn windows-looking lines into unix-looking lines
-    if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
-      push(@pclist, HexExtend($1));
-      $pcs->{$pclist[-1]} = 1;
-    } else {
-      $map .= $line;
-    }
-  }
-
-  my $libs = ParseLibraries($main::prog, $map, $pcs);
-  my $symbols = ExtractSymbols($libs, $pcs);
-
-  foreach my $pc (@pclist) {
-    # ->[0] is the shortname, ->[2] is the full name
-    print(($symbols->{$pc}->[0] || "??") . "\n");
-  }
-}
-
-
-# For sorting functions by name
-sub ByName {
-  return ShortFunctionName($a) cmp ShortFunctionName($b);
-}
-
-# Print source-listing for all all routines that match $list_opts
-sub PrintListing {
-  my $total = shift;
-  my $libs = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $list_opts = shift;
-  my $html = shift;
-
-  my $output = \*STDOUT;
-  my $fname = "";
-
-  if ($html) {
-    # Arrange to write the output to a temporary file
-    $fname = TempName($main::next_tmpfile, "html");
-    $main::next_tmpfile++;
-    if (!open(TEMP, ">$fname")) {
-      print STDERR "$fname: $!\n";
-      return;
-    }
-    $output = \*TEMP;
-    print $output HtmlListingHeader();
-    printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
-                    $main::prog, Unparse($total), Units());
-  }
-
-  my $listed = 0;
-  foreach my $lib (@{$libs}) {
-    my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
-    my $offset = AddressSub($lib->[1], $lib->[3]);
-    foreach my $routine (sort ByName keys(%{$symbol_table})) {
-      # Print if there are any samples in this routine
-      my $start_addr = $symbol_table->{$routine}->[0];
-      my $end_addr = $symbol_table->{$routine}->[1];
-      my $length = hex(AddressSub($end_addr, $start_addr));
-      my $addr = AddressAdd($start_addr, $offset);
-      for (my $i = 0; $i < $length; $i++) {
-        if (defined($cumulative->{$addr})) {
-          $listed += PrintSource(
-            $lib->[0], $offset,
-            $routine, $flat, $cumulative,
-            $start_addr, $end_addr,
-            $html,
-            $output);
-          last;
-        }
-        $addr = AddressInc($addr);
-      }
-    }
-  }
-
-  if ($html) {
-    if ($listed > 0) {
-      print $output HtmlListingFooter();
-      close($output);
-      RunWeb($fname);
-    } else {
-      close($output);
-      unlink($fname);
-    }
-  }
-}
-
-sub HtmlListingHeader {
-  return <<'EOF';
-<DOCTYPE html>
-<html>
-<head>
-<title>Pprof listing</title>
-<style type="text/css">
-body {
-  font-family: sans-serif;
-}
-h1 {
-  font-size: 1.5em;
-  margin-bottom: 4px;
-}
-.legend {
-  font-size: 1.25em;
-}
-.line {
-  color: #aaaaaa;
-}
-.nop {
-  color: #aaaaaa;
-}
-.unimportant {
-  color: #cccccc;
-}
-.disasmloc {
-  color: #000000;
-}
-.deadsrc {
-  cursor: pointer;
-}
-.deadsrc:hover {
-  background-color: #eeeeee;
-}
-.livesrc {
-  color: #0000ff;
-  cursor: pointer;
-}
-.livesrc:hover {
-  background-color: #eeeeee;
-}
-.asm {
-  color: #008800;
-  display: none;
-}
-</style>
-<script type="text/javascript">
-function jeprof_toggle_asm(e) {
-  var target;
-  if (!e) e = window.event;
-  if (e.target) target = e.target;
-  else if (e.srcElement) target = e.srcElement;
-
-  if (target) {
-    var asm = target.nextSibling;
-    if (asm && asm.className == "asm") {
-      asm.style.display = (asm.style.display == "block" ? "" : "block");
-      e.preventDefault();
-      return false;
-    }
-  }
-}
-</script>
-</head>
-<body>
-EOF
-}
-
-sub HtmlListingFooter {
-  return <<'EOF';
-</body>
-</html>
-EOF
-}
-
-sub HtmlEscape {
-  my $text = shift;
-  $text =~ s/&/&amp;/g;
-  $text =~ s/</&lt;/g;
-  $text =~ s/>/&gt;/g;
-  return $text;
-}
-
-# Returns the indentation of the line, if it has any non-whitespace
-# characters.  Otherwise, returns -1.
-sub Indentation {
-  my $line = shift;
-  if (m/^(\s*)\S/) {
-    return length($1);
-  } else {
-    return -1;
-  }
-}
-
-# If the symbol table contains inlining info, Disassemble() may tag an
-# instruction with a location inside an inlined function.  But for
-# source listings, we prefer to use the location in the function we
-# are listing.  So use MapToSymbols() to fetch full location
-# information for each instruction and then pick out the first
-# location from a location list (location list contains callers before
-# callees in case of inlining).
-#
-# After this routine has run, each entry in $instructions contains:
-#   [0] start address
-#   [1] filename for function we are listing
-#   [2] line number for function we are listing
-#   [3] disassembly
-#   [4] limit address
-#   [5] most specific filename (may be different from [1] due to inlining)
-#   [6] most specific line number (may be different from [2] due to inlining)
-sub GetTopLevelLineNumbers {
-  my ($lib, $offset, $instructions) = @_;
-  my $pcs = [];
-  for (my $i = 0; $i <= $#{$instructions}; $i++) {
-    push(@{$pcs}, $instructions->[$i]->[0]);
-  }
-  my $symbols = {};
-  MapToSymbols($lib, $offset, $pcs, $symbols);
-  for (my $i = 0; $i <= $#{$instructions}; $i++) {
-    my $e = $instructions->[$i];
-    push(@{$e}, $e->[1]);
-    push(@{$e}, $e->[2]);
-    my $addr = $e->[0];
-    my $sym = $symbols->{$addr};
-    if (defined($sym)) {
-      if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
-        $e->[1] = $1;  # File name
-        $e->[2] = $2;  # Line number
-      }
-    }
-  }
-}
-
-# Print source-listing for one routine
-sub PrintSource {
-  my $prog = shift;
-  my $offset = shift;
-  my $routine = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $start_addr = shift;
-  my $end_addr = shift;
-  my $html = shift;
-  my $output = shift;
-
-  # Disassemble all instructions (just to get line numbers)
-  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
-  GetTopLevelLineNumbers($prog, $offset, \@instructions);
-
-  # Hack 1: assume that the first source file encountered in the
-  # disassembly contains the routine
-  my $filename = undef;
-  for (my $i = 0; $i <= $#instructions; $i++) {
-    if ($instructions[$i]->[2] >= 0) {
-      $filename = $instructions[$i]->[1];
-      last;
-    }
-  }
-  if (!defined($filename)) {
-    print STDERR "no filename found in $routine\n";
-    return 0;
-  }
-
-  # Hack 2: assume that the largest line number from $filename is the
-  # end of the procedure.  This is typically safe since if P1 contains
-  # an inlined call to P2, then P2 usually occurs earlier in the
-  # source file.  If this does not work, we might have to compute a
-  # density profile or just print all regions we find.
-  my $lastline = 0;
-  for (my $i = 0; $i <= $#instructions; $i++) {
-    my $f = $instructions[$i]->[1];
-    my $l = $instructions[$i]->[2];
-    if (($f eq $filename) && ($l > $lastline)) {
-      $lastline = $l;
-    }
-  }
-
-  # Hack 3: assume the first source location from "filename" is the start of
-  # the source code.
-  my $firstline = 1;
-  for (my $i = 0; $i <= $#instructions; $i++) {
-    if ($instructions[$i]->[1] eq $filename) {
-      $firstline = $instructions[$i]->[2];
-      last;
-    }
-  }
-
-  # Hack 4: Extend last line forward until its indentation is less than
-  # the indentation we saw on $firstline
-  my $oldlastline = $lastline;
-  {
-    if (!open(FILE, "<$filename")) {
-      print STDERR "$filename: $!\n";
-      return 0;
-    }
-    my $l = 0;
-    my $first_indentation = -1;
-    while (<FILE>) {
-      s/\r//g;         # turn windows-looking lines into unix-looking lines
-      $l++;
-      my $indent = Indentation($_);
-      if ($l >= $firstline) {
-        if ($first_indentation < 0 && $indent >= 0) {
-          $first_indentation = $indent;
-          last if ($first_indentation == 0);
-        }
-      }
-      if ($l >= $lastline && $indent >= 0) {
-        if ($indent >= $first_indentation) {
-          $lastline = $l+1;
-        } else {
-          last;
-        }
-      }
-    }
-    close(FILE);
-  }
-
-  # Assign all samples to the range $firstline,$lastline,
-  # Hack 4: If an instruction does not occur in the range, its samples
-  # are moved to the next instruction that occurs in the range.
-  my $samples1 = {};        # Map from line number to flat count
-  my $samples2 = {};        # Map from line number to cumulative count
-  my $running1 = 0;         # Unassigned flat counts
-  my $running2 = 0;         # Unassigned cumulative counts
-  my $total1 = 0;           # Total flat counts
-  my $total2 = 0;           # Total cumulative counts
-  my %disasm = ();          # Map from line number to disassembly
-  my $running_disasm = "";  # Unassigned disassembly
-  my $skip_marker = "---\n";
-  if ($html) {
-    $skip_marker = "";
-    for (my $l = $firstline; $l <= $lastline; $l++) {
-      $disasm{$l} = "";
-    }
-  }
-  my $last_dis_filename = '';
-  my $last_dis_linenum = -1;
-  my $last_touched_line = -1;  # To detect gaps in disassembly for a line
-  foreach my $e (@instructions) {
-    # Add up counts for all address that fall inside this instruction
-    my $c1 = 0;
-    my $c2 = 0;
-    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
-      $c1 += GetEntry($flat, $a);
-      $c2 += GetEntry($cumulative, $a);
-    }
-
-    if ($html) {
-      my $dis = sprintf("      %6s %6s \t\t%8s: %s ",
-                        HtmlPrintNumber($c1),
-                        HtmlPrintNumber($c2),
-                        UnparseAddress($offset, $e->[0]),
-                        CleanDisassembly($e->[3]));
-
-      # Append the most specific source line associated with this instruction
-      if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
-      $dis = HtmlEscape($dis);
-      my $f = $e->[5];
-      my $l = $e->[6];
-      if ($f ne $last_dis_filename) {
-        $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
-                        HtmlEscape(CleanFileName($f)), $l);
-      } elsif ($l ne $last_dis_linenum) {
-        # De-emphasize the unchanged file name portion
-        $dis .= sprintf("<span class=unimportant>%s</span>" .
-                        "<span class=disasmloc>:%d</span>",
-                        HtmlEscape(CleanFileName($f)), $l);
-      } else {
-        # De-emphasize the entire location
-        $dis .= sprintf("<span class=unimportant>%s:%d</span>",
-                        HtmlEscape(CleanFileName($f)), $l);
-      }
-      $last_dis_filename = $f;
-      $last_dis_linenum = $l;
-      $running_disasm .= $dis;
-      $running_disasm .= "\n";
-    }
-
-    $running1 += $c1;
-    $running2 += $c2;
-    $total1 += $c1;
-    $total2 += $c2;
-    my $file = $e->[1];
-    my $line = $e->[2];
-    if (($file eq $filename) &&
-        ($line >= $firstline) &&
-        ($line <= $lastline)) {
-      # Assign all accumulated samples to this line
-      AddEntry($samples1, $line, $running1);
-      AddEntry($samples2, $line, $running2);
-      $running1 = 0;
-      $running2 = 0;
-      if ($html) {
-        if ($line != $last_touched_line && $disasm{$line} ne '') {
-          $disasm{$line} .= "\n";
-        }
-        $disasm{$line} .= $running_disasm;
-        $running_disasm = '';
-        $last_touched_line = $line;
-      }
-    }
-  }
-
-  # Assign any leftover samples to $lastline
-  AddEntry($samples1, $lastline, $running1);
-  AddEntry($samples2, $lastline, $running2);
-  if ($html) {
-    if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
-      $disasm{$lastline} .= "\n";
-    }
-    $disasm{$lastline} .= $running_disasm;
-  }
-
-  if ($html) {
-    printf $output (
-      "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
-      "Total:%6s %6s (flat / cumulative %s)\n",
-      HtmlEscape(ShortFunctionName($routine)),
-      HtmlEscape(CleanFileName($filename)),
-      Unparse($total1),
-      Unparse($total2),
-      Units());
-  } else {
-    printf $output (
-      "ROUTINE ====================== %s in %s\n" .
-      "%6s %6s Total %s (flat / cumulative)\n",
-      ShortFunctionName($routine),
-      CleanFileName($filename),
-      Unparse($total1),
-      Unparse($total2),
-      Units());
-  }
-  if (!open(FILE, "<$filename")) {
-    print STDERR "$filename: $!\n";
-    return 0;
-  }
-  my $l = 0;
-  while (<FILE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    $l++;
-    if ($l >= $firstline - 5 &&
-        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
-      chop;
-      my $text = $_;
-      if ($l == $firstline) { print $output $skip_marker; }
-      my $n1 = GetEntry($samples1, $l);
-      my $n2 = GetEntry($samples2, $l);
-      if ($html) {
-        # Emit a span that has one of the following classes:
-        #    livesrc -- has samples
-        #    deadsrc -- has disassembly, but with no samples
-        #    nop     -- has no matching disasembly
-        # Also emit an optional span containing disassembly.
-        my $dis = $disasm{$l};
-        my $asm = "";
-        if (defined($dis) && $dis ne '') {
-          $asm = "<span class=\"asm\">" . $dis . "</span>";
-        }
-        my $source_class = (($n1 + $n2 > 0)
-                            ? "livesrc"
-                            : (($asm ne "") ? "deadsrc" : "nop"));
-        printf $output (
-          "<span class=\"line\">%5d</span> " .
-          "<span class=\"%s\">%6s %6s %s</span>%s\n",
-          $l, $source_class,
-          HtmlPrintNumber($n1),
-          HtmlPrintNumber($n2),
-          HtmlEscape($text),
-          $asm);
-      } else {
-        printf $output(
-          "%6s %6s %4d: %s\n",
-          UnparseAlt($n1),
-          UnparseAlt($n2),
-          $l,
-          $text);
-      }
-      if ($l == $lastline)  { print $output $skip_marker; }
-    };
-  }
-  close(FILE);
-  if ($html) {
-    print $output "</pre>\n";
-  }
-  return 1;
-}
-
-# Return the source line for the specified file/linenumber.
-# Returns undef if not found.
-sub SourceLine {
-  my $file = shift;
-  my $line = shift;
-
-  # Look in cache
-  if (!defined($main::source_cache{$file})) {
-    if (100 < scalar keys(%main::source_cache)) {
-      # Clear the cache when it gets too big
-      $main::source_cache = ();
-    }
-
-    # Read all lines from the file
-    if (!open(FILE, "<$file")) {
-      print STDERR "$file: $!\n";
-      $main::source_cache{$file} = [];  # Cache the negative result
-      return undef;
-    }
-    my $lines = [];
-    push(@{$lines}, "");        # So we can use 1-based line numbers as indices
-    while (<FILE>) {
-      push(@{$lines}, $_);
-    }
-    close(FILE);
-
-    # Save the lines in the cache
-    $main::source_cache{$file} = $lines;
-  }
-
-  my $lines = $main::source_cache{$file};
-  if (($line < 0) || ($line > $#{$lines})) {
-    return undef;
-  } else {
-    return $lines->[$line];
-  }
-}
-
-# Print disassembly for one routine with interspersed source if available
-sub PrintDisassembledFunction {
-  my $prog = shift;
-  my $offset = shift;
-  my $routine = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $start_addr = shift;
-  my $end_addr = shift;
-  my $total = shift;
-
-  # Disassemble all instructions
-  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
-
-  # Make array of counts per instruction
-  my @flat_count = ();
-  my @cum_count = ();
-  my $flat_total = 0;
-  my $cum_total = 0;
-  foreach my $e (@instructions) {
-    # Add up counts for all address that fall inside this instruction
-    my $c1 = 0;
-    my $c2 = 0;
-    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
-      $c1 += GetEntry($flat, $a);
-      $c2 += GetEntry($cumulative, $a);
-    }
-    push(@flat_count, $c1);
-    push(@cum_count, $c2);
-    $flat_total += $c1;
-    $cum_total += $c2;
-  }
-
-  # Print header with total counts
-  printf("ROUTINE ====================== %s\n" .
-         "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
-         ShortFunctionName($routine),
-         Unparse($flat_total),
-         Unparse($cum_total),
-         Units(),
-         ($cum_total * 100.0) / $total);
-
-  # Process instructions in order
-  my $current_file = "";
-  for (my $i = 0; $i <= $#instructions; ) {
-    my $e = $instructions[$i];
-
-    # Print the new file name whenever we switch files
-    if ($e->[1] ne $current_file) {
-      $current_file = $e->[1];
-      my $fname = $current_file;
-      $fname =~ s|^\./||;   # Trim leading "./"
-
-      # Shorten long file names
-      if (length($fname) >= 58) {
-        $fname = "..." . substr($fname, -55);
-      }
-      printf("-------------------- %s\n", $fname);
-    }
-
-    # TODO: Compute range of lines to print together to deal with
-    # small reorderings.
-    my $first_line = $e->[2];
-    my $last_line = $first_line;
-    my %flat_sum = ();
-    my %cum_sum = ();
-    for (my $l = $first_line; $l <= $last_line; $l++) {
-      $flat_sum{$l} = 0;
-      $cum_sum{$l} = 0;
-    }
-
-    # Find run of instructions for this range of source lines
-    my $first_inst = $i;
-    while (($i <= $#instructions) &&
-           ($instructions[$i]->[2] >= $first_line) &&
-           ($instructions[$i]->[2] <= $last_line)) {
-      $e = $instructions[$i];
-      $flat_sum{$e->[2]} += $flat_count[$i];
-      $cum_sum{$e->[2]} += $cum_count[$i];
-      $i++;
-    }
-    my $last_inst = $i - 1;
-
-    # Print source lines
-    for (my $l = $first_line; $l <= $last_line; $l++) {
-      my $line = SourceLine($current_file, $l);
-      if (!defined($line)) {
-        $line = "?\n";
-        next;
-      } else {
-        $line =~ s/^\s+//;
-      }
-      printf("%6s %6s %5d: %s",
-             UnparseAlt($flat_sum{$l}),
-             UnparseAlt($cum_sum{$l}),
-             $l,
-             $line);
-    }
-
-    # Print disassembly
-    for (my $x = $first_inst; $x <= $last_inst; $x++) {
-      my $e = $instructions[$x];
-      printf("%6s %6s    %8s: %6s\n",
-             UnparseAlt($flat_count[$x]),
-             UnparseAlt($cum_count[$x]),
-             UnparseAddress($offset, $e->[0]),
-             CleanDisassembly($e->[3]));
-    }
-  }
-}
-
-# Print DOT graph
-sub PrintDot {
-  my $prog = shift;
-  my $symbols = shift;
-  my $raw = shift;
-  my $flat = shift;
-  my $cumulative = shift;
-  my $overall_total = shift;
-
-  # Get total
-  my $local_total = TotalProfile($flat);
-  my $nodelimit = int($main::opt_nodefraction * $local_total);
-  my $edgelimit = int($main::opt_edgefraction * $local_total);
-  my $nodecount = $main::opt_nodecount;
-
-  # Find nodes to include
-  my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
-                     abs(GetEntry($cumulative, $a))
-                     || $a cmp $b }
-              keys(%{$cumulative}));
-  my $last = $nodecount - 1;
-  if ($last > $#list) {
-    $last = $#list;
-  }
-  while (($last >= 0) &&
-         (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
-    $last--;
-  }
-  if ($last < 0) {
-    print STDERR "No nodes to print\n";
-    return 0;
-  }
-
-  if ($nodelimit > 0 || $edgelimit > 0) {
-    printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
-                   Unparse($nodelimit), Units(),
-                   Unparse($edgelimit), Units());
-  }
-
-  # Open DOT output file
-  my $output;
-  my $escaped_dot = ShellEscape(@DOT);
-  my $escaped_ps2pdf = ShellEscape(@PS2PDF);
-  if ($main::opt_gv) {
-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
-    $output = "| $escaped_dot -Tps2 >$escaped_outfile";
-  } elsif ($main::opt_evince) {
-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
-    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
-  } elsif ($main::opt_ps) {
-    $output = "| $escaped_dot -Tps2";
-  } elsif ($main::opt_pdf) {
-    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
-  } elsif ($main::opt_web || $main::opt_svg) {
-    # We need to post-process the SVG, so write to a temporary file always.
-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
-    $output = "| $escaped_dot -Tsvg >$escaped_outfile";
-  } elsif ($main::opt_gif) {
-    $output = "| $escaped_dot -Tgif";
-  } else {
-    $output = ">&STDOUT";
-  }
-  open(DOT, $output) || error("$output: $!\n");
-
-  # Title
-  printf DOT ("digraph \"%s; %s %s\" {\n",
-              $prog,
-              Unparse($overall_total),
-              Units());
-  if ($main::opt_pdf) {
-    # The output is more printable if we set the page size for dot.
-    printf DOT ("size=\"8,11\"\n");
-  }
-  printf DOT ("node [width=0.375,height=0.25];\n");
-
-  # Print legend
-  printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
-              "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
-              $prog,
-              sprintf("Total %s: %s", Units(), Unparse($overall_total)),
-              sprintf("Focusing on: %s", Unparse($local_total)),
-              sprintf("Dropped nodes with <= %s abs(%s)",
-                      Unparse($nodelimit), Units()),
-              sprintf("Dropped edges with <= %s %s",
-                      Unparse($edgelimit), Units())
-              );
-
-  # Print nodes
-  my %node = ();
-  my $nextnode = 1;
-  foreach my $a (@list[0..$last]) {
-    # Pick font size
-    my $f = GetEntry($flat, $a);
-    my $c = GetEntry($cumulative, $a);
-
-    my $fs = 8;
-    if ($local_total > 0) {
-      $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
-    }
-
-    $node{$a} = $nextnode++;
-    my $sym = $a;
-    $sym =~ s/\s+/\\n/g;
-    $sym =~ s/::/\\n/g;
-
-    # Extra cumulative info to print for non-leaves
-    my $extra = "";
-    if ($f != $c) {
-      $extra = sprintf("\\rof %s (%s)",
-                       Unparse($c),
-                       Percent($c, $local_total));
-    }
-    my $style = "";
-    if ($main::opt_heapcheck) {
-      if ($f > 0) {
-        # make leak-causing nodes more visible (add a background)
-        $style = ",style=filled,fillcolor=gray"
-      } elsif ($f < 0) {
-        # make anti-leak-causing nodes (which almost never occur)
-        # stand out as well (triple border)
-        $style = ",peripheries=3"
-      }
-    }
-
-    printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
-                "\",shape=box,fontsize=%.1f%s];\n",
-                $node{$a},
-                $sym,
-                Unparse($f),
-                Percent($f, $local_total),
-                $extra,
-                $fs,
-                $style,
-               );
-  }
-
-  # Get edges and counts per edge
-  my %edge = ();
-  my $n;
-  my $fullname_to_shortname_map = {};
-  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
-  foreach my $k (keys(%{$raw})) {
-    # TODO: omit low %age edges
-    $n = $raw->{$k};
-    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
-    for (my $i = 1; $i <= $#translated; $i++) {
-      my $src = $translated[$i];
-      my $dst = $translated[$i-1];
-      #next if ($src eq $dst);  # Avoid self-edges?
-      if (exists($node{$src}) && exists($node{$dst})) {
-        my $edge_label = "$src\001$dst";
-        if (!exists($edge{$edge_label})) {
-          $edge{$edge_label} = 0;
-        }
-        $edge{$edge_label} += $n;
-      }
-    }
-  }
-
-  # Print edges (process in order of decreasing counts)
-  my %indegree = ();   # Number of incoming edges added per node so far
-  my %outdegree = ();  # Number of outgoing edges added per node so far
-  foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
-    my @x = split(/\001/, $e);
-    $n = $edge{$e};
-
-    # Initialize degree of kept incoming and outgoing edges if necessary
-    my $src = $x[0];
-    my $dst = $x[1];
-    if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
-    if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
-
-    my $keep;
-    if ($indegree{$dst} == 0) {
-      # Keep edge if needed for reachability
-      $keep = 1;
-    } elsif (abs($n) <= $edgelimit) {
-      # Drop if we are below --edgefraction
-      $keep = 0;
-    } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
-             $indegree{$dst} >= $main::opt_maxdegree) {
-      # Keep limited number of in/out edges per node
-      $keep = 0;
-    } else {
-      $keep = 1;
-    }
-
-    if ($keep) {
-      $outdegree{$src}++;
-      $indegree{$dst}++;
-
-      # Compute line width based on edge count
-      my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
-      if ($fraction > 1) { $fraction = 1; }
-      my $w = $fraction * 2;
-      if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
-        # SVG output treats line widths < 1 poorly.
-        $w = 1;
-      }
-
-      # Dot sometimes segfaults if given edge weights that are too large, so
-      # we cap the weights at a large value
-      my $edgeweight = abs($n) ** 0.7;
-      if ($edgeweight > 100000) { $edgeweight = 100000; }
-      $edgeweight = int($edgeweight);
-
-      my $style = sprintf("setlinewidth(%f)", $w);
-      if ($x[1] =~ m/\(inline\)/) {
-        $style .= ",dashed";
-      }
-
-      # Use a slightly squashed function of the edge count as the weight
-      printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
-                  $node{$x[0]},
-                  $node{$x[1]},
-                  Unparse($n),
-                  $edgeweight,
-                  $style);
-    }
-  }
-
-  print DOT ("}\n");
-  close(DOT);
-
-  if ($main::opt_web || $main::opt_svg) {
-    # Rewrite SVG to be more usable inside web browser.
-    RewriteSvg(TempName($main::next_tmpfile, "svg"));
-  }
-
-  return 1;
-}
-
-sub RewriteSvg {
-  my $svgfile = shift;
-
-  open(SVG, $svgfile) || die "open temp svg: $!";
-  my @svg = <SVG>;
-  close(SVG);
-  unlink $svgfile;
-  my $svg = join('', @svg);
-
-  # Dot's SVG output is
-  #
-  #    <svg width="___" height="___"
-  #     viewBox="___" xmlns=...>
-  #    <g id="graph0" transform="...">
-  #    ...
-  #    </g>
-  #    </svg>
-  #
-  # Change it to
-  #
-  #    <svg width="100%" height="100%"
-  #     xmlns=...>
-  #    $svg_javascript
-  #    <g id="viewport" transform="translate(0,0)">
-  #    <g id="graph0" transform="...">
-  #    ...
-  #    </g>
-  #    </g>
-  #    </svg>
-
-  # Fix width, height; drop viewBox.
-  $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
-
-  # Insert script, viewport <g> above first <g>
-  my $svg_javascript = SvgJavascript();
-  my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
-  $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
-
-  # Insert final </g> above </svg>.
-  $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
-  $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
-
-  if ($main::opt_svg) {
-    # --svg: write to standard output.
-    print $svg;
-  } else {
-    # Write back to temporary file.
-    open(SVG, ">$svgfile") || die "open $svgfile: $!";
-    print SVG $svg;
-    close(SVG);
-  }
-}
-
-sub SvgJavascript {
-  return <<'EOF';
-<script type="text/ecmascript"><![CDATA[
-// SVGPan
-// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
-// Local modification: if(true || ...) below to force panning, never moving.
-
-/**
- *  SVGPan library 1.2
- * ====================
- *
- * Given an unique existing element with id "viewport", including the
- * the library into any SVG adds the following capabilities:
- *
- *  - Mouse panning
- *  - Mouse zooming (using the wheel)
- *  - Object dargging
- *
- * Known issues:
- *
- *  - Zooming (while panning) on Safari has still some issues
- *
- * Releases:
- *
- * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
- *	Fixed a bug with browser mouse handler interaction
- *
- * 1.1, Wed Feb  3 17:39:33 GMT 2010, Zeng Xiaohui
- *	Updated the zoom code to support the mouse wheel on Safari/Chrome
- *
- * 1.0, Andrea Leofreddi
- *	First release
- *
- * This code is licensed under the following BSD license:
- *
- * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without modification, are
- * permitted provided that the following conditions are met:
- *
- *    1. Redistributions of source code must retain the above copyright notice, this list of
- *       conditions and the following disclaimer.
- *
- *    2. Redistributions in binary form must reproduce the above copyright notice, this list
- *       of conditions and the following disclaimer in the documentation and/or other materials
- *       provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
- * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are those of the
- * authors and should not be interpreted as representing official policies, either expressed
- * or implied, of Andrea Leofreddi.
- */
-
-var root = document.documentElement;
-
-var state = 'none', stateTarget, stateOrigin, stateTf;
-
-setupHandlers(root);
-
-/**
- * Register handlers
- */
-function setupHandlers(root){
-	setAttributes(root, {
-		"onmouseup" : "add(evt)",
-		"onmousedown" : "handleMouseDown(evt)",
-		"onmousemove" : "handleMouseMove(evt)",
-		"onmouseup" : "handleMouseUp(evt)",
-		//"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
-	});
-
-	if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
-		window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
-	else
-		window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
-
-	var g = svgDoc.getElementById("svg");
-	g.width = "100%";
-	g.height = "100%";
-}
-
-/**
- * Instance an SVGPoint object with given event coordinates.
- */
-function getEventPoint(evt) {
-	var p = root.createSVGPoint();
-
-	p.x = evt.clientX;
-	p.y = evt.clientY;
-
-	return p;
-}
-
-/**
- * Sets the current transform matrix of an element.
- */
-function setCTM(element, matrix) {
-	var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
-
-	element.setAttribute("transform", s);
-}
-
-/**
- * Dumps a matrix to a string (useful for debug).
- */
-function dumpMatrix(matrix) {
-	var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n  " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n  0, 0, 1 ]";
-
-	return s;
-}
-
-/**
- * Sets attributes of an element.
- */
-function setAttributes(element, attributes){
-	for (i in attributes)
-		element.setAttributeNS(null, i, attributes[i]);
-}
-
-/**
- * Handle mouse move event.
- */
-function handleMouseWheel(evt) {
-	if(evt.preventDefault)
-		evt.preventDefault();
-
-	evt.returnValue = false;
-
-	var svgDoc = evt.target.ownerDocument;
-
-	var delta;
-
-	if(evt.wheelDelta)
-		delta = evt.wheelDelta / 3600; // Chrome/Safari
-	else
-		delta = evt.detail / -90; // Mozilla
-
-	var z = 1 + delta; // Zoom factor: 0.9/1.1
-
-	var g = svgDoc.getElementById("viewport");
-
-	var p = getEventPoint(evt);
-
-	p = p.matrixTransform(g.getCTM().inverse());
-
-	// Compute new scale matrix in current mouse position
-	var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
-
-        setCTM(g, g.getCTM().multiply(k));
-
-	stateTf = stateTf.multiply(k.inverse());
-}
-
-/**
- * Handle mouse move event.
- */
-function handleMouseMove(evt) {
-	if(evt.preventDefault)
-		evt.preventDefault();
-
-	evt.returnValue = false;
-
-	var svgDoc = evt.target.ownerDocument;
-
-	var g = svgDoc.getElementById("viewport");
-
-	if(state == 'pan') {
-		// Pan mode
-		var p = getEventPoint(evt).matrixTransform(stateTf);
-
-		setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
-	} else if(state == 'move') {
-		// Move mode
-		var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
-
-		setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
-
-		stateOrigin = p;
-	}
-}
-
-/**
- * Handle click event.
- */
-function handleMouseDown(evt) {
-	if(evt.preventDefault)
-		evt.preventDefault();
-
-	evt.returnValue = false;
-
-	var svgDoc = evt.target.ownerDocument;
-
-	var g = svgDoc.getElementById("viewport");
-
-	if(true || evt.target.tagName == "svg") {
-		// Pan mode
-		state = 'pan';
-
-		stateTf = g.getCTM().inverse();
-
-		stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
-	} else {
-		// Move mode
-		state = 'move';
-
-		stateTarget = evt.target;
-
-		stateTf = g.getCTM().inverse();
-
-		stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
-	}
-}
-
-/**
- * Handle mouse button release event.
- */
-function handleMouseUp(evt) {
-	if(evt.preventDefault)
-		evt.preventDefault();
-
-	evt.returnValue = false;
-
-	var svgDoc = evt.target.ownerDocument;
-
-	if(state == 'pan' || state == 'move') {
-		// Quit pan mode
-		state = '';
-	}
-}
-
-]]></script>
-EOF
-}
-
-# Provides a map from fullname to shortname for cases where the
-# shortname is ambiguous.  The symlist has both the fullname and
-# shortname for all symbols, which is usually fine, but sometimes --
-# such as overloaded functions -- two different fullnames can map to
-# the same shortname.  In that case, we use the address of the
-# function to disambiguate the two.  This function fills in a map that
-# maps fullnames to modified shortnames in such cases.  If a fullname
-# is not present in the map, the 'normal' shortname provided by the
-# symlist is the appropriate one to use.
-sub FillFullnameToShortnameMap {
-  my $symbols = shift;
-  my $fullname_to_shortname_map = shift;
-  my $shortnames_seen_once = {};
-  my $shortnames_seen_more_than_once = {};
-
-  foreach my $symlist (values(%{$symbols})) {
-    # TODO(csilvers): deal with inlined symbols too.
-    my $shortname = $symlist->[0];
-    my $fullname = $symlist->[2];
-    if ($fullname !~ /<[0-9a-fA-F]+>$/) {  # fullname doesn't end in an address
-      next;       # the only collisions we care about are when addresses differ
-    }
-    if (defined($shortnames_seen_once->{$shortname}) &&
-        $shortnames_seen_once->{$shortname} ne $fullname) {
-      $shortnames_seen_more_than_once->{$shortname} = 1;
-    } else {
-      $shortnames_seen_once->{$shortname} = $fullname;
-    }
-  }
-
-  foreach my $symlist (values(%{$symbols})) {
-    my $shortname = $symlist->[0];
-    my $fullname = $symlist->[2];
-    # TODO(csilvers): take in a list of addresses we care about, and only
-    # store in the map if $symlist->[1] is in that list.  Saves space.
-    next if defined($fullname_to_shortname_map->{$fullname});
-    if (defined($shortnames_seen_more_than_once->{$shortname})) {
-      if ($fullname =~ /<0*([^>]*)>$/) {   # fullname has address at end of it
-        $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
-      }
-    }
-  }
-}
-
-# Return a small number that identifies the argument.
-# Multiple calls with the same argument will return the same number.
-# Calls with different arguments will return different numbers.
-sub ShortIdFor {
-  my $key = shift;
-  my $id = $main::uniqueid{$key};
-  if (!defined($id)) {
-    $id = keys(%main::uniqueid) + 1;
-    $main::uniqueid{$key} = $id;
-  }
-  return $id;
-}
-
-# Translate a stack of addresses into a stack of symbols
-sub TranslateStack {
-  my $symbols = shift;
-  my $fullname_to_shortname_map = shift;
-  my $k = shift;
-
-  my @addrs = split(/\n/, $k);
-  my @result = ();
-  for (my $i = 0; $i <= $#addrs; $i++) {
-    my $a = $addrs[$i];
-
-    # Skip large addresses since they sometimes show up as fake entries on RH9
-    if (length($a) > 8 && $a gt "7fffffffffffffff") {
-      next;
-    }
-
-    if ($main::opt_disasm || $main::opt_list) {
-      # We want just the address for the key
-      push(@result, $a);
-      next;
-    }
-
-    my $symlist = $symbols->{$a};
-    if (!defined($symlist)) {
-      $symlist = [$a, "", $a];
-    }
-
-    # We can have a sequence of symbols for a particular entry
-    # (more than one symbol in the case of inlining).  Callers
-    # come before callees in symlist, so walk backwards since
-    # the translated stack should contain callees before callers.
-    for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
-      my $func = $symlist->[$j-2];
-      my $fileline = $symlist->[$j-1];
-      my $fullfunc = $symlist->[$j];
-      if (defined($fullname_to_shortname_map->{$fullfunc})) {
-        $func = $fullname_to_shortname_map->{$fullfunc};
-      }
-      if ($j > 2) {
-        $func = "$func (inline)";
-      }
-
-      # Do not merge nodes corresponding to Callback::Run since that
-      # causes confusing cycles in dot display.  Instead, we synthesize
-      # a unique name for this frame per caller.
-      if ($func =~ m/Callback.*::Run$/) {
-        my $caller = ($i > 0) ? $addrs[$i-1] : 0;
-        $func = "Run#" . ShortIdFor($caller);
-      }
-
-      if ($main::opt_addresses) {
-        push(@result, "$a $func $fileline");
-      } elsif ($main::opt_lines) {
-        if ($func eq '??' && $fileline eq '??:0') {
-          push(@result, "$a");
-        } else {
-          push(@result, "$func $fileline");
-        }
-      } elsif ($main::opt_functions) {
-        if ($func eq '??') {
-          push(@result, "$a");
-        } else {
-          push(@result, $func);
-        }
-      } elsif ($main::opt_files) {
-        if ($fileline eq '??:0' || $fileline eq '') {
-          push(@result, "$a");
-        } else {
-          my $f = $fileline;
-          $f =~ s/:\d+$//;
-          push(@result, $f);
-        }
-      } else {
-        push(@result, $a);
-        last;  # Do not print inlined info
-      }
-    }
-  }
-
-  # print join(",", @addrs), " => ", join(",", @result), "\n";
-  return @result;
-}
-
-# Generate percent string for a number and a total
-sub Percent {
-  my $num = shift;
-  my $tot = shift;
-  if ($tot != 0) {
-    return sprintf("%.1f%%", $num * 100.0 / $tot);
-  } else {
-    return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
-  }
-}
-
-# Generate pretty-printed form of number
-sub Unparse {
-  my $num = shift;
-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
-    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
-      return sprintf("%d", $num);
-    } else {
-      if ($main::opt_show_bytes) {
-        return sprintf("%d", $num);
-      } else {
-        return sprintf("%.1f", $num / 1048576.0);
-      }
-    }
-  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
-    return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
-  } else {
-    return sprintf("%d", $num);
-  }
-}
-
-# Alternate pretty-printed form: 0 maps to "."
-sub UnparseAlt {
-  my $num = shift;
-  if ($num == 0) {
-    return ".";
-  } else {
-    return Unparse($num);
-  }
-}
-
-# Alternate pretty-printed form: 0 maps to ""
-sub HtmlPrintNumber {
-  my $num = shift;
-  if ($num == 0) {
-    return "";
-  } else {
-    return Unparse($num);
-  }
-}
-
-# Return output units
-sub Units {
-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
-    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
-      return "objects";
-    } else {
-      if ($main::opt_show_bytes) {
-        return "B";
-      } else {
-        return "MB";
-      }
-    }
-  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
-    return "seconds";
-  } else {
-    return "samples";
-  }
-}
-
-##### Profile manipulation code #####
-
-# Generate flattened profile:
-# If count is charged to stack [a,b,c,d], in generated profile,
-# it will be charged to [a]
-sub FlatProfile {
-  my $profile = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    if ($#addrs >= 0) {
-      AddEntry($result, $addrs[0], $count);
-    }
-  }
-  return $result;
-}
-
-# Generate cumulative profile:
-# If count is charged to stack [a,b,c,d], in generated profile,
-# it will be charged to [a], [b], [c], [d]
-sub CumulativeProfile {
-  my $profile = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    foreach my $a (@addrs) {
-      AddEntry($result, $a, $count);
-    }
-  }
-  return $result;
-}
-
-# If the second-youngest PC on the stack is always the same, returns
-# that pc.  Otherwise, returns undef.
-sub IsSecondPcAlwaysTheSame {
-  my $profile = shift;
-
-  my $second_pc = undef;
-  foreach my $k (keys(%{$profile})) {
-    my @addrs = split(/\n/, $k);
-    if ($#addrs < 1) {
-      return undef;
-    }
-    if (not defined $second_pc) {
-      $second_pc = $addrs[1];
-    } else {
-      if ($second_pc ne $addrs[1]) {
-        return undef;
-      }
-    }
-  }
-  return $second_pc;
-}
-
-sub ExtractSymbolLocation {
-  my $symbols = shift;
-  my $address = shift;
-  # 'addr2line' outputs "??:0" for unknown locations; we do the
-  # same to be consistent.
-  my $location = "??:0:unknown";
-  if (exists $symbols->{$address}) {
-    my $file = $symbols->{$address}->[1];
-    if ($file eq "?") {
-      $file = "??:0"
-    }
-    $location = $file . ":" . $symbols->{$address}->[0];
-  }
-  return $location;
-}
-
-# Extracts a graph of calls.
-sub ExtractCalls {
-  my $symbols = shift;
-  my $profile = shift;
-
-  my $calls = {};
-  while( my ($stack_trace, $count) = each %$profile ) {
-    my @address = split(/\n/, $stack_trace);
-    my $destination = ExtractSymbolLocation($symbols, $address[0]);
-    AddEntry($calls, $destination, $count);
-    for (my $i = 1; $i <= $#address; $i++) {
-      my $source = ExtractSymbolLocation($symbols, $address[$i]);
-      my $call = "$source -> $destination";
-      AddEntry($calls, $call, $count);
-      $destination = $source;
-    }
-  }
-
-  return $calls;
-}
-
-sub FilterFrames {
-  my $symbols = shift;
-  my $profile = shift;
-
-  if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
-    return $profile;
-  }
-
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    my @path = ();
-    foreach my $a (@addrs) {
-      my $sym;
-      if (exists($symbols->{$a})) {
-        $sym = $symbols->{$a}->[0];
-      } else {
-        $sym = $a;
-      }
-      if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
-        next;
-      }
-      if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
-        next;
-      }
-      push(@path, $a);
-    }
-    if (scalar(@path) > 0) {
-      my $reduced_path = join("\n", @path);
-      AddEntry($result, $reduced_path, $count);
-    }
-  }
-
-  return $result;
-}
-
-sub RemoveUninterestingFrames {
-  my $symbols = shift;
-  my $profile = shift;
-
-  # List of function names to skip
-  my %skip = ();
-  my $skip_regexp = 'NOMATCH';
-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
-    foreach my $name ('calloc',
-                      'cfree',
-                      'malloc',
-                      'free',
-                      'memalign',
-                      'posix_memalign',
-                      'aligned_alloc',
-                      'pvalloc',
-                      'valloc',
-                      'realloc',
-                      'mallocx', # jemalloc
-                      'rallocx', # jemalloc
-                      'xallocx', # jemalloc
-                      'dallocx', # jemalloc
-                      'sdallocx', # jemalloc
-                      'tc_calloc',
-                      'tc_cfree',
-                      'tc_malloc',
-                      'tc_free',
-                      'tc_memalign',
-                      'tc_posix_memalign',
-                      'tc_pvalloc',
-                      'tc_valloc',
-                      'tc_realloc',
-                      'tc_new',
-                      'tc_delete',
-                      'tc_newarray',
-                      'tc_deletearray',
-                      'tc_new_nothrow',
-                      'tc_newarray_nothrow',
-                      'do_malloc',
-                      '::do_malloc',   # new name -- got moved to an unnamed ns
-                      '::do_malloc_or_cpp_alloc',
-                      'DoSampledAllocation',
-                      'simple_alloc::allocate',
-                      '__malloc_alloc_template::allocate',
-                      '__builtin_delete',
-                      '__builtin_new',
-                      '__builtin_vec_delete',
-                      '__builtin_vec_new',
-                      'operator new',
-                      'operator new[]',
-                      # The entry to our memory-allocation routines on OS X
-                      'malloc_zone_malloc',
-                      'malloc_zone_calloc',
-                      'malloc_zone_valloc',
-                      'malloc_zone_realloc',
-                      'malloc_zone_memalign',
-                      'malloc_zone_free',
-                      # These mark the beginning/end of our custom sections
-                      '__start_google_malloc',
-                      '__stop_google_malloc',
-                      '__start_malloc_hook',
-                      '__stop_malloc_hook') {
-      $skip{$name} = 1;
-      $skip{"_" . $name} = 1;   # Mach (OS X) adds a _ prefix to everything
-    }
-    # TODO: Remove TCMalloc once everything has been
-    # moved into the tcmalloc:: namespace and we have flushed
-    # old code out of the system.
-    $skip_regexp = "TCMalloc|^tcmalloc::";
-  } elsif ($main::profile_type eq 'contention') {
-    foreach my $vname ('base::RecordLockProfileData',
-                       'base::SubmitMutexProfileData',
-                       'base::SubmitSpinLockProfileData',
-                       'Mutex::Unlock',
-                       'Mutex::UnlockSlow',
-                       'Mutex::ReaderUnlock',
-                       'MutexLock::~MutexLock',
-                       'SpinLock::Unlock',
-                       'SpinLock::SlowUnlock',
-                       'SpinLockHolder::~SpinLockHolder') {
-      $skip{$vname} = 1;
-    }
-  } elsif ($main::profile_type eq 'cpu') {
-    # Drop signal handlers used for CPU profile collection
-    # TODO(dpeng): this should not be necessary; it's taken
-    # care of by the general 2nd-pc mechanism below.
-    foreach my $name ('ProfileData::Add',           # historical
-                      'ProfileData::prof_handler',  # historical
-                      'CpuProfiler::prof_handler',
-                      '__FRAME_END__',
-                      '__pthread_sighandler',
-                      '__restore') {
-      $skip{$name} = 1;
-    }
-  } else {
-    # Nothing skipped for unknown types
-  }
-
-  if ($main::profile_type eq 'cpu') {
-    # If all the second-youngest program counters are the same,
-    # this STRONGLY suggests that it is an artifact of measurement,
-    # i.e., stack frames pushed by the CPU profiler signal handler.
-    # Hence, we delete them.
-    # (The topmost PC is read from the signal structure, not from
-    # the stack, so it does not get involved.)
-    while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
-      my $result = {};
-      my $func = '';
-      if (exists($symbols->{$second_pc})) {
-        $second_pc = $symbols->{$second_pc}->[0];
-      }
-      print STDERR "Removing $second_pc from all stack traces.\n";
-      foreach my $k (keys(%{$profile})) {
-        my $count = $profile->{$k};
-        my @addrs = split(/\n/, $k);
-        splice @addrs, 1, 1;
-        my $reduced_path = join("\n", @addrs);
-        AddEntry($result, $reduced_path, $count);
-      }
-      $profile = $result;
-    }
-  }
-
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    my @path = ();
-    foreach my $a (@addrs) {
-      if (exists($symbols->{$a})) {
-        my $func = $symbols->{$a}->[0];
-        if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
-          # Throw away the portion of the backtrace seen so far, under the
-          # assumption that previous frames were for functions internal to the
-          # allocator.
-          @path = ();
-          next;
-        }
-      }
-      push(@path, $a);
-    }
-    my $reduced_path = join("\n", @path);
-    AddEntry($result, $reduced_path, $count);
-  }
-
-  $result = FilterFrames($symbols, $result);
-
-  return $result;
-}
-
-# Reduce profile to granularity given by user
-sub ReduceProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $result = {};
-  my $fullname_to_shortname_map = {};
-  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
-    my @path = ();
-    my %seen = ();
-    $seen{''} = 1;      # So that empty keys are skipped
-    foreach my $e (@translated) {
-      # To avoid double-counting due to recursion, skip a stack-trace
-      # entry if it has already been seen
-      if (!$seen{$e}) {
-        $seen{$e} = 1;
-        push(@path, $e);
-      }
-    }
-    my $reduced_path = join("\n", @path);
-    AddEntry($result, $reduced_path, $count);
-  }
-  return $result;
-}
-
-# Does the specified symbol array match the regexp?
-sub SymbolMatches {
-  my $sym = shift;
-  my $re = shift;
-  if (defined($sym)) {
-    for (my $i = 0; $i < $#{$sym}; $i += 3) {
-      if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
-        return 1;
-      }
-    }
-  }
-  return 0;
-}
-
-# Focus only on paths involving specified regexps
-sub FocusProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $focus = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    foreach my $a (@addrs) {
-      # Reply if it matches either the address/shortname/fileline
-      if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
-        AddEntry($result, $k, $count);
-        last;
-      }
-    }
-  }
-  return $result;
-}
-
-# Focus only on paths not involving specified regexps
-sub IgnoreProfile {
-  my $symbols = shift;
-  my $profile = shift;
-  my $ignore = shift;
-  my $result = {};
-  foreach my $k (keys(%{$profile})) {
-    my $count = $profile->{$k};
-    my @addrs = split(/\n/, $k);
-    my $matched = 0;
-    foreach my $a (@addrs) {
-      # Reply if it matches either the address/shortname/fileline
-      if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
-        $matched = 1;
-        last;
-      }
-    }
-    if (!$matched) {
-      AddEntry($result, $k, $count);
-    }
-  }
-  return $result;
-}
-
-# Get total count in profile
-sub TotalProfile {
-  my $profile = shift;
-  my $result = 0;
-  foreach my $k (keys(%{$profile})) {
-    $result += $profile->{$k};
-  }
-  return $result;
-}
-
-# Add A to B
-sub AddProfile {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  # add all keys in A
-  foreach my $k (keys(%{$A})) {
-    my $v = $A->{$k};
-    AddEntry($R, $k, $v);
-  }
-  # add all keys in B
-  foreach my $k (keys(%{$B})) {
-    my $v = $B->{$k};
-    AddEntry($R, $k, $v);
-  }
-  return $R;
-}
-
-# Merges symbol maps
-sub MergeSymbols {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  foreach my $k (keys(%{$A})) {
-    $R->{$k} = $A->{$k};
-  }
-  if (defined($B)) {
-    foreach my $k (keys(%{$B})) {
-      $R->{$k} = $B->{$k};
-    }
-  }
-  return $R;
-}
-
-
-# Add A to B
-sub AddPcs {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  # add all keys in A
-  foreach my $k (keys(%{$A})) {
-    $R->{$k} = 1
-  }
-  # add all keys in B
-  foreach my $k (keys(%{$B})) {
-    $R->{$k} = 1
-  }
-  return $R;
-}
-
-# Subtract B from A
-sub SubtractProfile {
-  my $A = shift;
-  my $B = shift;
-
-  my $R = {};
-  foreach my $k (keys(%{$A})) {
-    my $v = $A->{$k} - GetEntry($B, $k);
-    if ($v < 0 && $main::opt_drop_negative) {
-      $v = 0;
-    }
-    AddEntry($R, $k, $v);
-  }
-  if (!$main::opt_drop_negative) {
-    # Take care of when subtracted profile has more entries
-    foreach my $k (keys(%{$B})) {
-      if (!exists($A->{$k})) {
-        AddEntry($R, $k, 0 - $B->{$k});
-      }
-    }
-  }
-  return $R;
-}
-
-# Get entry from profile; zero if not present
-sub GetEntry {
-  my $profile = shift;
-  my $k = shift;
-  if (exists($profile->{$k})) {
-    return $profile->{$k};
-  } else {
-    return 0;
-  }
-}
-
-# Add entry to specified profile
-sub AddEntry {
-  my $profile = shift;
-  my $k = shift;
-  my $n = shift;
-  if (!exists($profile->{$k})) {
-    $profile->{$k} = 0;
-  }
-  $profile->{$k} += $n;
-}
-
-# Add a stack of entries to specified profile, and add them to the $pcs
-# list.
-sub AddEntries {
-  my $profile = shift;
-  my $pcs = shift;
-  my $stack = shift;
-  my $count = shift;
-  my @k = ();
-
-  foreach my $e (split(/\s+/, $stack)) {
-    my $pc = HexExtend($e);
-    $pcs->{$pc} = 1;
-    push @k, $pc;
-  }
-  AddEntry($profile, (join "\n", @k), $count);
-}
-
-##### Code to profile a server dynamically #####
-
-sub CheckSymbolPage {
-  my $url = SymbolPageURL();
-  my $command = ShellEscape(@URL_FETCHER, $url);
-  open(SYMBOL, "$command |") or error($command);
-  my $line = <SYMBOL>;
-  $line =~ s/\r//g;         # turn windows-looking lines into unix-looking lines
-  close(SYMBOL);
-  unless (defined($line)) {
-    error("$url doesn't exist\n");
-  }
-
-  if ($line =~ /^num_symbols:\s+(\d+)$/) {
-    if ($1 == 0) {
-      error("Stripped binary. No symbols available.\n");
-    }
-  } else {
-    error("Failed to get the number of symbols from $url\n");
-  }
-}
-
-sub IsProfileURL {
-  my $profile_name = shift;
-  if (-f $profile_name) {
-    printf STDERR "Using local file $profile_name.\n";
-    return 0;
-  }
-  return 1;
-}
-
-sub ParseProfileURL {
-  my $profile_name = shift;
-
-  if (!defined($profile_name) || $profile_name eq "") {
-    return ();
-  }
-
-  # Split profile URL - matches all non-empty strings, so no test.
-  $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
-
-  my $proto = $1 || "http://";
-  my $hostport = $2;
-  my $prefix = $3;
-  my $profile = $4 || "/";
-
-  my $host = $hostport;
-  $host =~ s/:.*//;
-
-  my $baseurl = "$proto$hostport$prefix";
-  return ($host, $baseurl, $profile);
-}
-
-# We fetch symbols from the first profile argument.
-sub SymbolPageURL {
-  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
-  return "$baseURL$SYMBOL_PAGE";
-}
-
-sub FetchProgramName() {
-  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
-  my $url = "$baseURL$PROGRAM_NAME_PAGE";
-  my $command_line = ShellEscape(@URL_FETCHER, $url);
-  open(CMDLINE, "$command_line |") or error($command_line);
-  my $cmdline = <CMDLINE>;
-  $cmdline =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
-  close(CMDLINE);
-  error("Failed to get program name from $url\n") unless defined($cmdline);
-  $cmdline =~ s/\x00.+//;  # Remove argv[1] and latters.
-  $cmdline =~ s!\n!!g;  # Remove LFs.
-  return $cmdline;
-}
-
-# Gee, curl's -L (--location) option isn't reliable at least
-# with its 7.12.3 version.  Curl will forget to post data if
-# there is a redirection.  This function is a workaround for
-# curl.  Redirection happens on borg hosts.
-sub ResolveRedirectionForCurl {
-  my $url = shift;
-  my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
-  open(CMDLINE, "$command_line |") or error($command_line);
-  while (<CMDLINE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    if (/^Location: (.*)/) {
-      $url = $1;
-    }
-  }
-  close(CMDLINE);
-  return $url;
-}
-
-# Add a timeout flat to URL_FETCHER.  Returns a new list.
-sub AddFetchTimeout {
-  my $timeout = shift;
-  my @fetcher = @_;
-  if (defined($timeout)) {
-    if (join(" ", @fetcher) =~ m/\bcurl -s/) {
-      push(@fetcher, "--max-time", sprintf("%d", $timeout));
-    } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
-      push(@fetcher, sprintf("--deadline=%d", $timeout));
-    }
-  }
-  return @fetcher;
-}
-
-# Reads a symbol map from the file handle name given as $1, returning
-# the resulting symbol map.  Also processes variables relating to symbols.
-# Currently, the only variable processed is 'binary=<value>' which updates
-# $main::prog to have the correct program name.
-sub ReadSymbols {
-  my $in = shift;
-  my $map = {};
-  while (<$in>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    # Removes all the leading zeroes from the symbols, see comment below.
-    if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
-      $map->{$1} = $2;
-    } elsif (m/^---/) {
-      last;
-    } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
-      my ($variable, $value) = ($1, $2);
-      for ($variable, $value) {
-        s/^\s+//;
-        s/\s+$//;
-      }
-      if ($variable eq "binary") {
-        if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
-          printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
-                         $main::prog, $value);
-        }
-        $main::prog = $value;
-      } else {
-        printf STDERR ("Ignoring unknown variable in symbols list: " .
-            "'%s' = '%s'\n", $variable, $value);
-      }
-    }
-  }
-  return $map;
-}
-
-sub URLEncode {
-  my $str = shift;
-  $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
-  return $str;
-}
-
-sub AppendSymbolFilterParams {
-  my $url = shift;
-  my @params = ();
-  if ($main::opt_retain ne '') {
-    push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
-  }
-  if ($main::opt_exclude ne '') {
-    push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
-  }
-  if (scalar @params > 0) {
-    $url = sprintf("%s?%s", $url, join("&", @params));
-  }
-  return $url;
-}
-
-# Fetches and processes symbols to prepare them for use in the profile output
-# code.  If the optional 'symbol_map' arg is not given, fetches symbols from
-# $SYMBOL_PAGE for all PC values found in profile.  Otherwise, the raw symbols
-# are assumed to have already been fetched into 'symbol_map' and are simply
-# extracted and processed.
-sub FetchSymbols {
-  my $pcset = shift;
-  my $symbol_map = shift;
-
-  my %seen = ();
-  my @pcs = grep { !$seen{$_}++ } keys(%$pcset);  # uniq
-
-  if (!defined($symbol_map)) {
-    my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
-
-    open(POSTFILE, ">$main::tmpfile_sym");
-    print POSTFILE $post_data;
-    close(POSTFILE);
-
-    my $url = SymbolPageURL();
-
-    my $command_line;
-    if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
-      $url = ResolveRedirectionForCurl($url);
-      $url = AppendSymbolFilterParams($url);
-      $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
-                                  $url);
-    } else {
-      $url = AppendSymbolFilterParams($url);
-      $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
-                       . " < " . ShellEscape($main::tmpfile_sym));
-    }
-    # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
-    my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
-    open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
-    $symbol_map = ReadSymbols(*SYMBOL{IO});
-    close(SYMBOL);
-  }
-
-  my $symbols = {};
-  foreach my $pc (@pcs) {
-    my $fullname;
-    # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
-    # Then /symbol reads the long symbols in as uint64, and outputs
-    # the result with a "0x%08llx" format which get rid of the zeroes.
-    # By removing all the leading zeroes in both $pc and the symbols from
-    # /symbol, the symbols match and are retrievable from the map.
-    my $shortpc = $pc;
-    $shortpc =~ s/^0*//;
-    # Each line may have a list of names, which includes the function
-    # and also other functions it has inlined.  They are separated (in
-    # PrintSymbolizedProfile), by --, which is illegal in function names.
-    my $fullnames;
-    if (defined($symbol_map->{$shortpc})) {
-      $fullnames = $symbol_map->{$shortpc};
-    } else {
-      $fullnames = "0x" . $pc;  # Just use addresses
-    }
-    my $sym = [];
-    $symbols->{$pc} = $sym;
-    foreach my $fullname (split("--", $fullnames)) {
-      my $name = ShortFunctionName($fullname);
-      push(@{$sym}, $name, "?", $fullname);
-    }
-  }
-  return $symbols;
-}
-
-sub BaseName {
-  my $file_name = shift;
-  $file_name =~ s!^.*/!!;  # Remove directory name
-  return $file_name;
-}
-
-sub MakeProfileBaseName {
-  my ($binary_name, $profile_name) = @_;
-  my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
-  my $binary_shortname = BaseName($binary_name);
-  return sprintf("%s.%s.%s",
-                 $binary_shortname, $main::op_time, $host);
-}
-
-sub FetchDynamicProfile {
-  my $binary_name = shift;
-  my $profile_name = shift;
-  my $fetch_name_only = shift;
-  my $encourage_patience = shift;
-
-  if (!IsProfileURL($profile_name)) {
-    return $profile_name;
-  } else {
-    my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
-    if ($path eq "" || $path eq "/") {
-      # Missing type specifier defaults to cpu-profile
-      $path = $PROFILE_PAGE;
-    }
-
-    my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
-
-    my $url = "$baseURL$path";
-    my $fetch_timeout = undef;
-    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
-      if ($path =~ m/[?]/) {
-        $url .= "&";
-      } else {
-        $url .= "?";
-      }
-      $url .= sprintf("seconds=%d", $main::opt_seconds);
-      $fetch_timeout = $main::opt_seconds * 1.01 + 60;
-      # Set $profile_type for consumption by PrintSymbolizedProfile.
-      $main::profile_type = 'cpu';
-    } else {
-      # For non-CPU profiles, we add a type-extension to
-      # the target profile file name.
-      my $suffix = $path;
-      $suffix =~ s,/,.,g;
-      $profile_file .= $suffix;
-      # Set $profile_type for consumption by PrintSymbolizedProfile.
-      if ($path =~ m/$HEAP_PAGE/) {
-        $main::profile_type = 'heap';
-      } elsif ($path =~ m/$GROWTH_PAGE/) {
-        $main::profile_type = 'growth';
-      } elsif ($path =~ m/$CONTENTION_PAGE/) {
-        $main::profile_type = 'contention';
-      }
-    }
-
-    my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
-    if (! -d $profile_dir) {
-      mkdir($profile_dir)
-          || die("Unable to create profile directory $profile_dir: $!\n");
-    }
-    my $tmp_profile = "$profile_dir/.tmp.$profile_file";
-    my $real_profile = "$profile_dir/$profile_file";
-
-    if ($fetch_name_only > 0) {
-      return $real_profile;
-    }
-
-    my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
-    my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
-    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
-      print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n  ${real_profile}\n";
-      if ($encourage_patience) {
-        print STDERR "Be patient...\n";
-      }
-    } else {
-      print STDERR "Fetching $path profile from $url to\n  ${real_profile}\n";
-    }
-
-    (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
-    (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
-    print STDERR "Wrote profile to $real_profile\n";
-    $main::collected_profile = $real_profile;
-    return $main::collected_profile;
-  }
-}
-
-# Collect profiles in parallel
-sub FetchDynamicProfiles {
-  my $items = scalar(@main::pfile_args);
-  my $levels = log($items) / log(2);
-
-  if ($items == 1) {
-    $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
-  } else {
-    # math rounding issues
-    if ((2 ** $levels) < $items) {
-     $levels++;
-    }
-    my $count = scalar(@main::pfile_args);
-    for (my $i = 0; $i < $count; $i++) {
-      $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
-    }
-    print STDERR "Fetching $count profiles, Be patient...\n";
-    FetchDynamicProfilesRecurse($levels, 0, 0);
-    $main::collected_profile = join(" \\\n    ", @main::profile_files);
-  }
-}
-
-# Recursively fork a process to get enough processes
-# collecting profiles
-sub FetchDynamicProfilesRecurse {
-  my $maxlevel = shift;
-  my $level = shift;
-  my $position = shift;
-
-  if (my $pid = fork()) {
-    $position = 0 | ($position << 1);
-    TryCollectProfile($maxlevel, $level, $position);
-    wait;
-  } else {
-    $position = 1 | ($position << 1);
-    TryCollectProfile($maxlevel, $level, $position);
-    cleanup();
-    exit(0);
-  }
-}
-
-# Collect a single profile
-sub TryCollectProfile {
-  my $maxlevel = shift;
-  my $level = shift;
-  my $position = shift;
-
-  if ($level >= ($maxlevel - 1)) {
-    if ($position < scalar(@main::pfile_args)) {
-      FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
-    }
-  } else {
-    FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
-  }
-}
-
-##### Parsing code #####
-
-# Provide a small streaming-read module to handle very large
-# cpu-profile files.  Stream in chunks along a sliding window.
-# Provides an interface to get one 'slot', correctly handling
-# endian-ness differences.  A slot is one 32-bit or 64-bit word
-# (depending on the input profile).  We tell endianness and bit-size
-# for the profile by looking at the first 8 bytes: in cpu profiles,
-# the second slot is always 3 (we'll accept anything that's not 0).
-BEGIN {
-  package CpuProfileStream;
-
-  sub new {
-    my ($class, $file, $fname) = @_;
-    my $self = { file        => $file,
-                 base        => 0,
-                 stride      => 512 * 1024,   # must be a multiple of bitsize/8
-                 slots       => [],
-                 unpack_code => "",           # N for big-endian, V for little
-                 perl_is_64bit => 1,          # matters if profile is 64-bit
-    };
-    bless $self, $class;
-    # Let unittests adjust the stride
-    if ($main::opt_test_stride > 0) {
-      $self->{stride} = $main::opt_test_stride;
-    }
-    # Read the first two slots to figure out bitsize and endianness.
-    my $slots = $self->{slots};
-    my $str;
-    read($self->{file}, $str, 8);
-    # Set the global $address_length based on what we see here.
-    # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
-    $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
-    if ($address_length == 8) {
-      if (substr($str, 6, 2) eq chr(0)x2) {
-        $self->{unpack_code} = 'V';  # Little-endian.
-      } elsif (substr($str, 4, 2) eq chr(0)x2) {
-        $self->{unpack_code} = 'N';  # Big-endian
-      } else {
-        ::error("$fname: header size >= 2**16\n");
-      }
-      @$slots = unpack($self->{unpack_code} . "*", $str);
-    } else {
-      # If we're a 64-bit profile, check if we're a 64-bit-capable
-      # perl.  Otherwise, each slot will be represented as a float
-      # instead of an int64, losing precision and making all the
-      # 64-bit addresses wrong.  We won't complain yet, but will
-      # later if we ever see a value that doesn't fit in 32 bits.
-      my $has_q = 0;
-      eval { $has_q = pack("Q", "1") ? 1 : 1; };
-      if (!$has_q) {
-        $self->{perl_is_64bit} = 0;
-      }
-      read($self->{file}, $str, 8);
-      if (substr($str, 4, 4) eq chr(0)x4) {
-        # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
-        $self->{unpack_code} = 'V';  # Little-endian.
-      } elsif (substr($str, 0, 4) eq chr(0)x4) {
-        $self->{unpack_code} = 'N';  # Big-endian
-      } else {
-        ::error("$fname: header size >= 2**32\n");
-      }
-      my @pair = unpack($self->{unpack_code} . "*", $str);
-      # Since we know one of the pair is 0, it's fine to just add them.
-      @$slots = (0, $pair[0] + $pair[1]);
-    }
-    return $self;
-  }
-
-  # Load more data when we access slots->get(X) which is not yet in memory.
-  sub overflow {
-    my ($self) = @_;
-    my $slots = $self->{slots};
-    $self->{base} += $#$slots + 1;   # skip over data we're replacing
-    my $str;
-    read($self->{file}, $str, $self->{stride});
-    if ($address_length == 8) {      # the 32-bit case
-      # This is the easy case: unpack provides 32-bit unpacking primitives.
-      @$slots = unpack($self->{unpack_code} . "*", $str);
-    } else {
-      # We need to unpack 32 bits at a time and combine.
-      my @b32_values = unpack($self->{unpack_code} . "*", $str);
-      my @b64_values = ();
-      for (my $i = 0; $i < $#b32_values; $i += 2) {
-        # TODO(csilvers): if this is a 32-bit perl, the math below
-        #    could end up in a too-large int, which perl will promote
-        #    to a double, losing necessary precision.  Deal with that.
-        #    Right now, we just die.
-        my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
-        if ($self->{unpack_code} eq 'N') {    # big-endian
-          ($lo, $hi) = ($hi, $lo);
-        }
-        my $value = $lo + $hi * (2**32);
-        if (!$self->{perl_is_64bit} &&   # check value is exactly represented
-            (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
-          ::error("Need a 64-bit perl to process this 64-bit profile.\n");
-        }
-        push(@b64_values, $value);
-      }
-      @$slots = @b64_values;
-    }
-  }
-
-  # Access the i-th long in the file (logically), or -1 at EOF.
-  sub get {
-    my ($self, $idx) = @_;
-    my $slots = $self->{slots};
-    while ($#$slots >= 0) {
-      if ($idx < $self->{base}) {
-        # The only time we expect a reference to $slots[$i - something]
-        # after referencing $slots[$i] is reading the very first header.
-        # Since $stride > |header|, that shouldn't cause any lookback
-        # errors.  And everything after the header is sequential.
-        print STDERR "Unexpected look-back reading CPU profile";
-        return -1;   # shrug, don't know what better to return
-      } elsif ($idx > $self->{base} + $#$slots) {
-        $self->overflow();
-      } else {
-        return $slots->[$idx - $self->{base}];
-      }
-    }
-    # If we get here, $slots is [], which means we've reached EOF
-    return -1;  # unique since slots is supposed to hold unsigned numbers
-  }
-}
-
-# Reads the top, 'header' section of a profile, and returns the last
-# line of the header, commonly called a 'header line'.  The header
-# section of a profile consists of zero or more 'command' lines that
-# are instructions to jeprof, which jeprof executes when reading the
-# header.  All 'command' lines start with a %.  After the command
-# lines is the 'header line', which is a profile-specific line that
-# indicates what type of profile it is, and perhaps other global
-# information about the profile.  For instance, here's a header line
-# for a heap profile:
-#   heap profile:     53:    38236 [  5525:  1284029] @ heapprofile
-# For historical reasons, the CPU profile does not contain a text-
-# readable header line.  If the profile looks like a CPU profile,
-# this function returns "".  If no header line could be found, this
-# function returns undef.
-#
-# The following commands are recognized:
-#   %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
-#
-# The input file should be in binmode.
-sub ReadProfileHeader {
-  local *PROFILE = shift;
-  my $firstchar = "";
-  my $line = "";
-  read(PROFILE, $firstchar, 1);
-  seek(PROFILE, -1, 1);                    # unread the firstchar
-  if ($firstchar !~ /[[:print:]]/) {       # is not a text character
-    return "";
-  }
-  while (defined($line = <PROFILE>)) {
-    $line =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
-    if ($line =~ /^%warn\s+(.*)/) {        # 'warn' command
-      # Note this matches both '%warn blah\n' and '%warn\n'.
-      print STDERR "WARNING: $1\n";        # print the rest of the line
-    } elsif ($line =~ /^%/) {
-      print STDERR "Ignoring unknown command from profile header: $line";
-    } else {
-      # End of commands, must be the header line.
-      return $line;
-    }
-  }
-  return undef;     # got to EOF without seeing a header line
-}
-
-sub IsSymbolizedProfileFile {
-  my $file_name = shift;
-  if (!(-e $file_name) || !(-r $file_name)) {
-    return 0;
-  }
-  # Check if the file contains a symbol-section marker.
-  open(TFILE, "<$file_name");
-  binmode TFILE;
-  my $firstline = ReadProfileHeader(*TFILE);
-  close(TFILE);
-  if (!$firstline) {
-    return 0;
-  }
-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $symbol_marker = $&;
-  return $firstline =~ /^--- *$symbol_marker/;
-}
-
-# Parse profile generated by common/profiler.cc and return a reference
-# to a map:
-#      $result->{version}     Version number of profile file
-#      $result->{period}      Sampling period (in microseconds)
-#      $result->{profile}     Profile object
-#      $result->{threads}     Map of thread IDs to profile objects
-#      $result->{map}         Memory map info from profile
-#      $result->{pcs}         Hash of all PC values seen, key is hex address
-sub ReadProfile {
-  my $prog = shift;
-  my $fname = shift;
-  my $result;            # return value
-
-  $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $contention_marker = $&;
-  $GROWTH_PAGE  =~ m,[^/]+$,;    # matches everything after the last slash
-  my $growth_marker = $&;
-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $symbol_marker = $&;
-  $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $profile_marker = $&;
-  $HEAP_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
-  my $heap_marker = $&;
-
-  # Look at first line to see if it is a heap or a CPU profile.
-  # CPU profile may start with no header at all, and just binary data
-  # (starting with \0\0\0\0) -- in that case, don't try to read the
-  # whole firstline, since it may be gigabytes(!) of data.
-  open(PROFILE, "<$fname") || error("$fname: $!\n");
-  binmode PROFILE;      # New perls do UTF-8 processing
-  my $header = ReadProfileHeader(*PROFILE);
-  if (!defined($header)) {   # means "at EOF"
-    error("Profile is empty.\n");
-  }
-
-  my $symbols;
-  if ($header =~ m/^--- *$symbol_marker/o) {
-    # Verify that the user asked for a symbolized profile
-    if (!$main::use_symbolized_profile) {
-      # we have both a binary and symbolized profiles, abort
-      error("FATAL ERROR: Symbolized profile\n   $fname\ncannot be used with " .
-            "a binary arg. Try again without passing\n   $prog\n");
-    }
-    # Read the symbol section of the symbolized profile file.
-    $symbols = ReadSymbols(*PROFILE{IO});
-    # Read the next line to get the header for the remaining profile.
-    $header = ReadProfileHeader(*PROFILE) || "";
-  }
-
-  if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
-    # Skip "--- ..." line for profile types that have their own headers.
-    $header = ReadProfileHeader(*PROFILE) || "";
-  }
-
-  $main::profile_type = '';
-
-  if ($header =~ m/^heap profile:.*$growth_marker/o) {
-    $main::profile_type = 'growth';
-    $result =  ReadHeapProfile($prog, *PROFILE, $header);
-  } elsif ($header =~ m/^heap profile:/) {
-    $main::profile_type = 'heap';
-    $result =  ReadHeapProfile($prog, *PROFILE, $header);
-  } elsif ($header =~ m/^heap/) {
-    $main::profile_type = 'heap';
-    $result = ReadThreadedHeapProfile($prog, $fname, $header);
-  } elsif ($header =~ m/^--- *$contention_marker/o) {
-    $main::profile_type = 'contention';
-    $result = ReadSynchProfile($prog, *PROFILE);
-  } elsif ($header =~ m/^--- *Stacks:/) {
-    print STDERR
-      "Old format contention profile: mistakenly reports " .
-      "condition variable signals as lock contentions.\n";
-    $main::profile_type = 'contention';
-    $result = ReadSynchProfile($prog, *PROFILE);
-  } elsif ($header =~ m/^--- *$profile_marker/) {
-    # the binary cpu profile data starts immediately after this line
-    $main::profile_type = 'cpu';
-    $result = ReadCPUProfile($prog, $fname, *PROFILE);
-  } else {
-    if (defined($symbols)) {
-      # a symbolized profile contains a format we don't recognize, bail out
-      error("$fname: Cannot recognize profile section after symbols.\n");
-    }
-    # no ascii header present -- must be a CPU profile
-    $main::profile_type = 'cpu';
-    $result = ReadCPUProfile($prog, $fname, *PROFILE);
-  }
-
-  close(PROFILE);
-
-  # if we got symbols along with the profile, return those as well
-  if (defined($symbols)) {
-    $result->{symbols} = $symbols;
-  }
-
-  return $result;
-}
-
-# Subtract one from caller pc so we map back to call instr.
-# However, don't do this if we're reading a symbolized profile
-# file, in which case the subtract-one was done when the file
-# was written.
-#
-# We apply the same logic to all readers, though ReadCPUProfile uses an
-# independent implementation.
-sub FixCallerAddresses {
-  my $stack = shift;
-  # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
-  # dumps unadjusted profiles.
-  {
-    $stack =~ /(\s)/;
-    my $delimiter = $1;
-    my @addrs = split(' ', $stack);
-    my @fixedaddrs;
-    $#fixedaddrs = $#addrs;
-    if ($#addrs >= 0) {
-      $fixedaddrs[0] = $addrs[0];
-    }
-    for (my $i = 1; $i <= $#addrs; $i++) {
-      $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
-    }
-    return join $delimiter, @fixedaddrs;
-  }
-}
-
-# CPU profile reader
-sub ReadCPUProfile {
-  my $prog = shift;
-  my $fname = shift;       # just used for logging
-  local *PROFILE = shift;
-  my $version;
-  my $period;
-  my $i;
-  my $profile = {};
-  my $pcs = {};
-
-  # Parse string into array of slots.
-  my $slots = CpuProfileStream->new(*PROFILE, $fname);
-
-  # Read header.  The current header version is a 5-element structure
-  # containing:
-  #   0: header count (always 0)
-  #   1: header "words" (after this one: 3)
-  #   2: format version (0)
-  #   3: sampling period (usec)
-  #   4: unused padding (always 0)
-  if ($slots->get(0) != 0 ) {
-    error("$fname: not a profile file, or old format profile file\n");
-  }
-  $i = 2 + $slots->get(1);
-  $version = $slots->get(2);
-  $period = $slots->get(3);
-  # Do some sanity checking on these header values.
-  if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
-    error("$fname: not a profile file, or corrupted profile file\n");
-  }
-
-  # Parse profile
-  while ($slots->get($i) != -1) {
-    my $n = $slots->get($i++);
-    my $d = $slots->get($i++);
-    if ($d > (2**16)) {  # TODO(csilvers): what's a reasonable max-stack-depth?
-      my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
-      print STDERR "At index $i (address $addr):\n";
-      error("$fname: stack trace depth >= 2**32\n");
-    }
-    if ($slots->get($i) == 0) {
-      # End of profile data marker
-      $i += $d;
-      last;
-    }
-
-    # Make key out of the stack entries
-    my @k = ();
-    for (my $j = 0; $j < $d; $j++) {
-      my $pc = $slots->get($i+$j);
-      # Subtract one from caller pc so we map back to call instr.
-      $pc--;
-      $pc = sprintf("%0*x", $address_length, $pc);
-      $pcs->{$pc} = 1;
-      push @k, $pc;
-    }
-
-    AddEntry($profile, (join "\n", @k), $n);
-    $i += $d;
-  }
-
-  # Parse map
-  my $map = '';
-  seek(PROFILE, $i * 4, 0);
-  read(PROFILE, $map, (stat PROFILE)[7]);
-
-  my $r = {};
-  $r->{version} = $version;
-  $r->{period} = $period;
-  $r->{profile} = $profile;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-
-  return $r;
-}
-
-sub HeapProfileIndex {
-  my $index = 1;
-  if ($main::opt_inuse_space) {
-    $index = 1;
-  } elsif ($main::opt_inuse_objects) {
-    $index = 0;
-  } elsif ($main::opt_alloc_space) {
-    $index = 3;
-  } elsif ($main::opt_alloc_objects) {
-    $index = 2;
-  }
-  return $index;
-}
-
-sub ReadMappedLibraries {
-  my $fh = shift;
-  my $map = "";
-  # Read the /proc/self/maps data
-  while (<$fh>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    $map .= $_;
-  }
-  return $map;
-}
-
-sub ReadMemoryMap {
-  my $fh = shift;
-  my $map = "";
-  # Read /proc/self/maps data as formatted by DumpAddressMap()
-  my $buildvar = "";
-  while (<PROFILE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    # Parse "build=<dir>" specification if supplied
-    if (m/^\s*build=(.*)\n/) {
-      $buildvar = $1;
-    }
-
-    # Expand "$build" variable if available
-    $_ =~ s/\$build\b/$buildvar/g;
-
-    $map .= $_;
-  }
-  return $map;
-}
-
-sub AdjustSamples {
-  my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
-  if ($sample_adjustment) {
-    if ($sampling_algorithm == 2) {
-      # Remote-heap version 2
-      # The sampling frequency is the rate of a Poisson process.
-      # This means that the probability of sampling an allocation of
-      # size X with sampling rate Y is 1 - exp(-X/Y)
-      if ($n1 != 0) {
-        my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
-        my $scale_factor = 1/(1 - exp(-$ratio));
-        $n1 *= $scale_factor;
-        $s1 *= $scale_factor;
-      }
-      if ($n2 != 0) {
-        my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
-        my $scale_factor = 1/(1 - exp(-$ratio));
-        $n2 *= $scale_factor;
-        $s2 *= $scale_factor;
-      }
-    } else {
-      # Remote-heap version 1
-      my $ratio;
-      $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
-      if ($ratio < 1) {
-        $n1 /= $ratio;
-        $s1 /= $ratio;
-      }
-      $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
-      if ($ratio < 1) {
-        $n2 /= $ratio;
-        $s2 /= $ratio;
-      }
-    }
-  }
-  return ($n1, $s1, $n2, $s2);
-}
-
-sub ReadHeapProfile {
-  my $prog = shift;
-  local *PROFILE = shift;
-  my $header = shift;
-
-  my $index = HeapProfileIndex();
-
-  # Find the type of this profile.  The header line looks like:
-  #    heap profile:   1246:  8800744 [  1246:  8800744] @ <heap-url>/266053
-  # There are two pairs <count: size>, the first inuse objects/space, and the
-  # second allocated objects/space.  This is followed optionally by a profile
-  # type, and if that is present, optionally by a sampling frequency.
-  # For remote heap profiles (v1):
-  # The interpretation of the sampling frequency is that the profiler, for
-  # each sample, calculates a uniformly distributed random integer less than
-  # the given value, and records the next sample after that many bytes have
-  # been allocated.  Therefore, the expected sample interval is half of the
-  # given frequency.  By default, if not specified, the expected sample
-  # interval is 128KB.  Only remote-heap-page profiles are adjusted for
-  # sample size.
-  # For remote heap profiles (v2):
-  # The sampling frequency is the rate of a Poisson process. This means that
-  # the probability of sampling an allocation of size X with sampling rate Y
-  # is 1 - exp(-X/Y)
-  # For version 2, a typical header line might look like this:
-  # heap profile:   1922: 127792360 [  1922: 127792360] @ <heap-url>_v2/524288
-  # the trailing number (524288) is the sampling rate. (Version 1 showed
-  # double the 'rate' here)
-  my $sampling_algorithm = 0;
-  my $sample_adjustment = 0;
-  chomp($header);
-  my $type = "unknown";
-  if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
-    if (defined($6) && ($6 ne '')) {
-      $type = $6;
-      my $sample_period = $8;
-      # $type is "heapprofile" for profiles generated by the
-      # heap-profiler, and either "heap" or "heap_v2" for profiles
-      # generated by sampling directly within tcmalloc.  It can also
-      # be "growth" for heap-growth profiles.  The first is typically
-      # found for profiles generated locally, and the others for
-      # remote profiles.
-      if (($type eq "heapprofile") || ($type !~ /heap/) ) {
-        # No need to adjust for the sampling rate with heap-profiler-derived data
-        $sampling_algorithm = 0;
-      } elsif ($type =~ /_v2/) {
-        $sampling_algorithm = 2;     # version 2 sampling
-        if (defined($sample_period) && ($sample_period ne '')) {
-          $sample_adjustment = int($sample_period);
-        }
-      } else {
-        $sampling_algorithm = 1;     # version 1 sampling
-        if (defined($sample_period) && ($sample_period ne '')) {
-          $sample_adjustment = int($sample_period)/2;
-        }
-      }
-    } else {
-      # We detect whether or not this is a remote-heap profile by checking
-      # that the total-allocated stats ($n2,$s2) are exactly the
-      # same as the in-use stats ($n1,$s1).  It is remotely conceivable
-      # that a non-remote-heap profile may pass this check, but it is hard
-      # to imagine how that could happen.
-      # In this case it's so old it's guaranteed to be remote-heap version 1.
-      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
-      if (($n1 == $n2) && ($s1 == $s2)) {
-        # This is likely to be a remote-heap based sample profile
-        $sampling_algorithm = 1;
-      }
-    }
-  }
-
-  if ($sampling_algorithm > 0) {
-    # For remote-heap generated profiles, adjust the counts and sizes to
-    # account for the sample rate (we sample once every 128KB by default).
-    if ($sample_adjustment == 0) {
-      # Turn on profile adjustment.
-      $sample_adjustment = 128*1024;
-      print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
-    } else {
-      printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
-                     $sample_adjustment);
-    }
-    if ($sampling_algorithm > 1) {
-      # We don't bother printing anything for the original version (version 1)
-      printf STDERR "Heap version $sampling_algorithm\n";
-    }
-  }
-
-  my $profile = {};
-  my $pcs = {};
-  my $map = "";
-
-  while (<PROFILE>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    if (/^MAPPED_LIBRARIES:/) {
-      $map .= ReadMappedLibraries(*PROFILE);
-      last;
-    }
-
-    if (/^--- Memory map:/) {
-      $map .= ReadMemoryMap(*PROFILE);
-      last;
-    }
-
-    # Read entry of the form:
-    #  <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
-    s/^\s*//;
-    s/\s*$//;
-    if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
-      my $stack = $5;
-      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
-      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
-                                 $n1, $s1, $n2, $s2);
-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
-    }
-  }
-
-  my $r = {};
-  $r->{version} = "heap";
-  $r->{period} = 1;
-  $r->{profile} = $profile;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-  return $r;
-}
-
-sub ReadThreadedHeapProfile {
-  my ($prog, $fname, $header) = @_;
-
-  my $index = HeapProfileIndex();
-  my $sampling_algorithm = 0;
-  my $sample_adjustment = 0;
-  chomp($header);
-  my $type = "unknown";
-  # Assuming a very specific type of header for now.
-  if ($header =~ m"^heap_v2/(\d+)") {
-    $type = "_v2";
-    $sampling_algorithm = 2;
-    $sample_adjustment = int($1);
-  }
-  if ($type ne "_v2" || !defined($sample_adjustment)) {
-    die "Threaded heap profiles require v2 sampling with a sample rate\n";
-  }
-
-  my $profile = {};
-  my $thread_profiles = {};
-  my $pcs = {};
-  my $map = "";
-  my $stack = "";
-
-  while (<PROFILE>) {
-    s/\r//g;
-    if (/^MAPPED_LIBRARIES:/) {
-      $map .= ReadMappedLibraries(*PROFILE);
-      last;
-    }
-
-    if (/^--- Memory map:/) {
-      $map .= ReadMemoryMap(*PROFILE);
-      last;
-    }
-
-    # Read entry of the form:
-    # @ a1 a2 ... an
-    #   t*: <count1>: <bytes1> [<count2>: <bytes2>]
-    #   t1: <count1>: <bytes1> [<count2>: <bytes2>]
-    #     ...
-    #   tn: <count1>: <bytes1> [<count2>: <bytes2>]
-    s/^\s*//;
-    s/\s*$//;
-    if (m/^@\s+(.*)$/) {
-      $stack = $1;
-    } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
-      if ($stack eq "") {
-        # Still in the header, so this is just a per-thread summary.
-        next;
-      }
-      my $thread = $2;
-      my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
-      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
-                                 $n1, $s1, $n2, $s2);
-      if ($thread eq "*") {
-        AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
-      } else {
-        if (!exists($thread_profiles->{$thread})) {
-          $thread_profiles->{$thread} = {};
-        }
-        AddEntries($thread_profiles->{$thread}, $pcs,
-                   FixCallerAddresses($stack), $counts[$index]);
-      }
-    }
-  }
-
-  my $r = {};
-  $r->{version} = "heap";
-  $r->{period} = 1;
-  $r->{profile} = $profile;
-  $r->{threads} = $thread_profiles;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-  return $r;
-}
-
-sub ReadSynchProfile {
-  my $prog = shift;
-  local *PROFILE = shift;
-  my $header = shift;
-
-  my $map = '';
-  my $profile = {};
-  my $pcs = {};
-  my $sampling_period = 1;
-  my $cyclespernanosec = 2.8;   # Default assumption for old binaries
-  my $seen_clockrate = 0;
-  my $line;
-
-  my $index = 0;
-  if ($main::opt_total_delay) {
-    $index = 0;
-  } elsif ($main::opt_contentions) {
-    $index = 1;
-  } elsif ($main::opt_mean_delay) {
-    $index = 2;
-  }
-
-  while ( $line = <PROFILE> ) {
-    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
-    if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
-      my ($cycles, $count, $stack) = ($1, $2, $3);
-
-      # Convert cycles to nanoseconds
-      $cycles /= $cyclespernanosec;
-
-      # Adjust for sampling done by application
-      $cycles *= $sampling_period;
-      $count *= $sampling_period;
-
-      my @values = ($cycles, $count, $cycles / $count);
-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
-
-    } elsif ( $line =~ /^(slow release).*thread \d+  \@\s*(.*?)\s*$/ ||
-              $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
-      my ($cycles, $stack) = ($1, $2);
-      if ($cycles !~ /^\d+$/) {
-        next;
-      }
-
-      # Convert cycles to nanoseconds
-      $cycles /= $cyclespernanosec;
-
-      # Adjust for sampling done by application
-      $cycles *= $sampling_period;
-
-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
-
-    } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
-      my ($variable, $value) = ($1,$2);
-      for ($variable, $value) {
-        s/^\s+//;
-        s/\s+$//;
-      }
-      if ($variable eq "cycles/second") {
-        $cyclespernanosec = $value / 1e9;
-        $seen_clockrate = 1;
-      } elsif ($variable eq "sampling period") {
-        $sampling_period = $value;
-      } elsif ($variable eq "ms since reset") {
-        # Currently nothing is done with this value in jeprof
-        # So we just silently ignore it for now
-      } elsif ($variable eq "discarded samples") {
-        # Currently nothing is done with this value in jeprof
-        # So we just silently ignore it for now
-      } else {
-        printf STDERR ("Ignoring unnknown variable in /contention output: " .
-                       "'%s' = '%s'\n",$variable,$value);
-      }
-    } else {
-      # Memory map entry
-      $map .= $line;
-    }
-  }
-
-  if (!$seen_clockrate) {
-    printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
-                   $cyclespernanosec);
-  }
-
-  my $r = {};
-  $r->{version} = 0;
-  $r->{period} = $sampling_period;
-  $r->{profile} = $profile;
-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
-  $r->{pcs} = $pcs;
-  return $r;
-}
-
-# Given a hex value in the form "0x1abcd" or "1abcd", return either
-# "0001abcd" or "000000000001abcd", depending on the current (global)
-# address length.
-sub HexExtend {
-  my $addr = shift;
-
-  $addr =~ s/^(0x)?0*//;
-  my $zeros_needed = $address_length - length($addr);
-  if ($zeros_needed < 0) {
-    printf STDERR "Warning: address $addr is longer than address length $address_length\n";
-    return $addr;
-  }
-  return ("0" x $zeros_needed) . $addr;
-}
-
-##### Symbol extraction #####
-
-# Aggressively search the lib_prefix values for the given library
-# If all else fails, just return the name of the library unmodified.
-# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
-# it will search the following locations in this order, until it finds a file:
-#   /my/path/lib/dir/mylib.so
-#   /other/path/lib/dir/mylib.so
-#   /my/path/dir/mylib.so
-#   /other/path/dir/mylib.so
-#   /my/path/mylib.so
-#   /other/path/mylib.so
-#   /lib/dir/mylib.so              (returned as last resort)
-sub FindLibrary {
-  my $file = shift;
-  my $suffix = $file;
-
-  # Search for the library as described above
-  do {
-    foreach my $prefix (@prefix_list) {
-      my $fullpath = $prefix . $suffix;
-      if (-e $fullpath) {
-        return $fullpath;
-      }
-    }
-  } while ($suffix =~ s|^/[^/]+/|/|);
-  return $file;
-}
-
-# Return path to library with debugging symbols.
-# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
-sub DebuggingLibrary {
-  my $file = shift;
-  if ($file =~ m|^/|) {
-      if (-f "/usr/lib/debug$file") {
-        return "/usr/lib/debug$file";
-      } elsif (-f "/usr/lib/debug$file.debug") {
-        return "/usr/lib/debug$file.debug";
-      }
-  }
-  return undef;
-}
-
-# Parse text section header of a library using objdump
-sub ParseTextSectionHeaderFromObjdump {
-  my $lib = shift;
-
-  my $size = undef;
-  my $vma;
-  my $file_offset;
-  # Get objdump output from the library file to figure out how to
-  # map between mapped addresses and addresses in the library.
-  my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
-  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
-  while (<OBJDUMP>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    # Idx Name          Size      VMA       LMA       File off  Algn
-    #  10 .text         00104b2c  420156f0  420156f0  000156f0  2**4
-    # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
-    # offset may still be 8.  But AddressSub below will still handle that.
-    my @x = split;
-    if (($#x >= 6) && ($x[1] eq '.text')) {
-      $size = $x[2];
-      $vma = $x[3];
-      $file_offset = $x[5];
-      last;
-    }
-  }
-  close(OBJDUMP);
-
-  if (!defined($size)) {
-    return undef;
-  }
-
-  my $r = {};
-  $r->{size} = $size;
-  $r->{vma} = $vma;
-  $r->{file_offset} = $file_offset;
-
-  return $r;
-}
-
-# Parse text section header of a library using otool (on OS X)
-sub ParseTextSectionHeaderFromOtool {
-  my $lib = shift;
-
-  my $size = undef;
-  my $vma = undef;
-  my $file_offset = undef;
-  # Get otool output from the library file to figure out how to
-  # map between mapped addresses and addresses in the library.
-  my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
-  open(OTOOL, "$command |") || error("$command: $!\n");
-  my $cmd = "";
-  my $sectname = "";
-  my $segname = "";
-  foreach my $line (<OTOOL>) {
-    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
-    # Load command <#>
-    #       cmd LC_SEGMENT
-    # [...]
-    # Section
-    #   sectname __text
-    #    segname __TEXT
-    #       addr 0x000009f8
-    #       size 0x00018b9e
-    #     offset 2552
-    #      align 2^2 (4)
-    # We will need to strip off the leading 0x from the hex addresses,
-    # and convert the offset into hex.
-    if ($line =~ /Load command/) {
-      $cmd = "";
-      $sectname = "";
-      $segname = "";
-    } elsif ($line =~ /Section/) {
-      $sectname = "";
-      $segname = "";
-    } elsif ($line =~ /cmd (\w+)/) {
-      $cmd = $1;
-    } elsif ($line =~ /sectname (\w+)/) {
-      $sectname = $1;
-    } elsif ($line =~ /segname (\w+)/) {
-      $segname = $1;
-    } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
-               $sectname eq "__text" &&
-               $segname eq "__TEXT")) {
-      next;
-    } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
-      $vma = $1;
-    } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
-      $size = $1;
-    } elsif ($line =~ /\boffset ([0-9]+)/) {
-      $file_offset = sprintf("%016x", $1);
-    }
-    if (defined($vma) && defined($size) && defined($file_offset)) {
-      last;
-    }
-  }
-  close(OTOOL);
-
-  if (!defined($vma) || !defined($size) || !defined($file_offset)) {
-     return undef;
-  }
-
-  my $r = {};
-  $r->{size} = $size;
-  $r->{vma} = $vma;
-  $r->{file_offset} = $file_offset;
-
-  return $r;
-}
-
-sub ParseTextSectionHeader {
-  # obj_tool_map("otool") is only defined if we're in a Mach-O environment
-  if (defined($obj_tool_map{"otool"})) {
-    my $r = ParseTextSectionHeaderFromOtool(@_);
-    if (defined($r)){
-      return $r;
-    }
-  }
-  # If otool doesn't work, or we don't have it, fall back to objdump
-  return ParseTextSectionHeaderFromObjdump(@_);
-}
-
-# Split /proc/pid/maps dump into a list of libraries
-sub ParseLibraries {
-  return if $main::use_symbol_page;  # We don't need libraries info.
-  my $prog = shift;
-  my $map = shift;
-  my $pcs = shift;
-
-  my $result = [];
-  my $h = "[a-f0-9]+";
-  my $zero_offset = HexExtend("0");
-
-  my $buildvar = "";
-  foreach my $l (split("\n", $map)) {
-    if ($l =~ m/^\s*build=(.*)$/) {
-      $buildvar = $1;
-    }
-
-    my $start;
-    my $finish;
-    my $offset;
-    my $lib;
-    if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
-      # Full line from /proc/self/maps.  Example:
-      #   40000000-40015000 r-xp 00000000 03:01 12845071   /lib/ld-2.3.2.so
-      $start = HexExtend($1);
-      $finish = HexExtend($2);
-      $offset = HexExtend($3);
-      $lib = $4;
-      $lib =~ s|\\|/|g;     # turn windows-style paths into unix-style paths
-    } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
-      # Cooked line from DumpAddressMap.  Example:
-      #   40000000-40015000: /lib/ld-2.3.2.so
-      $start = HexExtend($1);
-      $finish = HexExtend($2);
-      $offset = $zero_offset;
-      $lib = $3;
-    }
-    # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
-    # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
-    #
-    # Example:
-    # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
-    # o.1 NCH -1
-    elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
-      $start = HexExtend($1);
-      $finish = HexExtend($2);
-      $offset = $zero_offset;
-      $lib = FindLibrary($5);
-
-    } else {
-      next;
-    }
-
-    # Expand "$build" variable if available
-    $lib =~ s/\$build\b/$buildvar/g;
-
-    $lib = FindLibrary($lib);
-
-    # Check for pre-relocated libraries, which use pre-relocated symbol tables
-    # and thus require adjusting the offset that we'll use to translate
-    # VM addresses into symbol table addresses.
-    # Only do this if we're not going to fetch the symbol table from a
-    # debugging copy of the library.
-    if (!DebuggingLibrary($lib)) {
-      my $text = ParseTextSectionHeader($lib);
-      if (defined($text)) {
-         my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
-         $offset = AddressAdd($offset, $vma_offset);
-      }
-    }
-
-    if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
-    push(@{$result}, [$lib, $start, $finish, $offset]);
-  }
-
-  # Append special entry for additional library (not relocated)
-  if ($main::opt_lib ne "") {
-    my $text = ParseTextSectionHeader($main::opt_lib);
-    if (defined($text)) {
-       my $start = $text->{vma};
-       my $finish = AddressAdd($start, $text->{size});
-
-       push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
-    }
-  }
-
-  # Append special entry for the main program.  This covers
-  # 0..max_pc_value_seen, so that we assume pc values not found in one
-  # of the library ranges will be treated as coming from the main
-  # program binary.
-  my $min_pc = HexExtend("0");
-  my $max_pc = $min_pc;          # find the maximal PC value in any sample
-  foreach my $pc (keys(%{$pcs})) {
-    if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
-  }
-  push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
-
-  return $result;
-}
-
-# Add two hex addresses of length $address_length.
-# Run jeprof --test for unit test if this is changed.
-sub AddressAdd {
-  my $addr1 = shift;
-  my $addr2 = shift;
-  my $sum;
-
-  if ($address_length == 8) {
-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
-    $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
-    return sprintf("%08x", $sum);
-
-  } else {
-    # Do the addition in 7-nibble chunks to trivialize carry handling.
-
-    if ($main::opt_debug and $main::opt_test) {
-      print STDERR "AddressAdd $addr1 + $addr2 = ";
-    }
-
-    my $a1 = substr($addr1,-7);
-    $addr1 = substr($addr1,0,-7);
-    my $a2 = substr($addr2,-7);
-    $addr2 = substr($addr2,0,-7);
-    $sum = hex($a1) + hex($a2);
-    my $c = 0;
-    if ($sum > 0xfffffff) {
-      $c = 1;
-      $sum -= 0x10000000;
-    }
-    my $r = sprintf("%07x", $sum);
-
-    $a1 = substr($addr1,-7);
-    $addr1 = substr($addr1,0,-7);
-    $a2 = substr($addr2,-7);
-    $addr2 = substr($addr2,0,-7);
-    $sum = hex($a1) + hex($a2) + $c;
-    $c = 0;
-    if ($sum > 0xfffffff) {
-      $c = 1;
-      $sum -= 0x10000000;
-    }
-    $r = sprintf("%07x", $sum) . $r;
-
-    $sum = hex($addr1) + hex($addr2) + $c;
-    if ($sum > 0xff) { $sum -= 0x100; }
-    $r = sprintf("%02x", $sum) . $r;
-
-    if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
-
-    return $r;
-  }
-}
-
-
-# Subtract two hex addresses of length $address_length.
-# Run jeprof --test for unit test if this is changed.
-sub AddressSub {
-  my $addr1 = shift;
-  my $addr2 = shift;
-  my $diff;
-
-  if ($address_length == 8) {
-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
-    $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
-    return sprintf("%08x", $diff);
-
-  } else {
-    # Do the addition in 7-nibble chunks to trivialize borrow handling.
-    # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
-
-    my $a1 = hex(substr($addr1,-7));
-    $addr1 = substr($addr1,0,-7);
-    my $a2 = hex(substr($addr2,-7));
-    $addr2 = substr($addr2,0,-7);
-    my $b = 0;
-    if ($a2 > $a1) {
-      $b = 1;
-      $a1 += 0x10000000;
-    }
-    $diff = $a1 - $a2;
-    my $r = sprintf("%07x", $diff);
-
-    $a1 = hex(substr($addr1,-7));
-    $addr1 = substr($addr1,0,-7);
-    $a2 = hex(substr($addr2,-7)) + $b;
-    $addr2 = substr($addr2,0,-7);
-    $b = 0;
-    if ($a2 > $a1) {
-      $b = 1;
-      $a1 += 0x10000000;
-    }
-    $diff = $a1 - $a2;
-    $r = sprintf("%07x", $diff) . $r;
-
-    $a1 = hex($addr1);
-    $a2 = hex($addr2) + $b;
-    if ($a2 > $a1) { $a1 += 0x100; }
-    $diff = $a1 - $a2;
-    $r = sprintf("%02x", $diff) . $r;
-
-    # if ($main::opt_debug) { print STDERR "$r\n"; }
-
-    return $r;
-  }
-}
-
-# Increment a hex addresses of length $address_length.
-# Run jeprof --test for unit test if this is changed.
-sub AddressInc {
-  my $addr = shift;
-  my $sum;
-
-  if ($address_length == 8) {
-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
-    $sum = (hex($addr)+1) % (0x10000000 * 16);
-    return sprintf("%08x", $sum);
-
-  } else {
-    # Do the addition in 7-nibble chunks to trivialize carry handling.
-    # We are always doing this to step through the addresses in a function,
-    # and will almost never overflow the first chunk, so we check for this
-    # case and exit early.
-
-    # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
-
-    my $a1 = substr($addr,-7);
-    $addr = substr($addr,0,-7);
-    $sum = hex($a1) + 1;
-    my $r = sprintf("%07x", $sum);
-    if ($sum <= 0xfffffff) {
-      $r = $addr . $r;
-      # if ($main::opt_debug) { print STDERR "$r\n"; }
-      return HexExtend($r);
-    } else {
-      $r = "0000000";
-    }
-
-    $a1 = substr($addr,-7);
-    $addr = substr($addr,0,-7);
-    $sum = hex($a1) + 1;
-    $r = sprintf("%07x", $sum) . $r;
-    if ($sum <= 0xfffffff) {
-      $r = $addr . $r;
-      # if ($main::opt_debug) { print STDERR "$r\n"; }
-      return HexExtend($r);
-    } else {
-      $r = "00000000000000";
-    }
-
-    $sum = hex($addr) + 1;
-    if ($sum > 0xff) { $sum -= 0x100; }
-    $r = sprintf("%02x", $sum) . $r;
-
-    # if ($main::opt_debug) { print STDERR "$r\n"; }
-    return $r;
-  }
-}
-
-# Extract symbols for all PC values found in profile
-sub ExtractSymbols {
-  my $libs = shift;
-  my $pcset = shift;
-
-  my $symbols = {};
-
-  # Map each PC value to the containing library.  To make this faster,
-  # we sort libraries by their starting pc value (highest first), and
-  # advance through the libraries as we advance the pc.  Sometimes the
-  # addresses of libraries may overlap with the addresses of the main
-  # binary, so to make sure the libraries 'win', we iterate over the
-  # libraries in reverse order (which assumes the binary doesn't start
-  # in the middle of a library, which seems a fair assumption).
-  my @pcs = (sort { $a cmp $b } keys(%{$pcset}));  # pcset is 0-extended strings
-  foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
-    my $libname = $lib->[0];
-    my $start = $lib->[1];
-    my $finish = $lib->[2];
-    my $offset = $lib->[3];
-
-    # Use debug library if it exists
-    my $debug_libname = DebuggingLibrary($libname);
-    if ($debug_libname) {
-        $libname = $debug_libname;
-    }
-
-    # Get list of pcs that belong in this library.
-    my $contained = [];
-    my ($start_pc_index, $finish_pc_index);
-    # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
-    for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
-         $finish_pc_index--) {
-      last if $pcs[$finish_pc_index - 1] le $finish;
-    }
-    # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
-    for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
-         $start_pc_index--) {
-      last if $pcs[$start_pc_index - 1] lt $start;
-    }
-    # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
-    # in case there are overlaps in libraries and the main binary.
-    @{$contained} = splice(@pcs, $start_pc_index,
-                           $finish_pc_index - $start_pc_index);
-    # Map to symbols
-    MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
-  }
-
-  return $symbols;
-}
-
-# Map list of PC values to symbols for a given image
-sub MapToSymbols {
-  my $image = shift;
-  my $offset = shift;
-  my $pclist = shift;
-  my $symbols = shift;
-
-  my $debug = 0;
-
-  # Ignore empty binaries
-  if ($#{$pclist} < 0) { return; }
-
-  # Figure out the addr2line command to use
-  my $addr2line = $obj_tool_map{"addr2line"};
-  my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
-  if (exists $obj_tool_map{"addr2line_pdb"}) {
-    $addr2line = $obj_tool_map{"addr2line_pdb"};
-    $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
-  }
-
-  # If "addr2line" isn't installed on the system at all, just use
-  # nm to get what info we can (function names, but not line numbers).
-  if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
-    MapSymbolsWithNM($image, $offset, $pclist, $symbols);
-    return;
-  }
-
-  # "addr2line -i" can produce a variable number of lines per input
-  # address, with no separator that allows us to tell when data for
-  # the next address starts.  So we find the address for a special
-  # symbol (_fini) and interleave this address between all real
-  # addresses passed to addr2line.  The name of this special symbol
-  # can then be used as a separator.
-  $sep_address = undef;  # May be filled in by MapSymbolsWithNM()
-  my $nm_symbols = {};
-  MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
-  if (defined($sep_address)) {
-    # Only add " -i" to addr2line if the binary supports it.
-    # addr2line --help returns 0, but not if it sees an unknown flag first.
-    if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
-      $cmd .= " -i";
-    } else {
-      $sep_address = undef;   # no need for sep_address if we don't support -i
-    }
-  }
-
-  # Make file with all PC values with intervening 'sep_address' so
-  # that we can reliably detect the end of inlined function list
-  open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
-  if ($debug) { print("---- $image ---\n"); }
-  for (my $i = 0; $i <= $#{$pclist}; $i++) {
-    # addr2line always reads hex addresses, and does not need '0x' prefix.
-    if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
-    printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
-    if (defined($sep_address)) {
-      printf ADDRESSES ("%s\n", $sep_address);
-    }
-  }
-  close(ADDRESSES);
-  if ($debug) {
-    print("----\n");
-    system("cat", $main::tmpfile_sym);
-    print("----\n");
-    system("$cmd < " . ShellEscape($main::tmpfile_sym));
-    print("----\n");
-  }
-
-  open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
-      || error("$cmd: $!\n");
-  my $count = 0;   # Index in pclist
-  while (<SYMBOLS>) {
-    # Read fullfunction and filelineinfo from next pair of lines
-    s/\r?\n$//g;
-    my $fullfunction = $_;
-    $_ = <SYMBOLS>;
-    s/\r?\n$//g;
-    my $filelinenum = $_;
-
-    if (defined($sep_address) && $fullfunction eq $sep_symbol) {
-      # Terminating marker for data for this address
-      $count++;
-      next;
-    }
-
-    $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
-
-    my $pcstr = $pclist->[$count];
-    my $function = ShortFunctionName($fullfunction);
-    my $nms = $nm_symbols->{$pcstr};
-    if (defined($nms)) {
-      if ($fullfunction eq '??') {
-        # nm found a symbol for us.
-        $function = $nms->[0];
-        $fullfunction = $nms->[2];
-      } else {
-	# MapSymbolsWithNM tags each routine with its starting address,
-	# useful in case the image has multiple occurrences of this
-	# routine.  (It uses a syntax that resembles template paramters,
-	# that are automatically stripped out by ShortFunctionName().)
-	# addr2line does not provide the same information.  So we check
-	# if nm disambiguated our symbol, and if so take the annotated
-	# (nm) version of the routine-name.  TODO(csilvers): this won't
-	# catch overloaded, inlined symbols, which nm doesn't see.
-	# Better would be to do a check similar to nm's, in this fn.
-	if ($nms->[2] =~ m/^\Q$function\E/) {  # sanity check it's the right fn
-	  $function = $nms->[0];
-	  $fullfunction = $nms->[2];
-	}
-      }
-    }
-
-    # Prepend to accumulated symbols for pcstr
-    # (so that caller comes before callee)
-    my $sym = $symbols->{$pcstr};
-    if (!defined($sym)) {
-      $sym = [];
-      $symbols->{$pcstr} = $sym;
-    }
-    unshift(@{$sym}, $function, $filelinenum, $fullfunction);
-    if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
-    if (!defined($sep_address)) {
-      # Inlining is off, so this entry ends immediately
-      $count++;
-    }
-  }
-  close(SYMBOLS);
-}
-
-# Use nm to map the list of referenced PCs to symbols.  Return true iff we
-# are able to read procedure information via nm.
-sub MapSymbolsWithNM {
-  my $image = shift;
-  my $offset = shift;
-  my $pclist = shift;
-  my $symbols = shift;
-
-  # Get nm output sorted by increasing address
-  my $symbol_table = GetProcedureBoundaries($image, ".");
-  if (!%{$symbol_table}) {
-    return 0;
-  }
-  # Start addresses are already the right length (8 or 16 hex digits).
-  my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
-    keys(%{$symbol_table});
-
-  if ($#names < 0) {
-    # No symbols: just use addresses
-    foreach my $pc (@{$pclist}) {
-      my $pcstr = "0x" . $pc;
-      $symbols->{$pc} = [$pcstr, "?", $pcstr];
-    }
-    return 0;
-  }
-
-  # Sort addresses so we can do a join against nm output
-  my $index = 0;
-  my $fullname = $names[0];
-  my $name = ShortFunctionName($fullname);
-  foreach my $pc (sort { $a cmp $b } @{$pclist}) {
-    # Adjust for mapped offset
-    my $mpc = AddressSub($pc, $offset);
-    while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
-      $index++;
-      $fullname = $names[$index];
-      $name = ShortFunctionName($fullname);
-    }
-    if ($mpc lt $symbol_table->{$fullname}->[1]) {
-      $symbols->{$pc} = [$name, "?", $fullname];
-    } else {
-      my $pcstr = "0x" . $pc;
-      $symbols->{$pc} = [$pcstr, "?", $pcstr];
-    }
-  }
-  return 1;
-}
-
-sub ShortFunctionName {
-  my $function = shift;
-  while ($function =~ s/\([^()]*\)(\s*const)?//g) { }   # Argument types
-  while ($function =~ s/<[^<>]*>//g)  { }    # Remove template arguments
-  $function =~ s/^.*\s+(\w+::)/$1/;          # Remove leading type
-  return $function;
-}
-
-# Trim overly long symbols found in disassembler output
-sub CleanDisassembly {
-  my $d = shift;
-  while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
-  while ($d =~ s/(\w+)<[^<>]*>/$1/g)  { }       # Remove template arguments
-  return $d;
-}
-
-# Clean file name for display
-sub CleanFileName {
-  my ($f) = @_;
-  $f =~ s|^/proc/self/cwd/||;
-  $f =~ s|^\./||;
-  return $f;
-}
-
-# Make address relative to section and clean up for display
-sub UnparseAddress {
-  my ($offset, $address) = @_;
-  $address = AddressSub($address, $offset);
-  $address =~ s/^0x//;
-  $address =~ s/^0*//;
-  return $address;
-}
-
-##### Miscellaneous #####
-
-# Find the right versions of the above object tools to use.  The
-# argument is the program file being analyzed, and should be an ELF
-# 32-bit or ELF 64-bit executable file.  The location of the tools
-# is determined by considering the following options in this order:
-#   1) --tools option, if set
-#   2) JEPROF_TOOLS environment variable, if set
-#   3) the environment
-sub ConfigureObjTools {
-  my $prog_file = shift;
-
-  # Check for the existence of $prog_file because /usr/bin/file does not
-  # predictably return error status in prod.
-  (-e $prog_file)  || error("$prog_file does not exist.\n");
-
-  my $file_type = undef;
-  if (-e "/usr/bin/file") {
-    # Follow symlinks (at least for systems where "file" supports that).
-    my $escaped_prog_file = ShellEscape($prog_file);
-    $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
-                  /usr/bin/file $escaped_prog_file`;
-  } elsif ($^O == "MSWin32") {
-    $file_type = "MS Windows";
-  } else {
-    print STDERR "WARNING: Can't determine the file type of $prog_file";
-  }
-
-  if ($file_type =~ /64-bit/) {
-    # Change $address_length to 16 if the program file is ELF 64-bit.
-    # We can't detect this from many (most?) heap or lock contention
-    # profiles, since the actual addresses referenced are generally in low
-    # memory even for 64-bit programs.
-    $address_length = 16;
-  }
-
-  if ($file_type =~ /MS Windows/) {
-    # For windows, we provide a version of nm and addr2line as part of
-    # the opensource release, which is capable of parsing
-    # Windows-style PDB executables.  It should live in the path, or
-    # in the same directory as jeprof.
-    $obj_tool_map{"nm_pdb"} = "nm-pdb";
-    $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
-  }
-
-  if ($file_type =~ /Mach-O/) {
-    # OS X uses otool to examine Mach-O files, rather than objdump.
-    $obj_tool_map{"otool"} = "otool";
-    $obj_tool_map{"addr2line"} = "false";  # no addr2line
-    $obj_tool_map{"objdump"} = "false";  # no objdump
-  }
-
-  # Go fill in %obj_tool_map with the pathnames to use:
-  foreach my $tool (keys %obj_tool_map) {
-    $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
-  }
-}
-
-# Returns the path of a caller-specified object tool.  If --tools or
-# JEPROF_TOOLS are specified, then returns the full path to the tool
-# with that prefix.  Otherwise, returns the path unmodified (which
-# means we will look for it on PATH).
-sub ConfigureTool {
-  my $tool = shift;
-  my $path;
-
-  # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
-  # item is either a) a pathname prefix, or b) a map of the form
-  # <tool>:<path>.  First we look for an entry of type (b) for our
-  # tool.  If one is found, we use it.  Otherwise, we consider all the
-  # pathname prefixes in turn, until one yields an existing file.  If
-  # none does, we use a default path.
-  my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
-  if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
-    $path = $2;
-    # TODO(csilvers): sanity-check that $path exists?  Hard if it's relative.
-  } elsif ($tools ne '') {
-    foreach my $prefix (split(',', $tools)) {
-      next if ($prefix =~ /:/);    # ignore "tool:fullpath" entries in the list
-      if (-x $prefix . $tool) {
-        $path = $prefix . $tool;
-        last;
-      }
-    }
-    if (!$path) {
-      error("No '$tool' found with prefix specified by " .
-            "--tools (or \$JEPROF_TOOLS) '$tools'\n");
-    }
-  } else {
-    # ... otherwise use the version that exists in the same directory as
-    # jeprof.  If there's nothing there, use $PATH.
-    $0 =~ m,[^/]*$,;     # this is everything after the last slash
-    my $dirname = $`;    # this is everything up to and including the last slash
-    if (-x "$dirname$tool") {
-      $path = "$dirname$tool";
-    } else {
-      $path = $tool;
-    }
-  }
-  if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
-  return $path;
-}
-
-sub ShellEscape {
-  my @escaped_words = ();
-  foreach my $word (@_) {
-    my $escaped_word = $word;
-    if ($word =~ m![^a-zA-Z0-9/.,_=-]!) {  # check for anything not in whitelist
-      $escaped_word =~ s/'/'\\''/;
-      $escaped_word = "'$escaped_word'";
-    }
-    push(@escaped_words, $escaped_word);
-  }
-  return join(" ", @escaped_words);
-}
-
-sub cleanup {
-  unlink($main::tmpfile_sym);
-  unlink(keys %main::tempnames);
-
-  # We leave any collected profiles in $HOME/jeprof in case the user wants
-  # to look at them later.  We print a message informing them of this.
-  if ((scalar(@main::profile_files) > 0) &&
-      defined($main::collected_profile)) {
-    if (scalar(@main::profile_files) == 1) {
-      print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
-    }
-    print STDERR "If you want to investigate this profile further, you can do:\n";
-    print STDERR "\n";
-    print STDERR "  jeprof \\\n";
-    print STDERR "    $main::prog \\\n";
-    print STDERR "    $main::collected_profile\n";
-    print STDERR "\n";
-  }
-}
-
-sub sighandler {
-  cleanup();
-  exit(1);
-}
-
-sub error {
-  my $msg = shift;
-  print STDERR $msg;
-  cleanup();
-  exit(1);
-}
-
-
-# Run $nm_command and get all the resulting procedure boundaries whose
-# names match "$regexp" and returns them in a hashtable mapping from
-# procedure name to a two-element vector of [start address, end address]
-sub GetProcedureBoundariesViaNm {
-  my $escaped_nm_command = shift;    # shell-escaped
-  my $regexp = shift;
-
-  my $symbol_table = {};
-  open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
-  my $last_start = "0";
-  my $routine = "";
-  while (<NM>) {
-    s/\r//g;         # turn windows-looking lines into unix-looking lines
-    if (m/^\s*([0-9a-f]+) (.) (..*)/) {
-      my $start_val = $1;
-      my $type = $2;
-      my $this_routine = $3;
-
-      # It's possible for two symbols to share the same address, if
-      # one is a zero-length variable (like __start_google_malloc) or
-      # one symbol is a weak alias to another (like __libc_malloc).
-      # In such cases, we want to ignore all values except for the
-      # actual symbol, which in nm-speak has type "T".  The logic
-      # below does this, though it's a bit tricky: what happens when
-      # we have a series of lines with the same address, is the first
-      # one gets queued up to be processed.  However, it won't
-      # *actually* be processed until later, when we read a line with
-      # a different address.  That means that as long as we're reading
-      # lines with the same address, we have a chance to replace that
-      # item in the queue, which we do whenever we see a 'T' entry --
-      # that is, a line with type 'T'.  If we never see a 'T' entry,
-      # we'll just go ahead and process the first entry (which never
-      # got touched in the queue), and ignore the others.
-      if ($start_val eq $last_start && $type =~ /t/i) {
-        # We are the 'T' symbol at this address, replace previous symbol.
-        $routine = $this_routine;
-        next;
-      } elsif ($start_val eq $last_start) {
-        # We're not the 'T' symbol at this address, so ignore us.
-        next;
-      }
-
-      if ($this_routine eq $sep_symbol) {
-        $sep_address = HexExtend($start_val);
-      }
-
-      # Tag this routine with the starting address in case the image
-      # has multiple occurrences of this routine.  We use a syntax
-      # that resembles template parameters that are automatically
-      # stripped out by ShortFunctionName()
-      $this_routine .= "<$start_val>";
-
-      if (defined($routine) && $routine =~ m/$regexp/) {
-        $symbol_table->{$routine} = [HexExtend($last_start),
-                                     HexExtend($start_val)];
-      }
-      $last_start = $start_val;
-      $routine = $this_routine;
-    } elsif (m/^Loaded image name: (.+)/) {
-      # The win32 nm workalike emits information about the binary it is using.
-      if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
-    } elsif (m/^PDB file name: (.+)/) {
-      # The win32 nm workalike emits information about the pdb it is using.
-      if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
-    }
-  }
-  close(NM);
-  # Handle the last line in the nm output.  Unfortunately, we don't know
-  # how big this last symbol is, because we don't know how big the file
-  # is.  For now, we just give it a size of 0.
-  # TODO(csilvers): do better here.
-  if (defined($routine) && $routine =~ m/$regexp/) {
-    $symbol_table->{$routine} = [HexExtend($last_start),
-                                 HexExtend($last_start)];
-  }
-  return $symbol_table;
-}
-
-# Gets the procedure boundaries for all routines in "$image" whose names
-# match "$regexp" and returns them in a hashtable mapping from procedure
-# name to a two-element vector of [start address, end address].
-# Will return an empty map if nm is not installed or not working properly.
-sub GetProcedureBoundaries {
-  my $image = shift;
-  my $regexp = shift;
-
-  # If $image doesn't start with /, then put ./ in front of it.  This works
-  # around an obnoxious bug in our probing of nm -f behavior.
-  # "nm -f $image" is supposed to fail on GNU nm, but if:
-  #
-  # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
-  # b. you have a.out in your current directory (a not uncommon occurence)
-  #
-  # then "nm -f $image" succeeds because -f only looks at the first letter of
-  # the argument, which looks valid because it's [BbSsPp], and then since
-  # there's no image provided, it looks for a.out and finds it.
-  #
-  # This regex makes sure that $image starts with . or /, forcing the -f
-  # parsing to fail since . and / are not valid formats.
-  $image =~ s#^[^/]#./$&#;
-
-  # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
-  my $debugging = DebuggingLibrary($image);
-  if ($debugging) {
-    $image = $debugging;
-  }
-
-  my $nm = $obj_tool_map{"nm"};
-  my $cppfilt = $obj_tool_map{"c++filt"};
-
-  # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
-  # binary doesn't support --demangle.  In addition, for OS X we need
-  # to use the -f flag to get 'flat' nm output (otherwise we don't sort
-  # properly and get incorrect results).  Unfortunately, GNU nm uses -f
-  # in an incompatible way.  So first we test whether our nm supports
-  # --demangle and -f.
-  my $demangle_flag = "";
-  my $cppfilt_flag = "";
-  my $to_devnull = ">$dev_null 2>&1";
-  if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) {
-    # In this mode, we do "nm --demangle <foo>"
-    $demangle_flag = "--demangle";
-    $cppfilt_flag = "";
-  } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
-    # In this mode, we do "nm <foo> | c++filt"
-    $cppfilt_flag = " | " . ShellEscape($cppfilt);
-  };
-  my $flatten_flag = "";
-  if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
-    $flatten_flag = "-f";
-  }
-
-  # Finally, in the case $imagie isn't a debug library, we try again with
-  # -D to at least get *exported* symbols.  If we can't use --demangle,
-  # we use c++filt instead, if it exists on this system.
-  my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
-                                 $image) . " 2>$dev_null $cppfilt_flag",
-                     ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
-                                 $image) . " 2>$dev_null $cppfilt_flag",
-                     # 6nm is for Go binaries
-                     ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
-                     );
-
-  # If the executable is an MS Windows PDB-format executable, we'll
-  # have set up obj_tool_map("nm_pdb").  In this case, we actually
-  # want to use both unix nm and windows-specific nm_pdb, since
-  # PDB-format executables can apparently include dwarf .o files.
-  if (exists $obj_tool_map{"nm_pdb"}) {
-    push(@nm_commands,
-         ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
-         . " 2>$dev_null");
-  }
-
-  foreach my $nm_command (@nm_commands) {
-    my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
-    return $symbol_table if (%{$symbol_table});
-  }
-  my $symbol_table = {};
-  return $symbol_table;
-}
-
-
-# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
-# To make them more readable, we add underscores at interesting places.
-# This routine removes the underscores, producing the canonical representation
-# used by jeprof to represent addresses, particularly in the tested routines.
-sub CanonicalHex {
-  my $arg = shift;
-  return join '', (split '_',$arg);
-}
-
-
-# Unit test for AddressAdd:
-sub AddressAddUnitTest {
-  my $test_data_8 = shift;
-  my $test_data_16 = shift;
-  my $error_count = 0;
-  my $fail_count = 0;
-  my $pass_count = 0;
-  # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
-
-  # First a few 8-nibble addresses.  Note that this implementation uses
-  # plain old arithmetic, so a quick sanity check along with verifying what
-  # happens to overflow (we want it to wrap):
-  $address_length = 8;
-  foreach my $row (@{$test_data_8}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressAdd ($row->[0], $row->[1]);
-    if ($sum ne $row->[2]) {
-      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[2];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count = $fail_count;
-  $fail_count = 0;
-  $pass_count = 0;
-
-  # Now 16-nibble addresses.
-  $address_length = 16;
-  foreach my $row (@{$test_data_16}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
-    my $expected = join '', (split '_',$row->[2]);
-    if ($sum ne CanonicalHex($row->[2])) {
-      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[2];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count += $fail_count;
-
-  return $error_count;
-}
-
-
-# Unit test for AddressSub:
-sub AddressSubUnitTest {
-  my $test_data_8 = shift;
-  my $test_data_16 = shift;
-  my $error_count = 0;
-  my $fail_count = 0;
-  my $pass_count = 0;
-  # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
-
-  # First a few 8-nibble addresses.  Note that this implementation uses
-  # plain old arithmetic, so a quick sanity check along with verifying what
-  # happens to overflow (we want it to wrap):
-  $address_length = 8;
-  foreach my $row (@{$test_data_8}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressSub ($row->[0], $row->[1]);
-    if ($sum ne $row->[3]) {
-      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[3];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count = $fail_count;
-  $fail_count = 0;
-  $pass_count = 0;
-
-  # Now 16-nibble addresses.
-  $address_length = 16;
-  foreach my $row (@{$test_data_16}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
-    if ($sum ne CanonicalHex($row->[3])) {
-      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
-             $row->[0], $row->[1], $row->[3];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count += $fail_count;
-
-  return $error_count;
-}
-
-
-# Unit test for AddressInc:
-sub AddressIncUnitTest {
-  my $test_data_8 = shift;
-  my $test_data_16 = shift;
-  my $error_count = 0;
-  my $fail_count = 0;
-  my $pass_count = 0;
-  # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
-
-  # First a few 8-nibble addresses.  Note that this implementation uses
-  # plain old arithmetic, so a quick sanity check along with verifying what
-  # happens to overflow (we want it to wrap):
-  $address_length = 8;
-  foreach my $row (@{$test_data_8}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressInc ($row->[0]);
-    if ($sum ne $row->[4]) {
-      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
-             $row->[0], $row->[4];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count = $fail_count;
-  $fail_count = 0;
-  $pass_count = 0;
-
-  # Now 16-nibble addresses.
-  $address_length = 16;
-  foreach my $row (@{$test_data_16}) {
-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
-    my $sum = AddressInc (CanonicalHex($row->[0]));
-    if ($sum ne CanonicalHex($row->[4])) {
-      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
-             $row->[0], $row->[4];
-      ++$fail_count;
-    } else {
-      ++$pass_count;
-    }
-  }
-  printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
-         $pass_count, $fail_count;
-  $error_count += $fail_count;
-
-  return $error_count;
-}
-
-
-# Driver for unit tests.
-# Currently just the address add/subtract/increment routines for 64-bit.
-sub RunUnitTests {
-  my $error_count = 0;
-
-  # This is a list of tuples [a, b, a+b, a-b, a+1]
-  my $unit_test_data_8 = [
-    [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
-    [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
-    [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
-    [qw(00000001 ffffffff 00000000 00000002 00000002)],
-    [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
-  ];
-  my $unit_test_data_16 = [
-    # The implementation handles data in 7-nibble chunks, so those are the
-    # interesting boundaries.
-    [qw(aaaaaaaa 50505050
-        00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
-    [qw(50505050 aaaaaaaa
-        00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
-    [qw(ffffffff aaaaaaaa
-        00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
-    [qw(00000001 ffffffff
-        00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
-    [qw(00000001 fffffff0
-        00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
-
-    [qw(00_a00000a_aaaaaaa 50505050
-        00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
-    [qw(0f_fff0005_0505050 aaaaaaaa
-        0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
-    [qw(00_000000f_fffffff 01_800000a_aaaaaaa
-        01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
-    [qw(00_0000000_0000001 ff_fffffff_fffffff
-        00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
-    [qw(00_0000000_0000001 ff_fffffff_ffffff0
-        ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
-  ];
-
-  $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
-  $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
-  $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
-  if ($error_count > 0) {
-    print STDERR $error_count, " errors: FAILED\n";
-  } else {
-    print STDERR "PASS\n";
-  }
-  exit ($error_count);
-}
diff --git a/zircon/third_party/ulib/jemalloc/config.stamp.in b/zircon/third_party/ulib/jemalloc/config.stamp.in
deleted file mode 100644
index e69de29..0000000
--- a/zircon/third_party/ulib/jemalloc/config.stamp.in
+++ /dev/null
diff --git a/zircon/third_party/ulib/jemalloc/configure.ac b/zircon/third_party/ulib/jemalloc/configure.ac
deleted file mode 100644
index 4a1168b..0000000
--- a/zircon/third_party/ulib/jemalloc/configure.ac
+++ /dev/null
@@ -1,2096 +0,0 @@
-dnl Process this file with autoconf to produce a configure script.
-AC_INIT([Makefile.in])
-
-AC_CONFIG_AUX_DIR([build-aux])
-
-dnl ============================================================================
-dnl Custom macro definitions.
-
-dnl JE_CONCAT_VVV(r, a, b)
-dnl 
-dnl Set $r to the concatenation of $a and $b, with a space separating them iff
-dnl both $a and $b are non-emty.
-AC_DEFUN([JE_CONCAT_VVV],
-if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
-  $1="[$]{$2}[$]{$3}"
-else
-  $1="[$]{$2} [$]{$3}"
-fi
-)
-
-dnl JE_APPEND_VS(a, b)
-dnl 
-dnl Set $a to the concatenation of $a and b, with a space separating them iff
-dnl both $a and b are non-empty.
-AC_DEFUN([JE_APPEND_VS],
-  T_APPEND_V=$2
-  JE_CONCAT_VVV($1, $1, T_APPEND_V)
-)
-
-CONFIGURE_CFLAGS=
-SPECIFIED_CFLAGS="${CFLAGS}"
-dnl JE_CFLAGS_ADD(cflag)
-dnl 
-dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS
-dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests.  This macro
-dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS.
-AC_DEFUN([JE_CFLAGS_ADD],
-[
-AC_MSG_CHECKING([whether compiler supports $1])
-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
-JE_APPEND_VS(CONFIGURE_CFLAGS, $1)
-JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
-[[
-]], [[
-    return 0;
-]])],
-              [je_cv_cflags_added=$1]
-              AC_MSG_RESULT([yes]),
-              [je_cv_cflags_added=]
-              AC_MSG_RESULT([no])
-              [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"]
-)
-JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
-])
-
-dnl JE_CFLAGS_SAVE()
-dnl JE_CFLAGS_RESTORE()
-dnl 
-dnl Save/restore CFLAGS.  Nesting is not supported.
-AC_DEFUN([JE_CFLAGS_SAVE],
-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
-)
-AC_DEFUN([JE_CFLAGS_RESTORE],
-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
-JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
-)
-
-CONFIGURE_CXXFLAGS=
-SPECIFIED_CXXFLAGS="${CXXFLAGS}"
-dnl JE_CXXFLAGS_ADD(cxxflag)
-AC_DEFUN([JE_CXXFLAGS_ADD],
-[
-AC_MSG_CHECKING([whether compiler supports $1])
-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
-JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
-JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
-[[
-]], [[
-    return 0;
-]])],
-              [je_cv_cxxflags_added=$1]
-              AC_MSG_RESULT([yes]),
-              [je_cv_cxxflags_added=]
-              AC_MSG_RESULT([no])
-              [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
-)
-JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
-])
-
-dnl JE_COMPILABLE(label, hcode, mcode, rvar)
-dnl 
-dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
-dnl cause failure.
-AC_DEFUN([JE_COMPILABLE],
-[
-AC_CACHE_CHECK([whether $1 is compilable],
-               [$4],
-               [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2],
-                                                [$3])],
-                               [$4=yes],
-                               [$4=no])])
-])
-
-dnl ============================================================================
-
-CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
-AC_SUBST([CONFIG])
-
-dnl Library revision.
-rev=2
-AC_SUBST([rev])
-
-srcroot=$srcdir
-if test "x${srcroot}" = "x." ; then
-  srcroot=""
-else
-  srcroot="${srcroot}/"
-fi
-AC_SUBST([srcroot])
-abs_srcroot="`cd \"${srcdir}\"; pwd`/"
-AC_SUBST([abs_srcroot])
-
-objroot=""
-AC_SUBST([objroot])
-abs_objroot="`pwd`/"
-AC_SUBST([abs_objroot])
-
-dnl Munge install path variables.
-if test "x$prefix" = "xNONE" ; then
-  prefix="/usr/local"
-fi
-if test "x$exec_prefix" = "xNONE" ; then
-  exec_prefix=$prefix
-fi
-PREFIX=$prefix
-AC_SUBST([PREFIX])
-BINDIR=`eval echo $bindir`
-BINDIR=`eval echo $BINDIR`
-AC_SUBST([BINDIR])
-INCLUDEDIR=`eval echo $includedir`
-INCLUDEDIR=`eval echo $INCLUDEDIR`
-AC_SUBST([INCLUDEDIR])
-LIBDIR=`eval echo $libdir`
-LIBDIR=`eval echo $LIBDIR`
-AC_SUBST([LIBDIR])
-DATADIR=`eval echo $datadir`
-DATADIR=`eval echo $DATADIR`
-AC_SUBST([DATADIR])
-MANDIR=`eval echo $mandir`
-MANDIR=`eval echo $MANDIR`
-AC_SUBST([MANDIR])
-
-dnl Support for building documentation.
-AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH])
-if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
-  DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
-elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
-  DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
-else
-  dnl Documentation building will fail if this default gets used.
-  DEFAULT_XSLROOT=""
-fi
-AC_ARG_WITH([xslroot],
-  [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])], [
-if test "x$with_xslroot" = "xno" ; then
-  XSLROOT="${DEFAULT_XSLROOT}"
-else
-  XSLROOT="${with_xslroot}"
-fi
-],
-  XSLROOT="${DEFAULT_XSLROOT}"
-)
-AC_SUBST([XSLROOT])
-
-dnl If CFLAGS isn't defined, set CFLAGS to something reasonable.  Otherwise,
-dnl just prevent autoconf from molesting CFLAGS.
-CFLAGS=$CFLAGS
-AC_PROG_CC
-
-if test "x$GCC" != "xyes" ; then
-  AC_CACHE_CHECK([whether compiler is MSVC],
-                 [je_cv_msvc],
-                 [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
-                                                     [
-#ifndef _MSC_VER
-  int fail[-1];
-#endif
-])],
-                               [je_cv_msvc=yes],
-                               [je_cv_msvc=no])])
-fi
-
-dnl check if a cray prgenv wrapper compiler is being used
-je_cv_cray_prgenv_wrapper=""
-if test "x${PE_ENV}" != "x" ; then
-  case "${CC}" in
-    CC|cc)
-	je_cv_cray_prgenv_wrapper="yes"
-	;;
-    *)
-       ;;
-  esac
-fi
-
-AC_CACHE_CHECK([whether compiler is cray],
-              [je_cv_cray],
-              [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
-                                                  [
-#ifndef _CRAYC
-  int fail[-1];
-#endif
-])],
-                            [je_cv_cray=yes],
-                            [je_cv_cray=no])])
-
-if test "x${je_cv_cray}" = "xyes" ; then
-  AC_CACHE_CHECK([whether cray compiler version is 8.4],
-                [je_cv_cray_84],
-                [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
-                                                      [
-#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
-  int fail[-1];
-#endif
-])],
-                              [je_cv_cray_84=yes],
-                              [je_cv_cray_84=no])])
-fi
-
-if test "x$GCC" = "xyes" ; then
-  JE_CFLAGS_ADD([-std=gnu11])
-  if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then
-    AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
-  else
-    JE_CFLAGS_ADD([-std=gnu99])
-    if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then
-      AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
-    fi
-  fi
-  JE_CFLAGS_ADD([-Wall])
-  JE_CFLAGS_ADD([-Wshorten-64-to-32])
-  JE_CFLAGS_ADD([-Wsign-compare])
-  JE_CFLAGS_ADD([-pipe])
-  JE_CFLAGS_ADD([-g3])
-elif test "x$je_cv_msvc" = "xyes" ; then
-  CC="$CC -nologo"
-  JE_CFLAGS_ADD([-Zi])
-  JE_CFLAGS_ADD([-MT])
-  JE_CFLAGS_ADD([-W3])
-  JE_CFLAGS_ADD([-FS])
-  JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat)
-fi
-if test "x$je_cv_cray" = "xyes" ; then
-  dnl cray compiler 8.4 has an inlining bug
-  if test "x$je_cv_cray_84" = "xyes" ; then
-    JE_CFLAGS_ADD([-hipa2])
-    JE_CFLAGS_ADD([-hnognu])
-  fi
-  if test "x$enable_cc_silence" != "xno" ; then
-    dnl ignore unreachable code warning
-    JE_CFLAGS_ADD([-hnomessage=128])
-    dnl ignore redefinition of "malloc", "free", etc warning
-    JE_CFLAGS_ADD([-hnomessage=1357])
-  fi
-fi
-AC_SUBST([CONFIGURE_CFLAGS])
-AC_SUBST([SPECIFIED_CFLAGS])
-AC_SUBST([EXTRA_CFLAGS])
-AC_PROG_CPP
-
-AC_ARG_ENABLE([cxx],
-  [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])],
-if test "x$enable_cxx" = "xno" ; then
-  enable_cxx="0"
-else
-  enable_cxx="1"
-fi
-,
-enable_cxx="1"
-)
-if test "x$enable_cxx" = "x1" ; then
-  dnl Require at least c++14, which is the first version to support sized
-  dnl deallocation.  C++ support is not compiled otherwise.
-  m4_include([m4/ax_cxx_compile_stdcxx.m4])
-  AX_CXX_COMPILE_STDCXX([14], [noext], [optional])
-  if test "x${HAVE_CXX14}" = "x1" ; then
-    JE_CXXFLAGS_ADD([-Wall])
-    JE_CXXFLAGS_ADD([-g3])
-
-    SAVED_LIBS="${LIBS}"
-    JE_APPEND_VS(LIBS, -lstdc++)
-    JE_COMPILABLE([libstdc++ linkage], [
-#include <stdlib.h>
-], [[
-	int *arr = (int *)malloc(sizeof(int) * 42);
-	if (arr == NULL)
-		return (1);
-]], [je_cv_libstdcxx])
-    if test "x${je_cv_libstdcxx}" = "xno" ; then
-      LIBS="${SAVED_LIBS}"
-    fi
-  else
-    enable_cxx="0"
-  fi
-fi
-AC_SUBST([enable_cxx])
-AC_SUBST([CONFIGURE_CXXFLAGS])
-AC_SUBST([SPECIFIED_CXXFLAGS])
-AC_SUBST([EXTRA_CXXFLAGS])
-
-AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
-if test "x${ac_cv_big_endian}" = "x1" ; then
-  AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
-fi
-
-if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
-  JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99)
-fi
-
-if test "x${je_cv_msvc}" = "xyes" ; then
-  LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
-  AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
-else
-  AC_CHECK_SIZEOF([void *])
-  if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
-    LG_SIZEOF_PTR=3
-  elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
-    LG_SIZEOF_PTR=2
-  else
-    AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
-  fi
-fi
-AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR])
-
-AC_CHECK_SIZEOF([int])
-if test "x${ac_cv_sizeof_int}" = "x8" ; then
-  LG_SIZEOF_INT=3
-elif test "x${ac_cv_sizeof_int}" = "x4" ; then
-  LG_SIZEOF_INT=2
-else
-  AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}])
-fi
-AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT])
-
-AC_CHECK_SIZEOF([long])
-if test "x${ac_cv_sizeof_long}" = "x8" ; then
-  LG_SIZEOF_LONG=3
-elif test "x${ac_cv_sizeof_long}" = "x4" ; then
-  LG_SIZEOF_LONG=2
-else
-  AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}])
-fi
-AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG])
-
-AC_CHECK_SIZEOF([long long])
-if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
-  LG_SIZEOF_LONG_LONG=3
-elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
-  LG_SIZEOF_LONG_LONG=2
-else
-  AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
-fi
-AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG])
-
-AC_CHECK_SIZEOF([intmax_t])
-if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
-  LG_SIZEOF_INTMAX_T=4
-elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
-  LG_SIZEOF_INTMAX_T=3
-elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
-  LG_SIZEOF_INTMAX_T=2
-else
-  AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}])
-fi
-AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T])
-
-AC_CANONICAL_HOST
-dnl CPU-specific settings.
-CPU_SPINWAIT=""
-case "${host_cpu}" in
-  i686|x86_64)
-	if test "x${je_cv_msvc}" = "xyes" ; then
-	    AC_CACHE_VAL([je_cv_pause_msvc],
-	      [JE_COMPILABLE([pause instruction MSVC], [],
-					[[_mm_pause(); return 0;]],
-					[je_cv_pause_msvc])])
-	    if test "x${je_cv_pause_msvc}" = "xyes" ; then
-		CPU_SPINWAIT='_mm_pause()'
-	    fi
-	else
-	    AC_CACHE_VAL([je_cv_pause],
-	      [JE_COMPILABLE([pause instruction], [],
-					[[__asm__ volatile("pause"); return 0;]],
-					[je_cv_pause])])
-	    if test "x${je_cv_pause}" = "xyes" ; then
-		CPU_SPINWAIT='__asm__ volatile("pause")'
-	    fi
-	fi
-	;;
-  powerpc)
-	AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
-	;;
-  *)
-	;;
-esac
-AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
-
-LD_PRELOAD_VAR="LD_PRELOAD"
-so="so"
-importlib="${so}"
-o="$ac_objext"
-a="a"
-exe="$ac_exeext"
-libprefix="lib"
-link_whole_archive="0"
-DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
-RPATH='-Wl,-rpath,$(1)'
-SOREV="${so}.${rev}"
-PIC_CFLAGS='-fPIC -DPIC'
-CTARGET='-o $@'
-LDTARGET='-o $@'
-TEST_LD_MODE=
-EXTRA_LDFLAGS=
-ARFLAGS='crus'
-AROUT=' $@'
-CC_MM=1
-
-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
-  TEST_LD_MODE='-dynamic'
-fi
-
-if test "x${je_cv_cray}" = "xyes" ; then
-  CC_MM=
-fi
-
-AN_MAKEVAR([AR], [AC_PROG_AR])
-AN_PROGRAM([ar], [AC_PROG_AR])
-AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
-AC_PROG_AR
-
-dnl Platform-specific settings.  abi and RPATH can probably be determined
-dnl programmatically, but doing so is error-prone, which makes it generally
-dnl not worth the trouble.
-dnl 
-dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
-dnl definitions need to be seen before any headers are included, which is a pain
-dnl to make happen otherwise.
-default_munmap="1"
-maps_coalesce="1"
-case "${host}" in
-  *-*-darwin* | *-*-ios*)
-	abi="macho"
-	RPATH=""
-	LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
-	so="dylib"
-	importlib="${so}"
-	force_tls="0"
-	DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
-	SOREV="${rev}.${so}"
-	sbrk_deprecated="1"
-	;;
-  *-*-freebsd*)
-	abi="elf"
-	AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
-	force_lazy_lock="1"
-	;;
-  *-*-dragonfly*)
-	abi="elf"
-	;;
-  *-*-openbsd*)
-	abi="elf"
-	force_tls="0"
-	;;
-  *-*-bitrig*)
-	abi="elf"
-	;;
-  *-*-linux-android)
-	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
-	JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
-	abi="elf"
-	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
-	AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
-	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
-	AC_DEFINE([JEMALLOC_C11ATOMICS])
-	force_tls="0"
-	default_munmap="0"
-	;;
-  *-*-linux* | *-*-kfreebsd*)
-	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
-	JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
-	abi="elf"
-	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
-	AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
-	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
-	AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
-	default_munmap="0"
-	;;
-  *-*-netbsd*)
-	AC_MSG_CHECKING([ABI])
-        AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
-[[#ifdef __ELF__
-/* ELF */
-#else
-#error aout
-#endif
-]])],
-                          [abi="elf"],
-                          [abi="aout"])
-	AC_MSG_RESULT([$abi])
-	;;
-  *-*-solaris2*)
-	abi="elf"
-	RPATH='-Wl,-R,$(1)'
-	dnl Solaris needs this for sigwait().
-	JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS)
-	JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
-	;;
-  *-ibm-aix*)
-	if "$LG_SIZEOF_PTR" = "8"; then
-	  dnl 64bit AIX
-	  LD_PRELOAD_VAR="LDR_PRELOAD64"
-	else
-	  dnl 32bit AIX
-	  LD_PRELOAD_VAR="LDR_PRELOAD"
-	fi
-	abi="xcoff"
-	;;
-  *-*-mingw* | *-*-cygwin*)
-	abi="pecoff"
-	force_tls="0"
-	maps_coalesce="0"
-	RPATH=""
-	so="dll"
-	if test "x$je_cv_msvc" = "xyes" ; then
-	  importlib="lib"
-	  DSO_LDFLAGS="-LD"
-	  EXTRA_LDFLAGS="-link -DEBUG"
-	  CTARGET='-Fo$@'
-	  LDTARGET='-Fe$@'
-	  AR='lib'
-	  ARFLAGS='-nologo -out:'
-	  AROUT='$@'
-	  CC_MM=
-        else
-	  importlib="${so}"
-	  DSO_LDFLAGS="-shared"
-	  link_whole_archive="1"
-	fi
-	a="lib"
-	libprefix=""
-	SOREV="${so}"
-	PIC_CFLAGS=""
-	;;
-  *)
-	AC_MSG_RESULT([Unsupported operating system: ${host}])
-	abi="elf"
-	;;
-esac
-
-JEMALLOC_USABLE_SIZE_CONST=const
-AC_CHECK_HEADERS([malloc.h], [
-  AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument])
-  AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
-    [#include <malloc.h>
-     #include <stddef.h>
-    size_t malloc_usable_size(const void *ptr);
-    ],
-    [])],[
-                AC_MSG_RESULT([yes])
-         ],[
-                JEMALLOC_USABLE_SIZE_CONST=
-                AC_MSG_RESULT([no])
-         ])
-])
-AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST])
-AC_SUBST([abi])
-AC_SUBST([RPATH])
-AC_SUBST([LD_PRELOAD_VAR])
-AC_SUBST([so])
-AC_SUBST([importlib])
-AC_SUBST([o])
-AC_SUBST([a])
-AC_SUBST([exe])
-AC_SUBST([libprefix])
-AC_SUBST([link_whole_archive])
-AC_SUBST([DSO_LDFLAGS])
-AC_SUBST([EXTRA_LDFLAGS])
-AC_SUBST([SOREV])
-AC_SUBST([PIC_CFLAGS])
-AC_SUBST([CTARGET])
-AC_SUBST([LDTARGET])
-AC_SUBST([TEST_LD_MODE])
-AC_SUBST([MKLIB])
-AC_SUBST([ARFLAGS])
-AC_SUBST([AROUT])
-AC_SUBST([CC_MM])
-
-dnl Determine whether libm must be linked to use e.g. log(3).
-AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])])
-if test "x$ac_cv_search_log" != "xnone required" ; then
-  LM="$ac_cv_search_log"
-else
-  LM=
-fi
-AC_SUBST(LM)
-
-JE_COMPILABLE([__attribute__ syntax],
-              [static __attribute__((unused)) void foo(void){}],
-              [],
-              [je_cv_attribute])
-if test "x${je_cv_attribute}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
-  if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
-    JE_CFLAGS_ADD([-fvisibility=hidden])
-    JE_CXXFLAGS_ADD([-fvisibility=hidden])
-  fi
-fi
-dnl Check for tls_model attribute support (clang 3.0 still lacks support).
-JE_CFLAGS_SAVE()
-JE_CFLAGS_ADD([-Werror])
-JE_CFLAGS_ADD([-herror_on_warning])
-JE_COMPILABLE([tls_model attribute], [],
-              [static __thread int
-               __attribute__((tls_model("initial-exec"), unused)) foo;
-               foo = 0;],
-              [je_cv_tls_model])
-JE_CFLAGS_RESTORE()
-if test "x${je_cv_tls_model}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_TLS_MODEL],
-            [__attribute__((tls_model("initial-exec")))])
-else
-  AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
-fi
-dnl Check for alloc_size attribute support.
-JE_CFLAGS_SAVE()
-JE_CFLAGS_ADD([-Werror])
-JE_CFLAGS_ADD([-herror_on_warning])
-JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
-              [void *foo(size_t size) __attribute__((alloc_size(1)));],
-              [je_cv_alloc_size])
-JE_CFLAGS_RESTORE()
-if test "x${je_cv_alloc_size}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ])
-fi
-dnl Check for format(gnu_printf, ...) attribute support.
-JE_CFLAGS_SAVE()
-JE_CFLAGS_ADD([-Werror])
-JE_CFLAGS_ADD([-herror_on_warning])
-JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
-              [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
-              [je_cv_format_gnu_printf])
-JE_CFLAGS_RESTORE()
-if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ])
-fi
-dnl Check for format(printf, ...) attribute support.
-JE_CFLAGS_SAVE()
-JE_CFLAGS_ADD([-Werror])
-JE_CFLAGS_ADD([-herror_on_warning])
-JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
-              [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
-              [je_cv_format_printf])
-JE_CFLAGS_RESTORE()
-if test "x${je_cv_format_printf}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ])
-fi
-
-dnl Support optional additions to rpath.
-AC_ARG_WITH([rpath],
-  [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
-if test "x$with_rpath" = "xno" ; then
-  RPATH_EXTRA=
-else
-  RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
-fi,
-  RPATH_EXTRA=
-)
-AC_SUBST([RPATH_EXTRA])
-
-dnl Disable rules that do automatic regeneration of configure output by default.
-AC_ARG_ENABLE([autogen],
-  [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])],
-if test "x$enable_autogen" = "xno" ; then
-  enable_autogen="0"
-else
-  enable_autogen="1"
-fi
-,
-enable_autogen="0"
-)
-AC_SUBST([enable_autogen])
-
-AC_PROG_INSTALL
-AC_PROG_RANLIB
-AC_PATH_PROG([LD], [ld], [false], [$PATH])
-AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
-
-public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
-
-dnl Check for allocator-related functions that should be wrapped.
-AC_CHECK_FUNC([memalign],
-	      [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ])
-	       public_syms="${public_syms} memalign"])
-AC_CHECK_FUNC([valloc],
-	      [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ])
-	       public_syms="${public_syms} valloc"])
-
-dnl Do not compute test code coverage by default.
-GCOV_FLAGS=
-AC_ARG_ENABLE([code-coverage],
-  [AS_HELP_STRING([--enable-code-coverage],
-   [Enable code coverage])],
-[if test "x$enable_code_coverage" = "xno" ; then
-  enable_code_coverage="0"
-else
-  enable_code_coverage="1"
-fi
-],
-[enable_code_coverage="0"]
-)
-if test "x$enable_code_coverage" = "x1" ; then
-  deoptimize="no"
-  echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
-  if test "x${deoptimize}" = "xyes" ; then
-    JE_CFLAGS_ADD([-O0])
-  fi
-  JE_CFLAGS_ADD([-fprofile-arcs -ftest-coverage])
-  EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
-  AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ])
-fi
-AC_SUBST([enable_code_coverage])
-
-dnl Perform no name mangling by default.
-AC_ARG_WITH([mangling],
-  [AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
-  [mangling_map="$with_mangling"], [mangling_map=""])
-
-dnl Do not prefix public APIs by default.
-AC_ARG_WITH([jemalloc_prefix],
-  [AS_HELP_STRING([--with-jemalloc-prefix=<prefix>], [Prefix to prepend to all public APIs])],
-  [JEMALLOC_PREFIX="$with_jemalloc_prefix"],
-  [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
-  JEMALLOC_PREFIX=""
-else
-  JEMALLOC_PREFIX="je_"
-fi]
-)
-if test "x$JEMALLOC_PREFIX" != "x" ; then
-  JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
-  AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
-  AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
-fi
-AC_SUBST([JEMALLOC_CPREFIX])
-
-AC_ARG_WITH([export],
-  [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
-  [if test "x$with_export" = "xno"; then
-  AC_DEFINE([JEMALLOC_EXPORT],[])
-fi]
-)
-
-dnl Mangle library-private APIs.
-AC_ARG_WITH([private_namespace],
-  [AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
-  [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"],
-  [JEMALLOC_PRIVATE_NAMESPACE="je_"]
-)
-AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE])
-private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
-AC_SUBST([private_namespace])
-
-dnl Do not add suffix to installed files by default.
-AC_ARG_WITH([install_suffix],
-  [AS_HELP_STRING([--with-install-suffix=<suffix>], [Suffix to append to all installed files])],
-  [INSTALL_SUFFIX="$with_install_suffix"],
-  [INSTALL_SUFFIX=]
-)
-install_suffix="$INSTALL_SUFFIX"
-AC_SUBST([install_suffix])
-
-dnl Specify default malloc_conf.
-AC_ARG_WITH([malloc_conf],
-  [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
-  [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
-  [JEMALLOC_CONFIG_MALLOC_CONF=""]
-)
-config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
-AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"])
-
-dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
-dnl jemalloc_protos_jet.h easy.
-je_="je_"
-AC_SUBST([je_])
-
-cfgoutputs_in="Makefile.in"
-cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
-cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
-cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
-cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in"
-cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
-cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
-
-cfgoutputs_out="Makefile"
-cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
-cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
-cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
-cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
-cfgoutputs_out="${cfgoutputs_out} test/test.sh"
-cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
-
-cfgoutputs_tup="Makefile"
-cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
-cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
-cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
-cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
-cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
-cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
-
-cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
-cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
-
-cfghdrs_out="include/jemalloc/jemalloc_defs.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
-cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
-
-cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
-cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
-cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
-
-dnl Silence irrelevant compiler warnings by default.
-AC_ARG_ENABLE([cc-silence],
-  [AS_HELP_STRING([--disable-cc-silence],
-                  [Do not silence irrelevant compiler warnings])],
-[if test "x$enable_cc_silence" = "xno" ; then
-  enable_cc_silence="0"
-else
-  enable_cc_silence="1"
-fi
-],
-[enable_cc_silence="1"]
-)
-if test "x$enable_cc_silence" = "x1" ; then
-  AC_DEFINE([JEMALLOC_CC_SILENCE], [ ])
-fi
-
-dnl Do not compile with debugging by default.
-AC_ARG_ENABLE([debug],
-  [AS_HELP_STRING([--enable-debug],
-                  [Build debugging code (implies --enable-ivsalloc)])],
-[if test "x$enable_debug" = "xno" ; then
-  enable_debug="0"
-else
-  enable_debug="1"
-fi
-],
-[enable_debug="0"]
-)
-if test "x$enable_debug" = "x1" ; then
-  AC_DEFINE([JEMALLOC_DEBUG], [ ])
-fi
-if test "x$enable_debug" = "x1" ; then
-  AC_DEFINE([JEMALLOC_DEBUG], [ ])
-  enable_ivsalloc="1"
-fi
-AC_SUBST([enable_debug])
-
-dnl Do not validate pointers by default.
-AC_ARG_ENABLE([ivsalloc],
-  [AS_HELP_STRING([--enable-ivsalloc],
-                  [Validate pointers passed through the public API])],
-[if test "x$enable_ivsalloc" = "xno" ; then
-  enable_ivsalloc="0"
-else
-  enable_ivsalloc="1"
-fi
-],
-[enable_ivsalloc="0"]
-)
-if test "x$enable_ivsalloc" = "x1" ; then
-  AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
-fi
-
-dnl Only optimize if not debugging.
-if test "x$enable_debug" = "x0" ; then
-  if test "x$GCC" = "xyes" ; then
-    JE_CFLAGS_ADD([-O3])
-    JE_CXXFLAGS_ADD([-O3])
-    JE_CFLAGS_ADD([-funroll-loops])
-  elif test "x$je_cv_msvc" = "xyes" ; then
-    JE_CFLAGS_ADD([-O2])
-    JE_CXXFLAGS_ADD([-O2])
-  else
-    JE_CFLAGS_ADD([-O])
-    JE_CXXFLAGS_ADD([-O])
-  fi
-fi
-
-dnl Enable statistics calculation by default.
-AC_ARG_ENABLE([stats],
-  [AS_HELP_STRING([--disable-stats],
-                  [Disable statistics calculation/reporting])],
-[if test "x$enable_stats" = "xno" ; then
-  enable_stats="0"
-else
-  enable_stats="1"
-fi
-],
-[enable_stats="1"]
-)
-if test "x$enable_stats" = "x1" ; then
-  AC_DEFINE([JEMALLOC_STATS], [ ])
-fi
-AC_SUBST([enable_stats])
-
-dnl Do not enable profiling by default.
-AC_ARG_ENABLE([prof],
-  [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])],
-[if test "x$enable_prof" = "xno" ; then
-  enable_prof="0"
-else
-  enable_prof="1"
-fi
-],
-[enable_prof="0"]
-)
-if test "x$enable_prof" = "x1" ; then
-  backtrace_method=""
-else
-  backtrace_method="N/A"
-fi
-
-AC_ARG_ENABLE([prof-libunwind],
-  [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])],
-[if test "x$enable_prof_libunwind" = "xno" ; then
-  enable_prof_libunwind="0"
-else
-  enable_prof_libunwind="1"
-fi
-],
-[enable_prof_libunwind="0"]
-)
-AC_ARG_WITH([static_libunwind],
-  [AS_HELP_STRING([--with-static-libunwind=<libunwind.a>],
-  [Path to static libunwind library; use rather than dynamically linking])],
-if test "x$with_static_libunwind" = "xno" ; then
-  LUNWIND="-lunwind"
-else
-  if test ! -f "$with_static_libunwind" ; then
-    AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind])
-  fi
-  LUNWIND="$with_static_libunwind"
-fi,
-  LUNWIND="-lunwind"
-)
-if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
-  AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
-  if test "x$LUNWIND" = "x-lunwind" ; then
-    AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)],
-                 [enable_prof_libunwind="0"])
-  else
-    JE_APPEND_VS(LIBS, $LUNWIND)
-  fi
-  if test "x${enable_prof_libunwind}" = "x1" ; then
-    backtrace_method="libunwind"
-    AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ])
-  fi
-fi
-
-AC_ARG_ENABLE([prof-libgcc],
-  [AS_HELP_STRING([--disable-prof-libgcc],
-  [Do not use libgcc for backtracing])],
-[if test "x$enable_prof_libgcc" = "xno" ; then
-  enable_prof_libgcc="0"
-else
-  enable_prof_libgcc="1"
-fi
-],
-[enable_prof_libgcc="1"]
-)
-if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
-     -a "x$GCC" = "xyes" ; then
-  AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
-  AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
-  if test "x${enable_prof_libgcc}" = "x1" ; then
-    backtrace_method="libgcc"
-    AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
-  fi
-else
-  enable_prof_libgcc="0"
-fi
-
-AC_ARG_ENABLE([prof-gcc],
-  [AS_HELP_STRING([--disable-prof-gcc],
-  [Do not use gcc intrinsics for backtracing])],
-[if test "x$enable_prof_gcc" = "xno" ; then
-  enable_prof_gcc="0"
-else
-  enable_prof_gcc="1"
-fi
-],
-[enable_prof_gcc="1"]
-)
-if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
-     -a "x$GCC" = "xyes" ; then
-  JE_CFLAGS_ADD([-fno-omit-frame-pointer])
-  backtrace_method="gcc intrinsics"
-  AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
-else
-  enable_prof_gcc="0"
-fi
-
-if test "x$backtrace_method" = "x" ; then
-  backtrace_method="none (disabling profiling)"
-  enable_prof="0"
-fi
-AC_MSG_CHECKING([configured backtracing method])
-AC_MSG_RESULT([$backtrace_method])
-if test "x$enable_prof" = "x1" ; then
-  dnl Heap profiling uses the log(3) function.
-  JE_APPEND_VS(LIBS, $LM)
-
-  AC_DEFINE([JEMALLOC_PROF], [ ])
-fi
-AC_SUBST([enable_prof])
-
-dnl Enable thread-specific caching by default.
-AC_ARG_ENABLE([tcache],
-  [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])],
-[if test "x$enable_tcache" = "xno" ; then
-  enable_tcache="0"
-else
-  enable_tcache="1"
-fi
-],
-[enable_tcache="1"]
-)
-if test "x$enable_tcache" = "x1" ; then
-  AC_DEFINE([JEMALLOC_TCACHE], [ ])
-fi
-AC_SUBST([enable_tcache])
-
-dnl Indicate whether adjacent virtual memory mappings automatically coalesce
-dnl (and fragment on demand).
-if test "x${maps_coalesce}" = "x1" ; then
-  AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
-fi
-
-dnl Enable VM deallocation via munmap() by default.
-AC_ARG_ENABLE([munmap],
-  [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
-[if test "x$enable_munmap" = "xno" ; then
-  enable_munmap="0"
-else
-  enable_munmap="1"
-fi
-],
-[enable_munmap="${default_munmap}"]
-)
-if test "x$enable_munmap" = "x1" ; then
-  AC_DEFINE([JEMALLOC_MUNMAP], [ ])
-fi
-AC_SUBST([enable_munmap])
-
-dnl Enable allocation from DSS if supported by the OS.
-have_dss="1"
-dnl Check whether the BSD/SUSv1 sbrk() exists.  If not, disable DSS support.
-AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
-if test "x$have_sbrk" = "x1" ; then
-  if test "x$sbrk_deprecated" = "x1" ; then
-    AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
-    have_dss="0"
-  fi
-else
-  have_dss="0"
-fi
-
-if test "x$have_dss" = "x1" ; then
-  AC_DEFINE([JEMALLOC_DSS], [ ])
-fi
-
-dnl Support the junk/zero filling option by default.
-AC_ARG_ENABLE([fill],
-  [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
-[if test "x$enable_fill" = "xno" ; then
-  enable_fill="0"
-else
-  enable_fill="1"
-fi
-],
-[enable_fill="1"]
-)
-if test "x$enable_fill" = "x1" ; then
-  AC_DEFINE([JEMALLOC_FILL], [ ])
-fi
-AC_SUBST([enable_fill])
-
-dnl Disable utrace(2)-based tracing by default.
-AC_ARG_ENABLE([utrace],
-  [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])],
-[if test "x$enable_utrace" = "xno" ; then
-  enable_utrace="0"
-else
-  enable_utrace="1"
-fi
-],
-[enable_utrace="0"]
-)
-JE_COMPILABLE([utrace(2)], [
-#include <sys/types.h>
-#include <sys/param.h>
-#include <sys/time.h>
-#include <sys/uio.h>
-#include <sys/ktrace.h>
-], [
-	utrace((void *)0, 0);
-], [je_cv_utrace])
-if test "x${je_cv_utrace}" = "xno" ; then
-  enable_utrace="0"
-fi
-if test "x$enable_utrace" = "x1" ; then
-  AC_DEFINE([JEMALLOC_UTRACE], [ ])
-fi
-AC_SUBST([enable_utrace])
-
-dnl Do not support the xmalloc option by default.
-AC_ARG_ENABLE([xmalloc],
-  [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
-[if test "x$enable_xmalloc" = "xno" ; then
-  enable_xmalloc="0"
-else
-  enable_xmalloc="1"
-fi
-],
-[enable_xmalloc="0"]
-)
-if test "x$enable_xmalloc" = "x1" ; then
-  AC_DEFINE([JEMALLOC_XMALLOC], [ ])
-fi
-AC_SUBST([enable_xmalloc])
-
-dnl Support cache-oblivious allocation alignment by default.
-AC_ARG_ENABLE([cache-oblivious],
-  [AS_HELP_STRING([--disable-cache-oblivious],
-                  [Disable support for cache-oblivious allocation alignment])],
-[if test "x$enable_cache_oblivious" = "xno" ; then
-  enable_cache_oblivious="0"
-else
-  enable_cache_oblivious="1"
-fi
-],
-[enable_cache_oblivious="1"]
-)
-if test "x$enable_cache_oblivious" = "x1" ; then
-  AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ])
-fi
-AC_SUBST([enable_cache_oblivious])
-
-
-
-JE_COMPILABLE([a program using __builtin_unreachable], [
-void foo (void) {
-  __builtin_unreachable();
-}
-], [
-	{
-		foo();
-	}
-], [je_cv_gcc_builtin_unreachable])
-if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable])
-else
-  AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort])
-fi
-
-dnl ============================================================================
-dnl Check for  __builtin_ffsl(), then ffsl(3), and fail if neither are found.
-dnl One of those two functions should (theoretically) exist on all platforms
-dnl that jemalloc currently has a chance of functioning on without modification.
-dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
-dnl ffsl() or __builtin_ffsl() are defined, respectively.
-JE_COMPILABLE([a program using __builtin_ffsl], [
-#include <stdio.h>
-#include <strings.h>
-#include <string.h>
-], [
-	{
-		int rv = __builtin_ffsl(0x08);
-		printf("%d\n", rv);
-	}
-], [je_cv_gcc_builtin_ffsl])
-if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll])
-  AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl])
-  AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs])
-else
-  JE_COMPILABLE([a program using ffsl], [
-  #include <stdio.h>
-  #include <strings.h>
-  #include <string.h>
-  ], [
-	{
-		int rv = ffsl(0x08);
-		printf("%d\n", rv);
-	}
-  ], [je_cv_function_ffsl])
-  if test "x${je_cv_function_ffsl}" = "xyes" ; then
-    AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll])
-    AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl])
-    AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs])
-  else
-    AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()])
-  fi
-fi
-
-AC_ARG_WITH([lg_tiny_min],
-  [AS_HELP_STRING([--with-lg-tiny-min=<lg-tiny-min>],
-   [Base 2 log of minimum tiny size class to support])],
-  [LG_TINY_MIN="$with_lg_tiny_min"],
-  [LG_TINY_MIN="3"])
-AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN])
-
-AC_ARG_WITH([lg_quantum],
-  [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
-   [Base 2 log of minimum allocation alignment])],
-  [LG_QUANTA="$with_lg_quantum"],
-  [LG_QUANTA="3 4"])
-if test "x$with_lg_quantum" != "x" ; then
-  AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum])
-fi
-
-AC_ARG_WITH([lg_page],
-  [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
-  [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
-if test "x$LG_PAGE" = "xdetect"; then
-  AC_CACHE_CHECK([LG_PAGE],
-               [je_cv_lg_page],
-               AC_RUN_IFELSE([AC_LANG_PROGRAM(
-[[
-#include <strings.h>
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <unistd.h>
-#endif
-#include <stdio.h>
-]],
-[[
-    int result;
-    FILE *f;
-
-#ifdef _WIN32
-    SYSTEM_INFO si;
-    GetSystemInfo(&si);
-    result = si.dwPageSize;
-#else
-    result = sysconf(_SC_PAGESIZE);
-#endif
-    if (result == -1) {
-	return 1;
-    }
-    result = JEMALLOC_INTERNAL_FFSL(result) - 1;
-
-    f = fopen("conftest.out", "w");
-    if (f == NULL) {
-	return 1;
-    }
-    fprintf(f, "%d", result);
-    fclose(f);
-
-    return 0;
-]])],
-                             [je_cv_lg_page=`cat conftest.out`],
-                             [je_cv_lg_page=undefined],
-                             [je_cv_lg_page=12]))
-fi
-if test "x${je_cv_lg_page}" != "x" ; then
-  LG_PAGE="${je_cv_lg_page}"
-fi
-if test "x${LG_PAGE}" != "xundefined" ; then
-   AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE])
-else
-   AC_MSG_ERROR([cannot determine value for LG_PAGE])
-fi
-
-AC_ARG_WITH([lg_hugepage],
-  [AS_HELP_STRING([--with-lg-hugepage=<lg-hugepage>],
-   [Base 2 log of sytem huge page size])],
-  [je_cv_lg_hugepage="${with_lg_hugepage}"],
-  [je_cv_lg_hugepage=""])
-if test "x${je_cv_lg_hugepage}" = "x" ; then
-  dnl Look in /proc/meminfo (Linux-specific) for information on the default huge
-  dnl page size, if any.  The relevant line looks like:
-  dnl
-  dnl   Hugepagesize:       2048 kB
-  if test -e "/proc/meminfo" ; then
-    hpsk=[`cat /proc/meminfo 2>/dev/null | \
-          grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \
-          awk '{print $2}'`]
-    if test "x${hpsk}" != "x" ; then
-      je_cv_lg_hugepage=10
-      while test "${hpsk}" -gt 1 ; do
-        hpsk="$((hpsk / 2))"
-        je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))"
-      done
-    fi
-  fi
-
-  dnl Set default if unable to automatically configure.
-  if test "x${je_cv_lg_hugepage}" = "x" ; then
-    je_cv_lg_hugepage=21
-  fi
-fi
-AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}])
-
-AC_ARG_WITH([lg_page_sizes],
-  [AS_HELP_STRING([--with-lg-page-sizes=<lg-page-sizes>],
-   [Base 2 logs of system page sizes to support])],
-  [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"])
-
-AC_ARG_WITH([lg_size_class_group],
-  [AS_HELP_STRING([--with-lg-size-class-group=<lg-size-class-group>],
-   [Base 2 log of size classes per doubling])],
-  [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"],
-  [LG_SIZE_CLASS_GROUP="2"])
-
-dnl ============================================================================
-dnl jemalloc configuration.
-dnl 
-
-AC_ARG_WITH([version],
-  [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>],
-   [Version string])],
-  [
-    echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
-    if test $? -ne 0 ; then
-      AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>])
-    fi
-    echo "$with_version" > "${objroot}VERSION"
-  ], [
-    dnl Set VERSION if source directory is inside a git repository.
-    if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
-      dnl Pattern globs aren't powerful enough to match both single- and
-      dnl double-digit version numbers, so iterate over patterns to support up
-      dnl to version 99.99.99 without any accidental matches.
-      for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
-                     '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
-                     '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
-                     '[0-9][0-9].[0-9][0-9].[0-9]' \
-                     '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
-        (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
-        if test $? -eq 0 ; then
-          mv "${objroot}VERSION.tmp" "${objroot}VERSION"
-          break
-        fi
-      done
-    fi
-    rm -f "${objroot}VERSION.tmp"
-  ])
-
-if test ! -e "${objroot}VERSION" ; then
-  if test ! -e "${srcroot}VERSION" ; then
-    AC_MSG_RESULT(
-      [Missing VERSION file, and unable to generate it; creating bogus VERSION])
-    echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
-  else
-    cp ${srcroot}VERSION ${objroot}VERSION
-  fi
-fi
-jemalloc_version=`cat "${objroot}VERSION"`
-jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
-jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
-jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
-jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'`
-jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'`
-AC_SUBST([jemalloc_version])
-AC_SUBST([jemalloc_version_major])
-AC_SUBST([jemalloc_version_minor])
-AC_SUBST([jemalloc_version_bugfix])
-AC_SUBST([jemalloc_version_nrev])
-AC_SUBST([jemalloc_version_gid])
-
-dnl ============================================================================
-dnl Configure pthreads.
-
-if test "x$abi" != "xpecoff" ; then
-  AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
-  dnl Some systems may embed pthreads functionality in libc; check for libpthread
-  dnl first, but try libc too before failing.
-  AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -lpthread)],
-               [AC_SEARCH_LIBS([pthread_create], , ,
-                               AC_MSG_ERROR([libpthread is missing]))])
-  JE_COMPILABLE([pthread_atfork(3)], [
-#include <pthread.h>
-], [
-  pthread_atfork((void *)0, (void *)0, (void *)0);
-], [je_cv_pthread_atfork])
-  if test "x${je_cv_pthread_atfork}" = "xyes" ; then
-    AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ])
-  fi
-fi
-
-JE_APPEND_VS(CFLAGS, -D_REENTRANT)
-
-dnl Check whether clock_gettime(2) is in libc or librt.
-AC_SEARCH_LIBS([clock_gettime], [rt])
-
-dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
-dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
-  if test "$ac_cv_search_clock_gettime" != "-lrt"; then
-    JE_CFLAGS_SAVE()
-
-    unset ac_cv_search_clock_gettime
-    JE_CFLAGS_ADD([-dynamic])
-    AC_SEARCH_LIBS([clock_gettime], [rt])
-
-    JE_CFLAGS_RESTORE()
-  fi
-fi
-
-dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific).
-JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [
-#include <time.h>
-], [
-	struct timespec ts;
-
-	clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
-], [je_cv_clock_monotonic_coarse])
-if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE])
-fi
-
-dnl check for CLOCK_MONOTONIC.
-JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [
-#include <unistd.h>
-#include <time.h>
-], [
-	struct timespec ts;
-
-	clock_gettime(CLOCK_MONOTONIC, &ts);
-#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
-#  error _POSIX_MONOTONIC_CLOCK missing/invalid
-#endif
-], [je_cv_clock_monotonic])
-if test "x${je_cv_clock_monotonic}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC])
-fi
-
-dnl Check for mach_absolute_time().
-JE_COMPILABLE([mach_absolute_time()], [
-#include <mach/mach_time.h>
-], [
-	mach_absolute_time();
-], [je_cv_mach_absolute_time])
-if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME])
-fi
-
-dnl Use syscall(2) (if available) by default.
-AC_ARG_ENABLE([syscall],
-  [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])],
-[if test "x$enable_syscall" = "xno" ; then
-  enable_syscall="0"
-else
-  enable_syscall="1"
-fi
-],
-[enable_syscall="1"]
-)
-if test "x$enable_syscall" = "x1" ; then
-  dnl Check if syscall(2) is usable.  Treat warnings as errors, so that e.g. OS
-  dnl X 10.12's deprecation warning prevents use.
-  JE_CFLAGS_SAVE()
-  JE_CFLAGS_ADD([-Werror])
-  JE_COMPILABLE([syscall(2)], [
-#include <sys/syscall.h>
-#include <unistd.h>
-], [
-	syscall(SYS_write, 2, "hello", 5);
-],
-                [je_cv_syscall])
-  JE_CFLAGS_RESTORE()
-  if test "x$je_cv_syscall" = "xyes" ; then
-    AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ])
-  fi
-fi
-
-dnl Check if the GNU-specific secure_getenv function exists.
-AC_CHECK_FUNC([secure_getenv],
-              [have_secure_getenv="1"],
-              [have_secure_getenv="0"]
-             )
-if test "x$have_secure_getenv" = "x1" ; then
-  AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
-fi
-
-dnl Check if the Solaris/BSD issetugid function exists.
-AC_CHECK_FUNC([issetugid],
-              [have_issetugid="1"],
-              [have_issetugid="0"]
-             )
-if test "x$have_issetugid" = "x1" ; then
-  AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ])
-fi
-
-dnl Check whether the BSD-specific _malloc_thread_cleanup() exists.  If so, use
-dnl it rather than pthreads TSD cleanup functions to support cleanup during
-dnl thread exit, in order to avoid pthreads library recursion during
-dnl bootstrapping.
-AC_CHECK_FUNC([_malloc_thread_cleanup],
-              [have__malloc_thread_cleanup="1"],
-              [have__malloc_thread_cleanup="0"]
-             )
-if test "x$have__malloc_thread_cleanup" = "x1" ; then
-  AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ])
-  force_tls="1"
-fi
-
-dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists.  If
-dnl so, mutex initialization causes allocation, and we need to implement this
-dnl callback function in order to prevent recursive allocation.
-AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
-              [have__pthread_mutex_init_calloc_cb="1"],
-              [have__pthread_mutex_init_calloc_cb="0"]
-             )
-if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
-  AC_DEFINE([JEMALLOC_MUTEX_INIT_CB])
-fi
-
-dnl Disable lazy locking by default.
-AC_ARG_ENABLE([lazy_lock],
-  [AS_HELP_STRING([--enable-lazy-lock],
-  [Enable lazy locking (only lock when multi-threaded)])],
-[if test "x$enable_lazy_lock" = "xno" ; then
-  enable_lazy_lock="0"
-else
-  enable_lazy_lock="1"
-fi
-],
-[enable_lazy_lock=""]
-)
-if test "x${enable_lazy_lock}" = "x" ; then
-  if test "x${force_lazy_lock}" = "x1" ; then
-    AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
-    enable_lazy_lock="1"
-  else
-    enable_lazy_lock="0"
-  fi
-fi
-if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
-  AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented])
-  enable_lazy_lock="0"
-fi
-if test "x$enable_lazy_lock" = "x1" ; then
-  if test "x$abi" != "xpecoff" ; then
-    AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])])
-    AC_CHECK_FUNC([dlsym], [],
-      [AC_CHECK_LIB([dl], [dlsym], [JE_APPEND_VS(LIBS, -ldl)],
-                    [AC_MSG_ERROR([libdl is missing])])
-      ])
-  fi
-  AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
-fi
-AC_SUBST([enable_lazy_lock])
-
-AC_ARG_ENABLE([tls],
-  [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])],
-if test "x$enable_tls" = "xno" ; then
-  enable_tls="0"
-else
-  enable_tls="1"
-fi
-,
-enable_tls=""
-)
-if test "x${enable_tls}" = "x" ; then
-  if test "x${force_tls}" = "x1" ; then
-    AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
-    enable_tls="1"
-  elif test "x${force_tls}" = "x0" ; then
-    AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
-    enable_tls="0"
-  else
-    enable_tls="1"
-  fi
-fi
-if test "x${enable_tls}" = "x1" ; then
-AC_MSG_CHECKING([for TLS])
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
-[[
-    __thread int x;
-]], [[
-    x = 42;
-
-    return 0;
-]])],
-              AC_MSG_RESULT([yes]),
-              AC_MSG_RESULT([no])
-              enable_tls="0")
-else
-  enable_tls="0"
-fi
-AC_SUBST([enable_tls])
-if test "x${enable_tls}" = "x1" ; then
-  if test "x${force_tls}" = "x0" ; then
-    AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
-  fi
-  AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
-elif test "x${force_tls}" = "x1" ; then
-  AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
-fi
-
-dnl ============================================================================
-dnl Check for C11 atomics.
-
-JE_COMPILABLE([C11 atomics], [
-#include <stdint.h>
-#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
-#include <stdatomic.h>
-#else
-#error Atomics not available
-#endif
-], [
-    uint64_t *p = (uint64_t *)0;
-    uint64_t x = 1;
-    volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
-    uint64_t r = atomic_fetch_add(a, x) + x;
-    return (r == 0);
-], [je_cv_c11atomics])
-if test "x${je_cv_c11atomics}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_C11ATOMICS])
-fi
-
-dnl ============================================================================
-dnl Check for atomic(9) operations as provided on FreeBSD.
-
-JE_COMPILABLE([atomic(9)], [
-#include <sys/types.h>
-#include <machine/atomic.h>
-#include <inttypes.h>
-], [
-	{
-		uint32_t x32 = 0;
-		volatile uint32_t *x32p = &x32;
-		atomic_fetchadd_32(x32p, 1);
-	}
-	{
-		unsigned long xlong = 0;
-		volatile unsigned long *xlongp = &xlong;
-		atomic_fetchadd_long(xlongp, 1);
-	}
-], [je_cv_atomic9])
-if test "x${je_cv_atomic9}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_ATOMIC9])
-fi
-
-dnl ============================================================================
-dnl Check for atomic(3) operations as provided on Darwin.
-
-JE_COMPILABLE([Darwin OSAtomic*()], [
-#include <libkern/OSAtomic.h>
-#include <inttypes.h>
-], [
-	{
-		int32_t x32 = 0;
-		volatile int32_t *x32p = &x32;
-		OSAtomicAdd32(1, x32p);
-	}
-	{
-		int64_t x64 = 0;
-		volatile int64_t *x64p = &x64;
-		OSAtomicAdd64(1, x64p);
-	}
-], [je_cv_osatomic])
-if test "x${je_cv_osatomic}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_OSATOMIC], [ ])
-fi
-
-dnl ============================================================================
-dnl Check for madvise(2).
-
-JE_COMPILABLE([madvise(2)], [
-#include <sys/mman.h>
-], [
-	madvise((void *)0, 0, 0);
-], [je_cv_madvise])
-if test "x${je_cv_madvise}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ])
-
-  dnl Check for madvise(..., MADV_FREE).
-  JE_COMPILABLE([madvise(..., MADV_FREE)], [
-#include <sys/mman.h>
-], [
-	madvise((void *)0, 0, MADV_FREE);
-], [je_cv_madv_free])
-  if test "x${je_cv_madv_free}" = "xyes" ; then
-    AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
-  fi
-
-  dnl Check for madvise(..., MADV_DONTNEED).
-  JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [
-#include <sys/mman.h>
-], [
-	madvise((void *)0, 0, MADV_DONTNEED);
-], [je_cv_madv_dontneed])
-  if test "x${je_cv_madv_dontneed}" = "xyes" ; then
-    AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
-  fi
-
-  dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
-  JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
-#include <sys/mman.h>
-], [
-	madvise((void *)0, 0, MADV_HUGEPAGE);
-	madvise((void *)0, 0, MADV_NOHUGEPAGE);
-], [je_cv_thp])
-  if test "x${je_cv_thp}" = "xyes" ; then
-    AC_DEFINE([JEMALLOC_THP], [ ])
-  fi
-fi
-
-dnl ============================================================================
-dnl Check whether __sync_{add,sub}_and_fetch() are available despite
-dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined.
-
-AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[
-  AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()],
-               [je_cv_sync_compare_and_swap_$2],
-               [AC_LINK_IFELSE([AC_LANG_PROGRAM([
-                                                 #include <stdint.h>
-                                                ],
-                                                [
-                                                 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2
-                                                 {
-                                                    uint$1_t x$1 = 0;
-                                                    __sync_add_and_fetch(&x$1, 42);
-                                                    __sync_sub_and_fetch(&x$1, 1);
-                                                 }
-                                                 #else
-                                                 #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force
-                                                 #endif
-                                                ])],
-                               [je_cv_sync_compare_and_swap_$2=yes],
-                               [je_cv_sync_compare_and_swap_$2=no])])
-
-  if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then
-    AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ])
-  fi
-])
-
-if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then
-  JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4)
-  JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8)
-fi
-
-dnl ============================================================================
-dnl Check for __builtin_clz() and __builtin_clzl().
-
-AC_CACHE_CHECK([for __builtin_clz],
-               [je_cv_builtin_clz],
-               [AC_LINK_IFELSE([AC_LANG_PROGRAM([],
-                                                [
-                                                {
-                                                        unsigned x = 0;
-                                                        int y = __builtin_clz(x);
-                                                }
-                                                {
-                                                        unsigned long x = 0;
-                                                        int y = __builtin_clzl(x);
-                                                }
-                                                ])],
-                               [je_cv_builtin_clz=yes],
-                               [je_cv_builtin_clz=no])])
-
-if test "x${je_cv_builtin_clz}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ])
-fi
-
-dnl ============================================================================
-dnl Check for os_unfair_lock operations as provided on Darwin.
-
-JE_COMPILABLE([Darwin os_unfair_lock_*()], [
-#include <os/lock.h>
-#include <AvailabilityMacros.h>
-], [
-	#if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
-	#error "os_unfair_lock is not supported"
-	#else
-	os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
-	os_unfair_lock_lock(&lock);
-	os_unfair_lock_unlock(&lock);
-	#endif
-], [je_cv_os_unfair_lock])
-if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ])
-fi
-
-dnl ============================================================================
-dnl Check for spinlock(3) operations as provided on Darwin.
-
-JE_COMPILABLE([Darwin OSSpin*()], [
-#include <libkern/OSAtomic.h>
-#include <inttypes.h>
-], [
-	OSSpinLock lock = 0;
-	OSSpinLockLock(&lock);
-	OSSpinLockUnlock(&lock);
-], [je_cv_osspin])
-if test "x${je_cv_osspin}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_OSSPIN], [ ])
-fi
-
-dnl ============================================================================
-dnl Darwin-related configuration.
-
-AC_ARG_ENABLE([zone-allocator],
-  [AS_HELP_STRING([--disable-zone-allocator],
-                  [Disable zone allocator for Darwin])],
-[if test "x$enable_zone_allocator" = "xno" ; then
-  enable_zone_allocator="0"
-else
-  enable_zone_allocator="1"
-fi
-],
-[if test "x${abi}" = "xmacho"; then
-  enable_zone_allocator="1"
-fi
-]
-)
-AC_SUBST([enable_zone_allocator])
-
-if test "x${enable_zone_allocator}" = "x1" ; then
-  if test "x${abi}" != "xmacho"; then
-    AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
-  fi
-  AC_DEFINE([JEMALLOC_ZONE], [ ])
-fi
-
-dnl ============================================================================
-dnl Check for glibc malloc hooks
-
-JE_COMPILABLE([glibc malloc hook], [
-#include <stddef.h>
-
-extern void (* __free_hook)(void *ptr);
-extern void *(* __malloc_hook)(size_t size);
-extern void *(* __realloc_hook)(void *ptr, size_t size);
-], [
-  void *ptr = 0L;
-  if (__malloc_hook) ptr = __malloc_hook(1);
-  if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
-  if (__free_hook && ptr) __free_hook(ptr);
-], [je_cv_glibc_malloc_hook])
-if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ])
-fi
-
-JE_COMPILABLE([glibc memalign hook], [
-#include <stddef.h>
-
-extern void *(* __memalign_hook)(size_t alignment, size_t size);
-], [
-  void *ptr = 0L;
-  if (__memalign_hook) ptr = __memalign_hook(16, 7);
-], [je_cv_glibc_memalign_hook])
-if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ])
-fi
-
-JE_COMPILABLE([pthreads adaptive mutexes], [
-#include <pthread.h>
-], [
-  pthread_mutexattr_t attr;
-  pthread_mutexattr_init(&attr);
-  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
-  pthread_mutexattr_destroy(&attr);
-], [je_cv_pthread_mutex_adaptive_np])
-if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ])
-fi
-
-dnl ============================================================================
-dnl Check for typedefs, structures, and compiler characteristics.
-AC_HEADER_STDBOOL
-
-dnl ============================================================================
-dnl Define commands that generate output files.
-
-AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [
-  mkdir -p "${objroot}include/jemalloc/internal"
-  "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [
-  mkdir -p "${objroot}include/jemalloc/internal"
-  "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
-  f="${objroot}include/jemalloc/internal/public_symbols.txt"
-  mkdir -p "${objroot}include/jemalloc/internal"
-  cp /dev/null "${f}"
-  for nm in `echo ${mangling_map} |tr ',' ' '` ; do
-    n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'`
-    m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'`
-    echo "${n}:${m}" >> "${f}"
-    dnl Remove name from public_syms so that it isn't redefined later.
-    public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
-  done
-  for sym in ${public_syms} ; do
-    n="${sym}"
-    m="${JEMALLOC_PREFIX}${sym}"
-    echo "${n}:${m}" >> "${f}"
-  done
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-  mangling_map="${mangling_map}"
-  public_syms="${public_syms}"
-  JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
-  mkdir -p "${objroot}include/jemalloc/internal"
-  "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
-  mkdir -p "${objroot}include/jemalloc/internal"
-  "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
-  mkdir -p "${objroot}include/jemalloc/internal"
-  "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
-], [
-  SHELL="${SHELL}"
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-  LG_QUANTA="${LG_QUANTA}"
-  LG_TINY_MIN=${LG_TINY_MIN}
-  LG_PAGE_SIZES="${LG_PAGE_SIZES}"
-  LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
-])
-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
-  mkdir -p "${objroot}include/jemalloc"
-  cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [
-  mkdir -p "${objroot}include/jemalloc"
-  "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [
-  mkdir -p "${objroot}include/jemalloc"
-  "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [
-  mkdir -p "${objroot}include/jemalloc"
-  "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-])
-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [
-  mkdir -p "${objroot}include/jemalloc"
-  "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
-], [
-  srcdir="${srcdir}"
-  objroot="${objroot}"
-  install_suffix="${install_suffix}"
-])
-
-dnl Process .in files.
-AC_SUBST([cfghdrs_in])
-AC_SUBST([cfghdrs_out])
-AC_CONFIG_HEADERS([$cfghdrs_tup])
-
-dnl ============================================================================
-dnl Generate outputs.
-
-AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
-AC_SUBST([cfgoutputs_in])
-AC_SUBST([cfgoutputs_out])
-AC_OUTPUT
-
-dnl ============================================================================
-dnl Print out the results of configuration.
-AC_MSG_RESULT([===============================================================================])
-AC_MSG_RESULT([jemalloc version   : ${jemalloc_version}])
-AC_MSG_RESULT([library revision   : ${rev}])
-AC_MSG_RESULT([])
-AC_MSG_RESULT([CONFIG             : ${CONFIG}])
-AC_MSG_RESULT([CC                 : ${CC}])
-AC_MSG_RESULT([CONFIGURE_CFLAGS   : ${CONFIGURE_CFLAGS}])
-AC_MSG_RESULT([SPECIFIED_CFLAGS   : ${SPECIFIED_CFLAGS}])
-AC_MSG_RESULT([EXTRA_CFLAGS       : ${EXTRA_CFLAGS}])
-AC_MSG_RESULT([CPPFLAGS           : ${CPPFLAGS}])
-AC_MSG_RESULT([CXX                : ${CXX}])
-AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}])
-AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}])
-AC_MSG_RESULT([EXTRA_CXXFLAGS     : ${EXTRA_CXXFLAGS}])
-AC_MSG_RESULT([LDFLAGS            : ${LDFLAGS}])
-AC_MSG_RESULT([EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}])
-AC_MSG_RESULT([DSO_LDFLAGS        : ${DSO_LDFLAGS}])
-AC_MSG_RESULT([LIBS               : ${LIBS}])
-AC_MSG_RESULT([RPATH_EXTRA        : ${RPATH_EXTRA}])
-AC_MSG_RESULT([])
-AC_MSG_RESULT([XSLTPROC           : ${XSLTPROC}])
-AC_MSG_RESULT([XSLROOT            : ${XSLROOT}])
-AC_MSG_RESULT([])
-AC_MSG_RESULT([PREFIX             : ${PREFIX}])
-AC_MSG_RESULT([BINDIR             : ${BINDIR}])
-AC_MSG_RESULT([DATADIR            : ${DATADIR}])
-AC_MSG_RESULT([INCLUDEDIR         : ${INCLUDEDIR}])
-AC_MSG_RESULT([LIBDIR             : ${LIBDIR}])
-AC_MSG_RESULT([MANDIR             : ${MANDIR}])
-AC_MSG_RESULT([])
-AC_MSG_RESULT([srcroot            : ${srcroot}])
-AC_MSG_RESULT([abs_srcroot        : ${abs_srcroot}])
-AC_MSG_RESULT([objroot            : ${objroot}])
-AC_MSG_RESULT([abs_objroot        : ${abs_objroot}])
-AC_MSG_RESULT([])
-AC_MSG_RESULT([JEMALLOC_PREFIX    : ${JEMALLOC_PREFIX}])
-AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
-AC_MSG_RESULT([                   : ${JEMALLOC_PRIVATE_NAMESPACE}])
-AC_MSG_RESULT([install_suffix     : ${install_suffix}])
-AC_MSG_RESULT([malloc_conf        : ${config_malloc_conf}])
-AC_MSG_RESULT([autogen            : ${enable_autogen}])
-AC_MSG_RESULT([cc-silence         : ${enable_cc_silence}])
-AC_MSG_RESULT([debug              : ${enable_debug}])
-AC_MSG_RESULT([code-coverage      : ${enable_code_coverage}])
-AC_MSG_RESULT([stats              : ${enable_stats}])
-AC_MSG_RESULT([prof               : ${enable_prof}])
-AC_MSG_RESULT([prof-libunwind     : ${enable_prof_libunwind}])
-AC_MSG_RESULT([prof-libgcc        : ${enable_prof_libgcc}])
-AC_MSG_RESULT([prof-gcc           : ${enable_prof_gcc}])
-AC_MSG_RESULT([tcache             : ${enable_tcache}])
-AC_MSG_RESULT([fill               : ${enable_fill}])
-AC_MSG_RESULT([utrace             : ${enable_utrace}])
-AC_MSG_RESULT([xmalloc            : ${enable_xmalloc}])
-AC_MSG_RESULT([munmap             : ${enable_munmap}])
-AC_MSG_RESULT([lazy_lock          : ${enable_lazy_lock}])
-AC_MSG_RESULT([tls                : ${enable_tls}])
-AC_MSG_RESULT([cache-oblivious    : ${enable_cache_oblivious}])
-AC_MSG_RESULT([cxx                : ${enable_cxx}])
-AC_MSG_RESULT([===============================================================================])
diff --git a/zircon/third_party/ulib/jemalloc/coverage.sh b/zircon/third_party/ulib/jemalloc/coverage.sh
deleted file mode 100755
index 6d1362a..0000000
--- a/zircon/third_party/ulib/jemalloc/coverage.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-
-set -e
-
-objdir=$1
-suffix=$2
-shift 2
-objs=$@
-
-gcov -b -p -f -o "${objdir}" ${objs}
-
-# Move gcov outputs so that subsequent gcov invocations won't clobber results
-# for the same sources with different compilation flags.
-for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do
-  mv "${f}" "${f}.${suffix}"
-done
diff --git a/zircon/third_party/ulib/jemalloc/doc/html.xsl.in b/zircon/third_party/ulib/jemalloc/doc/html.xsl.in
deleted file mode 100644
index ec4fa65..0000000
--- a/zircon/third_party/ulib/jemalloc/doc/html.xsl.in
+++ /dev/null
@@ -1,5 +0,0 @@
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-  <xsl:import href="@XSLROOT@/html/docbook.xsl"/>
-  <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
-  <xsl:output method="xml" encoding="utf-8"/>
-</xsl:stylesheet>
diff --git a/zircon/third_party/ulib/jemalloc/doc/jemalloc.xml.in b/zircon/third_party/ulib/jemalloc/doc/jemalloc.xml.in
deleted file mode 100644
index 36ec140..0000000
--- a/zircon/third_party/ulib/jemalloc/doc/jemalloc.xml.in
+++ /dev/null
@@ -1,2735 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<?xml-stylesheet type="text/xsl"
-        href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
-        "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [
-]>
-
-<refentry>
-  <refentryinfo>
-    <title>User Manual</title>
-    <productname>jemalloc</productname>
-    <releaseinfo role="version">@jemalloc_version@</releaseinfo>
-    <authorgroup>
-      <author>
-        <firstname>Jason</firstname>
-        <surname>Evans</surname>
-        <personblurb>Author</personblurb>
-      </author>
-    </authorgroup>
-  </refentryinfo>
-  <refmeta>
-    <refentrytitle>JEMALLOC</refentrytitle>
-    <manvolnum>3</manvolnum>
-  </refmeta>
-  <refnamediv>
-    <refdescriptor>jemalloc</refdescriptor>
-    <refname>jemalloc</refname>
-    <!-- Each refname causes a man page file to be created.  Only if this were
-         the system malloc(3) implementation would these files be appropriate.
-    <refname>malloc</refname>
-    <refname>calloc</refname>
-    <refname>posix_memalign</refname>
-    <refname>aligned_alloc</refname>
-    <refname>realloc</refname>
-    <refname>free</refname>
-    <refname>mallocx</refname>
-    <refname>rallocx</refname>
-    <refname>xallocx</refname>
-    <refname>sallocx</refname>
-    <refname>dallocx</refname>
-    <refname>sdallocx</refname>
-    <refname>nallocx</refname>
-    <refname>mallctl</refname>
-    <refname>mallctlnametomib</refname>
-    <refname>mallctlbymib</refname>
-    <refname>malloc_stats_print</refname>
-    <refname>malloc_usable_size</refname>
-    -->
-    <refpurpose>general purpose memory allocation functions</refpurpose>
-  </refnamediv>
-  <refsect1 id="library">
-    <title>LIBRARY</title>
-    <para>This manual describes jemalloc @jemalloc_version@.  More information
-    can be found at the <ulink
-    url="http://jemalloc.net/">jemalloc website</ulink>.</para>
-  </refsect1>
-  <refsynopsisdiv>
-    <title>SYNOPSIS</title>
-    <funcsynopsis>
-      <funcsynopsisinfo>#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
-      <refsect2>
-        <title>Standard API</title>
-        <funcprototype>
-          <funcdef>void *<function>malloc</function></funcdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void *<function>calloc</function></funcdef>
-          <paramdef>size_t <parameter>number</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>int <function>posix_memalign</function></funcdef>
-          <paramdef>void **<parameter>ptr</parameter></paramdef>
-          <paramdef>size_t <parameter>alignment</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void *<function>aligned_alloc</function></funcdef>
-          <paramdef>size_t <parameter>alignment</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void *<function>realloc</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void <function>free</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-        </funcprototype>
-      </refsect2>
-      <refsect2>
-        <title>Non-standard API</title>
-        <funcprototype>
-          <funcdef>void *<function>mallocx</function></funcdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void *<function>rallocx</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>size_t <function>xallocx</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>size_t <parameter>extra</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>size_t <function>sallocx</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void <function>dallocx</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void <function>sdallocx</function></funcdef>
-          <paramdef>void *<parameter>ptr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>size_t <function>nallocx</function></funcdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>int <parameter>flags</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>int <function>mallctl</function></funcdef>
-          <paramdef>const char *<parameter>name</parameter></paramdef>
-          <paramdef>void *<parameter>oldp</parameter></paramdef>
-          <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
-          <paramdef>void *<parameter>newp</parameter></paramdef>
-          <paramdef>size_t <parameter>newlen</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>int <function>mallctlnametomib</function></funcdef>
-          <paramdef>const char *<parameter>name</parameter></paramdef>
-          <paramdef>size_t *<parameter>mibp</parameter></paramdef>
-          <paramdef>size_t *<parameter>miblenp</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>int <function>mallctlbymib</function></funcdef>
-          <paramdef>const size_t *<parameter>mib</parameter></paramdef>
-          <paramdef>size_t <parameter>miblen</parameter></paramdef>
-          <paramdef>void *<parameter>oldp</parameter></paramdef>
-          <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
-          <paramdef>void *<parameter>newp</parameter></paramdef>
-          <paramdef>size_t <parameter>newlen</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void <function>malloc_stats_print</function></funcdef>
-          <paramdef>void <parameter>(*write_cb)</parameter>
-            <funcparams>void *, const char *</funcparams>
-          </paramdef>
-          <paramdef>void *<parameter>cbopaque</parameter></paramdef>
-          <paramdef>const char *<parameter>opts</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>size_t <function>malloc_usable_size</function></funcdef>
-          <paramdef>const void *<parameter>ptr</parameter></paramdef>
-        </funcprototype>
-        <funcprototype>
-          <funcdef>void <function>(*malloc_message)</function></funcdef>
-          <paramdef>void *<parameter>cbopaque</parameter></paramdef>
-          <paramdef>const char *<parameter>s</parameter></paramdef>
-        </funcprototype>
-        <para><type>const char *</type><varname>malloc_conf</varname>;</para>
-      </refsect2>
-    </funcsynopsis>
-  </refsynopsisdiv>
-  <refsect1 id="description">
-    <title>DESCRIPTION</title>
-    <refsect2>
-      <title>Standard API</title>
-
-      <para>The <function>malloc()</function> function allocates
-      <parameter>size</parameter> bytes of uninitialized memory.  The allocated
-      space is suitably aligned (after possible pointer coercion) for storage
-      of any type of object.</para>
-
-      <para>The <function>calloc()</function> function allocates
-      space for <parameter>number</parameter> objects, each
-      <parameter>size</parameter> bytes in length.  The result is identical to
-      calling <function>malloc()</function> with an argument of
-      <parameter>number</parameter> * <parameter>size</parameter>, with the
-      exception that the allocated memory is explicitly initialized to zero
-      bytes.</para>
-
-      <para>The <function>posix_memalign()</function> function
-      allocates <parameter>size</parameter> bytes of memory such that the
-      allocation's base address is a multiple of
-      <parameter>alignment</parameter>, and returns the allocation in the value
-      pointed to by <parameter>ptr</parameter>.  The requested
-      <parameter>alignment</parameter> must be a power of 2 at least as large as
-      <code language="C">sizeof(<type>void *</type>)</code>.</para>
-
-      <para>The <function>aligned_alloc()</function> function
-      allocates <parameter>size</parameter> bytes of memory such that the
-      allocation's base address is a multiple of
-      <parameter>alignment</parameter>.  The requested
-      <parameter>alignment</parameter> must be a power of 2.  Behavior is
-      undefined if <parameter>size</parameter> is not an integral multiple of
-      <parameter>alignment</parameter>.</para>
-
-      <para>The <function>realloc()</function> function changes the
-      size of the previously allocated memory referenced by
-      <parameter>ptr</parameter> to <parameter>size</parameter> bytes.  The
-      contents of the memory are unchanged up to the lesser of the new and old
-      sizes.  If the new size is larger, the contents of the newly allocated
-      portion of the memory are undefined.  Upon success, the memory referenced
-      by <parameter>ptr</parameter> is freed and a pointer to the newly
-      allocated memory is returned.  Note that
-      <function>realloc()</function> may move the memory allocation,
-      resulting in a different return value than <parameter>ptr</parameter>.
-      If <parameter>ptr</parameter> is <constant>NULL</constant>, the
-      <function>realloc()</function> function behaves identically to
-      <function>malloc()</function> for the specified size.</para>
-
-      <para>The <function>free()</function> function causes the
-      allocated memory referenced by <parameter>ptr</parameter> to be made
-      available for future allocations.  If <parameter>ptr</parameter> is
-      <constant>NULL</constant>, no action occurs.</para>
-    </refsect2>
-    <refsect2>
-      <title>Non-standard API</title>
-      <para>The <function>mallocx()</function>,
-      <function>rallocx()</function>,
-      <function>xallocx()</function>,
-      <function>sallocx()</function>,
-      <function>dallocx()</function>,
-      <function>sdallocx()</function>, and
-      <function>nallocx()</function> functions all have a
-      <parameter>flags</parameter> argument that can be used to specify
-      options.  The functions only check the options that are contextually
-      relevant.  Use bitwise or (<code language="C">|</code>) operations to
-      specify one or more of the following:
-        <variablelist>
-          <varlistentry id="MALLOCX_LG_ALIGN">
-            <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
-            </constant></term>
-
-            <listitem><para>Align the memory allocation to start at an address
-            that is a multiple of <code language="C">(1 &lt;&lt;
-            <parameter>la</parameter>)</code>.  This macro does not validate
-            that <parameter>la</parameter> is within the valid
-            range.</para></listitem>
-          </varlistentry>
-          <varlistentry id="MALLOCX_ALIGN">
-            <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
-            </constant></term>
-
-            <listitem><para>Align the memory allocation to start at an address
-            that is a multiple of <parameter>a</parameter>, where
-            <parameter>a</parameter> is a power of two.  This macro does not
-            validate that <parameter>a</parameter> is a power of 2.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry id="MALLOCX_ZERO">
-            <term><constant>MALLOCX_ZERO</constant></term>
-
-            <listitem><para>Initialize newly allocated memory to contain zero
-            bytes.  In the growing reallocation case, the real size prior to
-            reallocation defines the boundary between untouched bytes and those
-            that are initialized to contain zero bytes.  If this macro is
-            absent, newly allocated memory is uninitialized.</para></listitem>
-          </varlistentry>
-          <varlistentry id="MALLOCX_TCACHE">
-            <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
-            </constant></term>
-
-            <listitem><para>Use the thread-specific cache (tcache) specified by
-            the identifier <parameter>tc</parameter>, which must have been
-            acquired via the <link
-            linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
-            mallctl.  This macro does not validate that
-            <parameter>tc</parameter> specifies a valid
-            identifier.</para></listitem>
-          </varlistentry>
-          <varlistentry id="MALLOC_TCACHE_NONE">
-            <term><constant>MALLOCX_TCACHE_NONE</constant></term>
-
-            <listitem><para>Do not use a thread-specific cache (tcache).  Unless
-            <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
-            <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
-            automatically managed tcache will be used under many circumstances.
-            This macro cannot be used in the same <parameter>flags</parameter>
-            argument as
-            <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
-          </varlistentry>
-          <varlistentry id="MALLOCX_ARENA">
-            <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
-            </constant></term>
-
-            <listitem><para>Use the arena specified by the index
-            <parameter>a</parameter>.  This macro has no effect for regions that
-            were allocated via an arena other than the one specified.  This
-            macro does not validate that <parameter>a</parameter> specifies an
-            arena index in the valid range.</para></listitem>
-          </varlistentry>
-        </variablelist>
-      </para>
-
-      <para>The <function>mallocx()</function> function allocates at
-      least <parameter>size</parameter> bytes of memory, and returns a pointer
-      to the base address of the allocation.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>.</para>
-
-      <para>The <function>rallocx()</function> function resizes the
-      allocation at <parameter>ptr</parameter> to be at least
-      <parameter>size</parameter> bytes, and returns a pointer to the base
-      address of the resulting allocation, which may or may not have moved from
-      its original location.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>.</para>
-
-      <para>The <function>xallocx()</function> function resizes the
-      allocation at <parameter>ptr</parameter> in place to be at least
-      <parameter>size</parameter> bytes, and returns the real size of the
-      allocation.  If <parameter>extra</parameter> is non-zero, an attempt is
-      made to resize the allocation to be at least <code
-      language="C">(<parameter>size</parameter> +
-      <parameter>extra</parameter>)</code> bytes, though inability to allocate
-      the extra byte(s) will not by itself result in failure to resize.
-      Behavior is undefined if <parameter>size</parameter> is
-      <constant>0</constant>, or if <code
-      language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
-      &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
-
-      <para>The <function>sallocx()</function> function returns the
-      real size of the allocation at <parameter>ptr</parameter>.</para>
-
-      <para>The <function>dallocx()</function> function causes the
-      memory referenced by <parameter>ptr</parameter> to be made available for
-      future allocations.</para>
-
-      <para>The <function>sdallocx()</function> function is an
-      extension of <function>dallocx()</function> with a
-      <parameter>size</parameter> parameter to allow the caller to pass in the
-      allocation size as an optimization.  The minimum valid input size is the
-      original requested size of the allocation, and the maximum valid input
-      size is the corresponding value returned by
-      <function>nallocx()</function> or
-      <function>sallocx()</function>.</para>
-
-      <para>The <function>nallocx()</function> function allocates no
-      memory, but it performs the same size computation as the
-      <function>mallocx()</function> function, and returns the real
-      size of the allocation that would result from the equivalent
-      <function>mallocx()</function> function call, or
-      <constant>0</constant> if the inputs exceed the maximum supported size
-      class and/or alignment.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>.</para>
-
-      <para>The <function>mallctl()</function> function provides a
-      general interface for introspecting the memory allocator, as well as
-      setting modifiable parameters and triggering actions.  The
-      period-separated <parameter>name</parameter> argument specifies a
-      location in a tree-structured namespace; see the <xref
-      linkend="mallctl_namespace" xrefstyle="template:%t"/> section for
-      documentation on the tree contents.  To read a value, pass a pointer via
-      <parameter>oldp</parameter> to adequate space to contain the value, and a
-      pointer to its length via <parameter>oldlenp</parameter>; otherwise pass
-      <constant>NULL</constant> and <constant>NULL</constant>.  Similarly, to
-      write a value, pass a pointer to the value via
-      <parameter>newp</parameter>, and its length via
-      <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
-      and <constant>0</constant>.</para>
-
-      <para>The <function>mallctlnametomib()</function> function
-      provides a way to avoid repeated name lookups for applications that
-      repeatedly query the same portion of the namespace, by translating a name
-      to a <quote>Management Information Base</quote> (MIB) that can be passed
-      repeatedly to <function>mallctlbymib()</function>.  Upon
-      successful return from <function>mallctlnametomib()</function>,
-      <parameter>mibp</parameter> contains an array of
-      <parameter>*miblenp</parameter> integers, where
-      <parameter>*miblenp</parameter> is the lesser of the number of components
-      in <parameter>name</parameter> and the input value of
-      <parameter>*miblenp</parameter>.  Thus it is possible to pass a
-      <parameter>*miblenp</parameter> that is smaller than the number of
-      period-separated name components, which results in a partial MIB that can
-      be used as the basis for constructing a complete MIB.  For name
-      components that are integers (e.g. the 2 in
-      <link
-      linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>),
-      the corresponding MIB component will always be that integer.  Therefore,
-      it is legitimate to construct code like the following: <programlisting
-      language="C"><![CDATA[
-unsigned nbins, i;
-size_t mib[4];
-size_t len, miblen;
-
-len = sizeof(nbins);
-mallctl("arenas.nbins", &nbins, &len, NULL, 0);
-
-miblen = 4;
-mallctlnametomib("arenas.bin.0.size", mib, &miblen);
-for (i = 0; i < nbins; i++) {
-	size_t bin_size;
-
-	mib[2] = i;
-	len = sizeof(bin_size);
-	mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
-	/* Do something with bin_size... */
-}]]></programlisting></para>
-
-      <para>The <function>malloc_stats_print()</function> function writes
-      summary statistics via the <parameter>write_cb</parameter> callback
-      function pointer and <parameter>cbopaque</parameter> data passed to
-      <parameter>write_cb</parameter>, or <function>malloc_message()</function>
-      if <parameter>write_cb</parameter> is <constant>NULL</constant>.  The
-      statistics are presented in human-readable form unless <quote>J</quote> is
-      specified as a character within the <parameter>opts</parameter> string, in
-      which case the statistics are presented in <ulink
-      url="http://www.json.org/">JSON format</ulink>.  This function can be
-      called repeatedly.  General information that never changes during
-      execution can be omitted by specifying <quote>g</quote> as a character
-      within the <parameter>opts</parameter> string.  Note that
-      <function>malloc_message()</function> uses the
-      <function>mallctl*()</function> functions internally, so inconsistent
-      statistics can be reported if multiple threads use these functions
-      simultaneously.  If <option>--enable-stats</option> is specified during
-      configuration, <quote>m</quote>, <quote>d</quote>, and <quote>a</quote>
-      can be specified to omit merged arena, destroyed merged arena, and per
-      arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
-      be specified to omit per size class statistics for bins and large objects,
-      respectively.  Unrecognized characters are silently ignored.  Note that
-      thread caching may prevent some statistics from being completely up to
-      date, since extra locking would be required to merge counters that track
-      thread cache operations.</para>
-
-      <para>The <function>malloc_usable_size()</function> function
-      returns the usable size of the allocation pointed to by
-      <parameter>ptr</parameter>.  The return value may be larger than the size
-      that was requested during allocation.  The
-      <function>malloc_usable_size()</function> function is not a
-      mechanism for in-place <function>realloc()</function>; rather
-      it is provided solely as a tool for introspection purposes.  Any
-      discrepancy between the requested allocation size and the size reported
-      by <function>malloc_usable_size()</function> should not be
-      depended on, since such behavior is entirely implementation-dependent.
-      </para>
-    </refsect2>
-  </refsect1>
-  <refsect1 id="tuning">
-    <title>TUNING</title>
-    <para>Once, when the first call is made to one of the memory allocation
-    routines, the allocator initializes its internals based in part on various
-    options that can be specified at compile- or run-time.</para>
-
-    <para>The string specified via <option>--with-malloc-conf</option>, the
-    string pointed to by the global variable <varname>malloc_conf</varname>, the
-    <quote>name</quote> of the file referenced by the symbolic link named
-    <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
-    environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
-    that order, from left to right as options.  Note that
-    <varname>malloc_conf</varname> may be read before
-    <function>main()</function> is entered, so the declaration of
-    <varname>malloc_conf</varname> should specify an initializer that contains
-    the final value to be read by jemalloc.  <option>--with-malloc-conf</option>
-    and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
-    <filename class="symlink">/etc/malloc.conf</filename> and
-    <envar>MALLOC_CONF</envar> can be safely set any time prior to program
-    invocation.</para>
-
-    <para>An options string is a comma-separated list of option:value pairs.
-    There is one key corresponding to each <link
-    linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref
-    linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options
-    documentation).  For example, <literal>abort:true,narenas:1</literal> sets
-    the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link
-    linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options.  Some
-    options have boolean values (true/false), others have integer values (base
-    8, 10, or 16, depending on prefix), and yet others have raw string
-    values.</para>
-  </refsect1>
-  <refsect1 id="implementation_notes">
-    <title>IMPLEMENTATION NOTES</title>
-    <para>Traditionally, allocators have used
-    <citerefentry><refentrytitle>sbrk</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
-    suboptimal for several reasons, including race conditions, increased
-    fragmentation, and artificial limitations on maximum usable memory.  If
-    <citerefentry><refentrytitle>sbrk</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry> is supported by the operating
-    system, this allocator uses both
-    <citerefentry><refentrytitle>mmap</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry> and
-    <citerefentry><refentrytitle>sbrk</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry>, in that order of preference;
-    otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry> is used.</para>
-
-    <para>This allocator uses multiple arenas in order to reduce lock
-    contention for threaded programs on multi-processor systems.  This works
-    well with regard to threading scalability, but incurs some costs.  There is
-    a small fixed per-arena overhead, and additionally, arenas manage memory
-    completely independently of each other, which means a small fixed increase
-    in overall memory fragmentation.  These overheads are not generally an
-    issue, given the number of arenas normally used.  Note that using
-    substantially more arenas than the default is not likely to improve
-    performance, mainly due to reduced cache performance.  However, it may make
-    sense to reduce the number of arenas if an application does not make much
-    use of the allocation functions.</para>
-
-    <para>In addition to multiple arenas, unless
-    <option>--disable-tcache</option> is specified during configuration, this
-    allocator supports thread-specific caching, in order to make it possible to
-    completely avoid synchronization for most allocation requests.  Such caching
-    allows very fast allocation in the common case, but it increases memory
-    usage and fragmentation, since a bounded number of objects can remain
-    allocated in each thread cache.</para>
-
-    <para>Memory is conceptually broken into extents.  Extents are always
-    aligned to multiples of the page size.  This alignment makes it possible to
-    find metadata for user objects quickly.  User objects are broken into two
-    categories according to size: small and large.  Contiguous small objects
-    comprise a slab, which resides within a single extent, whereas large objects
-    each have their own extents backing them.</para>
-
-    <para>Small objects are managed in groups by slabs.  Each slab maintains
-    a bitmap to track which regions are in use.  Allocation requests that are no
-    more than half the quantum (8 or 16, depending on architecture) are rounded
-    up to the nearest power of two that is at least <code
-    language="C">sizeof(<type>double</type>)</code>.  All other object size
-    classes are multiples of the quantum, spaced such that there are four size
-    classes for each doubling in size, which limits internal fragmentation to
-    approximately 20% for all but the smallest size classes.  Small size classes
-    are smaller than four times the page size, and large size classes extend
-    from four times the page size up to the largest size class that does not
-    exceed <constant>PTRDIFF_MAX</constant>.</para>
-
-    <para>Allocations are packed tightly together, which can be an issue for
-    multi-threaded applications.  If you need to assure that allocations do not
-    suffer from cacheline sharing, round your allocation requests up to the
-    nearest multiple of the cacheline size, or specify cacheline alignment when
-    allocating.</para>
-
-    <para>The <function>realloc()</function>,
-    <function>rallocx()</function>, and
-    <function>xallocx()</function> functions may resize allocations
-    without moving them under limited circumstances.  Unlike the
-    <function>*allocx()</function> API, the standard API does not
-    officially round up the usable size of an allocation to the nearest size
-    class, so technically it is necessary to call
-    <function>realloc()</function> to grow e.g. a 9-byte allocation to
-    16 bytes, or shrink a 16-byte allocation to 9 bytes.  Growth and shrinkage
-    trivially succeeds in place as long as the pre-size and post-size both round
-    up to the same size class.  No other API guarantees are made regarding
-    in-place resizing, but the current implementation also tries to resize large
-    allocations in place, as long as the pre-size and post-size are both large.
-    For shrinkage to succeed, the extent allocator must support splitting (see
-    <link
-    linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;.extent_hooks</mallctl></link>).
-    Growth only succeeds if the trailing memory is currently available, and the
-    extent allocator supports merging.</para>
-
-    <para>Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the
-    size classes in each category are as shown in <xref linkend="size_classes"
-    xrefstyle="template:Table %n"/>.</para>
-
-    <table xml:id="size_classes" frame="all">
-      <title>Size classes</title>
-      <tgroup cols="3" colsep="1" rowsep="1">
-      <colspec colname="c1" align="left"/>
-      <colspec colname="c2" align="right"/>
-      <colspec colname="c3" align="left"/>
-      <thead>
-        <row>
-          <entry>Category</entry>
-          <entry>Spacing</entry>
-          <entry>Size</entry>
-        </row>
-      </thead>
-      <tbody>
-        <row>
-          <entry morerows="8">Small</entry>
-          <entry>lg</entry>
-          <entry>[8]</entry>
-        </row>
-        <row>
-          <entry>16</entry>
-          <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
-        </row>
-        <row>
-          <entry>32</entry>
-          <entry>[160, 192, 224, 256]</entry>
-        </row>
-        <row>
-          <entry>64</entry>
-          <entry>[320, 384, 448, 512]</entry>
-        </row>
-        <row>
-          <entry>128</entry>
-          <entry>[640, 768, 896, 1024]</entry>
-        </row>
-        <row>
-          <entry>256</entry>
-          <entry>[1280, 1536, 1792, 2048]</entry>
-        </row>
-        <row>
-          <entry>512</entry>
-          <entry>[2560, 3072, 3584, 4096]</entry>
-        </row>
-        <row>
-          <entry>1 KiB</entry>
-          <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
-        </row>
-        <row>
-          <entry>2 KiB</entry>
-          <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
-        </row>
-        <row>
-          <entry morerows="15">Large</entry>
-          <entry>2 KiB</entry>
-          <entry>[16 KiB]</entry>
-        </row>
-        <row>
-          <entry>4 KiB</entry>
-          <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
-        </row>
-        <row>
-          <entry>8 KiB</entry>
-          <entry>[40 KiB, 48 KiB, 54 KiB, 64 KiB]</entry>
-        </row>
-        <row>
-          <entry>16 KiB</entry>
-          <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
-        </row>
-        <row>
-          <entry>32 KiB</entry>
-          <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
-        </row>
-        <row>
-          <entry>64 KiB</entry>
-          <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
-        </row>
-        <row>
-          <entry>128 KiB</entry>
-          <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
-        </row>
-        <row>
-          <entry>256 KiB</entry>
-          <entry>[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]</entry>
-        </row>
-        <row>
-          <entry>512 KiB</entry>
-          <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
-        </row>
-        <row>
-          <entry>1 MiB</entry>
-          <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
-        </row>
-        <row>
-          <entry>2 MiB</entry>
-          <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
-        </row>
-        <row>
-          <entry>4 MiB</entry>
-          <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
-        </row>
-        <row>
-          <entry>8 MiB</entry>
-          <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
-        </row>
-        <row>
-          <entry>...</entry>
-          <entry>...</entry>
-        </row>
-        <row>
-          <entry>512 PiB</entry>
-          <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
-        </row>
-        <row>
-          <entry>1 EiB</entry>
-          <entry>[5 EiB, 6 EiB, 7 EiB]</entry>
-        </row>
-      </tbody>
-      </tgroup>
-    </table>
-  </refsect1>
-  <refsect1 id="mallctl_namespace">
-    <title>MALLCTL NAMESPACE</title>
-    <para>The following names are defined in the namespace accessible via the
-    <function>mallctl*()</function> functions.  Value types are specified in
-    parentheses, their readable/writable statuses are encoded as
-    <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
-    <literal>--</literal>, and required build configuration flags follow, if
-    any.  A name element encoded as <literal>&lt;i&gt;</literal> or
-    <literal>&lt;j&gt;</literal> indicates an integer component, where the
-    integer varies from 0 to some upper value that must be determined via
-    introspection.  In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>
-    and <mallctl>arena.&lt;i&gt;.{initialized,purge,decay,dss}</mallctl>,
-    <literal>&lt;i&gt;</literal> equal to
-    <constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas
-    or access the summation of statistics from all arenas; similarly
-    <literal>&lt;i&gt;</literal> equal to
-    <constant>MALLCTL_ARENAS_DESTROYED</constant> can be used to access the
-    summation of statistics from all destroyed arenas.  These constants can be
-    utilized either via <function>mallctlnametomib()</function> followed by
-    <function>mallctlbymib()</function>, or via code such as the following:
-    <programlisting language="C"><![CDATA[
-#define STRINGIFY_HELPER(x) #x
-#define STRINGIFY(x) STRINGIFY_HELPER(x)
-
-mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
-    NULL, NULL, NULL, 0);]]></programlisting>
-    Take special note of the <link
-    linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which controls
-    refreshing of cached dynamic statistics.</para>
-
-    <variablelist>
-      <varlistentry id="version">
-        <term>
-          <mallctl>version</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Return the jemalloc version string.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="epoch">
-        <term>
-          <mallctl>epoch</mallctl>
-          (<type>uint64_t</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>If a value is passed in, refresh the data from which
-        the <function>mallctl*()</function> functions report values,
-        and increment the epoch.  Return the current epoch.  This is useful for
-        detecting whether another thread caused a refresh.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.cache_oblivious">
-        <term>
-          <mallctl>config.cache_oblivious</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-cache-oblivious</option> was specified
-        during build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.debug">
-        <term>
-          <mallctl>config.debug</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-debug</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.fill">
-        <term>
-          <mallctl>config.fill</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-fill</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.lazy_lock">
-        <term>
-          <mallctl>config.lazy_lock</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-lazy-lock</option> was specified
-        during build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.malloc_conf">
-        <term>
-          <mallctl>config.malloc_conf</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Embedded configure-time-specified run-time options
-        string, empty unless <option>--with-malloc-conf</option> was specified
-        during build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.munmap">
-        <term>
-          <mallctl>config.munmap</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-munmap</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.prof">
-        <term>
-          <mallctl>config.prof</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-prof</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.prof_libgcc">
-        <term>
-          <mallctl>config.prof_libgcc</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--disable-prof-libgcc</option> was not
-        specified during build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.prof_libunwind">
-        <term>
-          <mallctl>config.prof_libunwind</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-prof-libunwind</option> was specified
-        during build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.stats">
-        <term>
-          <mallctl>config.stats</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-stats</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.tcache">
-        <term>
-          <mallctl>config.tcache</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--disable-tcache</option> was not specified
-        during build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.tls">
-        <term>
-          <mallctl>config.tls</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--disable-tls</option> was not specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.utrace">
-        <term>
-          <mallctl>config.utrace</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-utrace</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="config.xmalloc">
-        <term>
-          <mallctl>config.xmalloc</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para><option>--enable-xmalloc</option> was specified during
-        build configuration.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.abort">
-        <term>
-          <mallctl>opt.abort</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Abort-on-warning enabled/disabled.  If true, most
-        warnings are fatal.  The process will call
-        <citerefentry><refentrytitle>abort</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry> in these cases.  This option is
-        disabled by default unless <option>--enable-debug</option> is
-        specified during configuration, in which case it is enabled by default.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.dss">
-        <term>
-          <mallctl>opt.dss</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
-        related to <citerefentry><refentrytitle>mmap</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry> allocation.  The following
-        settings are supported if
-        <citerefentry><refentrytitle>sbrk</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry> is supported by the operating
-        system: <quote>disabled</quote>, <quote>primary</quote>, and
-        <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
-        supported.  The default is <quote>secondary</quote> if
-        <citerefentry><refentrytitle>sbrk</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry> is supported by the operating
-        system; <quote>disabled</quote> otherwise.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.narenas">
-        <term>
-          <mallctl>opt.narenas</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Maximum number of arenas to use for automatic
-        multiplexing of threads and arenas.  The default is four times the
-        number of CPUs, or one if there is a single CPU.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.decay_time">
-        <term>
-          <mallctl>opt.decay_time</mallctl>
-          (<type>ssize_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Approximate time in seconds from the creation of a set
-        of unused dirty pages until an equivalent set of unused dirty pages is
-        purged and/or reused.  The pages are incrementally purged according to a
-        sigmoidal decay curve that starts and ends with zero purge rate.  A
-        decay time of 0 causes all unused dirty pages to be purged immediately
-        upon creation.  A decay time of -1 disables purging.  The default decay
-        time is 10 seconds.  See <link
-        linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
-        and <link
-        linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
-        for related dynamic control options.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.stats_print">
-        <term>
-          <mallctl>opt.stats_print</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Enable/disable statistics printing at exit.  If
-        enabled, the <function>malloc_stats_print()</function>
-        function is called at program exit via an
-        <citerefentry><refentrytitle>atexit</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry> function.  If
-        <option>--enable-stats</option> is specified during configuration, this
-        has the potential to cause deadlock for a multi-threaded process that
-        exits while one or more threads are executing in the memory allocation
-        functions.  Furthermore, <function>atexit()</function> may
-        allocate memory during application initialization and then deadlock
-        internally when jemalloc in turn calls
-        <function>atexit()</function>, so this option is not
-        universally usable (though the application can register its own
-        <function>atexit()</function> function with equivalent
-        functionality).  Therefore, this option should only be used with care;
-        it is primarily intended as a performance tuning aid during application
-        development.  This option is disabled by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.junk">
-        <term>
-          <mallctl>opt.junk</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal>
-          [<option>--enable-fill</option>]
-        </term>
-        <listitem><para>Junk filling.  If set to <quote>alloc</quote>, each byte
-        of uninitialized allocated memory will be initialized to
-        <literal>0xa5</literal>.  If set to <quote>free</quote>, all deallocated
-        memory will be initialized to <literal>0x5a</literal>.  If set to
-        <quote>true</quote>, both allocated and deallocated memory will be
-        initialized, and if set to <quote>false</quote>, junk filling be
-        disabled entirely.  This is intended for debugging and will impact
-        performance negatively.  This option is <quote>false</quote> by default
-        unless <option>--enable-debug</option> is specified during
-        configuration, in which case it is <quote>true</quote> by
-        default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.zero">
-        <term>
-          <mallctl>opt.zero</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-fill</option>]
-        </term>
-        <listitem><para>Zero filling enabled/disabled.  If enabled, each byte
-        of uninitialized allocated memory will be initialized to 0.  Note that
-        this initialization only happens once for each byte, so
-        <function>realloc()</function> and
-        <function>rallocx()</function> calls do not zero memory that
-        was previously allocated.  This is intended for debugging and will
-        impact performance negatively.  This option is disabled by default.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.utrace">
-        <term>
-          <mallctl>opt.utrace</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-utrace</option>]
-        </term>
-        <listitem><para>Allocation tracing based on
-        <citerefentry><refentrytitle>utrace</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry> enabled/disabled.  This option
-        is disabled by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.xmalloc">
-        <term>
-          <mallctl>opt.xmalloc</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-xmalloc</option>]
-        </term>
-        <listitem><para>Abort-on-out-of-memory enabled/disabled.  If enabled,
-        rather than returning failure for any allocation function, display a
-        diagnostic message on <constant>STDERR_FILENO</constant> and cause the
-        program to drop core (using
-        <citerefentry><refentrytitle>abort</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry>).  If an application is
-        designed to depend on this behavior, set the option at compile time by
-        including the following in the source code:
-        <programlisting language="C"><![CDATA[
-malloc_conf = "xmalloc:true";]]></programlisting>
-        This option is disabled by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.tcache">
-        <term>
-          <mallctl>opt.tcache</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Thread-specific caching (tcache) enabled/disabled.  When
-        there are multiple threads, each thread uses a tcache for objects up to
-        a certain size.  Thread-specific caching allows many allocations to be
-        satisfied without performing any thread synchronization, at the cost of
-        increased memory use.  See the <link
-        linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
-        option for related tuning information.  This option is enabled by
-        default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.lg_tcache_max">
-        <term>
-          <mallctl>opt.lg_tcache_max</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Maximum size class (log base 2) to cache in the
-        thread-specific cache (tcache).  At a minimum, all small size classes
-        are cached, and at a maximum all large size classes are cached.  The
-        default maximum is 32 KiB (2^15).</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof">
-        <term>
-          <mallctl>opt.prof</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Memory profiling enabled/disabled.  If enabled, profile
-        memory allocation activity.  See the <link
-        linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
-        option for on-the-fly activation/deactivation.  See the <link
-        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
-        option for probabilistic sampling control.  See the <link
-        linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
-        option for control of cumulative sample reporting.  See the <link
-        linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
-        option for information on interval-triggered profile dumping, the <link
-        linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link>
-        option for information on high-water-triggered profile dumping, and the
-        <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
-        option for final profile dumping.  Profile output is compatible with
-        the <command>jeprof</command> command, which is based on the
-        <command>pprof</command> that is developed as part of the <ulink
-        url="http://code.google.com/p/gperftools/">gperftools
-        package</ulink>.  See <link linkend="heap_profile_format">HEAP PROFILE
-        FORMAT</link> for heap profile format documentation.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_prefix">
-        <term>
-          <mallctl>opt.prof_prefix</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Filename prefix for profile dumps.  If the prefix is
-        set to the empty string, no automatic dumps will occur; this is
-        primarily useful for disabling the automatic final heap dump (which
-        also disables leak reporting, if enabled).  The default prefix is
-        <filename>jeprof</filename>.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_active">
-        <term>
-          <mallctl>opt.prof_active</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Profiling activated/deactivated.  This is a secondary
-        control mechanism that makes it possible to start the application with
-        profiling enabled (see the <link
-        linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but
-        inactive, then toggle profiling at any time during program execution
-        with the <link
-        linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl.
-        This option is enabled by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_thread_active_init">
-        <term>
-          <mallctl>opt.prof_thread_active_init</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Initial setting for <link
-        linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
-        in newly created threads.  The initial setting for newly created threads
-        can also be changed during execution via the <link
-        linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
-        mallctl.  This option is enabled by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.lg_prof_sample">
-        <term>
-          <mallctl>opt.lg_prof_sample</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Average interval (log base 2) between allocation
-        samples, as measured in bytes of allocation activity.  Increasing the
-        sampling interval decreases profile fidelity, but also decreases the
-        computational overhead.  The default sample interval is 512 KiB (2^19
-        B).</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_accum">
-        <term>
-          <mallctl>opt.prof_accum</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Reporting of cumulative object/byte counts in profile
-        dumps enabled/disabled.  If this option is enabled, every unique
-        backtrace must be stored for the duration of execution.  Depending on
-        the application, this can impose a large memory overhead, and the
-        cumulative counts are not always of interest.  This option is disabled
-        by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.lg_prof_interval">
-        <term>
-          <mallctl>opt.lg_prof_interval</mallctl>
-          (<type>ssize_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Average interval (log base 2) between memory profile
-        dumps, as measured in bytes of allocation activity.  The actual
-        interval between dumps may be sporadic because decentralized allocation
-        counters are used to avoid synchronization bottlenecks.  Profiles are
-        dumped to files named according to the pattern
-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
-        where <literal>&lt;prefix&gt;</literal> is controlled by the
-        <link
-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
-        option.  By default, interval-triggered profile dumping is disabled
-        (encoded as -1).
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_gdump">
-        <term>
-          <mallctl>opt.prof_gdump</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Set the initial state of <link
-        linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
-        enabled triggers a memory profile dump every time the total virtual
-        memory exceeds the previous maximum.  This option is disabled by
-        default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_final">
-        <term>
-          <mallctl>opt.prof_final</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Use an
-        <citerefentry><refentrytitle>atexit</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry> function to dump final memory
-        usage to a file named according to the pattern
-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
-        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
-        option.  Note that <function>atexit()</function> may allocate
-        memory during application initialization and then deadlock internally
-        when jemalloc in turn calls <function>atexit()</function>, so
-        this option is not universally usable (though the application can
-        register its own <function>atexit()</function> function with
-        equivalent functionality).  This option is disabled by
-        default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="opt.prof_leak">
-        <term>
-          <mallctl>opt.prof_leak</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Leak reporting enabled/disabled.  If enabled, use an
-        <citerefentry><refentrytitle>atexit</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry> function to report memory leaks
-        detected by allocation sampling.  See the
-        <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
-        information on analyzing heap profile output.  This option is disabled
-        by default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.arena">
-        <term>
-          <mallctl>thread.arena</mallctl>
-          (<type>unsigned</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Get or set the arena associated with the calling
-        thread.  If the specified arena was not initialized beforehand (see the
-        <link
-        linkend="arena.i.initialized"><mallctl>arena.i.initialized</mallctl></link>
-        mallctl), it will be automatically initialized as a side effect of
-        calling this interface.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.allocated">
-        <term>
-          <mallctl>thread.allocated</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Get the total number of bytes ever allocated by the
-        calling thread.  This counter has the potential to wrap around; it is
-        up to the application to appropriately interpret the counter in such
-        cases.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.allocatedp">
-        <term>
-          <mallctl>thread.allocatedp</mallctl>
-          (<type>uint64_t *</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Get a pointer to the the value that is returned by the
-        <link
-        linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
-        mallctl.  This is useful for avoiding the overhead of repeated
-        <function>mallctl*()</function> calls.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.deallocated">
-        <term>
-          <mallctl>thread.deallocated</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Get the total number of bytes ever deallocated by the
-        calling thread.  This counter has the potential to wrap around; it is
-        up to the application to appropriately interpret the counter in such
-        cases.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.deallocatedp">
-        <term>
-          <mallctl>thread.deallocatedp</mallctl>
-          (<type>uint64_t *</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Get a pointer to the the value that is returned by the
-        <link
-        linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
-        mallctl.  This is useful for avoiding the overhead of repeated
-        <function>mallctl*()</function> calls.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.tcache.enabled">
-        <term>
-          <mallctl>thread.tcache.enabled</mallctl>
-          (<type>bool</type>)
-          <literal>rw</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Enable/disable calling thread's tcache.  The tcache is
-        implicitly flushed as a side effect of becoming
-        disabled (see <link
-        linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.tcache.flush">
-        <term>
-          <mallctl>thread.tcache.flush</mallctl>
-          (<type>void</type>)
-          <literal>--</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Flush calling thread's thread-specific cache (tcache).
-        This interface releases all cached objects and internal data structures
-        associated with the calling thread's tcache.  Ordinarily, this interface
-        need not be called, since automatic periodic incremental garbage
-        collection occurs, and the thread cache is automatically discarded when
-        a thread exits.  However, garbage collection is triggered by allocation
-        activity, so it is possible for a thread that stops
-        allocating/deallocating to retain its cache indefinitely, in which case
-        the developer may find manual flushing useful.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.prof.name">
-        <term>
-          <mallctl>thread.prof.name</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal> or
-          <literal>-w</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Get/set the descriptive name associated with the calling
-        thread in memory profile dumps.  An internal copy of the name string is
-        created, so the input string need not be maintained after this interface
-        completes execution.  The output string of this interface should be
-        copied for non-ephemeral uses, because multiple implementation details
-        can cause asynchronous string deallocation.  Furthermore, each
-        invocation of this interface can only read or write; simultaneous
-        read/write is not supported due to string lifetime limitations.  The
-        name string must be nil-terminated and comprised only of characters in
-        the sets recognized
-        by <citerefentry><refentrytitle>isgraph</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry> and
-        <citerefentry><refentrytitle>isblank</refentrytitle>
-        <manvolnum>3</manvolnum></citerefentry>.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="thread.prof.active">
-        <term>
-          <mallctl>thread.prof.active</mallctl>
-          (<type>bool</type>)
-          <literal>rw</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Control whether sampling is currently active for the
-        calling thread.  This is an activation mechanism in addition to <link
-        linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
-        be active for the calling thread to sample.  This flag is enabled by
-        default.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="tcache.create">
-        <term>
-          <mallctl>tcache.create</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Create an explicit thread-specific cache (tcache) and
-        return an identifier that can be passed to the <link
-        linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
-        macro to explicitly use the specified cache rather than the
-        automatically managed one that is used by default.  Each explicit cache
-        can be used by only one thread at a time; the application must assure
-        that this constraint holds.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="tcache.flush">
-        <term>
-          <mallctl>tcache.flush</mallctl>
-          (<type>unsigned</type>)
-          <literal>-w</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Flush the specified thread-specific cache (tcache).  The
-        same considerations apply to this interface as to <link
-        linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
-        except that the tcache will never be automatically discarded.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="tcache.destroy">
-        <term>
-          <mallctl>tcache.destroy</mallctl>
-          (<type>unsigned</type>)
-          <literal>-w</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Flush the specified thread-specific cache (tcache) and
-        make the identifier available for use during a future tcache creation.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.initialized">
-        <term>
-          <mallctl>arena.&lt;i&gt;.initialized</mallctl>
-          (<type>bool</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Get whether the specified arena's statistics are
-        initialized (i.e. the arena was initialized prior to the current epoch).
-        This interface can also be nominally used to query whether the merged
-        statistics corresponding to <constant>MALLCTL_ARENAS_ALL</constant> are
-        initialized (always true).</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.purge">
-        <term>
-          <mallctl>arena.&lt;i&gt;.purge</mallctl>
-          (<type>void</type>)
-          <literal>--</literal>
-        </term>
-        <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
-        all arenas if &lt;i&gt; equals <constant>MALLCTL_ARENAS_ALL</constant>.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.decay">
-        <term>
-          <mallctl>arena.&lt;i&gt;.decay</mallctl>
-          (<type>void</type>)
-          <literal>--</literal>
-        </term>
-        <listitem><para>Trigger decay-based purging of unused dirty pages for
-        arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
-        <constant>MALLCTL_ARENAS_ALL</constant>.  The proportion of unused dirty
-        pages to be purged depends on the current time; see <link
-        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
-        details.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.reset">
-        <term>
-          <mallctl>arena.&lt;i&gt;.reset</mallctl>
-          (<type>void</type>)
-          <literal>--</literal>
-        </term>
-        <listitem><para>Discard all of the arena's extant allocations.  This
-        interface can only be used with arenas explicitly created via <link
-        linkend="arenas.create"><mallctl>arenas.create</mallctl></link>.  None
-        of the arena's discarded/cached allocations may accessed afterward.  As
-        part of this requirement, all thread caches which were used to
-        allocate/deallocate in conjunction with the arena must be flushed
-        beforehand.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.destroy">
-        <term>
-          <mallctl>arena.&lt;i&gt;.destroy</mallctl>
-          (<type>void</type>)
-          <literal>--</literal>
-        </term>
-        <listitem><para>Destroy the arena.  Discard all of the arena's extant
-        allocations using the same mechanism as for <link
-        linkend="arena.i.reset"><mallctl>arena.&lt;i&gt;.reset</mallctl></link>
-        (with all the same constraints and side effects), merge the arena stats
-        into those accessible at arena index
-        <constant>MALLCTL_ARENAS_DESTROYED</constant>, and then completely
-        discard all metadata associated with the arena.  Future calls to <link
-        linkend="arenas.create"><mallctl>arenas.create</mallctl></link> may
-        recycle the arena index.  Destruction will fail if any threads are
-        currently associated with the arena as a result of calls to <link
-        linkend="thread.arena"><mallctl>thread.arena</mallctl></link>.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.dss">
-        <term>
-          <mallctl>arena.&lt;i&gt;.dss</mallctl>
-          (<type>const char *</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Set the precedence of dss allocation as related to mmap
-        allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
-        <constant>MALLCTL_ARENAS_ALL</constant>.  See <link
-        linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
-        settings.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.decay_time">
-        <term>
-          <mallctl>arena.&lt;i&gt;.decay_time</mallctl>
-          (<type>ssize_t</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Current per-arena approximate time in seconds from the
-        creation of a set of unused dirty pages until an equivalent set of
-        unused dirty pages is purged and/or reused.  Each time this interface is
-        set, all currently unused dirty pages are considered to have fully
-        decayed, which causes immediate purging of all unused dirty pages unless
-        the decay time is set to -1 (i.e. purging disabled).  See <link
-        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
-        additional information.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arena.i.extent_hooks">
-        <term>
-          <mallctl>arena.&lt;i&gt;.extent_hooks</mallctl>
-          (<type>extent_hooks_t *</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Get or set the extent management hook functions for
-        arena &lt;i&gt;.  The functions must be capable of operating on all
-        extant extents associated with arena &lt;i&gt;, usually by passing
-        unknown extents to the replaced functions.  In practice, it is feasible
-        to control allocation for arenas explicitly created via <link
-        linkend="arenas.create"><mallctl>arenas.create</mallctl></link> such
-        that all extents originate from an application-supplied extent allocator
-        (by specifying the custom extent hook functions during arena creation),
-        but the automatically created arenas will have already created extents
-        prior to the application having an opportunity to take over extent
-        allocation.</para>
-
-        <programlisting language="C"><![CDATA[
-typedef extent_hooks_s extent_hooks_t;
-struct extent_hooks_s {
-	extent_alloc_t		*alloc;
-	extent_dalloc_t		*dalloc;
-	extent_commit_t		*commit;
-	extent_decommit_t	*decommit;
-	extent_purge_t		*purge_lazy;
-	extent_purge_t		*purge_forced;
-	extent_split_t		*split;
-	extent_merge_t		*merge;
-};]]></programlisting>
-        <para>The <type>extent_hooks_t</type> structure comprises function
-        pointers which are described individually below.  jemalloc uses these
-        functions to manage extent lifetime, which starts off with allocation of
-        mapped committed memory, in the simplest case followed by deallocation.
-        However, there are performance and platform reasons to retain extents
-        for later reuse.  Cleanup attempts cascade from deallocation to decommit
-        to lazy purging to forced purging, which gives the extent management
-        functions opportunities to reject the most permanent cleanup operations
-        in favor of less permanent (and often less costly) operations.  All
-        operations except allocation can be universally opted out of by setting
-        the hook pointers to <constant>NULL</constant>, or selectively opted out
-        of by returning failure.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>new_addr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>size_t <parameter>alignment</parameter></paramdef>
-          <paramdef>bool *<parameter>zero</parameter></paramdef>
-          <paramdef>bool *<parameter>commit</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>An extent allocation function conforms to the
-        <type>extent_alloc_t</type> type and upon success returns a pointer to
-        <parameter>size</parameter> bytes of mapped memory on behalf of arena
-        <parameter>arena_ind</parameter> such that the extent's base address is
-        a multiple of <parameter>alignment</parameter>, as well as setting
-        <parameter>*zero</parameter> to indicate whether the extent is zeroed
-        and <parameter>*commit</parameter> to indicate whether the extent is
-        committed.  Upon error the function returns <constant>NULL</constant>
-        and leaves <parameter>*zero</parameter> and
-        <parameter>*commit</parameter> unmodified.  The
-        <parameter>size</parameter> parameter is always a multiple of the page
-        size.  The <parameter>alignment</parameter> parameter is always a power
-        of two at least as large as the page size.  Zeroing is mandatory if
-        <parameter>*zero</parameter> is true upon function entry.  Committing is
-        mandatory if <parameter>*commit</parameter> is true upon function entry.
-        If <parameter>new_addr</parameter> is not <constant>NULL</constant>, the
-        returned pointer must be <parameter>new_addr</parameter> on success or
-        <constant>NULL</constant> on error.  Committed memory may be committed
-        in absolute terms as on a system that does not overcommit, or in
-        implicit terms as on a system that overcommits and satisfies physical
-        memory needs on demand via soft page faults.  Note that replacing the
-        default extent allocation function makes the arena's <link
-        linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
-        setting irrelevant.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef bool <function>(extent_dalloc_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>addr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>bool <parameter>committed</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>
-        An extent deallocation function conforms to the
-        <type>extent_dalloc_t</type> type and deallocates an extent at given
-        <parameter>addr</parameter> and <parameter>size</parameter> with
-        <parameter>committed</parameter>/decommited memory as indicated, on
-        behalf of arena <parameter>arena_ind</parameter>, returning false upon
-        success.  If the function returns true, this indicates opt-out from
-        deallocation; the virtual memory mapping associated with the extent
-        remains mapped, in the same commit state, and available for future use,
-        in which case it will be automatically retained for later reuse.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef bool <function>(extent_commit_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>addr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>size_t <parameter>offset</parameter></paramdef>
-          <paramdef>size_t <parameter>length</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>An extent commit function conforms to the
-        <type>extent_commit_t</type> type and commits zeroed physical memory to
-        back pages within an extent at given <parameter>addr</parameter> and
-        <parameter>size</parameter> at <parameter>offset</parameter> bytes,
-        extending for <parameter>length</parameter> on behalf of arena
-        <parameter>arena_ind</parameter>, returning false upon success.
-        Committed memory may be committed in absolute terms as on a system that
-        does not overcommit, or in implicit terms as on a system that
-        overcommits and satisfies physical memory needs on demand via soft page
-        faults. If the function returns true, this indicates insufficient
-        physical memory to satisfy the request.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef bool <function>(extent_decommit_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>addr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>size_t <parameter>offset</parameter></paramdef>
-          <paramdef>size_t <parameter>length</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>An extent decommit function conforms to the
-        <type>extent_decommit_t</type> type and decommits any physical memory
-        that is backing pages within an extent at given
-        <parameter>addr</parameter> and <parameter>size</parameter> at
-        <parameter>offset</parameter> bytes, extending for
-        <parameter>length</parameter> on behalf of arena
-        <parameter>arena_ind</parameter>, returning false upon success, in which
-        case the pages will be committed via the extent commit function before
-        being reused.  If the function returns true, this indicates opt-out from
-        decommit; the memory remains committed and available for future use, in
-        which case it will be automatically retained for later reuse.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef bool <function>(extent_purge_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>addr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>size_t <parameter>offset</parameter></paramdef>
-          <paramdef>size_t <parameter>length</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>An extent purge function conforms to the
-        <type>extent_purge_t</type> type and discards physical pages
-        within the virtual memory mapping associated with an extent at given
-        <parameter>addr</parameter> and <parameter>size</parameter> at
-        <parameter>offset</parameter> bytes, extending for
-        <parameter>length</parameter> on behalf of arena
-        <parameter>arena_ind</parameter>.  A lazy extent purge function can
-        delay purging indefinitely and leave the pages within the purged virtual
-        memory range in an indeterminite state, whereas a forced extent purge
-        function immediately purges, and the pages within the virtual memory
-        range will be zero-filled the next time they are accessed.  If the
-        function returns true, this indicates failure to purge.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef bool <function>(extent_split_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>addr</parameter></paramdef>
-          <paramdef>size_t <parameter>size</parameter></paramdef>
-          <paramdef>size_t <parameter>size_a</parameter></paramdef>
-          <paramdef>size_t <parameter>size_b</parameter></paramdef>
-          <paramdef>bool <parameter>committed</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>An extent split function conforms to the
-        <type>extent_split_t</type> type and optionally splits an extent at
-        given <parameter>addr</parameter> and <parameter>size</parameter> into
-        two adjacent extents, the first of <parameter>size_a</parameter> bytes,
-        and the second of <parameter>size_b</parameter> bytes, operating on
-        <parameter>committed</parameter>/decommitted memory as indicated, on
-        behalf of arena <parameter>arena_ind</parameter>, returning false upon
-        success.  If the function returns true, this indicates that the extent
-        remains unsplit and therefore should continue to be operated on as a
-        whole.</para>
-
-        <funcsynopsis><funcprototype>
-          <funcdef>typedef bool <function>(extent_merge_t)</function></funcdef>
-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
-          <paramdef>void *<parameter>addr_a</parameter></paramdef>
-          <paramdef>size_t <parameter>size_a</parameter></paramdef>
-          <paramdef>void *<parameter>addr_b</parameter></paramdef>
-          <paramdef>size_t <parameter>size_b</parameter></paramdef>
-          <paramdef>bool <parameter>committed</parameter></paramdef>
-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
-        </funcprototype></funcsynopsis>
-        <literallayout></literallayout>
-        <para>An extent merge function conforms to the
-        <type>extent_merge_t</type> type and optionally merges adjacent extents,
-        at given <parameter>addr_a</parameter> and <parameter>size_a</parameter>
-        with given <parameter>addr_b</parameter> and
-        <parameter>size_b</parameter> into one contiguous extent, operating on
-        <parameter>committed</parameter>/decommitted memory as indicated, on
-        behalf of arena <parameter>arena_ind</parameter>, returning false upon
-        success.  If the function returns true, this indicates that the extents
-        remain distinct mappings and therefore should continue to be operated on
-        independently.</para>
-        </listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.narenas">
-        <term>
-          <mallctl>arenas.narenas</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Current limit on number of arenas.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.decay_time">
-        <term>
-          <mallctl>arenas.decay_time</mallctl>
-          (<type>ssize_t</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Current default per-arena approximate time in seconds
-        from the creation of a set of unused dirty pages until an equivalent set
-        of unused dirty pages is purged and/or reused, used to initialize <link
-        linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
-        during arena creation.  See <link
-        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
-        additional information.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.quantum">
-        <term>
-          <mallctl>arenas.quantum</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Quantum size.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.page">
-        <term>
-          <mallctl>arenas.page</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Page size.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.tcache_max">
-        <term>
-          <mallctl>arenas.tcache_max</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Maximum thread-cached size class.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.nbins">
-        <term>
-          <mallctl>arenas.nbins</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Number of bin size classes.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.nhbins">
-        <term>
-          <mallctl>arenas.nhbins</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-          [<option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Total number of thread cache bin size
-        classes.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.bin.i.size">
-        <term>
-          <mallctl>arenas.bin.&lt;i&gt;.size</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Maximum size supported by size class.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.bin.i.nregs">
-        <term>
-          <mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
-          (<type>uint32_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Number of regions per slab.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.bin.i.slab_size">
-        <term>
-          <mallctl>arenas.bin.&lt;i&gt;.slab_size</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Number of bytes per slab.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.nlextents">
-        <term>
-          <mallctl>arenas.nlextents</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Total number of large size classes.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.lextent.i.size">
-        <term>
-          <mallctl>arenas.lextent.&lt;i&gt;.size</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Maximum size supported by this large size
-        class.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="arenas.create">
-        <term>
-          <mallctl>arenas.create</mallctl>
-          (<type>unsigned</type>, <type>extent_hooks_t *</type>)
-          <literal>rw</literal>
-        </term>
-        <listitem><para>Explicitly create a new arena outside the range of
-        automatically managed arenas, with optionally specified extent hooks,
-        and return the new arena index.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.thread_active_init">
-        <term>
-          <mallctl>prof.thread_active_init</mallctl>
-          (<type>bool</type>)
-          <literal>rw</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Control the initial setting for <link
-        linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
-        in newly created threads.  See the <link
-        linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
-        option for additional information.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.active">
-        <term>
-          <mallctl>prof.active</mallctl>
-          (<type>bool</type>)
-          <literal>rw</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Control whether sampling is currently active.  See the
-        <link
-        linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
-        option for additional information, as well as the interrelated <link
-        linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
-        mallctl.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.dump">
-        <term>
-          <mallctl>prof.dump</mallctl>
-          (<type>const char *</type>)
-          <literal>-w</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Dump a memory profile to the specified file, or if NULL
-        is specified, to a file according to the pattern
-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
-        where <literal>&lt;prefix&gt;</literal> is controlled by the
-        <link
-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
-        option.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.gdump">
-        <term>
-          <mallctl>prof.gdump</mallctl>
-          (<type>bool</type>)
-          <literal>rw</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>When enabled, trigger a memory profile dump every time
-        the total virtual memory exceeds the previous maximum.  Profiles are
-        dumped to files named according to the pattern
-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
-        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
-        option.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.reset">
-        <term>
-          <mallctl>prof.reset</mallctl>
-          (<type>size_t</type>)
-          <literal>-w</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Reset all memory profile statistics, and optionally
-        update the sample rate (see <link
-        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
-        and <link
-        linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.lg_sample">
-        <term>
-          <mallctl>prof.lg_sample</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Get the current sample rate (see <link
-        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>).
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="prof.interval">
-        <term>
-          <mallctl>prof.interval</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-prof</option>]
-        </term>
-        <listitem><para>Average number of bytes allocated between
-        interval-based profile dumps.  See the
-        <link
-        linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
-        option for additional information.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.allocated">
-        <term>
-          <mallctl>stats.allocated</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Total number of bytes allocated by the
-        application.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.active">
-        <term>
-          <mallctl>stats.active</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Total number of bytes in active pages allocated by the
-        application.  This is a multiple of the page size, and greater than or
-        equal to <link
-        linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
-        This does not include <link linkend="stats.arenas.i.pdirty">
-        <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>, nor pages
-        entirely devoted to allocator metadata.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.metadata">
-        <term>
-          <mallctl>stats.metadata</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Total number of bytes dedicated to metadata, which
-        comprise base allocations used for bootstrap-sensitive allocator
-        metadata structures (see <link
-        linkend="stats.arenas.i.base"><mallctl>stats.arenas.&lt;i&gt;.base</mallctl></link>)
-        and internal allocations (see <link
-        linkend="stats.arenas.i.internal"><mallctl>stats.arenas.&lt;i&gt;.internal</mallctl></link>).</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.resident">
-        <term>
-          <mallctl>stats.resident</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Maximum number of bytes in physically resident data
-        pages mapped by the allocator, comprising all pages dedicated to
-        allocator metadata, pages backing active allocations, and unused dirty
-        pages.  This is a maximum rather than precise because pages may not
-        actually be physically resident if they correspond to demand-zeroed
-        virtual memory that has not yet been touched.  This is a multiple of the
-        page size, and is larger than <link
-        linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.mapped">
-        <term>
-          <mallctl>stats.mapped</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Total number of bytes in active extents mapped by the
-        allocator.  This is larger than <link
-        linkend="stats.active"><mallctl>stats.active</mallctl></link>.  This
-        does not include inactive extents, even those that contain unused dirty
-        pages, which means that there is no strict ordering between this and
-        <link
-        linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.retained">
-        <term>
-          <mallctl>stats.retained</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Total number of bytes in virtual memory mappings that
-        were retained rather than being returned to the operating system via
-        e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry>.  Retained virtual memory is
-        typically untouched, decommitted, or purged, so it has no strongly
-        associated physical memory (see <link
-        linkend="arena.i.extent_hooks">extent hooks</link> for details).
-        Retained memory is excluded from mapped memory statistics, e.g. <link
-        linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.dss">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
-          (<type>const char *</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
-        related to <citerefentry><refentrytitle>mmap</refentrytitle>
-        <manvolnum>2</manvolnum></citerefentry> allocation.  See <link
-        linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.decay_time">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
-          (<type>ssize_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Approximate time in seconds from the creation of a set
-        of unused dirty pages until an equivalent set of unused dirty pages is
-        purged and/or reused.  See <link
-        linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
-        for details.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.nthreads">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
-          (<type>unsigned</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Number of threads currently assigned to
-        arena.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.pactive">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Number of pages in active extents.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.pdirty">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-        </term>
-        <listitem><para>Number of pages within unused extents that are
-        potentially dirty, and for which
-        <function>madvise(<parameter>...</parameter>
-        <parameter><constant>MADV_DONTNEED</constant></parameter>)</function> or
-        similar has not been called.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.mapped">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of mapped bytes.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.retained">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of retained bytes.  See <link
-        linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
-        details.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.base">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.base</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>
-        Number of bytes dedicated to bootstrap-sensitive allocator metadata
-        structures.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.internal">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.internal</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of bytes dedicated to internal allocations.
-        Internal allocations differ from application-originated allocations in
-        that they are for internal use, and that they are omitted from heap
-        profiles.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.resident">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.resident</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Maximum number of bytes in physically resident data
-        pages mapped by the arena, comprising all pages dedicated to allocator
-        metadata, pages backing active allocations, and unused dirty pages.
-        This is a maximum rather than precise because pages may not actually be
-        physically resident if they correspond to demand-zeroed virtual memory
-        that has not yet been touched.  This is a multiple of the page
-        size.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.npurge">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of dirty page purge sweeps performed.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.nmadvise">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.nmadvise</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of <function>madvise(<parameter>...</parameter>
-        <parameter><constant>MADV_DONTNEED</constant></parameter>)</function> or
-        similar calls made to purge dirty pages.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.purged">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.purged</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of pages purged.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.small.allocated">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of bytes currently allocated by small objects.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.small.nmalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of allocation requests served by
-        small bins.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.small.ndalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of small objects returned to bins.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.small.nrequests">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of small allocation requests.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.large.allocated">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Number of bytes currently allocated by large objects.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.large.nmalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of large allocation requests served
-        directly by the arena.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.large.ndalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of large deallocation requests served
-        directly by the arena.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.large.nrequests">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of large allocation requests.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.nmalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of allocations served by bin.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.ndalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of allocations returned to bin.
-        </para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.nrequests">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of allocation
-        requests.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.curregs">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Current number of regions for this size
-        class.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.nfills">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option> <option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Cumulative number of tcache fills.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.nflushes">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option> <option>--enable-tcache</option>]
-        </term>
-        <listitem><para>Cumulative number of tcache flushes.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.nslabs">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nslabs</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of slabs created.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.nreslabs">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreslabs</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of times the current slab from which
-        to allocate changed.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.bins.j.curslabs">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curslabs</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Current number of slabs.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.lextents.j.nmalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nmalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of allocation requests for this size
-        class served directly by the arena.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.lextents.j.ndalloc">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.ndalloc</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of deallocation requests for this
-        size class served directly by the arena.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.lextents.j.nrequests">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nrequests</mallctl>
-          (<type>uint64_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Cumulative number of allocation requests for this size
-        class.</para></listitem>
-      </varlistentry>
-
-      <varlistentry id="stats.arenas.i.lextents.j.curlextents">
-        <term>
-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.curlextents</mallctl>
-          (<type>size_t</type>)
-          <literal>r-</literal>
-          [<option>--enable-stats</option>]
-        </term>
-        <listitem><para>Current number of large allocations for this size class.
-        </para></listitem>
-      </varlistentry>
-    </variablelist>
-  </refsect1>
-  <refsect1 id="heap_profile_format">
-    <title>HEAP PROFILE FORMAT</title>
-    <para>Although the heap profiling functionality was originally designed to
-    be compatible with the
-    <command>pprof</command> command that is developed as part of the <ulink
-    url="http://code.google.com/p/gperftools/">gperftools
-    package</ulink>, the addition of per thread heap profiling functionality
-    required a different heap profile format.  The <command>jeprof</command>
-    command is derived from <command>pprof</command>, with enhancements to
-    support the heap profile format described here.</para>
-
-    <para>In the following hypothetical heap profile, <constant>[...]</constant>
-    indicates elision for the sake of compactness.  <programlisting><![CDATA[
-heap_v2/524288
-  t*: 28106: 56637512 [0: 0]
-  [...]
-  t3: 352: 16777344 [0: 0]
-  [...]
-  t99: 17754: 29341640 [0: 0]
-  [...]
-@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
-  t*: 13: 6688 [0: 0]
-  t3: 12: 6496 [0: ]
-  t99: 1: 192 [0: 0]
-[...]
-
-MAPPED_LIBRARIES:
-[...]]]></programlisting> The following matches the above heap profile, but most
-tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
-descriptions of the corresponding fields.  <programlisting><![CDATA[
-<heap_profile_format_version>/<mean_sample_interval>
-  <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
-  [...]
-  <thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
-  [...]
-  <thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
-  [...]
-@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
-  <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
-  <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
-  <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
-[...]
-
-MAPPED_LIBRARIES:
-</proc/<pid>/maps>]]></programlisting></para>
-  </refsect1>
-
-  <refsect1 id="debugging_malloc_problems">
-    <title>DEBUGGING MALLOC PROBLEMS</title>
-    <para>When debugging, it is a good idea to configure/build jemalloc with
-    the <option>--enable-debug</option> and <option>--enable-fill</option>
-    options, and recompile the program with suitable options and symbols for
-    debugger support.  When so configured, jemalloc incorporates a wide variety
-    of run-time assertions that catch application errors such as double-free,
-    write-after-free, etc.</para>
-
-    <para>Programs often accidentally depend on <quote>uninitialized</quote>
-    memory actually being filled with zero bytes.  Junk filling
-    (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
-    option) tends to expose such bugs in the form of obviously incorrect
-    results and/or coredumps.  Conversely, zero
-    filling (see the <link
-    linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates
-    the symptoms of such bugs.  Between these two options, it is usually
-    possible to quickly detect, diagnose, and eliminate such bugs.</para>
-
-    <para>This implementation does not provide much detail about the problems
-    it detects, because the performance impact for storing such information
-    would be prohibitive.</para>
-  </refsect1>
-  <refsect1 id="diagnostic_messages">
-    <title>DIAGNOSTIC MESSAGES</title>
-    <para>If any of the memory allocation/deallocation functions detect an
-    error or warning condition, a message will be printed to file descriptor
-    <constant>STDERR_FILENO</constant>.  Errors will result in the process
-    dumping core.  If the <link
-    linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most
-    warnings are treated as errors.</para>
-
-    <para>The <varname>malloc_message</varname> variable allows the programmer
-    to override the function which emits the text strings forming the errors
-    and warnings if for some reason the <constant>STDERR_FILENO</constant> file
-    descriptor is not suitable for this.
-    <function>malloc_message()</function> takes the
-    <parameter>cbopaque</parameter> pointer argument that is
-    <constant>NULL</constant> unless overridden by the arguments in a call to
-    <function>malloc_stats_print()</function>, followed by a string
-    pointer.  Please note that doing anything which tries to allocate memory in
-    this function is likely to result in a crash or deadlock.</para>
-
-    <para>All messages are prefixed by
-    <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
-  </refsect1>
-  <refsect1 id="return_values">
-    <title>RETURN VALUES</title>
-    <refsect2>
-      <title>Standard API</title>
-      <para>The <function>malloc()</function> and
-      <function>calloc()</function> functions return a pointer to the
-      allocated memory if successful; otherwise a <constant>NULL</constant>
-      pointer is returned and <varname>errno</varname> is set to
-      <errorname>ENOMEM</errorname>.</para>
-
-      <para>The <function>posix_memalign()</function> function
-      returns the value 0 if successful; otherwise it returns an error value.
-      The <function>posix_memalign()</function> function will fail
-      if:
-        <variablelist>
-          <varlistentry>
-            <term><errorname>EINVAL</errorname></term>
-
-            <listitem><para>The <parameter>alignment</parameter> parameter is
-            not a power of 2 at least as large as
-            <code language="C">sizeof(<type>void *</type>)</code>.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><errorname>ENOMEM</errorname></term>
-
-            <listitem><para>Memory allocation error.</para></listitem>
-          </varlistentry>
-        </variablelist>
-      </para>
-
-      <para>The <function>aligned_alloc()</function> function returns
-      a pointer to the allocated memory if successful; otherwise a
-      <constant>NULL</constant> pointer is returned and
-      <varname>errno</varname> is set.  The
-      <function>aligned_alloc()</function> function will fail if:
-        <variablelist>
-          <varlistentry>
-            <term><errorname>EINVAL</errorname></term>
-
-            <listitem><para>The <parameter>alignment</parameter> parameter is
-            not a power of 2.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><errorname>ENOMEM</errorname></term>
-
-            <listitem><para>Memory allocation error.</para></listitem>
-          </varlistentry>
-        </variablelist>
-      </para>
-
-      <para>The <function>realloc()</function> function returns a
-      pointer, possibly identical to <parameter>ptr</parameter>, to the
-      allocated memory if successful; otherwise a <constant>NULL</constant>
-      pointer is returned, and <varname>errno</varname> is set to
-      <errorname>ENOMEM</errorname> if the error was the result of an
-      allocation failure.  The <function>realloc()</function>
-      function always leaves the original buffer intact when an error occurs.
-      </para>
-
-      <para>The <function>free()</function> function returns no
-      value.</para>
-    </refsect2>
-    <refsect2>
-      <title>Non-standard API</title>
-      <para>The <function>mallocx()</function> and
-      <function>rallocx()</function> functions return a pointer to
-      the allocated memory if successful; otherwise a <constant>NULL</constant>
-      pointer is returned to indicate insufficient contiguous memory was
-      available to service the allocation request.  </para>
-
-      <para>The <function>xallocx()</function> function returns the
-      real size of the resulting resized allocation pointed to by
-      <parameter>ptr</parameter>, which is a value less than
-      <parameter>size</parameter> if the allocation could not be adequately
-      grown in place.  </para>
-
-      <para>The <function>sallocx()</function> function returns the
-      real size of the allocation pointed to by <parameter>ptr</parameter>.
-      </para>
-
-      <para>The <function>nallocx()</function> returns the real size
-      that would result from a successful equivalent
-      <function>mallocx()</function> function call, or zero if
-      insufficient memory is available to perform the size computation.  </para>
-
-      <para>The <function>mallctl()</function>,
-      <function>mallctlnametomib()</function>, and
-      <function>mallctlbymib()</function> functions return 0 on
-      success; otherwise they return an error value.  The functions will fail
-      if:
-        <variablelist>
-          <varlistentry>
-            <term><errorname>EINVAL</errorname></term>
-
-            <listitem><para><parameter>newp</parameter> is not
-            <constant>NULL</constant>, and <parameter>newlen</parameter> is too
-            large or too small.  Alternatively, <parameter>*oldlenp</parameter>
-            is too large or too small; in this case as much data as possible
-            are read despite the error.</para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><errorname>ENOENT</errorname></term>
-
-            <listitem><para><parameter>name</parameter> or
-            <parameter>mib</parameter> specifies an unknown/invalid
-            value.</para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><errorname>EPERM</errorname></term>
-
-            <listitem><para>Attempt to read or write void value, or attempt to
-            write read-only value.</para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><errorname>EAGAIN</errorname></term>
-
-            <listitem><para>A memory allocation failure
-            occurred.</para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><errorname>EFAULT</errorname></term>
-
-            <listitem><para>An interface with side effects failed in some way
-            not directly related to <function>mallctl*()</function>
-            read/write processing.</para></listitem>
-          </varlistentry>
-        </variablelist>
-      </para>
-
-      <para>The <function>malloc_usable_size()</function> function
-      returns the usable size of the allocation pointed to by
-      <parameter>ptr</parameter>.  </para>
-    </refsect2>
-  </refsect1>
-  <refsect1 id="environment">
-    <title>ENVIRONMENT</title>
-    <para>The following environment variable affects the execution of the
-    allocation functions:
-      <variablelist>
-        <varlistentry>
-          <term><envar>MALLOC_CONF</envar></term>
-
-          <listitem><para>If the environment variable
-          <envar>MALLOC_CONF</envar> is set, the characters it contains
-          will be interpreted as options.</para></listitem>
-        </varlistentry>
-      </variablelist>
-    </para>
-  </refsect1>
-  <refsect1 id="examples">
-    <title>EXAMPLES</title>
-    <para>To dump core whenever a problem occurs:
-      <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
-    </para>
-    <para>To specify in the source that only one arena should be automatically
-    created:
-      <programlisting language="C"><![CDATA[
-malloc_conf = "narenas:1";]]></programlisting></para>
-  </refsect1>
-  <refsect1 id="see_also">
-    <title>SEE ALSO</title>
-    <para><citerefentry><refentrytitle>madvise</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry>,
-    <citerefentry><refentrytitle>mmap</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry>,
-    <citerefentry><refentrytitle>sbrk</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry>,
-    <citerefentry><refentrytitle>utrace</refentrytitle>
-    <manvolnum>2</manvolnum></citerefentry>,
-    <citerefentry><refentrytitle>alloca</refentrytitle>
-    <manvolnum>3</manvolnum></citerefentry>,
-    <citerefentry><refentrytitle>atexit</refentrytitle>
-    <manvolnum>3</manvolnum></citerefentry>,
-    <citerefentry><refentrytitle>getpagesize</refentrytitle>
-    <manvolnum>3</manvolnum></citerefentry></para>
-  </refsect1>
-  <refsect1 id="standards">
-    <title>STANDARDS</title>
-    <para>The <function>malloc()</function>,
-    <function>calloc()</function>,
-    <function>realloc()</function>, and
-    <function>free()</function> functions conform to ISO/IEC
-    9899:1990 (<quote>ISO C90</quote>).</para>
-
-    <para>The <function>posix_memalign()</function> function conforms
-    to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
-  </refsect1>
-</refentry>
diff --git a/zircon/third_party/ulib/jemalloc/doc/manpages.xsl.in b/zircon/third_party/ulib/jemalloc/doc/manpages.xsl.in
deleted file mode 100644
index 88b2626..0000000
--- a/zircon/third_party/ulib/jemalloc/doc/manpages.xsl.in
+++ /dev/null
@@ -1,4 +0,0 @@
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-  <xsl:import href="@XSLROOT@/manpages/docbook.xsl"/>
-  <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
-</xsl:stylesheet>
diff --git a/zircon/third_party/ulib/jemalloc/doc/stylesheet.xsl b/zircon/third_party/ulib/jemalloc/doc/stylesheet.xsl
deleted file mode 100644
index 619365d..0000000
--- a/zircon/third_party/ulib/jemalloc/doc/stylesheet.xsl
+++ /dev/null
@@ -1,10 +0,0 @@
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-  <xsl:param name="funcsynopsis.style">ansi</xsl:param>
-  <xsl:param name="function.parens" select="0"/>
-  <xsl:template match="function">
-    <xsl:call-template name="inline.monoseq"/>
-  </xsl:template>
-  <xsl:template match="mallctl">
-    <quote><xsl:call-template name="inline.monoseq"/></quote>
-  </xsl:template>
-</xsl:stylesheet>
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_externs.h
deleted file mode 100644
index f99521e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_externs.h
+++ /dev/null
@@ -1,96 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
-#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-static const size_t	large_pad =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
-    PAGE
-#else
-    0
-#endif
-    ;
-
-extern ssize_t		opt_decay_time;
-
-extern const arena_bin_info_t	arena_bin_info[NBINS];
-
-extent_t	*arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
-    size_t alignment, bool *zero);
-void	arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent);
-void	arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena,
-    extent_t *extent, bool cache);
-void	arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena,
-    extent_t *extent, bool cache);
-#ifdef JEMALLOC_JET
-size_t	arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
-#endif
-extent_t	*arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
-    size_t usize, size_t alignment, bool *zero);
-void	arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena,
-    extent_t *extent, bool locked);
-void	arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
-    extent_t *extent, size_t oldsize);
-void	arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
-    extent_t *extent, size_t oldsize);
-ssize_t	arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
-bool	arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
-void	arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
-void	arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
-void	arena_reset(tsd_t *tsd, arena_t *arena);
-void	arena_destroy(tsd_t *tsd, arena_t *arena);
-void	arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
-    tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
-void	arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
-    bool zero);
-#ifdef JEMALLOC_JET
-typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
-extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
-#else
-void	arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
-#endif
-void	*arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
-    szind_t ind, bool zero);
-void	*arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool zero, tcache_t *tcache);
-void	arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize);
-void	arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    tcache_t *tcache, bool slow_path);
-void	arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
-    extent_t *extent, void *ptr);
-void	arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    void *ptr);
-bool	arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    size_t oldsize, size_t size, size_t extra, bool zero);
-void	*arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
-    size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t	arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
-bool	arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
-ssize_t	arena_decay_time_default_get(void);
-bool	arena_decay_time_default_set(ssize_t decay_time);
-void	arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
-    unsigned *nthreads, const char **dss, ssize_t *decay_time, size_t *nactive,
-    size_t *ndirty);
-void	arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
-    const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty,
-    arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats);
-unsigned	arena_nthreads_get(arena_t *arena, bool internal);
-void	arena_nthreads_inc(arena_t *arena, bool internal);
-void	arena_nthreads_dec(arena_t *arena, bool internal);
-size_t	arena_extent_sn_next(arena_t *arena);
-arena_t	*arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-void	arena_boot(void);
-void	arena_prefork0(tsdn_t *tsdn, arena_t *arena);
-void	arena_prefork1(tsdn_t *tsdn, arena_t *arena);
-void	arena_prefork2(tsdn_t *tsdn, arena_t *arena);
-void	arena_prefork3(tsdn_t *tsdn, arena_t *arena);
-void	arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
-void	arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
deleted file mode 100644
index d241b8a1..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
-#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-unsigned	arena_ind_get(const arena_t *arena);
-void	arena_internal_add(arena_t *arena, size_t size);
-void	arena_internal_sub(arena_t *arena, size_t size);
-size_t	arena_internal_get(arena_t *arena);
-bool	arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
-bool	arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool	arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
-#endif /* JEMALLOC_ENABLE_INLINE */
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
-
-JEMALLOC_INLINE unsigned
-arena_ind_get(const arena_t *arena)
-{
-	return (base_ind_get(arena->base));
-}
-
-JEMALLOC_INLINE void
-arena_internal_add(arena_t *arena, size_t size)
-{
-	atomic_add_zu(&arena->stats.internal, size);
-}
-
-JEMALLOC_INLINE void
-arena_internal_sub(arena_t *arena, size_t size)
-{
-	atomic_sub_zu(&arena->stats.internal, size);
-}
-
-JEMALLOC_INLINE size_t
-arena_internal_get(arena_t *arena)
-{
-	return (atomic_read_zu(&arena->stats.internal));
-}
-
-JEMALLOC_INLINE bool
-arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
-{
-	cassert(config_prof);
-	assert(prof_interval != 0);
-
-	arena->prof_accumbytes += accumbytes;
-	if (arena->prof_accumbytes >= prof_interval) {
-		arena->prof_accumbytes %= prof_interval;
-		return (true);
-	}
-	return (false);
-}
-
-JEMALLOC_INLINE bool
-arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
-{
-	cassert(config_prof);
-
-	if (likely(prof_interval == 0))
-		return (false);
-	return (arena_prof_accum_impl(arena, accumbytes));
-}
-
-JEMALLOC_INLINE bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
-{
-	cassert(config_prof);
-
-	if (likely(prof_interval == 0))
-		return (false);
-
-	{
-		bool ret;
-
-		malloc_mutex_lock(tsdn, &arena->lock);
-		ret = arena_prof_accum_impl(arena, accumbytes);
-		malloc_mutex_unlock(tsdn, &arena->lock);
-		return (ret);
-	}
-}
-
-#endif /* (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) */
-
-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
deleted file mode 100644
index 9461466..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
-#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-szind_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
-prof_tctx_t	*arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
-    const void *ptr);
-void	arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize, prof_tctx_t *tctx);
-void	arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    prof_tctx_t *tctx);
-void	arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
-void	arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
-void	*arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
-    bool zero, tcache_t *tcache, bool slow_path);
-arena_t	*arena_aalloc(tsdn_t *tsdn, const void *ptr);
-size_t	arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
-void	arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    tcache_t *tcache, bool slow_path);
-void	arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
-    tcache_t *tcache, bool slow_path);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
-JEMALLOC_INLINE szind_t
-arena_bin_index(arena_t *arena, arena_bin_t *bin)
-{
-	szind_t binind = (szind_t)(bin - arena->bins);
-	assert(binind < NBINS);
-	return (binind);
-}
-
-JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-
-	if (unlikely(!extent_slab_get(extent)))
-		return (large_prof_tctx_get(tsdn, extent));
-	return ((prof_tctx_t *)(uintptr_t)1U);
-}
-
-JEMALLOC_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize, prof_tctx_t *tctx)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-
-	if (unlikely(!extent_slab_get(extent)))
-		large_prof_tctx_set(tsdn, extent, tctx);
-}
-
-JEMALLOC_INLINE void
-arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    prof_tctx_t *tctx)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-	assert(!extent_slab_get(extent));
-
-	large_prof_tctx_reset(tsdn, extent);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
-{
-	tsd_t *tsd;
-	ticker_t *decay_ticker;
-
-	if (unlikely(tsdn_null(tsdn)))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
-	if (unlikely(decay_ticker == NULL))
-		return;
-	if (unlikely(ticker_ticks(decay_ticker, nticks)))
-		arena_purge(tsdn, arena, false);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
-{
-	malloc_mutex_assert_not_owner(tsdn, &arena->lock);
-
-	arena_decay_ticks(tsdn, arena, 1);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
-    tcache_t *tcache, bool slow_path)
-{
-	assert(!tsdn_null(tsdn) || tcache == NULL);
-	assert(size != 0);
-
-	if (likely(tcache != NULL)) {
-		if (likely(size <= SMALL_MAXCLASS)) {
-			return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
-			    tcache, size, ind, zero, slow_path));
-		}
-		if (likely(size <= tcache_maxclass)) {
-			return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
-			    tcache, size, ind, zero, slow_path));
-		}
-		/* (size > tcache_maxclass) case falls through. */
-		assert(size > tcache_maxclass);
-	}
-
-	return (arena_malloc_hard(tsdn, arena, size, ind, zero));
-}
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-arena_aalloc(tsdn_t *tsdn, const void *ptr)
-{
-	return (extent_arena_get(iealloc(tsdn, ptr)));
-}
-
-/* Return the size of the allocation pointed to by ptr. */
-JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
-	size_t ret;
-
-	assert(ptr != NULL);
-
-	if (likely(extent_slab_get(extent)))
-		ret = index2size(extent_slab_data_get_const(extent)->binind);
-	else
-		ret = large_salloc(tsdn, extent);
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
-    bool slow_path)
-{
-	assert(!tsdn_null(tsdn) || tcache == NULL);
-	assert(ptr != NULL);
-
-	if (likely(extent_slab_get(extent))) {
-		/* Small allocation. */
-		if (likely(tcache != NULL)) {
-			szind_t binind = extent_slab_data_get(extent)->binind;
-			tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
-			    slow_path);
-		} else {
-			arena_dalloc_small(tsdn, extent_arena_get(extent),
-			    extent, ptr);
-		}
-	} else {
-		size_t usize = extent_usize_get(extent);
-
-		if (likely(tcache != NULL) && usize <= tcache_maxclass) {
-			if (config_prof && unlikely(usize <= SMALL_MAXCLASS)) {
-				arena_dalloc_promoted(tsdn, extent, ptr,
-				    tcache, slow_path);
-			} else {
-				tcache_dalloc_large(tsdn_tsd(tsdn), tcache,
-				    ptr, usize, slow_path);
-			}
-		} else
-			large_dalloc(tsdn, extent);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
-    tcache_t *tcache, bool slow_path)
-{
-	assert(!tsdn_null(tsdn) || tcache == NULL);
-	assert(ptr != NULL);
-
-	if (likely(extent_slab_get(extent))) {
-		/* Small allocation. */
-		if (likely(tcache != NULL)) {
-			szind_t binind = size2index(size);
-			assert(binind == extent_slab_data_get(extent)->binind);
-			tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
-			    slow_path);
-		} else {
-			arena_dalloc_small(tsdn, extent_arena_get(extent),
-			    extent, ptr);
-		}
-	} else {
-		if (likely(tcache != NULL) && size <= tcache_maxclass) {
-			if (config_prof && unlikely(size <= SMALL_MAXCLASS)) {
-				arena_dalloc_promoted(tsdn, extent, ptr,
-				    tcache, slow_path);
-			} else {
-				tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
-				    size, slow_path);
-			}
-		} else
-			large_dalloc(tsdn, extent);
-	}
-}
-
-#endif /* (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) */
-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_structs_a.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_structs_a.h
deleted file mode 100644
index ccb3b05..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_structs_a.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
-
-struct arena_slab_data_s {
-	/* Index of bin this slab is associated with. */
-	szind_t		binind;
-
-	/* Number of free regions in slab. */
-	unsigned	nfree;
-
-	/* Per region allocated/deallocated bitmap. */
-	bitmap_t	bitmap[BITMAP_GROUPS_MAX];
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_structs_b.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_structs_b.h
deleted file mode 100644
index c1c2073..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_structs_b.h
+++ /dev/null
@@ -1,214 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
-/*
- * Read-only information associated with each element of arena_t's bins array
- * is stored separately, partly to reduce memory usage (only one copy, rather
- * than one per arena), but mainly to avoid false cacheline sharing.
- *
- * Each slab has the following layout:
- *
- *   /--------------------\
- *   | region 0           |
- *   |--------------------|
- *   | region 1           |
- *   |--------------------|
- *   | ...                |
- *   | ...                |
- *   | ...                |
- *   |--------------------|
- *   | region nregs-1     |
- *   \--------------------/
- */
-struct arena_bin_info_s {
-	/* Size of regions in a slab for this bin's size class. */
-	size_t			reg_size;
-
-	/* Total size of a slab for this bin's size class. */
-	size_t			slab_size;
-
-	/* Total number of regions in a slab for this bin's size class. */
-	uint32_t		nregs;
-
-	/*
-	 * Metadata used to manipulate bitmaps for slabs associated with this
-	 * bin.
-	 */
-	bitmap_info_t		bitmap_info;
-};
-
-struct arena_decay_s {
-	/*
-	 * Approximate time in seconds from the creation of a set of unused
-	 * dirty pages until an equivalent set of unused dirty pages is purged
-	 * and/or reused.
-	 */
-	ssize_t			time;
-	/* time / SMOOTHSTEP_NSTEPS. */
-	nstime_t		interval;
-	/*
-	 * Time at which the current decay interval logically started.  We do
-	 * not actually advance to a new epoch until sometime after it starts
-	 * because of scheduling and computation delays, and it is even possible
-	 * to completely skip epochs.  In all cases, during epoch advancement we
-	 * merge all relevant activity into the most recently recorded epoch.
-	 */
-	nstime_t		epoch;
-	/* Deadline randomness generator. */
-	uint64_t		jitter_state;
-	/*
-	 * Deadline for current epoch.  This is the sum of interval and per
-	 * epoch jitter which is a uniform random variable in [0..interval).
-	 * Epochs always advance by precise multiples of interval, but we
-	 * randomize the deadline to reduce the likelihood of arenas purging in
-	 * lockstep.
-	 */
-	nstime_t		deadline;
-	/*
-	 * Number of dirty pages at beginning of current epoch.  During epoch
-	 * advancement we use the delta between arena->decay.ndirty and
-	 * arena->ndirty to determine how many dirty pages, if any, were
-	 * generated.
-	 */
-	size_t			nunpurged;
-	/*
-	 * Trailing log of how many unused dirty pages were generated during
-	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
-	 * element is the most recent epoch.  Corresponding epoch times are
-	 * relative to epoch.
-	 */
-	size_t			backlog[SMOOTHSTEP_NSTEPS];
-};
-
-struct arena_bin_s {
-	/* All operations on arena_bin_t fields require lock ownership. */
-	malloc_mutex_t		lock;
-
-	/*
-	 * Current slab being used to service allocations of this bin's size
-	 * class.  slabcur is independent of slabs_{nonfull,full}; whenever
-	 * slabcur is reassigned, the previous slab must be deallocated or
-	 * inserted into slabs_{nonfull,full}.
-	 */
-	extent_t		*slabcur;
-
-	/*
-	 * Heap of non-full slabs.  This heap is used to assure that new
-	 * allocations come from the non-full slab that is oldest/lowest in
-	 * memory.
-	 */
-	extent_heap_t		slabs_nonfull;
-
-	/* Ring sentinel used to track full slabs. */
-	extent_t		slabs_full;
-
-	/* Bin statistics. */
-	malloc_bin_stats_t	stats;
-};
-
-struct arena_s {
-	/*
-	 * Number of threads currently assigned to this arena, synchronized via
-	 * atomic operations.  Each thread has two distinct assignments, one for
-	 * application-serving allocation, and the other for internal metadata
-	 * allocation.  Internal metadata must not be allocated from arenas
-	 * explicitly created via the arenas.create mallctl, because the
-	 * arena.<i>.reset mallctl indiscriminately discards all allocations for
-	 * the affected arena.
-	 *
-	 *   0: Application allocation.
-	 *   1: Internal metadata allocation.
-	 */
-	unsigned		nthreads[2];
-
-	/*
-	 * There are three classes of arena operations from a locking
-	 * perspective:
-	 * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
-	 * 2) Bin-related operations are protected by bin locks.
-	 * 3) Extent-related operations are protected by this mutex.
-	 */
-	malloc_mutex_t		lock;
-
-	arena_stats_t		stats;
-	/*
-	 * List of tcaches for extant threads associated with this arena.
-	 * Stats from these are merged incrementally, and at exit if
-	 * opt_stats_print is enabled.
-	 */
-	ql_head(tcache_t)	tcache_ql;
-
-	uint64_t		prof_accumbytes;
-
-	/*
-	 * PRNG state for cache index randomization of large allocation base
-	 * pointers.
-	 */
-	size_t			offset_state;
-
-	/* Extent serial number generator state. */
-	size_t			extent_sn_next;
-
-	dss_prec_t		dss_prec;
-
-	/* True if a thread is currently executing arena_purge_to_limit(). */
-	bool			purging;
-
-	/* Number of pages in active extents. */
-	size_t			nactive;
-
-	/*
-	 * Current count of pages within unused extents that are potentially
-	 * dirty, and for which pages_purge_*() has not been called.  By
-	 * tracking this, we can institute a limit on how much dirty unused
-	 * memory is mapped for each arena.
-	 */
-	size_t			ndirty;
-
-	/* Decay-based purging state. */
-	arena_decay_t		decay;
-
-	/* Extant large allocations. */
-	ql_head(extent_t)	large;
-	/* Synchronizes all large allocation/update/deallocation. */
-	malloc_mutex_t		large_mtx;
-
-	/*
-	 * Heaps of extents that were previously allocated.  These are used when
-	 * allocating extents, in an attempt to re-use address space.
-	 */
-	extent_heap_t		extents_cached[NPSIZES+1];
-	extent_heap_t		extents_retained[NPSIZES+1];
-	/*
-	 * Ring sentinel used to track unused dirty memory.  Dirty memory is
-	 * managed as an LRU of cached extents.
-	 */
-	extent_t		extents_dirty;
-	/* Protects extents_{cached,retained,dirty}. */
-	malloc_mutex_t		extents_mtx;
-
-	/*
-	 * Next extent size class in a growing series to use when satisfying a
-	 * request via the extent hooks (only if !config_munmap).  This limits
-	 * the number of disjoint virtual memory ranges so that extent merging
-	 * can be effective even if multiple arenas' extent allocation requests
-	 * are highly interleaved.
-	 */
-	pszind_t		extent_grow_next;
-
-	/* Cache of extent structures that were allocated via base_alloc(). */
-	ql_head(extent_t)	extent_cache;
-	malloc_mutex_t		extent_cache_mtx;
-
-	/* bins is used to store heaps of free regions. */
-	arena_bin_t		bins[NBINS];
-
-	/* Base allocator, from which arena metadata are allocated. */
-	base_t			*base;
-};
-
-/* Used in conjunction with tsd for fast arena-related context lookup. */
-struct arena_tdata_s {
-	ticker_t		decay_ticker;
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_types.h
deleted file mode 100644
index a13a1b6..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/arena_types.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
-#define JEMALLOC_INTERNAL_ARENA_TYPES_H
-
-#define	LARGE_MINCLASS		(ZU(1) << LG_LARGE_MINCLASS)
-
-/* Maximum number of regions in one slab. */
-#define	LG_SLAB_MAXREGS		(LG_PAGE - LG_TINY_MIN)
-#define	SLAB_MAXREGS		(1U << LG_SLAB_MAXREGS)
-
-/* Default decay time in seconds. */
-#define	DECAY_TIME_DEFAULT	10
-/* Number of event ticks between time checks. */
-#define	DECAY_NTICKS_PER_UPDATE	1000
-
-typedef struct arena_slab_data_s arena_slab_data_t;
-typedef struct arena_bin_info_s arena_bin_info_t;
-typedef struct arena_decay_s arena_decay_t;
-typedef struct arena_bin_s arena_bin_t;
-typedef struct arena_s arena_t;
-typedef struct arena_tdata_s arena_tdata_t;
-
-#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/assert.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/assert.h
deleted file mode 100644
index 6f8f7eb..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/assert.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-#define	assert(e) do {							\
-	if (unlikely(config_debug && !(e))) {				\
-		malloc_printf(						\
-		    "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",	\
-		    __FILE__, __LINE__, #e);				\
-		abort();						\
-	}								\
-} while (0)
-#endif
-
-#ifndef not_reached
-#define	not_reached() do {						\
-	if (config_debug) {						\
-		malloc_printf(						\
-		    "<jemalloc>: %s:%d: Unreachable code reached\n",	\
-		    __FILE__, __LINE__);				\
-		abort();						\
-	}								\
-	unreachable();							\
-} while (0)
-#endif
-
-#ifndef not_implemented
-#define	not_implemented() do {						\
-	if (config_debug) {						\
-		malloc_printf("<jemalloc>: %s:%d: Not implemented\n",	\
-		    __FILE__, __LINE__);				\
-		abort();						\
-	}								\
-} while (0)
-#endif
-
-#ifndef assert_not_implemented
-#define	assert_not_implemented(e) do {					\
-	if (unlikely(config_debug && !(e)))				\
-		not_implemented();					\
-} while (0)
-#endif
-
-
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/atomic_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/atomic_externs.h
deleted file mode 100644
index 598eec3f..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/atomic_externs.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H
-#define JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-#define	atomic_read_u64(p)	atomic_add_u64(p, 0)
-#endif
-#define	atomic_read_u32(p)	atomic_add_u32(p, 0)
-#define	atomic_read_p(p)	atomic_add_p(p, NULL)
-#define	atomic_read_zu(p)	atomic_add_zu(p, 0)
-#define	atomic_read_u(p)	atomic_add_u(p, 0)
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/atomic_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/atomic_inlines.h
deleted file mode 100644
index a47a46e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/atomic_inlines.h
+++ /dev/null
@@ -1,587 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_INLINES_H
-#define JEMALLOC_INTERNAL_ATOMIC_INLINES_H
-
-/*
- * All arithmetic functions return the arithmetic result of the atomic
- * operation.  Some atomic operation APIs return the value prior to mutation, in
- * which case the following functions must redundantly compute the result so
- * that it can be returned.  These functions are normally inlined, so the extra
- * operations can be optimized away if the return values aren't used by the
- * callers.
- *
- *   <t> atomic_read_<t>(<t> *p) { return (*p); }
- *   <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
- *   <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
- *   bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
- *   {
- *     if (*p != c)
- *       return (true);
- *     *p = s;
- *     return (false);
- *   }
- *   void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
- */
-
-#ifndef JEMALLOC_ENABLE_INLINE
-#  if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-uint64_t	atomic_add_u64(uint64_t *p, uint64_t x);
-uint64_t	atomic_sub_u64(uint64_t *p, uint64_t x);
-bool	atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s);
-void	atomic_write_u64(uint64_t *p, uint64_t x);
-#  endif
-uint32_t	atomic_add_u32(uint32_t *p, uint32_t x);
-uint32_t	atomic_sub_u32(uint32_t *p, uint32_t x);
-bool	atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s);
-void	atomic_write_u32(uint32_t *p, uint32_t x);
-void	*atomic_add_p(void **p, void *x);
-void	*atomic_sub_p(void **p, void *x);
-bool	atomic_cas_p(void **p, void *c, void *s);
-void	atomic_write_p(void **p, const void *x);
-size_t	atomic_add_zu(size_t *p, size_t x);
-size_t	atomic_sub_zu(size_t *p, size_t x);
-bool	atomic_cas_zu(size_t *p, size_t c, size_t s);
-void	atomic_write_zu(size_t *p, size_t x);
-unsigned	atomic_add_u(unsigned *p, unsigned x);
-unsigned	atomic_sub_u(unsigned *p, unsigned x);
-bool	atomic_cas_u(unsigned *p, unsigned c, unsigned s);
-void	atomic_write_u(unsigned *p, unsigned x);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
-/******************************************************************************/
-/* 64-bit operations. */
-#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-#  if (defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
-	uint64_t t = x;
-
-	__asm__ volatile (
-	    "lock; xaddq %0, %1;"
-	    : "+r" (t), "=m" (*p) /* Outputs. */
-	    : "m" (*p) /* Inputs. */
-	    );
-
-	return (t + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
-	uint64_t t;
-
-	x = (uint64_t)(-(int64_t)x);
-	t = x;
-	__asm__ volatile (
-	    "lock; xaddq %0, %1;"
-	    : "+r" (t), "=m" (*p) /* Outputs. */
-	    : "m" (*p) /* Inputs. */
-	    );
-
-	return (t + x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
-	uint8_t success;
-
-	__asm__ volatile (
-	    "lock; cmpxchgq %4, %0;"
-	    "sete %1;"
-	    : "=m" (*p), "=a" (success) /* Outputs. */
-	    : "m" (*p), "a" (c), "r" (s) /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-
-	return (!(bool)success);
-}
-
-JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
-	__asm__ volatile (
-	    "xchgq %1, %0;" /* Lock is implied by xchgq. */
-	    : "=m" (*p), "+r" (x) /* Outputs. */
-	    : "m" (*p) /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-}
-#  elif (defined(JEMALLOC_C11ATOMICS))
-JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
-	volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
-	return (atomic_fetch_add(a, x) + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
-	volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
-	return (atomic_fetch_sub(a, x) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
-	volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
-	return (!atomic_compare_exchange_strong(a, &c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
-	volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
-	atomic_store(a, x);
-}
-#  elif (defined(JEMALLOC_ATOMIC9))
-JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
-	/*
-	 * atomic_fetchadd_64() doesn't exist, but we only ever use this
-	 * function on LP64 systems, so atomic_fetchadd_long() will do.
-	 */
-	assert(sizeof(uint64_t) == sizeof(unsigned long));
-
-	return (atomic_fetchadd_long(p, (unsigned long)x) + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
-	assert(sizeof(uint64_t) == sizeof(unsigned long));
-
-	return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
-	assert(sizeof(uint64_t) == sizeof(unsigned long));
-
-	return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
-	assert(sizeof(uint64_t) == sizeof(unsigned long));
-
-	atomic_store_rel_long(p, x);
-}
-#  elif (defined(JEMALLOC_OSATOMIC))
-JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
-	return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
-	return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
-	return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
-	uint64_t o;
-
-	/*The documented OSAtomic*() API does not expose an atomic exchange. */
-	do {
-		o = atomic_read_u64(p);
-	} while (atomic_cas_u64(p, o, x));
-}
-#  elif (defined(_MSC_VER))
-JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
-	return (InterlockedExchangeAdd64(p, x) + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
-	return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
-	uint64_t o;
-
-	o = InterlockedCompareExchange64(p, s, c);
-	return (o != c);
-}
-
-JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
-	InterlockedExchange64(p, x);
-}
-#  elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
-    defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
-JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
-	return (__sync_add_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
-	return (__sync_sub_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
-	return (!__sync_bool_compare_and_swap(p, c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
-	__sync_lock_test_and_set(p, x);
-}
-#  else
-#    error "Missing implementation for 64-bit atomic operations"
-#  endif
-#endif
-
-/******************************************************************************/
-/* 32-bit operations. */
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
-	uint32_t t = x;
-
-	__asm__ volatile (
-	    "lock; xaddl %0, %1;"
-	    : "+r" (t), "=m" (*p) /* Outputs. */
-	    : "m" (*p) /* Inputs. */
-	    );
-
-	return (t + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
-	uint32_t t;
-
-	x = (uint32_t)(-(int32_t)x);
-	t = x;
-	__asm__ volatile (
-	    "lock; xaddl %0, %1;"
-	    : "+r" (t), "=m" (*p) /* Outputs. */
-	    : "m" (*p) /* Inputs. */
-	    );
-
-	return (t + x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
-	uint8_t success;
-
-	__asm__ volatile (
-	    "lock; cmpxchgl %4, %0;"
-	    "sete %1;"
-	    : "=m" (*p), "=a" (success) /* Outputs. */
-	    : "m" (*p), "a" (c), "r" (s) /* Inputs. */
-	    : "memory"
-	    );
-
-	return (!(bool)success);
-}
-
-JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
-	__asm__ volatile (
-	    "xchgl %1, %0;" /* Lock is implied by xchgl. */
-	    : "=m" (*p), "+r" (x) /* Outputs. */
-	    : "m" (*p) /* Inputs. */
-	    : "memory" /* Clobbers. */
-	    );
-}
-#  elif (defined(JEMALLOC_C11ATOMICS))
-JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
-	volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
-	return (atomic_fetch_add(a, x) + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
-	volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
-	return (atomic_fetch_sub(a, x) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
-	volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
-	return (!atomic_compare_exchange_strong(a, &c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
-	volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
-	atomic_store(a, x);
-}
-#elif (defined(JEMALLOC_ATOMIC9))
-JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
-	return (atomic_fetchadd_32(p, x) + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
-	return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
-	return (!atomic_cmpset_32(p, c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
-	atomic_store_rel_32(p, x);
-}
-#elif (defined(JEMALLOC_OSATOMIC))
-JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
-	return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
-	return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
-	return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
-	uint32_t o;
-
-	/*The documented OSAtomic*() API does not expose an atomic exchange. */
-	do {
-		o = atomic_read_u32(p);
-	} while (atomic_cas_u32(p, o, x));
-}
-#elif (defined(_MSC_VER))
-JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
-	return (InterlockedExchangeAdd(p, x) + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
-	return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
-	uint32_t o;
-
-	o = InterlockedCompareExchange(p, s, c);
-	return (o != c);
-}
-
-JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
-	InterlockedExchange(p, x);
-}
-#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
- defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
-JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
-	return (__sync_add_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
-	return (__sync_sub_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
-	return (!__sync_bool_compare_and_swap(p, c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
-	__sync_lock_test_and_set(p, x);
-}
-#else
-#  error "Missing implementation for 32-bit atomic operations"
-#endif
-
-/******************************************************************************/
-/* Pointer operations. */
-JEMALLOC_INLINE void *
-atomic_add_p(void **p, void *x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)x));
-#elif (LG_SIZEOF_PTR == 2)
-	return ((void *)atomic_add_u32((uint32_t *)p, (uint32_t)x));
-#endif
-}
-
-JEMALLOC_INLINE void *
-atomic_sub_p(void **p, void *x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
-#elif (LG_SIZEOF_PTR == 2)
-	return ((void *)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x)));
-#endif
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_p(void **p, void *c, void *s)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
-#elif (LG_SIZEOF_PTR == 2)
-	return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
-#endif
-}
-
-JEMALLOC_INLINE void
-atomic_write_p(void **p, const void *x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	atomic_write_u64((uint64_t *)p, (uint64_t)x);
-#elif (LG_SIZEOF_PTR == 2)
-	atomic_write_u32((uint32_t *)p, (uint32_t)x);
-#endif
-}
-
-/******************************************************************************/
-/* size_t operations. */
-JEMALLOC_INLINE size_t
-atomic_add_zu(size_t *p, size_t x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x));
-#elif (LG_SIZEOF_PTR == 2)
-	return ((size_t)atomic_add_u32((uint32_t *)p, (uint32_t)x));
-#endif
-}
-
-JEMALLOC_INLINE size_t
-atomic_sub_zu(size_t *p, size_t x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
-#elif (LG_SIZEOF_PTR == 2)
-	return ((size_t)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x)));
-#endif
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_zu(size_t *p, size_t c, size_t s)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
-#elif (LG_SIZEOF_PTR == 2)
-	return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
-#endif
-}
-
-JEMALLOC_INLINE void
-atomic_write_zu(size_t *p, size_t x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	atomic_write_u64((uint64_t *)p, (uint64_t)x);
-#elif (LG_SIZEOF_PTR == 2)
-	atomic_write_u32((uint32_t *)p, (uint32_t)x);
-#endif
-}
-
-/******************************************************************************/
-/* unsigned operations. */
-JEMALLOC_INLINE unsigned
-atomic_add_u(unsigned *p, unsigned x)
-{
-#if (LG_SIZEOF_INT == 3)
-	return ((unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x));
-#elif (LG_SIZEOF_INT == 2)
-	return ((unsigned)atomic_add_u32((uint32_t *)p, (uint32_t)x));
-#endif
-}
-
-JEMALLOC_INLINE unsigned
-atomic_sub_u(unsigned *p, unsigned x)
-{
-#if (LG_SIZEOF_INT == 3)
-	return ((unsigned)atomic_add_u64((uint64_t *)p,
-	    (uint64_t)-((int64_t)x)));
-#elif (LG_SIZEOF_INT == 2)
-	return ((unsigned)atomic_add_u32((uint32_t *)p,
-	    (uint32_t)-((int32_t)x)));
-#endif
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_u(unsigned *p, unsigned c, unsigned s)
-{
-#if (LG_SIZEOF_INT == 3)
-	return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
-#elif (LG_SIZEOF_INT == 2)
-	return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
-#endif
-}
-
-JEMALLOC_INLINE void
-atomic_write_u(unsigned *p, unsigned x)
-{
-#if (LG_SIZEOF_INT == 3)
-	atomic_write_u64((uint64_t *)p, (uint64_t)x);
-#elif (LG_SIZEOF_INT == 2)
-	atomic_write_u32((uint32_t *)p, (uint32_t)x);
-#endif
-}
-
-/******************************************************************************/
-#endif
-#endif /* JEMALLOC_INTERNAL_ATOMIC_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_externs.h
deleted file mode 100644
index 43fd76b..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_externs.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
-#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-base_t	*b0get(void);
-base_t	*base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-void	base_delete(base_t *base);
-extent_hooks_t	*base_extent_hooks_get(base_t *base);
-extent_hooks_t	*base_extent_hooks_set(base_t *base,
-    extent_hooks_t *extent_hooks);
-void	*base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
-void	base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
-    size_t *resident, size_t *mapped);
-void	base_prefork(tsdn_t *tsdn, base_t *base);
-void	base_postfork_parent(tsdn_t *tsdn, base_t *base);
-void	base_postfork_child(tsdn_t *tsdn, base_t *base);
-bool	base_boot(tsdn_t *tsdn);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_inlines.h
deleted file mode 100644
index 63547d6..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_inlines.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
-#define JEMALLOC_INTERNAL_BASE_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-unsigned	base_ind_get(const base_t *base);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BASE_C_))
-JEMALLOC_INLINE unsigned
-base_ind_get(const base_t *base)
-{
-	return (base->ind);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_structs.h
deleted file mode 100644
index bad37c0..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_structs.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
-#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
-
-/* Embedded at the beginning of every block of base-managed virtual memory. */
-struct base_block_s {
-	/* Total size of block's virtual memory mapping. */
-	size_t		size;
-
-	/* Next block in list of base's blocks. */
-	base_block_t	*next;
-
-	/* Tracks unused trailing space. */
-	extent_t	extent;
-};
-
-struct base_s {
-	/* Associated arena's index within the arenas array. */
-	unsigned	ind;
-
-	/* User-configurable extent hook functions. */
-	union {
-		extent_hooks_t	*extent_hooks;
-		void		*extent_hooks_pun;
-	};
-
-	/* Protects base_alloc() and base_stats_get() operations. */
-	malloc_mutex_t	mtx;
-
-	/* Serial number generation state. */
-	size_t		extent_sn_next;
-
-	/* Chain of all blocks associated with base. */
-	base_block_t	*blocks;
-
-	/* Heap of extents that track unused trailing space within blocks. */
-	extent_heap_t	avail[NSIZES];
-
-	/* Stats, only maintained if config_stats. */
-	size_t		allocated;
-	size_t		resident;
-	size_t		mapped;
-};
-
-#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_types.h
deleted file mode 100644
index be7ee82..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/base_types.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
-#define JEMALLOC_INTERNAL_BASE_TYPES_H
-
-typedef struct base_block_s base_block_t;
-typedef struct base_s base_t;
-
-#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_externs.h
deleted file mode 100644
index 1b130aa..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_externs.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BITMAP_EXTERNS_H
-#define JEMALLOC_INTERNAL_BITMAP_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-void	bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
-void	bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
-size_t	bitmap_size(const bitmap_info_t *binfo);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_BITMAP_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_inlines.h
deleted file mode 100644
index 5400f9d..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_inlines.h
+++ /dev/null
@@ -1,152 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BITMAP_INLINES_H
-#define JEMALLOC_INTERNAL_BITMAP_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool	bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
-bool	bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
-void	bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
-size_t	bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
-void	bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
-JEMALLOC_INLINE bool
-bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
-#ifdef BITMAP_USE_TREE
-	size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
-	bitmap_t rg = bitmap[rgoff];
-	/* The bitmap is full iff the root group is 0. */
-	return (rg == 0);
-#else
-	size_t i;
-
-	for (i = 0; i < binfo->ngroups; i++) {
-		if (bitmap[i] != 0)
-			return (false);
-	}
-	return (true);
-#endif
-}
-
-JEMALLOC_INLINE bool
-bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
-	size_t goff;
-	bitmap_t g;
-
-	assert(bit < binfo->nbits);
-	goff = bit >> LG_BITMAP_GROUP_NBITS;
-	g = bitmap[goff];
-	return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
-}
-
-JEMALLOC_INLINE void
-bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
-	size_t goff;
-	bitmap_t *gp;
-	bitmap_t g;
-
-	assert(bit < binfo->nbits);
-	assert(!bitmap_get(bitmap, binfo, bit));
-	goff = bit >> LG_BITMAP_GROUP_NBITS;
-	gp = &bitmap[goff];
-	g = *gp;
-	assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
-	g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-	*gp = g;
-	assert(bitmap_get(bitmap, binfo, bit));
-#ifdef BITMAP_USE_TREE
-	/* Propagate group state transitions up the tree. */
-	if (g == 0) {
-		unsigned i;
-		for (i = 1; i < binfo->nlevels; i++) {
-			bit = goff;
-			goff = bit >> LG_BITMAP_GROUP_NBITS;
-			gp = &bitmap[binfo->levels[i].group_offset + goff];
-			g = *gp;
-			assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
-			g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-			*gp = g;
-			if (g != 0)
-				break;
-		}
-	}
-#endif
-}
-
-/* sfu: set first unset. */
-JEMALLOC_INLINE size_t
-bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
-	size_t bit;
-	bitmap_t g;
-	unsigned i;
-
-	assert(!bitmap_full(bitmap, binfo));
-
-#ifdef BITMAP_USE_TREE
-	i = binfo->nlevels - 1;
-	g = bitmap[binfo->levels[i].group_offset];
-	bit = ffs_lu(g) - 1;
-	while (i > 0) {
-		i--;
-		g = bitmap[binfo->levels[i].group_offset + bit];
-		bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
-	}
-#else
-	i = 0;
-	g = bitmap[0];
-	while ((bit = ffs_lu(g)) == 0) {
-		i++;
-		g = bitmap[i];
-	}
-	bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
-#endif
-	bitmap_set(bitmap, binfo, bit);
-	return (bit);
-}
-
-JEMALLOC_INLINE void
-bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
-	size_t goff;
-	bitmap_t *gp;
-	bitmap_t g;
-	UNUSED bool propagate;
-
-	assert(bit < binfo->nbits);
-	assert(bitmap_get(bitmap, binfo, bit));
-	goff = bit >> LG_BITMAP_GROUP_NBITS;
-	gp = &bitmap[goff];
-	g = *gp;
-	propagate = (g == 0);
-	assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
-	g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-	*gp = g;
-	assert(!bitmap_get(bitmap, binfo, bit));
-#ifdef BITMAP_USE_TREE
-	/* Propagate group state transitions up the tree. */
-	if (propagate) {
-		unsigned i;
-		for (i = 1; i < binfo->nlevels; i++) {
-			bit = goff;
-			goff = bit >> LG_BITMAP_GROUP_NBITS;
-			gp = &bitmap[binfo->levels[i].group_offset + goff];
-			g = *gp;
-			propagate = (g == 0);
-			assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
-			    == 0);
-			g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-			*gp = g;
-			if (!propagate)
-				break;
-		}
-	}
-#endif /* BITMAP_USE_TREE */
-}
-
-#endif
-
-#endif /* JEMALLOC_INTERNAL_BITMAP_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_structs.h
deleted file mode 100644
index 297ae66..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_structs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BITMAP_STRUCTS_H
-#define JEMALLOC_INTERNAL_BITMAP_STRUCTS_H
-
-struct bitmap_level_s {
-	/* Offset of this level's groups within the array of groups. */
-	size_t group_offset;
-};
-
-struct bitmap_info_s {
-	/* Logical number of bits in bitmap (stored at bottom level). */
-	size_t nbits;
-
-#ifdef BITMAP_USE_TREE
-	/* Number of levels necessary for nbits. */
-	unsigned nlevels;
-
-	/*
-	 * Only the first (nlevels+1) elements are used, and levels are ordered
-	 * bottom to top (e.g. the bottom level is stored in levels[0]).
-	 */
-	bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
-#else /* BITMAP_USE_TREE */
-	/* Number of groups necessary for nbits. */
-	size_t ngroups;
-#endif /* BITMAP_USE_TREE */
-};
-
-#endif /* JEMALLOC_INTERNAL_BITMAP_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_types.h
deleted file mode 100644
index d823186..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/bitmap_types.h
+++ /dev/null
@@ -1,133 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BITMAP_TYPES_H
-#define JEMALLOC_INTERNAL_BITMAP_TYPES_H
-
-/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
-#define	LG_BITMAP_MAXBITS	LG_SLAB_MAXREGS
-#define	BITMAP_MAXBITS		(ZU(1) << LG_BITMAP_MAXBITS)
-
-typedef struct bitmap_level_s bitmap_level_t;
-typedef struct bitmap_info_s bitmap_info_t;
-typedef unsigned long bitmap_t;
-#define	LG_SIZEOF_BITMAP	LG_SIZEOF_LONG
-
-/* Number of bits per group. */
-#define	LG_BITMAP_GROUP_NBITS		(LG_SIZEOF_BITMAP + 3)
-#define	BITMAP_GROUP_NBITS		(1U << LG_BITMAP_GROUP_NBITS)
-#define	BITMAP_GROUP_NBITS_MASK		(BITMAP_GROUP_NBITS-1)
-
-/*
- * Do some analysis on how big the bitmap is before we use a tree.  For a brute
- * force linear search, if we would have to call ffs_lu() more than 2^3 times,
- * use a tree instead.
- */
-#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
-#  define BITMAP_USE_TREE
-#endif
-
-/* Number of groups required to store a given number of bits. */
-#define	BITMAP_BITS2GROUPS(nbits)					\
-    (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
-
-/*
- * Number of groups required at a particular level for a given number of bits.
- */
-#define	BITMAP_GROUPS_L0(nbits)						\
-    BITMAP_BITS2GROUPS(nbits)
-#define	BITMAP_GROUPS_L1(nbits)						\
-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
-#define	BITMAP_GROUPS_L2(nbits)						\
-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
-#define	BITMAP_GROUPS_L3(nbits)						\
-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(		\
-	BITMAP_BITS2GROUPS((nbits)))))
-#define	BITMAP_GROUPS_L4(nbits)						\
-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(		\
-	BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
-
-/*
- * Assuming the number of levels, number of groups required for a given number
- * of bits.
- */
-#define	BITMAP_GROUPS_1_LEVEL(nbits)					\
-    BITMAP_GROUPS_L0(nbits)
-#define	BITMAP_GROUPS_2_LEVEL(nbits)					\
-    (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
-#define	BITMAP_GROUPS_3_LEVEL(nbits)					\
-    (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
-#define	BITMAP_GROUPS_4_LEVEL(nbits)					\
-    (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
-#define	BITMAP_GROUPS_5_LEVEL(nbits)					\
-    (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
-
-/*
- * Maximum number of groups required to support LG_BITMAP_MAXBITS.
- */
-#ifdef BITMAP_USE_TREE
-
-#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
-#else
-#  error "Unsupported bitmap size"
-#endif
-
-/*
- * Maximum number of levels possible.  This could be statically computed based
- * on LG_BITMAP_MAXBITS:
- *
- * #define BITMAP_MAX_LEVELS \
- *     (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
- *     + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
- *
- * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
- * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
- * various cascading macros.  The only additional cost this incurs is some
- * unused trailing entries in bitmap_info_t structures; the bitmaps themselves
- * are not impacted.
- */
-#define	BITMAP_MAX_LEVELS	5
-
-#define	BITMAP_INFO_INITIALIZER(nbits) {				\
-	/* nbits. */							\
-	nbits,								\
-	/* nlevels. */							\
-	(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) +		\
-	    (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) +	\
-	    (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) +	\
-	    (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1,	\
-	/* levels. */							\
-	{								\
-		{0},							\
-		{BITMAP_GROUPS_L0(nbits)},				\
-		{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)},	\
-		{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) +	\
-		    BITMAP_GROUPS_L0(nbits)},				\
-		{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) +	\
-		    BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)},	\
-		{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) +	\
-		     BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits)	\
-		     + BITMAP_GROUPS_L0(nbits)}				\
-	}								\
-}
-
-#else /* BITMAP_USE_TREE */
-
-#define	BITMAP_GROUPS_MAX	BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
-
-#define	BITMAP_INFO_INITIALIZER(nbits) {				\
-	/* nbits. */							\
-	nbits,								\
-	/* ngroups. */							\
-	BITMAP_BITS2GROUPS(nbits)					\
-}
-
-#endif /* BITMAP_USE_TREE */
-
-#endif /* JEMALLOC_INTERNAL_BITMAP_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_externs.h
deleted file mode 100644
index 3f31e23..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_externs.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CKH_EXTERNS_H
-#define JEMALLOC_INTERNAL_CKH_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-bool	ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
-    ckh_keycomp_t *keycomp);
-void	ckh_delete(tsd_t *tsd, ckh_t *ckh);
-size_t	ckh_count(ckh_t *ckh);
-bool	ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-bool	ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
-bool	ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
-    void **data);
-bool	ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
-void	ckh_string_hash(const void *key, size_t r_hash[2]);
-bool	ckh_string_keycomp(const void *k1, const void *k2);
-void	ckh_pointer_hash(const void *key, size_t r_hash[2]);
-bool	ckh_pointer_keycomp(const void *k1, const void *k2);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_CKH_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_structs.h
deleted file mode 100644
index a800cbc..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_structs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CKH_STRUCTS_H
-#define JEMALLOC_INTERNAL_CKH_STRUCTS_H
-
-/* Hash table cell. */
-struct ckhc_s {
-	const void	*key;
-	const void	*data;
-};
-
-struct ckh_s {
-#ifdef CKH_COUNT
-	/* Counters used to get an idea of performance. */
-	uint64_t	ngrows;
-	uint64_t	nshrinks;
-	uint64_t	nshrinkfails;
-	uint64_t	ninserts;
-	uint64_t	nrelocs;
-#endif
-
-	/* Used for pseudo-random number generation. */
-	uint64_t	prng_state;
-
-	/* Total number of items. */
-	size_t		count;
-
-	/*
-	 * Minimum and current number of hash table buckets.  There are
-	 * 2^LG_CKH_BUCKET_CELLS cells per bucket.
-	 */
-	unsigned	lg_minbuckets;
-	unsigned	lg_curbuckets;
-
-	/* Hash and comparison functions. */
-	ckh_hash_t	*hash;
-	ckh_keycomp_t	*keycomp;
-
-	/* Hash table with 2^lg_curbuckets buckets. */
-	ckhc_t		*tab;
-};
-
-#endif /* JEMALLOC_INTERNAL_CKH_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_types.h
deleted file mode 100644
index 9a1d8d4..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ckh_types.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CKH_TYPES_H
-#define JEMALLOC_INTERNAL_CKH_TYPES_H
-
-typedef struct ckh_s ckh_t;
-typedef struct ckhc_s ckhc_t;
-
-/* Typedefs to allow easy function pointer passing. */
-typedef void ckh_hash_t (const void *, size_t[2]);
-typedef bool ckh_keycomp_t (const void *, const void *);
-
-/* Maintain counters used to get an idea of performance. */
-/* #define	CKH_COUNT */
-/* Print counter values in ckh_delete() (requires CKH_COUNT). */
-/* #define	CKH_VERBOSE */
-
-/*
- * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket.  Try to fit
- * one bucket per L1 cache line.
- */
-#define	LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
-
-#endif /* JEMALLOC_INTERNAL_CKH_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_externs.h
deleted file mode 100644
index 9e9d5f3..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_externs.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CTL_EXTERNS_H
-#define JEMALLOC_INTERNAL_CTL_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-int	ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
-    void *newp, size_t newlen);
-int	ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
-    size_t *miblenp);
-
-int	ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen);
-bool	ctl_boot(void);
-void	ctl_prefork(tsdn_t *tsdn);
-void	ctl_postfork_parent(tsdn_t *tsdn);
-void	ctl_postfork_child(tsdn_t *tsdn);
-
-#define	xmallctl(name, oldp, oldlenp, newp, newlen) do {		\
-	if (je_mallctl(name, oldp, oldlenp, newp, newlen)		\
-	    != 0) {							\
-		malloc_printf(						\
-		    "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n",	\
-		    name);						\
-		abort();						\
-	}								\
-} while (0)
-
-#define	xmallctlnametomib(name, mibp, miblenp) do {			\
-	if (je_mallctlnametomib(name, mibp, miblenp) != 0) {		\
-		malloc_printf("<jemalloc>: Failure in "			\
-		    "xmallctlnametomib(\"%s\", ...)\n", name);		\
-		abort();						\
-	}								\
-} while (0)
-
-#define	xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do {	\
-	if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp,		\
-	    newlen) != 0) {						\
-		malloc_write(						\
-		    "<jemalloc>: Failure in xmallctlbymib()\n");	\
-		abort();						\
-	}								\
-} while (0)
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_CTL_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_structs.h
deleted file mode 100644
index 18806a5..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_structs.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CTL_STRUCTS_H
-#define JEMALLOC_INTERNAL_CTL_STRUCTS_H
-
-struct ctl_node_s {
-	bool			named;
-};
-
-struct ctl_named_node_s {
-	struct ctl_node_s	node;
-	const char		*name;
-	/* If (nchildren == 0), this is a terminal node. */
-	size_t			nchildren;
-	const			ctl_node_t *children;
-	int			(*ctl)(tsd_t *, const size_t *, size_t, void *,
-	    size_t *, void *, size_t);
-};
-
-struct ctl_indexed_node_s {
-	struct ctl_node_s	node;
-	const ctl_named_node_t	*(*index)(tsdn_t *, const size_t *, size_t,
-	    size_t);
-};
-
-struct ctl_arena_stats_s {
-	arena_stats_t		astats;
-
-	/* Aggregate stats for small size classes, based on bin stats. */
-	size_t			allocated_small;
-	uint64_t		nmalloc_small;
-	uint64_t		ndalloc_small;
-	uint64_t		nrequests_small;
-
-	malloc_bin_stats_t	bstats[NBINS];
-	malloc_large_stats_t	lstats[NSIZES - NBINS];
-};
-
-struct ctl_stats_s {
-	size_t			allocated;
-	size_t			active;
-	size_t			metadata;
-	size_t			resident;
-	size_t			mapped;
-	size_t			retained;
-};
-
-struct ctl_arena_s {
-	unsigned		arena_ind;
-	bool			initialized;
-	ql_elm(ctl_arena_t)	destroyed_link;
-
-	/* Basic stats, supported even if !config_stats. */
-	unsigned		nthreads;
-	const char		*dss;
-	ssize_t			decay_time;
-	size_t			pactive;
-	size_t			pdirty;
-
-	/* NULL if !config_stats. */
-	ctl_arena_stats_t	*astats;
-};
-
-struct ctl_arenas_s {
-	uint64_t		epoch;
-	unsigned		narenas;
-	ql_head(ctl_arena_t)	destroyed;
-
-	/*
-	 * Element 0 corresponds to merged stats for extant arenas (accessed via
-	 * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
-	 * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
-	 * remaining MALLOCX_ARENA_MAX+1 elements correspond to arenas.
-	 */
-	ctl_arena_t		*arenas[MALLOCX_ARENA_MAX + 3];
-};
-
-#endif /* JEMALLOC_INTERNAL_CTL_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_types.h
deleted file mode 100644
index 7853a4b..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ctl_types.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CTL_TYPES_H
-#define JEMALLOC_INTERNAL_CTL_TYPES_H
-
-typedef struct ctl_node_s ctl_node_t;
-typedef struct ctl_named_node_s ctl_named_node_t;
-typedef struct ctl_indexed_node_s ctl_indexed_node_t;
-typedef struct ctl_arena_stats_s ctl_arena_stats_t;
-typedef struct ctl_stats_s ctl_stats_t;
-typedef struct ctl_arena_s ctl_arena_t;
-typedef struct ctl_arenas_s ctl_arenas_t;
-
-#endif /* JEMALLOC_INTERNAL_CTL_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_externs.h
deleted file mode 100644
index 5ac2a3e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_externs.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_DSS_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-extern const char	*opt_dss;
-
-dss_prec_t	extent_dss_prec_get(void);
-bool	extent_dss_prec_set(dss_prec_t dss_prec);
-void	*extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
-    size_t size, size_t alignment, bool *zero, bool *commit);
-bool	extent_in_dss(void *addr);
-bool	extent_dss_mergeable(void *addr_a, void *addr_b);
-void	extent_dss_boot(void);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_structs.h
deleted file mode 100644
index ca15bab..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_structs.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_STRUCTS_H
-#define JEMALLOC_INTERNAL_EXTENT_DSS_STRUCTS_H
-
-#pragma GCC visibility push(hidden)
-
-extern const char *dss_prec_names[];
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_types.h
deleted file mode 100644
index 2839757..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_dss_types.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_TYPES_H
-#define JEMALLOC_INTERNAL_EXTENT_DSS_TYPES_H
-
-typedef enum {
-	dss_prec_disabled  = 0,
-	dss_prec_primary   = 1,
-	dss_prec_secondary = 2,
-
-	dss_prec_limit     = 3
-} dss_prec_t;
-#define	DSS_PREC_DEFAULT	dss_prec_secondary
-#define	DSS_DEFAULT		"secondary"
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_externs.h
deleted file mode 100644
index 9761a469..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_externs.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-extern rtree_t			extents_rtree;
-extern const extent_hooks_t	extent_hooks_default;
-
-extent_t	*extent_alloc(tsdn_t *tsdn, arena_t *arena);
-void	extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-
-extent_hooks_t	*extent_hooks_get(arena_t *arena);
-extent_hooks_t	*extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks);
-
-#ifdef JEMALLOC_JET
-typedef size_t (extent_size_quantize_t)(size_t);
-extern extent_size_quantize_t *extent_size_quantize_floor;
-extern extent_size_quantize_t *extent_size_quantize_ceil;
-#else
-size_t	extent_size_quantize_floor(size_t size);
-size_t	extent_size_quantize_ceil(size_t size);
-#endif
-
-ph_proto(, extent_heap_, extent_heap_t, extent_t)
-
-extent_t	*extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab);
-extent_t	*extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab);
-extent_t	*extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab);
-void	extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-void	extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent);
-bool	extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent);
-void	extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent);
-bool	extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length);
-bool	extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length);
-bool	extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length);
-bool	extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length);
-extent_t	*extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
-    size_t usize_a, size_t size_b, size_t usize_b);
-bool	extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
-
-bool	extent_boot(void);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_inlines.h
deleted file mode 100644
index 87e0bcd..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_inlines.h
+++ /dev/null
@@ -1,312 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
-#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-extent_t	*extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent);
-arena_t	*extent_arena_get(const extent_t *extent);
-void	*extent_base_get(const extent_t *extent);
-void	*extent_addr_get(const extent_t *extent);
-size_t	extent_size_get(const extent_t *extent);
-size_t	extent_usize_get(const extent_t *extent);
-void	*extent_before_get(const extent_t *extent);
-void	*extent_last_get(const extent_t *extent);
-void	*extent_past_get(const extent_t *extent);
-size_t	extent_sn_get(const extent_t *extent);
-bool	extent_active_get(const extent_t *extent);
-bool	extent_retained_get(const extent_t *extent);
-bool	extent_zeroed_get(const extent_t *extent);
-bool	extent_committed_get(const extent_t *extent);
-bool	extent_slab_get(const extent_t *extent);
-arena_slab_data_t	*extent_slab_data_get(extent_t *extent);
-const arena_slab_data_t	*extent_slab_data_get_const(const extent_t *extent);
-prof_tctx_t	*extent_prof_tctx_get(const extent_t *extent);
-void	extent_arena_set(extent_t *extent, arena_t *arena);
-void	extent_addr_set(extent_t *extent, void *addr);
-void	extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
-void	extent_size_set(extent_t *extent, size_t size);
-void	extent_usize_set(extent_t *extent, size_t usize);
-void	extent_sn_set(extent_t *extent, size_t sn);
-void	extent_active_set(extent_t *extent, bool active);
-void	extent_zeroed_set(extent_t *extent, bool zeroed);
-void	extent_committed_set(extent_t *extent, bool committed);
-void	extent_slab_set(extent_t *extent, bool slab);
-void	extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
-void	extent_init(extent_t *extent, arena_t *arena, void *addr,
-    size_t size, size_t usize, size_t sn, bool active, bool zeroed,
-    bool committed, bool slab);
-void	extent_ring_insert(extent_t *sentinel, extent_t *extent);
-void	extent_ring_remove(extent_t *extent);
-int	extent_sn_comp(const extent_t *a, const extent_t *b);
-int	extent_ad_comp(const extent_t *a, const extent_t *b);
-int	extent_snad_comp(const extent_t *a, const extent_t *b);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
-JEMALLOC_INLINE extent_t *
-extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
-{
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
-	return (rtree_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
-	    dependent));
-}
-
-JEMALLOC_INLINE arena_t *
-extent_arena_get(const extent_t *extent)
-{
-	return (extent->e_arena);
-}
-
-JEMALLOC_INLINE void *
-extent_base_get(const extent_t *extent)
-{
-	assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
-	    !extent->e_slab);
-	return (PAGE_ADDR2BASE(extent->e_addr));
-}
-
-JEMALLOC_INLINE void *
-extent_addr_get(const extent_t *extent)
-{
-	assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
-	    !extent->e_slab);
-	return (extent->e_addr);
-}
-
-JEMALLOC_INLINE size_t
-extent_size_get(const extent_t *extent)
-{
-	return (extent->e_size);
-}
-
-JEMALLOC_INLINE size_t
-extent_usize_get(const extent_t *extent)
-{
-	assert(!extent->e_slab);
-	return (extent->e_usize);
-}
-
-JEMALLOC_INLINE void *
-extent_before_get(const extent_t *extent)
-{
-	return ((void *)((uintptr_t)extent_base_get(extent) - PAGE));
-}
-
-JEMALLOC_INLINE void *
-extent_last_get(const extent_t *extent)
-{
-	return ((void *)((uintptr_t)extent_base_get(extent) +
-	    extent_size_get(extent) - PAGE));
-}
-
-JEMALLOC_INLINE void *
-extent_past_get(const extent_t *extent)
-{
-	return ((void *)((uintptr_t)extent_base_get(extent) +
-	    extent_size_get(extent)));
-}
-
-JEMALLOC_INLINE size_t
-extent_sn_get(const extent_t *extent)
-{
-	return (extent->e_sn);
-}
-
-JEMALLOC_INLINE bool
-extent_active_get(const extent_t *extent)
-{
-	return (extent->e_active);
-}
-
-JEMALLOC_INLINE bool
-extent_retained_get(const extent_t *extent)
-{
-	return (qr_next(extent, qr_link) == extent);
-}
-
-JEMALLOC_INLINE bool
-extent_zeroed_get(const extent_t *extent)
-{
-	return (extent->e_zeroed);
-}
-
-JEMALLOC_INLINE bool
-extent_committed_get(const extent_t *extent)
-{
-	return (extent->e_committed);
-}
-
-JEMALLOC_INLINE bool
-extent_slab_get(const extent_t *extent)
-{
-	return (extent->e_slab);
-}
-
-JEMALLOC_INLINE arena_slab_data_t *
-extent_slab_data_get(extent_t *extent)
-{
-	assert(extent->e_slab);
-	return (&extent->e_slab_data);
-}
-
-JEMALLOC_INLINE const arena_slab_data_t *
-extent_slab_data_get_const(const extent_t *extent)
-{
-	assert(extent->e_slab);
-	return (&extent->e_slab_data);
-}
-
-JEMALLOC_INLINE prof_tctx_t *
-extent_prof_tctx_get(const extent_t *extent)
-{
-	return ((prof_tctx_t *)atomic_read_p(
-	    &((extent_t *)extent)->e_prof_tctx_pun));
-}
-
-JEMALLOC_INLINE void
-extent_arena_set(extent_t *extent, arena_t *arena)
-{
-	extent->e_arena = arena;
-}
-
-JEMALLOC_INLINE void
-extent_addr_set(extent_t *extent, void *addr)
-{
-	extent->e_addr = addr;
-}
-
-JEMALLOC_INLINE void
-extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
-{
-	assert(extent_base_get(extent) == extent_addr_get(extent));
-
-	if (alignment < PAGE) {
-		unsigned lg_range = LG_PAGE -
-		    lg_floor(CACHELINE_CEILING(alignment));
-		size_t r =
-		    prng_lg_range_zu(&extent_arena_get(extent)->offset_state,
-		    lg_range, true);
-		uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
-		    lg_range);
-		extent->e_addr = (void *)((uintptr_t)extent->e_addr +
-		    random_offset);
-		assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
-		    extent->e_addr);
-	}
-}
-
-JEMALLOC_INLINE void
-extent_size_set(extent_t *extent, size_t size)
-{
-	extent->e_size = size;
-}
-
-JEMALLOC_INLINE void
-extent_usize_set(extent_t *extent, size_t usize)
-{
-	extent->e_usize = usize;
-}
-
-JEMALLOC_INLINE void
-extent_sn_set(extent_t *extent, size_t sn)
-{
-	extent->e_sn = sn;
-}
-
-JEMALLOC_INLINE void
-extent_active_set(extent_t *extent, bool active)
-{
-	extent->e_active = active;
-}
-
-JEMALLOC_INLINE void
-extent_zeroed_set(extent_t *extent, bool zeroed)
-{
-	extent->e_zeroed = zeroed;
-}
-
-JEMALLOC_INLINE void
-extent_committed_set(extent_t *extent, bool committed)
-{
-	extent->e_committed = committed;
-}
-
-JEMALLOC_INLINE void
-extent_slab_set(extent_t *extent, bool slab)
-{
-	extent->e_slab = slab;
-}
-
-JEMALLOC_INLINE void
-extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
-{
-	atomic_write_p(&extent->e_prof_tctx_pun, tctx);
-}
-
-JEMALLOC_INLINE void
-extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
-    size_t usize, size_t sn, bool active, bool zeroed, bool committed,
-    bool slab)
-{
-	assert(addr == PAGE_ADDR2BASE(addr) || !slab);
-
-	extent_arena_set(extent, arena);
-	extent_addr_set(extent, addr);
-	extent_size_set(extent, size);
-	extent_usize_set(extent, usize);
-	extent_sn_set(extent, sn);
-	extent_active_set(extent, active);
-	extent_zeroed_set(extent, zeroed);
-	extent_committed_set(extent, committed);
-	extent_slab_set(extent, slab);
-	if (config_prof)
-		extent_prof_tctx_set(extent, NULL);
-	qr_new(extent, qr_link);
-}
-
-JEMALLOC_INLINE void
-extent_ring_insert(extent_t *sentinel, extent_t *extent)
-{
-	qr_meld(sentinel, extent, extent_t, qr_link);
-}
-
-JEMALLOC_INLINE void
-extent_ring_remove(extent_t *extent)
-{
-	qr_remove(extent, qr_link);
-}
-
-JEMALLOC_INLINE int
-extent_sn_comp(const extent_t *a, const extent_t *b)
-{
-	size_t a_sn = extent_sn_get(a);
-	size_t b_sn = extent_sn_get(b);
-
-	return ((a_sn > b_sn) - (a_sn < b_sn));
-}
-
-JEMALLOC_INLINE int
-extent_ad_comp(const extent_t *a, const extent_t *b)
-{
-	uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
-	uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
-
-	return ((a_addr > b_addr) - (a_addr < b_addr));
-}
-
-JEMALLOC_INLINE int
-extent_snad_comp(const extent_t *a, const extent_t *b)
-{
-	int ret;
-
-	ret = extent_sn_comp(a, b);
-	if (ret != 0)
-		return (ret);
-
-	ret = extent_ad_comp(a, b);
-	return (ret);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_mmap_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_mmap_externs.h
deleted file mode 100644
index a48cdbe..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_mmap_externs.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-void	*extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
-    bool *zero, bool *commit);
-bool	extent_dalloc_mmap(void *addr, size_t size);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_structs.h
deleted file mode 100644
index de31317..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_structs.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
-#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
-
-/* Extent (span of pages).  Use accessor functions for e_* fields. */
-struct extent_s {
-	/* Arena from which this extent came, if any. */
-	arena_t			*e_arena;
-
-	/* Pointer to the extent that this structure is responsible for. */
-	void			*e_addr;
-
-	/* Extent size. */
-	size_t			e_size;
-
-	/*
-	 * Usable size, typically smaller than extent size due to large_pad or
-	 * promotion of sampled small regions.
-	 */
-	size_t			e_usize;
-
-	/*
-	 * Serial number (potentially non-unique).
-	 *
-	 * In principle serial numbers can wrap around on 32-bit systems if
-	 * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
-	 * back on address comparison for equal serial numbers, stable (if
-	 * imperfect) ordering is maintained.
-	 *
-	 * Serial numbers may not be unique even in the absence of wrap-around,
-	 * e.g. when splitting an extent and assigning the same serial number to
-	 * both resulting adjacent extents.
-	 */
-	size_t			e_sn;
-
-	/* True if extent is active (in use). */
-	bool			e_active;
-
-	/*
-	 * The zeroed flag is used by extent recycling code to track whether
-	 * memory is zero-filled.
-	 */
-	bool			e_zeroed;
-
-	/*
-	 * True if physical memory is committed to the extent, whether
-	 * explicitly or implicitly as on a system that overcommits and
-	 * satisfies physical memory needs on demand via soft page faults.
-	 */
-	bool			e_committed;
-
-	/*
-	 * The slab flag indicates whether the extent is used for a slab of
-	 * small regions.  This helps differentiate small size classes, and it
-	 * indicates whether interior pointers can be looked up via iealloc().
-	 */
-	bool			e_slab;
-
-	union {
-		/* Small region slab metadata. */
-		arena_slab_data_t	e_slab_data;
-
-		/* Profile counters, used for large objects. */
-		union {
-			void		*e_prof_tctx_pun;
-			prof_tctx_t	*e_prof_tctx;
-		};
-	};
-
-	/*
-	 * Linkage for arena's extents_dirty and arena_bin_t's slabs_full rings.
-	 */
-	qr(extent_t)		qr_link;
-
-	union {
-		/* Linkage for per size class sn/address-ordered heaps. */
-		phn(extent_t)		ph_link;
-
-		/* Linkage for arena's large and extent_cache lists. */
-		ql_elm(extent_t)	ql_link;
-	};
-};
-typedef ph(extent_t) extent_heap_t;
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_types.h
deleted file mode 100644
index 4873dc5..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/extent_types.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
-#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
-
-typedef struct extent_s extent_t;
-
-#define	EXTENT_HOOKS_INITIALIZER	NULL
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/hash_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/hash_inlines.h
deleted file mode 100644
index 4bb7850..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/hash_inlines.h
+++ /dev/null
@@ -1,338 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HASH_INLINES_H
-#define JEMALLOC_INTERNAL_HASH_INLINES_H
-
-/*
- * The following hash function is based on MurmurHash3, placed into the public
- * domain by Austin Appleby.  See https://github.com/aappleby/smhasher for
- * details.
- */
-
-#ifndef JEMALLOC_ENABLE_INLINE
-uint32_t	hash_x86_32(const void *key, int len, uint32_t seed);
-void	hash_x86_128(const void *key, const int len, uint32_t seed,
-    uint64_t r_out[2]);
-void	hash_x64_128(const void *key, const int len, const uint32_t seed,
-    uint64_t r_out[2]);
-void	hash(const void *key, size_t len, const uint32_t seed,
-    size_t r_hash[2]);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
-/******************************************************************************/
-/* Internal implementation. */
-JEMALLOC_INLINE uint32_t
-hash_rotl_32(uint32_t x, int8_t r)
-{
-	return ((x << r) | (x >> (32 - r)));
-}
-
-JEMALLOC_INLINE uint64_t
-hash_rotl_64(uint64_t x, int8_t r)
-{
-	return ((x << r) | (x >> (64 - r)));
-}
-
-JEMALLOC_INLINE uint32_t
-hash_get_block_32(const uint32_t *p, int i)
-{
-	/* Handle unaligned read. */
-	if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
-		uint32_t ret;
-
-		memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
-		return (ret);
-	}
-
-	return (p[i]);
-}
-
-JEMALLOC_INLINE uint64_t
-hash_get_block_64(const uint64_t *p, int i)
-{
-	/* Handle unaligned read. */
-	if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
-		uint64_t ret;
-
-		memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
-		return (ret);
-	}
-
-	return (p[i]);
-}
-
-JEMALLOC_INLINE uint32_t
-hash_fmix_32(uint32_t h)
-{
-	h ^= h >> 16;
-	h *= 0x85ebca6b;
-	h ^= h >> 13;
-	h *= 0xc2b2ae35;
-	h ^= h >> 16;
-
-	return (h);
-}
-
-JEMALLOC_INLINE uint64_t
-hash_fmix_64(uint64_t k)
-{
-	k ^= k >> 33;
-	k *= KQU(0xff51afd7ed558ccd);
-	k ^= k >> 33;
-	k *= KQU(0xc4ceb9fe1a85ec53);
-	k ^= k >> 33;
-
-	return (k);
-}
-
-JEMALLOC_INLINE uint32_t
-hash_x86_32(const void *key, int len, uint32_t seed)
-{
-	const uint8_t *data = (const uint8_t *) key;
-	const int nblocks = len / 4;
-
-	uint32_t h1 = seed;
-
-	const uint32_t c1 = 0xcc9e2d51;
-	const uint32_t c2 = 0x1b873593;
-
-	/* body */
-	{
-		const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
-		int i;
-
-		for (i = -nblocks; i; i++) {
-			uint32_t k1 = hash_get_block_32(blocks, i);
-
-			k1 *= c1;
-			k1 = hash_rotl_32(k1, 15);
-			k1 *= c2;
-
-			h1 ^= k1;
-			h1 = hash_rotl_32(h1, 13);
-			h1 = h1*5 + 0xe6546b64;
-		}
-	}
-
-	/* tail */
-	{
-		const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
-
-		uint32_t k1 = 0;
-
-		switch (len & 3) {
-		case 3: k1 ^= tail[2] << 16;
-		case 2: k1 ^= tail[1] << 8;
-		case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
-			k1 *= c2; h1 ^= k1;
-		}
-	}
-
-	/* finalization */
-	h1 ^= len;
-
-	h1 = hash_fmix_32(h1);
-
-	return (h1);
-}
-
-UNUSED JEMALLOC_INLINE void
-hash_x86_128(const void *key, const int len, uint32_t seed,
-    uint64_t r_out[2])
-{
-	const uint8_t * data = (const uint8_t *) key;
-	const int nblocks = len / 16;
-
-	uint32_t h1 = seed;
-	uint32_t h2 = seed;
-	uint32_t h3 = seed;
-	uint32_t h4 = seed;
-
-	const uint32_t c1 = 0x239b961b;
-	const uint32_t c2 = 0xab0e9789;
-	const uint32_t c3 = 0x38b34ae5;
-	const uint32_t c4 = 0xa1e38b93;
-
-	/* body */
-	{
-		const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
-		int i;
-
-		for (i = -nblocks; i; i++) {
-			uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
-			uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
-			uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
-			uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
-
-			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
-
-			h1 = hash_rotl_32(h1, 19); h1 += h2;
-			h1 = h1*5 + 0x561ccd1b;
-
-			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
-
-			h2 = hash_rotl_32(h2, 17); h2 += h3;
-			h2 = h2*5 + 0x0bcaa747;
-
-			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
-
-			h3 = hash_rotl_32(h3, 15); h3 += h4;
-			h3 = h3*5 + 0x96cd1c35;
-
-			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
-
-			h4 = hash_rotl_32(h4, 13); h4 += h1;
-			h4 = h4*5 + 0x32ac3b17;
-		}
-	}
-
-	/* tail */
-	{
-		const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
-		uint32_t k1 = 0;
-		uint32_t k2 = 0;
-		uint32_t k3 = 0;
-		uint32_t k4 = 0;
-
-		switch (len & 15) {
-		case 15: k4 ^= tail[14] << 16;
-		case 14: k4 ^= tail[13] << 8;
-		case 13: k4 ^= tail[12] << 0;
-			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
-
-		case 12: k3 ^= tail[11] << 24;
-		case 11: k3 ^= tail[10] << 16;
-		case 10: k3 ^= tail[ 9] << 8;
-		case  9: k3 ^= tail[ 8] << 0;
-		     k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
-
-		case  8: k2 ^= tail[ 7] << 24;
-		case  7: k2 ^= tail[ 6] << 16;
-		case  6: k2 ^= tail[ 5] << 8;
-		case  5: k2 ^= tail[ 4] << 0;
-			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
-
-		case  4: k1 ^= tail[ 3] << 24;
-		case  3: k1 ^= tail[ 2] << 16;
-		case  2: k1 ^= tail[ 1] << 8;
-		case  1: k1 ^= tail[ 0] << 0;
-			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
-		}
-	}
-
-	/* finalization */
-	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
-
-	h1 += h2; h1 += h3; h1 += h4;
-	h2 += h1; h3 += h1; h4 += h1;
-
-	h1 = hash_fmix_32(h1);
-	h2 = hash_fmix_32(h2);
-	h3 = hash_fmix_32(h3);
-	h4 = hash_fmix_32(h4);
-
-	h1 += h2; h1 += h3; h1 += h4;
-	h2 += h1; h3 += h1; h4 += h1;
-
-	r_out[0] = (((uint64_t) h2) << 32) | h1;
-	r_out[1] = (((uint64_t) h4) << 32) | h3;
-}
-
-UNUSED JEMALLOC_INLINE void
-hash_x64_128(const void *key, const int len, const uint32_t seed,
-    uint64_t r_out[2])
-{
-	const uint8_t *data = (const uint8_t *) key;
-	const int nblocks = len / 16;
-
-	uint64_t h1 = seed;
-	uint64_t h2 = seed;
-
-	const uint64_t c1 = KQU(0x87c37b91114253d5);
-	const uint64_t c2 = KQU(0x4cf5ad432745937f);
-
-	/* body */
-	{
-		const uint64_t *blocks = (const uint64_t *) (data);
-		int i;
-
-		for (i = 0; i < nblocks; i++) {
-			uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
-			uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
-
-			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
-
-			h1 = hash_rotl_64(h1, 27); h1 += h2;
-			h1 = h1*5 + 0x52dce729;
-
-			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
-
-			h2 = hash_rotl_64(h2, 31); h2 += h1;
-			h2 = h2*5 + 0x38495ab5;
-		}
-	}
-
-	/* tail */
-	{
-		const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
-		uint64_t k1 = 0;
-		uint64_t k2 = 0;
-
-		switch (len & 15) {
-		case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
-		case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
-		case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
-		case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
-		case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
-		case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
-		case  9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
-			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
-
-		case  8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
-		case  7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
-		case  6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
-		case  5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
-		case  4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
-		case  3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
-		case  2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
-		case  1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
-			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
-		}
-	}
-
-	/* finalization */
-	h1 ^= len; h2 ^= len;
-
-	h1 += h2;
-	h2 += h1;
-
-	h1 = hash_fmix_64(h1);
-	h2 = hash_fmix_64(h2);
-
-	h1 += h2;
-	h2 += h1;
-
-	r_out[0] = h1;
-	r_out[1] = h2;
-}
-
-/******************************************************************************/
-/* API. */
-JEMALLOC_INLINE void
-hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
-{
-	assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
-
-#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
-	hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
-#else
-	{
-		uint64_t hashes[2];
-		hash_x86_128(key, (int)len, seed, hashes);
-		r_hash[0] = (size_t)hashes[0];
-		r_hash[1] = (size_t)hashes[1];
-	}
-#endif
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_HASH_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
deleted file mode 100644
index 242d11b..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ /dev/null
@@ -1,1188 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_H
-#define	JEMALLOC_INTERNAL_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "jemalloc_internal_defs.h"
-#include "jemalloc/internal/jemalloc_internal_decls.h"
-
-#ifdef JEMALLOC_UTRACE
-#include <sys/ktrace.h>
-#endif
-
-#define	JEMALLOC_NO_DEMANGLE
-#ifdef JEMALLOC_JET
-#  define JEMALLOC_N(n) jet_##n
-#  include "jemalloc/internal/public_namespace.h"
-#  define JEMALLOC_NO_RENAME
-#  include "../jemalloc.h"
-#  undef JEMALLOC_NO_RENAME
-#else
-#  define JEMALLOC_N(n) je_##n
-#  include "../jemalloc.h"
-#endif
-#include "jemalloc/internal/private_namespace.h"
-
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
-    true
-#else
-    false
-#endif
-    ;
-static const bool have_dss =
-#ifdef JEMALLOC_DSS
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_fill =
-#ifdef JEMALLOC_FILL
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_lazy_lock =
-#ifdef JEMALLOC_LAZY_LOCK
-    true
-#else
-    false
-#endif
-    ;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
-static const bool config_prof =
-#ifdef JEMALLOC_PROF
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_prof_libgcc =
-#ifdef JEMALLOC_PROF_LIBGCC
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_prof_libunwind =
-#ifdef JEMALLOC_PROF_LIBUNWIND
-    true
-#else
-    false
-#endif
-    ;
-static const bool maps_coalesce =
-#ifdef JEMALLOC_MAPS_COALESCE
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_munmap =
-#ifdef JEMALLOC_MUNMAP
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_stats =
-#ifdef JEMALLOC_STATS
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_tcache =
-#ifdef JEMALLOC_TCACHE
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_tls =
-#ifdef JEMALLOC_TLS
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_utrace =
-#ifdef JEMALLOC_UTRACE
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_xmalloc =
-#ifdef JEMALLOC_XMALLOC
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_ivsalloc =
-#ifdef JEMALLOC_IVSALLOC
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_cache_oblivious =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
-    true
-#else
-    false
-#endif
-    ;
-static const bool have_thp =
-#ifdef JEMALLOC_THP
-    true
-#else
-    false
-#endif
-    ;
-
-#if defined(JEMALLOC_C11ATOMICS) && !defined(__cplusplus)
-#include <stdatomic.h>
-#endif
-
-#ifdef JEMALLOC_ATOMIC9
-#include <machine/atomic.h>
-#endif
-
-#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
-#include <libkern/OSAtomic.h>
-#endif
-
-#ifdef JEMALLOC_ZONE
-#include <mach/mach_error.h>
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
-#endif
-
-#include "jemalloc/internal/ph.h"
-#ifndef __PGI
-#define	RB_COMPACT
-#endif
-#include "jemalloc/internal/rb.h"
-#include "jemalloc/internal/qr.h"
-#include "jemalloc/internal/ql.h"
-
-/*
- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
- * but there are circular dependencies that cannot be broken without
- * substantial performance degradation.
- *
- * Historically, we dealt with this by each header into four sections (types,
- * structs, externs, and inlines), and included each header file multiple times
- * in this file, picking out the portion we want on each pass using the
- * following #defines:
- *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
- *                        types.
- *   JEMALLOC_H_STRUCTS : Data structures.
- *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
- *   JEMALLOC_H_INLINES : Inline functions.
- *
- * We're moving toward a world in which the dependencies are explicit; each file
- * will #include the headers it depends on (rather than relying on them being
- * implicitly available via this file including every header file in the
- * project).
- *
- * We're now in an intermediate state: we've broken up the header files to avoid
- * having to include each one multiple times, but have not yet moved the
- * dependency information into the header files (i.e. we still rely on the
- * ordering in this file to ensure all a header's dependencies are available in
- * its translation unit).  Each component is now broken up into multiple header
- * files, corresponding to the sections above (e.g. instead of "tsd.h", we now
- * have "tsd_types.h", "tsd_structs.h", "tsd_externs.h", "tsd_inlines.h").
- */
-
-#include "jemalloc/internal/jemalloc_internal_macros.h"
-
-/******************************************************************************/
-/* TYPES */
-/******************************************************************************/
-
-/* Page size index type. */
-typedef unsigned pszind_t;
-
-/* Size class index type. */
-typedef unsigned szind_t;
-
-/*
- * Flags bits:
- *
- * a: arena
- * t: tcache
- * 0: unused
- * z: zero
- * n: alignment
- *
- * aaaaaaaa aaaatttt tttttttt 0znnnnnn
- */
-#define	MALLOCX_ARENA_BITS	12
-#define	MALLOCX_TCACHE_BITS	12
-#define	MALLOCX_LG_ALIGN_BITS	6
-#define	MALLOCX_ARENA_SHIFT	20
-#define	MALLOCX_TCACHE_SHIFT	8
-#define	MALLOCX_ARENA_MASK \
-    (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
-/* NB: Arena index bias decreases the maximum number of arenas by 1. */
-#define	MALLOCX_ARENA_MAX	((1 << MALLOCX_ARENA_BITS) - 2)
-#define	MALLOCX_TCACHE_MASK \
-    (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
-#define	MALLOCX_TCACHE_MAX	((1 << MALLOCX_TCACHE_BITS) - 3)
-#define	MALLOCX_LG_ALIGN_MASK	((1 << MALLOCX_LG_ALIGN_BITS) - 1)
-/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
-#define	MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
-    (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
-#define	MALLOCX_ALIGN_GET(flags)					\
-    (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
-#define	MALLOCX_ZERO_GET(flags)						\
-    ((bool)(flags & MALLOCX_ZERO))
-
-#define	MALLOCX_TCACHE_GET(flags)					\
-    (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
-#define	MALLOCX_ARENA_GET(flags)					\
-    (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
-
-/* Smallest size class to support. */
-#define	TINY_MIN		(1U << LG_TINY_MIN)
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#ifndef LG_QUANTUM
-#  if (defined(__i386__) || defined(_M_IX86))
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __ia64__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __alpha__
-#    define LG_QUANTUM		4
-#  endif
-#  if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
-#    define LG_QUANTUM		4
-#  endif
-#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __arm__
-#    define LG_QUANTUM		3
-#  endif
-#  ifdef __aarch64__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __hppa__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __mips__
-#    define LG_QUANTUM		3
-#  endif
-#  ifdef __or1k__
-#    define LG_QUANTUM		3
-#  endif
-#  ifdef __powerpc__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __riscv__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __s390__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __SH4__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __tile__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __le32__
-#    define LG_QUANTUM		4
-#  endif
-#  ifndef LG_QUANTUM
-#    error "Unknown minimum alignment for architecture; specify via "
-	 "--with-lg-quantum"
-#  endif
-#endif
-
-#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
-#define	QUANTUM_MASK		(QUANTUM - 1)
-
-/* Return the smallest quantum multiple that is >= a. */
-#define	QUANTUM_CEILING(a)						\
-	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
-
-#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
-#define	LONG_MASK		(LONG - 1)
-
-/* Return the smallest long multiple that is >= a. */
-#define	LONG_CEILING(a)							\
-	(((a) + LONG_MASK) & ~LONG_MASK)
-
-#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
-#define	PTR_MASK		(SIZEOF_PTR - 1)
-
-/* Return the smallest (void *) multiple that is >= a. */
-#define	PTR_CEILING(a)							\
-	(((a) + PTR_MASK) & ~PTR_MASK)
-
-/*
- * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
- * In addition, this controls the spacing of cacheline-spaced size classes.
- *
- * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
- * only handle raw constants.
- */
-#define	LG_CACHELINE		6
-#define	CACHELINE		64
-#define	CACHELINE_MASK		(CACHELINE - 1)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define	CACHELINE_CEILING(s)						\
-	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-
-/* Return the nearest aligned address at or below a. */
-#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
-	((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
-
-/* Return the offset between a and the nearest aligned address at or below a. */
-#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
-	((size_t)((uintptr_t)(a) & (alignment - 1)))
-
-/* Return the smallest alignment multiple that is >= s. */
-#define	ALIGNMENT_CEILING(s, alignment)					\
-	(((s) + (alignment - 1)) & ((~(alignment)) + 1))
-
-/* Declare a variable-length array. */
-#if __STDC_VERSION__ < 199901L
-#  ifdef _MSC_VER
-#    include <malloc.h>
-#    define alloca _alloca
-#  else
-#    ifdef JEMALLOC_HAS_ALLOCA_H
-#      include <alloca.h>
-#    else
-#      include <stdlib.h>
-#    endif
-#  endif
-#  define VARIABLE_ARRAY(type, name, count) \
-	type *name = alloca(sizeof(type) * (count))
-#else
-#  define VARIABLE_ARRAY(type, name, count) type name[(count)]
-#endif
-
-#include "jemalloc/internal/nstime_types.h"
-#include "jemalloc/internal/util_types.h"
-#include "jemalloc/internal/spin_types.h"
-#include "jemalloc/internal/prng_types.h"
-#include "jemalloc/internal/ticker_types.h"
-#include "jemalloc/internal/ckh_types.h"
-#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/stats_types.h"
-#include "jemalloc/internal/ctl_types.h"
-#include "jemalloc/internal/witness_types.h"
-#include "jemalloc/internal/mutex_types.h"
-#include "jemalloc/internal/tsd_types.h"
-#include "jemalloc/internal/extent_types.h"
-#include "jemalloc/internal/extent_dss_types.h"
-#include "jemalloc/internal/base_types.h"
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/bitmap_types.h"
-#include "jemalloc/internal/rtree_types.h"
-#include "jemalloc/internal/pages_types.h"
-#include "jemalloc/internal/tcache_types.h"
-#include "jemalloc/internal/prof_types.h"
-
-
-/******************************************************************************/
-/* STRUCTS */
-/******************************************************************************/
-
-#include "jemalloc/internal/nstime_structs.h"
-#include "jemalloc/internal/spin_structs.h"
-#include "jemalloc/internal/ticker_structs.h"
-#include "jemalloc/internal/ckh_structs.h"
-#include "jemalloc/internal/stats_structs.h"
-#include "jemalloc/internal/ctl_structs.h"
-#include "jemalloc/internal/witness_structs.h"
-#include "jemalloc/internal/mutex_structs.h"
-#include "jemalloc/internal/bitmap_structs.h"
-#include "jemalloc/internal/arena_structs_a.h"
-#include "jemalloc/internal/extent_structs.h"
-#include "jemalloc/internal/extent_dss_structs.h"
-#include "jemalloc/internal/base_structs.h"
-#include "jemalloc/internal/arena_structs_b.h"
-#include "jemalloc/internal/rtree_structs.h"
-#include "jemalloc/internal/tcache_structs.h"
-#include "jemalloc/internal/prof_structs.h"
-#include "jemalloc/internal/tsd_structs.h"
-
-
-/******************************************************************************/
-/* EXTERNS */
-/******************************************************************************/
-
-#pragma GCC visibility push(hidden)
-
-extern bool	opt_abort;
-extern const char	*opt_junk;
-extern bool	opt_junk_alloc;
-extern bool	opt_junk_free;
-extern bool	opt_utrace;
-extern bool	opt_xmalloc;
-extern bool	opt_zero;
-extern unsigned	opt_narenas;
-
-/* Number of CPUs. */
-extern unsigned	ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned	narenas_auto;
-
-/*
- * Arenas that are used to service external requests.  Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern arena_t	**arenas;
-
-/*
- * pind2sz_tab encodes the same information as could be computed by
- * pind2sz_compute().
- */
-extern size_t const	pind2sz_tab[NPSIZES+1];
-/*
- * index2size_tab encodes the same information as could be computed (at
- * unacceptable cost in some code paths) by index2size_compute().
- */
-extern size_t const	index2size_tab[NSIZES];
-/*
- * size2index_tab is a compact lookup table that rounds request sizes up to
- * size classes.  In order to reduce cache footprint, the table is compressed,
- * and all accesses are via size2index().
- */
-extern uint8_t const	size2index_tab[];
-
-void	*a0malloc(size_t size);
-void	a0dalloc(void *ptr);
-void	*bootstrap_malloc(size_t size);
-void	*bootstrap_calloc(size_t num, size_t size);
-void	bootstrap_free(void *ptr);
-void	arena_set(unsigned ind, arena_t *arena);
-unsigned	narenas_total_get(void);
-arena_t	*arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-arena_tdata_t	*arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t	*arena_choose_hard(tsd_t *tsd, bool internal);
-void	arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
-void	iarena_cleanup(tsd_t *tsd);
-void	arena_cleanup(tsd_t *tsd);
-void	arenas_tdata_cleanup(tsd_t *tsd);
-void	jemalloc_prefork(void);
-void	jemalloc_postfork_parent(void);
-void	jemalloc_postfork_child(void);
-
-#include "jemalloc/internal/nstime_externs.h"
-#include "jemalloc/internal/util_externs.h"
-#include "jemalloc/internal/atomic_externs.h"
-#include "jemalloc/internal/ckh_externs.h"
-#include "jemalloc/internal/stats_externs.h"
-#include "jemalloc/internal/ctl_externs.h"
-#include "jemalloc/internal/witness_externs.h"
-#include "jemalloc/internal/mutex_externs.h"
-#include "jemalloc/internal/bitmap_externs.h"
-#include "jemalloc/internal/extent_externs.h"
-#include "jemalloc/internal/extent_dss_externs.h"
-#include "jemalloc/internal/extent_mmap_externs.h"
-#include "jemalloc/internal/base_externs.h"
-#include "jemalloc/internal/arena_externs.h"
-#include "jemalloc/internal/rtree_externs.h"
-#include "jemalloc/internal/pages_externs.h"
-#include "jemalloc/internal/large_externs.h"
-#include "jemalloc/internal/tcache_externs.h"
-#include "jemalloc/internal/prof_externs.h"
-#include "jemalloc/internal/tsd_externs.h"
-
-/******************************************************************************/
-/* INLINES */
-/******************************************************************************/
-
-#include "jemalloc/internal/util_inlines.h"
-#include "jemalloc/internal/atomic_inlines.h"
-#include "jemalloc/internal/spin_inlines.h"
-#include "jemalloc/internal/prng_inlines.h"
-#include "jemalloc/internal/ticker_inlines.h"
-#include "jemalloc/internal/tsd_inlines.h"
-#include "jemalloc/internal/witness_inlines.h"
-#include "jemalloc/internal/mutex_inlines.h"
-#include "jemalloc/internal/rtree_inlines.h"
-#include "jemalloc/internal/extent_inlines.h"
-#include "jemalloc/internal/base_inlines.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-pszind_t	psz2ind(size_t psz);
-size_t	pind2sz_compute(pszind_t pind);
-size_t	pind2sz_lookup(pszind_t pind);
-size_t	pind2sz(pszind_t pind);
-size_t	psz2u(size_t psz);
-szind_t	size2index_compute(size_t size);
-szind_t	size2index_lookup(size_t size);
-szind_t	size2index(size_t size);
-size_t	index2size_compute(szind_t index);
-size_t	index2size_lookup(szind_t index);
-size_t	index2size(szind_t index);
-size_t	s2u_compute(size_t size);
-size_t	s2u_lookup(size_t size);
-size_t	s2u(size_t size);
-size_t	sa2u(size_t size, size_t alignment);
-arena_t	*arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
-arena_t	*arena_choose(tsd_t *tsd, arena_t *arena);
-arena_t	*arena_ichoose(tsd_t *tsd, arena_t *arena);
-arena_tdata_t	*arena_tdata_get(tsd_t *tsd, unsigned ind,
-    bool refresh_if_missing);
-arena_t	*arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
-ticker_t	*decay_ticker_get(tsd_t *tsd, unsigned ind);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE pszind_t
-psz2ind(size_t psz)
-{
-	if (unlikely(psz > LARGE_MAXCLASS))
-		return (NPSIZES);
-	{
-		pszind_t x = lg_floor((psz<<1)-1);
-		pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
-		    (LG_SIZE_CLASS_GROUP + LG_PAGE);
-		pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
-		pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
-		    LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-
-		size_t delta_inverse_mask = ZU(-1) << lg_delta;
-		pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
-		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-		pszind_t ind = grp + mod;
-		return (ind);
-	}
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_compute(pszind_t pind)
-{
-	if (unlikely(pind == NPSIZES))
-		return (LARGE_MAXCLASS + PAGE);
-	{
-		size_t grp = pind >> LG_SIZE_CLASS_GROUP;
-		size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-		size_t grp_size_mask = ~((!!grp)-1);
-		size_t grp_size = ((ZU(1) << (LG_PAGE +
-		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
-		size_t shift = (grp == 0) ? 1 : grp;
-		size_t lg_delta = shift + (LG_PAGE-1);
-		size_t mod_size = (mod+1) << lg_delta;
-
-		size_t sz = grp_size + mod_size;
-		return (sz);
-	}
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_lookup(pszind_t pind)
-{
-	size_t ret = (size_t)pind2sz_tab[pind];
-	assert(ret == pind2sz_compute(pind));
-	return (ret);
-}
-
-JEMALLOC_INLINE size_t
-pind2sz(pszind_t pind)
-{
-	assert(pind < NPSIZES+1);
-	return (pind2sz_lookup(pind));
-}
-
-JEMALLOC_INLINE size_t
-psz2u(size_t psz)
-{
-	if (unlikely(psz > LARGE_MAXCLASS))
-		return (LARGE_MAXCLASS + PAGE);
-	{
-		size_t x = lg_floor((psz<<1)-1);
-		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
-		    LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-		size_t delta = ZU(1) << lg_delta;
-		size_t delta_mask = delta - 1;
-		size_t usize = (psz + delta_mask) & ~delta_mask;
-		return (usize);
-	}
-}
-
-JEMALLOC_INLINE szind_t
-size2index_compute(size_t size)
-{
-	if (unlikely(size > LARGE_MAXCLASS))
-		return (NSIZES);
-#if (NTBINS != 0)
-	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
-		szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
-		szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
-		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
-	}
-#endif
-	{
-		szind_t x = lg_floor((size<<1)-1);
-		szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
-		    x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
-		szind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
-		szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
-		    ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-
-		size_t delta_inverse_mask = ZU(-1) << lg_delta;
-		szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
-		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-		szind_t index = NTBINS + grp + mod;
-		return (index);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index_lookup(size_t size)
-{
-	assert(size <= LOOKUP_MAXCLASS);
-	{
-		szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
-		assert(ret == size2index_compute(size));
-		return (ret);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index(size_t size)
-{
-	assert(size > 0);
-	if (likely(size <= LOOKUP_MAXCLASS))
-		return (size2index_lookup(size));
-	return (size2index_compute(size));
-}
-
-JEMALLOC_INLINE size_t
-index2size_compute(szind_t index)
-{
-#if (NTBINS > 0)
-	if (index < NTBINS)
-		return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
-#endif
-	{
-		size_t reduced_index = index - NTBINS;
-		size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
-		size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
-		    1);
-
-		size_t grp_size_mask = ~((!!grp)-1);
-		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
-		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
-		size_t shift = (grp == 0) ? 1 : grp;
-		size_t lg_delta = shift + (LG_QUANTUM-1);
-		size_t mod_size = (mod+1) << lg_delta;
-
-		size_t usize = grp_size + mod_size;
-		return (usize);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(szind_t index)
-{
-	size_t ret = (size_t)index2size_tab[index];
-	assert(ret == index2size_compute(index));
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size(szind_t index)
-{
-	assert(index < NSIZES);
-	return (index2size_lookup(index));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_compute(size_t size)
-{
-	if (unlikely(size > LARGE_MAXCLASS))
-		return (0);
-#if (NTBINS > 0)
-	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
-		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
-		size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
-		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
-		    (ZU(1) << lg_ceil));
-	}
-#endif
-	{
-		size_t x = lg_floor((size<<1)-1);
-		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
-		    ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-		size_t delta = ZU(1) << lg_delta;
-		size_t delta_mask = delta - 1;
-		size_t usize = (size + delta_mask) & ~delta_mask;
-		return (usize);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_lookup(size_t size)
-{
-	size_t ret = index2size_lookup(size2index_lookup(size));
-
-	assert(ret == s2u_compute(size));
-	return (ret);
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-s2u(size_t size)
-{
-	assert(size > 0);
-	if (likely(size <= LOOKUP_MAXCLASS))
-		return (s2u_lookup(size));
-	return (s2u_compute(size));
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size and alignment.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sa2u(size_t size, size_t alignment)
-{
-	size_t usize;
-
-	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
-
-	/* Try for a small size class. */
-	if (size <= SMALL_MAXCLASS && alignment < PAGE) {
-		/*
-		 * Round size up to the nearest multiple of alignment.
-		 *
-		 * This done, we can take advantage of the fact that for each
-		 * small size class, every object is aligned at the smallest
-		 * power of two that is non-zero in the base two representation
-		 * of the size.  For example:
-		 *
-		 *   Size |   Base 2 | Minimum alignment
-		 *   -----+----------+------------------
-		 *     96 |  1100000 |  32
-		 *    144 | 10100000 |  32
-		 *    192 | 11000000 |  64
-		 */
-		usize = s2u(ALIGNMENT_CEILING(size, alignment));
-		if (usize < LARGE_MINCLASS)
-			return (usize);
-	}
-
-	/* Large size class.  Beware of overflow. */
-
-	if (unlikely(alignment > LARGE_MAXCLASS))
-		return (0);
-
-	/* Make sure result is a large size class. */
-	if (size <= LARGE_MINCLASS)
-		usize = LARGE_MINCLASS;
-	else {
-		usize = s2u(size);
-		if (usize < size) {
-			/* size_t overflow. */
-			return (0);
-		}
-	}
-
-	/*
-	 * Calculate the multi-page mapping that large_palloc() would need in
-	 * order to guarantee the alignment.
-	 */
-	if (usize + large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
-		/* size_t overflow. */
-		return (0);
-	}
-	return (usize);
-}
-
-/* Choose an arena based on a per-thread value. */
-JEMALLOC_INLINE arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
-{
-	arena_t *ret;
-
-	if (arena != NULL)
-		return (arena);
-
-	ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
-	if (unlikely(ret == NULL))
-		ret = arena_choose_hard(tsd, internal);
-
-	return (ret);
-}
-
-JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
-{
-	return (arena_choose_impl(tsd, arena, false));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena)
-{
-	return (arena_choose_impl(tsd, arena, true));
-}
-
-JEMALLOC_INLINE arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
-{
-	arena_tdata_t *tdata;
-	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
-	if (unlikely(arenas_tdata == NULL)) {
-		/* arenas_tdata hasn't been initialized yet. */
-		return (arena_tdata_get_hard(tsd, ind));
-	}
-	if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
-		/*
-		 * ind is invalid, cache is old (too small), or tdata to be
-		 * initialized.
-		 */
-		return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
-		    NULL);
-	}
-
-	tdata = &arenas_tdata[ind];
-	if (likely(tdata != NULL) || !refresh_if_missing)
-		return (tdata);
-	return (arena_tdata_get_hard(tsd, ind));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
-{
-	arena_t *ret;
-
-	assert(ind <= MALLOCX_ARENA_MAX);
-
-	ret = arenas[ind];
-	if (unlikely(ret == NULL)) {
-		ret = (arena_t *)atomic_read_p((void **)&arenas[ind]);
-		if (init_if_missing && unlikely(ret == NULL)) {
-			ret = arena_init(tsdn, ind,
-			    (extent_hooks_t *)&extent_hooks_default);
-		}
-	}
-	return (ret);
-}
-
-JEMALLOC_INLINE ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind)
-{
-	arena_tdata_t *tdata;
-
-	tdata = arena_tdata_get(tsd, ind, true);
-	if (unlikely(tdata == NULL))
-		return (NULL);
-	return (&tdata->decay_ticker);
-}
-#endif
-
-#include "jemalloc/internal/bitmap_inlines.h"
-/*
- * Include portions of arena code interleaved with tcache code in order to
- * resolve circular dependencies.
- */
-#include "jemalloc/internal/arena_inlines_a.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-extent_t	*iealloc(tsdn_t *tsdn, const void *ptr);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE extent_t *
-iealloc(tsdn_t *tsdn, const void *ptr)
-{
-	return (extent_lookup(tsdn, ptr, true));
-}
-#endif
-
-#include "jemalloc/internal/tcache_inlines.h"
-#include "jemalloc/internal/arena_inlines_b.h"
-#include "jemalloc/internal/hash_inlines.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-arena_t	*iaalloc(tsdn_t *tsdn, const void *ptr);
-size_t	isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
-void	*iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
-    tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path);
-void	*ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
-    bool slow_path);
-void	*ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, bool is_internal, arena_t *arena);
-void	*ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena);
-void	*ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t	ivsalloc(tsdn_t *tsdn, const void *ptr);
-void	idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
-    bool is_internal, bool slow_path);
-void	idalloc(tsd_t *tsd, extent_t *extent, void *ptr);
-void	isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
-    tcache_t *tcache, bool slow_path);
-void	*iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena);
-void	*iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
-void	*iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t alignment, bool zero);
-bool	ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t extra, size_t alignment, bool zero);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(tsdn_t *tsdn, const void *ptr)
-{
-	assert(ptr != NULL);
-
-	return (arena_aalloc(tsdn, ptr));
-}
-
-/*
- * Typical usage:
- *   tsdn_t *tsdn = [...]
- *   void *ptr = [...]
- *   extent_t *extent = iealloc(tsdn, ptr);
- *   size_t sz = isalloc(tsdn, extent, ptr);
- */
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
-	assert(ptr != NULL);
-
-	return (arena_salloc(tsdn, extent, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
-    bool is_internal, arena_t *arena, bool slow_path)
-{
-	void *ret;
-
-	assert(size != 0);
-	assert(!is_internal || tcache == NULL);
-	assert(!is_internal || arena == NULL || arena_ind_get(arena) <
-	    narenas_auto);
-
-	ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
-	if (config_stats && is_internal && likely(ret != NULL)) {
-		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
-		    iealloc(tsdn, ret), ret));
-	}
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
-{
-	return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
-	    false, NULL, slow_path));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, bool is_internal, arena_t *arena)
-{
-	void *ret;
-
-	assert(usize != 0);
-	assert(usize == sa2u(usize, alignment));
-	assert(!is_internal || tcache == NULL);
-	assert(!is_internal || arena == NULL || arena_ind_get(arena) <
-	    narenas_auto);
-
-	ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
-	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
-	if (config_stats && is_internal && likely(ret != NULL)) {
-		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
-		    iealloc(tsdn, ret), ret));
-	}
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena)
-{
-	return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
-{
-	return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
-	    tcache_get(tsd, true), false, NULL));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr)
-{
-	extent_t *extent;
-
-	/*
-	 * Return 0 if ptr is not within an extent managed by jemalloc.  This
-	 * function has two extra costs relative to isalloc():
-	 * - The extent_lookup() call cannot claim to be a dependent lookup,
-	 *   which induces rtree lookup load dependencies.
-	 * - The lookup may fail, so there is an extra branch to check for
-	 *   failure.
-	 * */
-	extent = extent_lookup(tsdn, ptr, false);
-	if (extent == NULL)
-		return (0);
-	assert(extent_active_get(extent));
-	/* Only slab members should be looked up via interior pointers. */
-	assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
-
-	return (isalloc(tsdn, extent, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
-    bool is_internal, bool slow_path)
-{
-	assert(ptr != NULL);
-	assert(!is_internal || tcache == NULL);
-	assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
-	    narenas_auto);
-	if (config_stats && is_internal) {
-		arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, extent,
-		    ptr));
-	}
-
-	arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloc(tsd_t *tsd, extent_t *extent, void *ptr)
-{
-	idalloctm(tsd_tsdn(tsd), extent, ptr, tcache_get(tsd, false), false,
-	    true);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
-    tcache_t *tcache, bool slow_path)
-{
-	arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
-    arena_t *arena)
-{
-	void *p;
-	size_t usize, copysize;
-
-	usize = sa2u(size + extra, alignment);
-	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-		return (NULL);
-	p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
-	if (p == NULL) {
-		if (extra == 0)
-			return (NULL);
-		/* Try again, without extra this time. */
-		usize = sa2u(size, alignment);
-		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-			return (NULL);
-		p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
-		if (p == NULL)
-			return (NULL);
-	}
-	/*
-	 * Copy at most size bytes (not size+extra), since the caller has no
-	 * expectation that the extra bytes will be reliably preserved.
-	 */
-	copysize = (size < oldsize) ? size : oldsize;
-	memcpy(p, ptr, copysize);
-	isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
-	return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
-    size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
-{
-	assert(ptr != NULL);
-	assert(size != 0);
-
-	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
-	    != 0) {
-		/*
-		 * Existing object alignment is inadequate; allocate new space
-		 * and copy.
-		 */
-		return (iralloct_realign(tsdn, extent, ptr, oldsize, size, 0,
-		    alignment, zero, tcache, arena));
-	}
-
-	return (arena_ralloc(tsdn, arena, extent, ptr, oldsize, size, alignment,
-	    zero, tcache));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize, size_t size,
-    size_t alignment, bool zero)
-{
-	return (iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
-	    zero, tcache_get(tsd, true), NULL));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
-    size_t extra, size_t alignment, bool zero)
-{
-	assert(ptr != NULL);
-	assert(size != 0);
-
-	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
-	    != 0) {
-		/* Existing object alignment is inadequate. */
-		return (true);
-	}
-
-	return (arena_ralloc_no_move(tsdn, extent, ptr, oldsize, size, extra,
-	    zero));
-}
-#endif
-
-#include "jemalloc/internal/prof_inlines.h"
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
deleted file mode 100644
index dc9df35..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ /dev/null
@@ -1,1184 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_H
-#define	JEMALLOC_INTERNAL_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "jemalloc_internal_defs.h"
-#include "jemalloc/internal/jemalloc_internal_decls.h"
-
-#ifdef JEMALLOC_UTRACE
-#include <sys/ktrace.h>
-#endif
-
-#define	JEMALLOC_NO_DEMANGLE
-#ifdef JEMALLOC_JET
-#  define JEMALLOC_N(n) jet_##n
-#  include "jemalloc/internal/public_namespace.h"
-#  define JEMALLOC_NO_RENAME
-#  include "../jemalloc@install_suffix@.h"
-#  undef JEMALLOC_NO_RENAME
-#else
-#  define JEMALLOC_N(n) @private_namespace@##n
-#  include "../jemalloc@install_suffix@.h"
-#endif
-#include "jemalloc/internal/private_namespace.h"
-
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
-    true
-#else
-    false
-#endif
-    ;
-static const bool have_dss =
-#ifdef JEMALLOC_DSS
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_fill =
-#ifdef JEMALLOC_FILL
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_lazy_lock =
-#ifdef JEMALLOC_LAZY_LOCK
-    true
-#else
-    false
-#endif
-    ;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
-static const bool config_prof =
-#ifdef JEMALLOC_PROF
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_prof_libgcc =
-#ifdef JEMALLOC_PROF_LIBGCC
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_prof_libunwind =
-#ifdef JEMALLOC_PROF_LIBUNWIND
-    true
-#else
-    false
-#endif
-    ;
-static const bool maps_coalesce =
-#ifdef JEMALLOC_MAPS_COALESCE
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_munmap =
-#ifdef JEMALLOC_MUNMAP
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_stats =
-#ifdef JEMALLOC_STATS
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_tcache =
-#ifdef JEMALLOC_TCACHE
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_tls =
-#ifdef JEMALLOC_TLS
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_utrace =
-#ifdef JEMALLOC_UTRACE
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_xmalloc =
-#ifdef JEMALLOC_XMALLOC
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_ivsalloc =
-#ifdef JEMALLOC_IVSALLOC
-    true
-#else
-    false
-#endif
-    ;
-static const bool config_cache_oblivious =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
-    true
-#else
-    false
-#endif
-    ;
-static const bool have_thp =
-#ifdef JEMALLOC_THP
-    true
-#else
-    false
-#endif
-    ;
-
-#if defined(JEMALLOC_C11ATOMICS) && !defined(__cplusplus)
-#include <stdatomic.h>
-#endif
-
-#ifdef JEMALLOC_ATOMIC9
-#include <machine/atomic.h>
-#endif
-
-#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
-#include <libkern/OSAtomic.h>
-#endif
-
-#ifdef JEMALLOC_ZONE
-#include <mach/mach_error.h>
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
-#endif
-
-#include "jemalloc/internal/ph.h"
-#ifndef __PGI
-#define	RB_COMPACT
-#endif
-#include "jemalloc/internal/rb.h"
-#include "jemalloc/internal/qr.h"
-#include "jemalloc/internal/ql.h"
-
-/*
- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
- * but there are circular dependencies that cannot be broken without
- * substantial performance degradation.
- *
- * Historically, we dealt with this by each header into four sections (types,
- * structs, externs, and inlines), and included each header file multiple times
- * in this file, picking out the portion we want on each pass using the
- * following #defines:
- *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
- *                        types.
- *   JEMALLOC_H_STRUCTS : Data structures.
- *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
- *   JEMALLOC_H_INLINES : Inline functions.
- *
- * We're moving toward a world in which the dependencies are explicit; each file
- * will #include the headers it depends on (rather than relying on them being
- * implicitly available via this file including every header file in the
- * project).
- *
- * We're now in an intermediate state: we've broken up the header files to avoid
- * having to include each one multiple times, but have not yet moved the
- * dependency information into the header files (i.e. we still rely on the
- * ordering in this file to ensure all a header's dependencies are available in
- * its translation unit).  Each component is now broken up into multiple header
- * files, corresponding to the sections above (e.g. instead of "tsd.h", we now
- * have "tsd_types.h", "tsd_structs.h", "tsd_externs.h", "tsd_inlines.h").
- */
-
-#include "jemalloc/internal/jemalloc_internal_macros.h"
-
-/******************************************************************************/
-/* TYPES */
-/******************************************************************************/
-
-/* Page size index type. */
-typedef unsigned pszind_t;
-
-/* Size class index type. */
-typedef unsigned szind_t;
-
-/*
- * Flags bits:
- *
- * a: arena
- * t: tcache
- * 0: unused
- * z: zero
- * n: alignment
- *
- * aaaaaaaa aaaatttt tttttttt 0znnnnnn
- */
-#define	MALLOCX_ARENA_BITS	12
-#define	MALLOCX_TCACHE_BITS	12
-#define	MALLOCX_LG_ALIGN_BITS	6
-#define	MALLOCX_ARENA_SHIFT	20
-#define	MALLOCX_TCACHE_SHIFT	8
-#define	MALLOCX_ARENA_MASK \
-    (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
-/* NB: Arena index bias decreases the maximum number of arenas by 1. */
-#define	MALLOCX_ARENA_MAX	((1 << MALLOCX_ARENA_BITS) - 2)
-#define	MALLOCX_TCACHE_MASK \
-    (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
-#define	MALLOCX_TCACHE_MAX	((1 << MALLOCX_TCACHE_BITS) - 3)
-#define	MALLOCX_LG_ALIGN_MASK	((1 << MALLOCX_LG_ALIGN_BITS) - 1)
-/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
-#define	MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
-    (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
-#define	MALLOCX_ALIGN_GET(flags)					\
-    (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
-#define	MALLOCX_ZERO_GET(flags)						\
-    ((bool)(flags & MALLOCX_ZERO))
-
-#define	MALLOCX_TCACHE_GET(flags)					\
-    (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
-#define	MALLOCX_ARENA_GET(flags)					\
-    (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
-
-/* Smallest size class to support. */
-#define	TINY_MIN		(1U << LG_TINY_MIN)
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#ifndef LG_QUANTUM
-#  if (defined(__i386__) || defined(_M_IX86))
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __ia64__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __alpha__
-#    define LG_QUANTUM		4
-#  endif
-#  if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
-#    define LG_QUANTUM		4
-#  endif
-#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __arm__
-#    define LG_QUANTUM		3
-#  endif
-#  ifdef __aarch64__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __hppa__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __mips__
-#    define LG_QUANTUM		3
-#  endif
-#  ifdef __or1k__
-#    define LG_QUANTUM		3
-#  endif
-#  ifdef __powerpc__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __riscv__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __s390__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __SH4__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __tile__
-#    define LG_QUANTUM		4
-#  endif
-#  ifdef __le32__
-#    define LG_QUANTUM		4
-#  endif
-#  ifndef LG_QUANTUM
-#    error "Unknown minimum alignment for architecture; specify via "
-	 "--with-lg-quantum"
-#  endif
-#endif
-
-#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
-#define	QUANTUM_MASK		(QUANTUM - 1)
-
-/* Return the smallest quantum multiple that is >= a. */
-#define	QUANTUM_CEILING(a)						\
-	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
-
-#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
-#define	LONG_MASK		(LONG - 1)
-
-/* Return the smallest long multiple that is >= a. */
-#define	LONG_CEILING(a)							\
-	(((a) + LONG_MASK) & ~LONG_MASK)
-
-#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
-#define	PTR_MASK		(SIZEOF_PTR - 1)
-
-/* Return the smallest (void *) multiple that is >= a. */
-#define	PTR_CEILING(a)							\
-	(((a) + PTR_MASK) & ~PTR_MASK)
-
-/*
- * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
- * In addition, this controls the spacing of cacheline-spaced size classes.
- *
- * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
- * only handle raw constants.
- */
-#define	LG_CACHELINE		6
-#define	CACHELINE		64
-#define	CACHELINE_MASK		(CACHELINE - 1)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define	CACHELINE_CEILING(s)						\
-	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-
-/* Return the nearest aligned address at or below a. */
-#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
-	((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
-
-/* Return the offset between a and the nearest aligned address at or below a. */
-#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
-	((size_t)((uintptr_t)(a) & (alignment - 1)))
-
-/* Return the smallest alignment multiple that is >= s. */
-#define	ALIGNMENT_CEILING(s, alignment)					\
-	(((s) + (alignment - 1)) & ((~(alignment)) + 1))
-
-/* Declare a variable-length array. */
-#if __STDC_VERSION__ < 199901L
-#  ifdef _MSC_VER
-#    include <malloc.h>
-#    define alloca _alloca
-#  else
-#    ifdef JEMALLOC_HAS_ALLOCA_H
-#      include <alloca.h>
-#    else
-#      include <stdlib.h>
-#    endif
-#  endif
-#  define VARIABLE_ARRAY(type, name, count) \
-	type *name = alloca(sizeof(type) * (count))
-#else
-#  define VARIABLE_ARRAY(type, name, count) type name[(count)]
-#endif
-
-#include "jemalloc/internal/nstime_types.h"
-#include "jemalloc/internal/util_types.h"
-#include "jemalloc/internal/spin_types.h"
-#include "jemalloc/internal/prng_types.h"
-#include "jemalloc/internal/ticker_types.h"
-#include "jemalloc/internal/ckh_types.h"
-#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/stats_types.h"
-#include "jemalloc/internal/ctl_types.h"
-#include "jemalloc/internal/witness_types.h"
-#include "jemalloc/internal/mutex_types.h"
-#include "jemalloc/internal/tsd_types.h"
-#include "jemalloc/internal/extent_types.h"
-#include "jemalloc/internal/extent_dss_types.h"
-#include "jemalloc/internal/base_types.h"
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/bitmap_types.h"
-#include "jemalloc/internal/rtree_types.h"
-#include "jemalloc/internal/pages_types.h"
-#include "jemalloc/internal/tcache_types.h"
-#include "jemalloc/internal/prof_types.h"
-
-
-/******************************************************************************/
-/* STRUCTS */
-/******************************************************************************/
-
-#include "jemalloc/internal/nstime_structs.h"
-#include "jemalloc/internal/spin_structs.h"
-#include "jemalloc/internal/ticker_structs.h"
-#include "jemalloc/internal/ckh_structs.h"
-#include "jemalloc/internal/stats_structs.h"
-#include "jemalloc/internal/ctl_structs.h"
-#include "jemalloc/internal/witness_structs.h"
-#include "jemalloc/internal/mutex_structs.h"
-#include "jemalloc/internal/bitmap_structs.h"
-#include "jemalloc/internal/arena_structs_a.h"
-#include "jemalloc/internal/extent_structs.h"
-#include "jemalloc/internal/extent_dss_structs.h"
-#include "jemalloc/internal/base_structs.h"
-#include "jemalloc/internal/arena_structs_b.h"
-#include "jemalloc/internal/rtree_structs.h"
-#include "jemalloc/internal/tcache_structs.h"
-#include "jemalloc/internal/prof_structs.h"
-#include "jemalloc/internal/tsd_structs.h"
-
-
-/******************************************************************************/
-/* EXTERNS */
-/******************************************************************************/
-
-extern bool	opt_abort;
-extern const char	*opt_junk;
-extern bool	opt_junk_alloc;
-extern bool	opt_junk_free;
-extern bool	opt_utrace;
-extern bool	opt_xmalloc;
-extern bool	opt_zero;
-extern unsigned	opt_narenas;
-
-/* Number of CPUs. */
-extern unsigned	ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned	narenas_auto;
-
-/*
- * Arenas that are used to service external requests.  Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern arena_t	**arenas;
-
-/*
- * pind2sz_tab encodes the same information as could be computed by
- * pind2sz_compute().
- */
-extern size_t const	pind2sz_tab[NPSIZES+1];
-/*
- * index2size_tab encodes the same information as could be computed (at
- * unacceptable cost in some code paths) by index2size_compute().
- */
-extern size_t const	index2size_tab[NSIZES];
-/*
- * size2index_tab is a compact lookup table that rounds request sizes up to
- * size classes.  In order to reduce cache footprint, the table is compressed,
- * and all accesses are via size2index().
- */
-extern uint8_t const	size2index_tab[];
-
-void	*a0malloc(size_t size);
-void	a0dalloc(void *ptr);
-void	*bootstrap_malloc(size_t size);
-void	*bootstrap_calloc(size_t num, size_t size);
-void	bootstrap_free(void *ptr);
-void	arena_set(unsigned ind, arena_t *arena);
-unsigned	narenas_total_get(void);
-arena_t	*arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-arena_tdata_t	*arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t	*arena_choose_hard(tsd_t *tsd, bool internal);
-void	arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
-void	iarena_cleanup(tsd_t *tsd);
-void	arena_cleanup(tsd_t *tsd);
-void	arenas_tdata_cleanup(tsd_t *tsd);
-void	jemalloc_prefork(void);
-void	jemalloc_postfork_parent(void);
-void	jemalloc_postfork_child(void);
-
-#include "jemalloc/internal/nstime_externs.h"
-#include "jemalloc/internal/util_externs.h"
-#include "jemalloc/internal/atomic_externs.h"
-#include "jemalloc/internal/ckh_externs.h"
-#include "jemalloc/internal/stats_externs.h"
-#include "jemalloc/internal/ctl_externs.h"
-#include "jemalloc/internal/witness_externs.h"
-#include "jemalloc/internal/mutex_externs.h"
-#include "jemalloc/internal/bitmap_externs.h"
-#include "jemalloc/internal/extent_externs.h"
-#include "jemalloc/internal/extent_dss_externs.h"
-#include "jemalloc/internal/extent_mmap_externs.h"
-#include "jemalloc/internal/base_externs.h"
-#include "jemalloc/internal/arena_externs.h"
-#include "jemalloc/internal/rtree_externs.h"
-#include "jemalloc/internal/pages_externs.h"
-#include "jemalloc/internal/large_externs.h"
-#include "jemalloc/internal/tcache_externs.h"
-#include "jemalloc/internal/prof_externs.h"
-#include "jemalloc/internal/tsd_externs.h"
-
-/******************************************************************************/
-/* INLINES */
-/******************************************************************************/
-
-#include "jemalloc/internal/util_inlines.h"
-#include "jemalloc/internal/atomic_inlines.h"
-#include "jemalloc/internal/spin_inlines.h"
-#include "jemalloc/internal/prng_inlines.h"
-#include "jemalloc/internal/ticker_inlines.h"
-#include "jemalloc/internal/tsd_inlines.h"
-#include "jemalloc/internal/witness_inlines.h"
-#include "jemalloc/internal/mutex_inlines.h"
-#include "jemalloc/internal/rtree_inlines.h"
-#include "jemalloc/internal/extent_inlines.h"
-#include "jemalloc/internal/base_inlines.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-pszind_t	psz2ind(size_t psz);
-size_t	pind2sz_compute(pszind_t pind);
-size_t	pind2sz_lookup(pszind_t pind);
-size_t	pind2sz(pszind_t pind);
-size_t	psz2u(size_t psz);
-szind_t	size2index_compute(size_t size);
-szind_t	size2index_lookup(size_t size);
-szind_t	size2index(size_t size);
-size_t	index2size_compute(szind_t index);
-size_t	index2size_lookup(szind_t index);
-size_t	index2size(szind_t index);
-size_t	s2u_compute(size_t size);
-size_t	s2u_lookup(size_t size);
-size_t	s2u(size_t size);
-size_t	sa2u(size_t size, size_t alignment);
-arena_t	*arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
-arena_t	*arena_choose(tsd_t *tsd, arena_t *arena);
-arena_t	*arena_ichoose(tsd_t *tsd, arena_t *arena);
-arena_tdata_t	*arena_tdata_get(tsd_t *tsd, unsigned ind,
-    bool refresh_if_missing);
-arena_t	*arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
-ticker_t	*decay_ticker_get(tsd_t *tsd, unsigned ind);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE pszind_t
-psz2ind(size_t psz)
-{
-	if (unlikely(psz > LARGE_MAXCLASS))
-		return (NPSIZES);
-	{
-		pszind_t x = lg_floor((psz<<1)-1);
-		pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
-		    (LG_SIZE_CLASS_GROUP + LG_PAGE);
-		pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
-		pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
-		    LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-
-		size_t delta_inverse_mask = ZI(-1) << lg_delta;
-		pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
-		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-		pszind_t ind = grp + mod;
-		return (ind);
-	}
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_compute(pszind_t pind)
-{
-	if (unlikely(pind == NPSIZES))
-		return (LARGE_MAXCLASS + PAGE);
-	{
-		size_t grp = pind >> LG_SIZE_CLASS_GROUP;
-		size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-		size_t grp_size_mask = ~((!!grp)-1);
-		size_t grp_size = ((ZU(1) << (LG_PAGE +
-		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
-		size_t shift = (grp == 0) ? 1 : grp;
-		size_t lg_delta = shift + (LG_PAGE-1);
-		size_t mod_size = (mod+1) << lg_delta;
-
-		size_t sz = grp_size + mod_size;
-		return (sz);
-	}
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_lookup(pszind_t pind)
-{
-	size_t ret = (size_t)pind2sz_tab[pind];
-	assert(ret == pind2sz_compute(pind));
-	return (ret);
-}
-
-JEMALLOC_INLINE size_t
-pind2sz(pszind_t pind)
-{
-	assert(pind < NPSIZES+1);
-	return (pind2sz_lookup(pind));
-}
-
-JEMALLOC_INLINE size_t
-psz2u(size_t psz)
-{
-	if (unlikely(psz > LARGE_MAXCLASS))
-		return (LARGE_MAXCLASS + PAGE);
-	{
-		size_t x = lg_floor((psz<<1)-1);
-		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
-		    LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-		size_t delta = ZU(1) << lg_delta;
-		size_t delta_mask = delta - 1;
-		size_t usize = (psz + delta_mask) & ~delta_mask;
-		return (usize);
-	}
-}
-
-JEMALLOC_INLINE szind_t
-size2index_compute(size_t size)
-{
-	if (unlikely(size > LARGE_MAXCLASS))
-		return (NSIZES);
-#if (NTBINS != 0)
-	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
-		szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
-		szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
-		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
-	}
-#endif
-	{
-		szind_t x = lg_floor((size<<1)-1);
-		szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
-		    x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
-		szind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
-		szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
-		    ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-
-		size_t delta_inverse_mask = ZI(-1) << lg_delta;
-		szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
-		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
-		szind_t index = NTBINS + grp + mod;
-		return (index);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index_lookup(size_t size)
-{
-	assert(size <= LOOKUP_MAXCLASS);
-	{
-		szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
-		assert(ret == size2index_compute(size));
-		return (ret);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index(size_t size)
-{
-	assert(size > 0);
-	if (likely(size <= LOOKUP_MAXCLASS))
-		return (size2index_lookup(size));
-	return (size2index_compute(size));
-}
-
-JEMALLOC_INLINE size_t
-index2size_compute(szind_t index)
-{
-#if (NTBINS > 0)
-	if (index < NTBINS)
-		return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
-#endif
-	{
-		size_t reduced_index = index - NTBINS;
-		size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
-		size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
-		    1);
-
-		size_t grp_size_mask = ~((!!grp)-1);
-		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
-		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
-		size_t shift = (grp == 0) ? 1 : grp;
-		size_t lg_delta = shift + (LG_QUANTUM-1);
-		size_t mod_size = (mod+1) << lg_delta;
-
-		size_t usize = grp_size + mod_size;
-		return (usize);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(szind_t index)
-{
-	size_t ret = (size_t)index2size_tab[index];
-	assert(ret == index2size_compute(index));
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size(szind_t index)
-{
-	assert(index < NSIZES);
-	return (index2size_lookup(index));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_compute(size_t size)
-{
-	if (unlikely(size > LARGE_MAXCLASS))
-		return (0);
-#if (NTBINS > 0)
-	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
-		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
-		size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
-		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
-		    (ZU(1) << lg_ceil));
-	}
-#endif
-	{
-		size_t x = lg_floor((size<<1)-1);
-		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
-		    ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-		size_t delta = ZU(1) << lg_delta;
-		size_t delta_mask = delta - 1;
-		size_t usize = (size + delta_mask) & ~delta_mask;
-		return (usize);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_lookup(size_t size)
-{
-	size_t ret = index2size_lookup(size2index_lookup(size));
-
-	assert(ret == s2u_compute(size));
-	return (ret);
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-s2u(size_t size)
-{
-	assert(size > 0);
-	if (likely(size <= LOOKUP_MAXCLASS))
-		return (s2u_lookup(size));
-	return (s2u_compute(size));
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size and alignment.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sa2u(size_t size, size_t alignment)
-{
-	size_t usize;
-
-	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
-
-	/* Try for a small size class. */
-	if (size <= SMALL_MAXCLASS && alignment < PAGE) {
-		/*
-		 * Round size up to the nearest multiple of alignment.
-		 *
-		 * This done, we can take advantage of the fact that for each
-		 * small size class, every object is aligned at the smallest
-		 * power of two that is non-zero in the base two representation
-		 * of the size.  For example:
-		 *
-		 *   Size |   Base 2 | Minimum alignment
-		 *   -----+----------+------------------
-		 *     96 |  1100000 |  32
-		 *    144 | 10100000 |  32
-		 *    192 | 11000000 |  64
-		 */
-		usize = s2u(ALIGNMENT_CEILING(size, alignment));
-		if (usize < LARGE_MINCLASS)
-			return (usize);
-	}
-
-	/* Large size class.  Beware of overflow. */
-
-	if (unlikely(alignment > LARGE_MAXCLASS))
-		return (0);
-
-	/* Make sure result is a large size class. */
-	if (size <= LARGE_MINCLASS)
-		usize = LARGE_MINCLASS;
-	else {
-		usize = s2u(size);
-		if (usize < size) {
-			/* size_t overflow. */
-			return (0);
-		}
-	}
-
-	/*
-	 * Calculate the multi-page mapping that large_palloc() would need in
-	 * order to guarantee the alignment.
-	 */
-	if (usize + large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
-		/* size_t overflow. */
-		return (0);
-	}
-	return (usize);
-}
-
-/* Choose an arena based on a per-thread value. */
-JEMALLOC_INLINE arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
-{
-	arena_t *ret;
-
-	if (arena != NULL)
-		return (arena);
-
-	ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
-	if (unlikely(ret == NULL))
-		ret = arena_choose_hard(tsd, internal);
-
-	return (ret);
-}
-
-JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
-{
-	return (arena_choose_impl(tsd, arena, false));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena)
-{
-	return (arena_choose_impl(tsd, arena, true));
-}
-
-JEMALLOC_INLINE arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
-{
-	arena_tdata_t *tdata;
-	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
-	if (unlikely(arenas_tdata == NULL)) {
-		/* arenas_tdata hasn't been initialized yet. */
-		return (arena_tdata_get_hard(tsd, ind));
-	}
-	if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
-		/*
-		 * ind is invalid, cache is old (too small), or tdata to be
-		 * initialized.
-		 */
-		return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
-		    NULL);
-	}
-
-	tdata = &arenas_tdata[ind];
-	if (likely(tdata != NULL) || !refresh_if_missing)
-		return (tdata);
-	return (arena_tdata_get_hard(tsd, ind));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
-{
-	arena_t *ret;
-
-	assert(ind <= MALLOCX_ARENA_MAX);
-
-	ret = arenas[ind];
-	if (unlikely(ret == NULL)) {
-		ret = (arena_t *)atomic_read_p((void **)&arenas[ind]);
-		if (init_if_missing && unlikely(ret == NULL)) {
-			ret = arena_init(tsdn, ind,
-			    (extent_hooks_t *)&extent_hooks_default);
-		}
-	}
-	return (ret);
-}
-
-JEMALLOC_INLINE ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind)
-{
-	arena_tdata_t *tdata;
-
-	tdata = arena_tdata_get(tsd, ind, true);
-	if (unlikely(tdata == NULL))
-		return (NULL);
-	return (&tdata->decay_ticker);
-}
-#endif
-
-#include "jemalloc/internal/bitmap_inlines.h"
-/*
- * Include portions of arena code interleaved with tcache code in order to
- * resolve circular dependencies.
- */
-#include "jemalloc/internal/arena_inlines_a.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-extent_t	*iealloc(tsdn_t *tsdn, const void *ptr);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE extent_t *
-iealloc(tsdn_t *tsdn, const void *ptr)
-{
-	return (extent_lookup(tsdn, ptr, true));
-}
-#endif
-
-#include "jemalloc/internal/tcache_inlines.h"
-#include "jemalloc/internal/arena_inlines_b.h"
-#include "jemalloc/internal/hash_inlines.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-arena_t	*iaalloc(tsdn_t *tsdn, const void *ptr);
-size_t	isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
-void	*iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
-    tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path);
-void	*ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
-    bool slow_path);
-void	*ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, bool is_internal, arena_t *arena);
-void	*ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena);
-void	*ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t	ivsalloc(tsdn_t *tsdn, const void *ptr);
-void	idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
-    bool is_internal, bool slow_path);
-void	idalloc(tsd_t *tsd, extent_t *extent, void *ptr);
-void	isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
-    tcache_t *tcache, bool slow_path);
-void	*iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena);
-void	*iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
-void	*iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t alignment, bool zero);
-bool	ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t extra, size_t alignment, bool zero);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(tsdn_t *tsdn, const void *ptr)
-{
-	assert(ptr != NULL);
-
-	return (arena_aalloc(tsdn, ptr));
-}
-
-/*
- * Typical usage:
- *   tsdn_t *tsdn = [...]
- *   void *ptr = [...]
- *   extent_t *extent = iealloc(tsdn, ptr);
- *   size_t sz = isalloc(tsdn, extent, ptr);
- */
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
-	assert(ptr != NULL);
-
-	return (arena_salloc(tsdn, extent, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
-    bool is_internal, arena_t *arena, bool slow_path)
-{
-	void *ret;
-
-	assert(size != 0);
-	assert(!is_internal || tcache == NULL);
-	assert(!is_internal || arena == NULL || arena_ind_get(arena) <
-	    narenas_auto);
-
-	ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
-	if (config_stats && is_internal && likely(ret != NULL)) {
-		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
-		    iealloc(tsdn, ret), ret));
-	}
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
-{
-	return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
-	    false, NULL, slow_path));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, bool is_internal, arena_t *arena)
-{
-	void *ret;
-
-	assert(usize != 0);
-	assert(usize == sa2u(usize, alignment));
-	assert(!is_internal || tcache == NULL);
-	assert(!is_internal || arena == NULL || arena_ind_get(arena) <
-	    narenas_auto);
-
-	ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
-	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
-	if (config_stats && is_internal && likely(ret != NULL)) {
-		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
-		    iealloc(tsdn, ret), ret));
-	}
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena)
-{
-	return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
-{
-	return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
-	    tcache_get(tsd, true), false, NULL));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr)
-{
-	extent_t *extent;
-
-	/*
-	 * Return 0 if ptr is not within an extent managed by jemalloc.  This
-	 * function has two extra costs relative to isalloc():
-	 * - The extent_lookup() call cannot claim to be a dependent lookup,
-	 *   which induces rtree lookup load dependencies.
-	 * - The lookup may fail, so there is an extra branch to check for
-	 *   failure.
-	 * */
-	extent = extent_lookup(tsdn, ptr, false);
-	if (extent == NULL)
-		return (0);
-	assert(extent_active_get(extent));
-	/* Only slab members should be looked up via interior pointers. */
-	assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
-
-	return (isalloc(tsdn, extent, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
-    bool is_internal, bool slow_path)
-{
-	assert(ptr != NULL);
-	assert(!is_internal || tcache == NULL);
-	assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
-	    narenas_auto);
-	if (config_stats && is_internal) {
-		arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, extent,
-		    ptr));
-	}
-
-	arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloc(tsd_t *tsd, extent_t *extent, void *ptr)
-{
-	idalloctm(tsd_tsdn(tsd), extent, ptr, tcache_get(tsd, false), false,
-	    true);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
-    tcache_t *tcache, bool slow_path)
-{
-	arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
-    arena_t *arena)
-{
-	void *p;
-	size_t usize, copysize;
-
-	usize = sa2u(size + extra, alignment);
-	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-		return (NULL);
-	p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
-	if (p == NULL) {
-		if (extra == 0)
-			return (NULL);
-		/* Try again, without extra this time. */
-		usize = sa2u(size, alignment);
-		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-			return (NULL);
-		p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
-		if (p == NULL)
-			return (NULL);
-	}
-	/*
-	 * Copy at most size bytes (not size+extra), since the caller has no
-	 * expectation that the extra bytes will be reliably preserved.
-	 */
-	copysize = (size < oldsize) ? size : oldsize;
-	memcpy(p, ptr, copysize);
-	isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
-	return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
-    size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
-{
-	assert(ptr != NULL);
-	assert(size != 0);
-
-	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
-	    != 0) {
-		/*
-		 * Existing object alignment is inadequate; allocate new space
-		 * and copy.
-		 */
-		return (iralloct_realign(tsdn, extent, ptr, oldsize, size, 0,
-		    alignment, zero, tcache, arena));
-	}
-
-	return (arena_ralloc(tsdn, arena, extent, ptr, oldsize, size, alignment,
-	    zero, tcache));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize, size_t size,
-    size_t alignment, bool zero)
-{
-	return (iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
-	    zero, tcache_get(tsd, true), NULL));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
-    size_t extra, size_t alignment, bool zero)
-{
-	assert(ptr != NULL);
-	assert(size != 0);
-
-	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
-	    != 0) {
-		/* Existing object alignment is inadequate. */
-		return (true);
-	}
-
-	return (arena_ralloc_no_move(tsdn, extent, ptr, oldsize, size, extra,
-	    zero));
-}
-#endif
-
-#include "jemalloc/internal/prof_inlines.h"
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
deleted file mode 100644
index 37ffca8..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_DECLS_H
-#define	JEMALLOC_INTERNAL_DECLS_H
-
-#include <math.h>
-#ifdef _WIN32
-#  include <windows.h>
-#  include "msvc_compat/windows_extra.h"
-
-#else
-#  include <sys/param.h>
-#  include <sys/mman.h>
-#  if !defined(__pnacl__) && !defined(__native_client__) && !defined(__Fuchsia__)
-#    include <sys/syscall.h>
-#    if !defined(SYS_write) && defined(__NR_write)
-#      define SYS_write __NR_write
-#    endif
-#    include <sys/uio.h>
-#  endif
-#  include <pthread.h>
-#  ifdef JEMALLOC_OS_UNFAIR_LOCK
-#    include <os/lock.h>
-#  endif
-#  ifdef JEMALLOC_GLIBC_MALLOC_HOOK
-#    include <sched.h>
-#  endif
-#  include <errno.h>
-#  include <sys/time.h>
-#  include <time.h>
-#  ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-#    include <mach/mach_time.h>
-#  endif
-#endif
-#include <sys/types.h>
-
-#include <limits.h>
-#ifndef SIZE_T_MAX
-#  define SIZE_T_MAX	SIZE_MAX
-#endif
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <stddef.h>
-#ifndef offsetof
-#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
-#endif
-#include <string.h>
-#include <strings.h>
-#include <ctype.h>
-#ifdef _MSC_VER
-#  include <io.h>
-typedef intptr_t ssize_t;
-#  define PATH_MAX 1024
-#  define STDERR_FILENO 2
-#  define __func__ __FUNCTION__
-#  ifdef JEMALLOC_HAS_RESTRICT
-#    define restrict __restrict
-#  endif
-/* Disable warnings about deprecated system functions. */
-#  pragma warning(disable: 4996)
-#if _MSC_VER < 1800
-static int
-isblank(int c)
-{
-	return (c == '\t' || c == ' ');
-}
-#endif
-#else
-#  include <unistd.h>
-#endif
-#include <fcntl.h>
-
-#endif /* JEMALLOC_INTERNAL_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
deleted file mode 100644
index ee8ec00..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/* include/jemalloc/internal/jemalloc_internal_defs.h.  Generated from jemalloc_internal_defs.h.in by configure.  */
-#ifndef JEMALLOC_INTERNAL_DEFS_H_
-#define	JEMALLOC_INTERNAL_DEFS_H_
-/*
- * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
- * public APIs to be prefixed.  This makes it possible, with some care, to use
- * multiple allocators simultaneously.
- */
-/* #undef JEMALLOC_PREFIX */
-/* #undef JEMALLOC_CPREFIX */
-
-/*
- * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
- * For shared libraries, symbol visibility mechanisms prevent these symbols
- * from being exported, but for static libraries, naming collisions are a real
- * possibility.
- */
-#define JEMALLOC_PRIVATE_NAMESPACE je_
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
-#if defined(__x86_64__)
-#define CPU_SPINWAIT __asm__ volatile("pause")
-#else
-#define CPU_SPINWAIT do {} while(0)
-#endif
-
-/* Defined if C11 atomics are available. */
-#define JEMALLOC_C11ATOMICS 1
-
-/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
-/* #undef JEMALLOC_ATOMIC9 */
-
-/*
- * Defined if OSAtomic*() functions are available, as provided by Darwin, and
- * documented in the atomic(3) manual page.
- */
-/* #undef JEMALLOC_OSATOMIC */
-
-/*
- * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
- * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
- * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
- * functions are defined in libgcc instead of being inlines).
- */
-/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
-
-/*
- * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
- * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
- * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
- * functions are defined in libgcc instead of being inlines).
- */
-/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
-
-/*
- * Defined if __builtin_clz() and __builtin_clzl() are available.
- */
-#define JEMALLOC_HAVE_BUILTIN_CLZ 
-
-/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
- */
-/* #undef JEMALLOC_OS_UNFAIR_LOCK */
-
-/*
- * Defined if OSSpin*() functions are available, as provided by Darwin, and
- * documented in the spinlock(3) manual page.
- */
-/* #undef JEMALLOC_OSSPIN */
-
-/* Defined if syscall(2) is usable. */
-/* #undef JEMALLOC_USE_SYSCALL */
-
-/*
- * Defined if secure_getenv(3) is available.
- */
-/* #undef JEMALLOC_HAVE_SECURE_GETENV */
-
-/*
- * Defined if issetugid(2) is available.
- */
-/* #undef JEMALLOC_HAVE_ISSETUGID */
-
-/* Defined if pthread_atfork(3) is available. */
-/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
-
-/*
- * Defined if _malloc_thread_cleanup() exists.  At least in the case of
- * FreeBSD, pthread_key_create() allocates, which if used during malloc
- * bootstrapping will cause recursion into the pthreads library.  Therefore, if
- * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
- * malloc_tsd.
- */
-/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
-#define JEMALLOC_THREADED_INIT 
-
-/*
- * Defined if the pthreads implementation defines
- * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
- * to avoid recursive allocation during mutex initialization.
- */
-/* #undef JEMALLOC_MUTEX_INIT_CB */
-
-/* Non-empty if the tls_model attribute is supported. */
-#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
-
-/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
-#define JEMALLOC_CC_SILENCE 
-
-/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
-/* #undef JEMALLOC_CODE_COVERAGE */
-
-/*
- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-/* #undef JEMALLOC_DEBUG */
-
-/* JEMALLOC_STATS enables statistics calculation. */
-#define JEMALLOC_STATS 
-
-/* JEMALLOC_PROF enables allocation profiling. */
-/* #undef JEMALLOC_PROF */
-
-/* Use libunwind for profile backtracing if defined. */
-/* #undef JEMALLOC_PROF_LIBUNWIND */
-
-/* Use libgcc for profile backtracing if defined. */
-/* #undef JEMALLOC_PROF_LIBGCC */
-
-/* Use gcc intrinsics for profile backtracing if defined. */
-/* #undef JEMALLOC_PROF_GCC */
-
-/*
- * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
- * This makes it possible to allocate/deallocate objects without any locking
- * when the cache is in the steady state.
- */
-#define JEMALLOC_TCACHE 
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
- * segment (DSS).
- */
-/* #undef JEMALLOC_DSS */
-
-/* Support memory filling (junk/zero). */
-#define JEMALLOC_FILL 
-
-/* Support utrace(2)-based tracing. */
-/* #undef JEMALLOC_UTRACE */
-
-/* Support optional abort() on OOM. */
-/* #undef JEMALLOC_XMALLOC */
-
-/* Support lazy locking (avoid locking unless a second thread is launched). */
-/* #undef JEMALLOC_LAZY_LOCK */
-
-/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
-#define LG_TINY_MIN 3
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-/* #undef LG_QUANTUM */
-
-/* One page is 2^LG_PAGE bytes. */
-#define LG_PAGE 12
-
-/*
- * One huge page is 2^LG_HUGEPAGE bytes.  Note that this is defined even if the
- * system does not explicitly support huge pages; system calls that require
- * explicit huge page support are separately configured.
- */
-#define LG_HUGEPAGE 21
-
-/*
- * If defined, adjacent virtual memory mappings with identical attributes
- * automatically coalesce, and they fragment when changes are made to subranges.
- * This is the normal order of things for mmap()/munmap(), but on Windows
- * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
- * mappings do *not* coalesce/fragment.
- */
-#define JEMALLOC_MAPS_COALESCE 
-
-/*
- * If defined, use munmap() to unmap freed extents, rather than storing them for
- * later reuse.  This is disabled by default on Linux because common sequences
- * of mmap()/munmap() calls will cause virtual memory map holes.
- */
-/* #undef JEMALLOC_MUNMAP */
-
-/* TLS is used to map arenas and magazine caches to threads. */
-/* #undef JEMALLOC_TLS */
-
-/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
- */
-#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
-
-/*
- * ffs*() functions to use for bitmapping.  Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
-#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
-#define JEMALLOC_INTERNAL_FFS __builtin_ffs
-
-/*
- * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
- * within jemalloc-owned extents before dereferencing them.
- */
-/* #undef JEMALLOC_IVSALLOC */
-
-/*
- * If defined, explicitly attempt to more uniformly distribute large allocation
- * pointer alignments across all cache indices.
- */
-#define JEMALLOC_CACHE_OBLIVIOUS 
-
-/*
- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
- */
-/* #undef JEMALLOC_ZONE */
-
-/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- *                                         /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
-/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
-
-/* Defined if madvise(2) is available. */
-/* #undef JEMALLOC_HAVE_MADVISE */
-
-/*
- * Methods for purging unused pages differ between operating systems.
- *
- *   madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- *                             will be discarded rather than swapped out.
- *   madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
- *                                 new pages will be demand-zeroed if the
- *                                 address region is later touched.
- */
-/* #undef JEMALLOC_PURGE_MADVISE_FREE */
-/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-/* #undef JEMALLOC_THP */
-
-/* Define if operating system has alloca.h header. */
-#define JEMALLOC_HAS_ALLOCA_H 1
-
-/* C99 restrict keyword supported. */
-#define JEMALLOC_HAS_RESTRICT 1
-
-/* For use by hash code. */
-/* #undef JEMALLOC_BIG_ENDIAN */
-
-/* sizeof(int) == 2^LG_SIZEOF_INT. */
-#define LG_SIZEOF_INT 2
-
-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
-#define LG_SIZEOF_LONG 3
-
-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
-#define LG_SIZEOF_LONG_LONG 3
-
-/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
-#define LG_SIZEOF_INTMAX_T 3
-
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
-/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
-
-/* glibc memalign hook. */
-/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
-
-/* Adaptive mutex support in pthreads. */
-/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */
-
-/*
- * If defined, jemalloc symbols are not exported (doesn't work when
- * JEMALLOC_PREFIX is not defined).
- */
-/* #undef JEMALLOC_EXPORT */
-
-/* config.malloc_conf options string. */
-#define JEMALLOC_CONFIG_MALLOC_CONF ""
-
-#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
deleted file mode 100644
index c777ab0..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ /dev/null
@@ -1,316 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_DEFS_H_
-#define	JEMALLOC_INTERNAL_DEFS_H_
-/*
- * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
- * public APIs to be prefixed.  This makes it possible, with some care, to use
- * multiple allocators simultaneously.
- */
-#undef JEMALLOC_PREFIX
-#undef JEMALLOC_CPREFIX
-
-/*
- * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
- * For shared libraries, symbol visibility mechanisms prevent these symbols
- * from being exported, but for static libraries, naming collisions are a real
- * possibility.
- */
-#undef JEMALLOC_PRIVATE_NAMESPACE
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
-#undef CPU_SPINWAIT
-
-/* Defined if C11 atomics are available. */
-#undef JEMALLOC_C11ATOMICS
-
-/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
-#undef JEMALLOC_ATOMIC9
-
-/*
- * Defined if OSAtomic*() functions are available, as provided by Darwin, and
- * documented in the atomic(3) manual page.
- */
-#undef JEMALLOC_OSATOMIC
-
-/*
- * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
- * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
- * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
- * functions are defined in libgcc instead of being inlines).
- */
-#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
-
-/*
- * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
- * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
- * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
- * functions are defined in libgcc instead of being inlines).
- */
-#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
-
-/*
- * Defined if __builtin_clz() and __builtin_clzl() are available.
- */
-#undef JEMALLOC_HAVE_BUILTIN_CLZ
-
-/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
- */
-#undef JEMALLOC_OS_UNFAIR_LOCK
-
-/*
- * Defined if OSSpin*() functions are available, as provided by Darwin, and
- * documented in the spinlock(3) manual page.
- */
-#undef JEMALLOC_OSSPIN
-
-/* Defined if syscall(2) is usable. */
-#undef JEMALLOC_USE_SYSCALL
-
-/*
- * Defined if secure_getenv(3) is available.
- */
-#undef JEMALLOC_HAVE_SECURE_GETENV
-
-/*
- * Defined if issetugid(2) is available.
- */
-#undef JEMALLOC_HAVE_ISSETUGID
-
-/* Defined if pthread_atfork(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_ATFORK
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-
-/*
- * Defined if _malloc_thread_cleanup() exists.  At least in the case of
- * FreeBSD, pthread_key_create() allocates, which if used during malloc
- * bootstrapping will cause recursion into the pthreads library.  Therefore, if
- * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
- * malloc_tsd.
- */
-#undef JEMALLOC_MALLOC_THREAD_CLEANUP
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
-#undef JEMALLOC_THREADED_INIT
-
-/*
- * Defined if the pthreads implementation defines
- * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
- * to avoid recursive allocation during mutex initialization.
- */
-#undef JEMALLOC_MUTEX_INIT_CB
-
-/* Non-empty if the tls_model attribute is supported. */
-#undef JEMALLOC_TLS_MODEL
-
-/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
-#undef JEMALLOC_CC_SILENCE
-
-/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
-#undef JEMALLOC_CODE_COVERAGE
-
-/*
- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-#undef JEMALLOC_DEBUG
-
-/* JEMALLOC_STATS enables statistics calculation. */
-#undef JEMALLOC_STATS
-
-/* JEMALLOC_PROF enables allocation profiling. */
-#undef JEMALLOC_PROF
-
-/* Use libunwind for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBUNWIND
-
-/* Use libgcc for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBGCC
-
-/* Use gcc intrinsics for profile backtracing if defined. */
-#undef JEMALLOC_PROF_GCC
-
-/*
- * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
- * This makes it possible to allocate/deallocate objects without any locking
- * when the cache is in the steady state.
- */
-#undef JEMALLOC_TCACHE
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
- * segment (DSS).
- */
-#undef JEMALLOC_DSS
-
-/* Support memory filling (junk/zero). */
-#undef JEMALLOC_FILL
-
-/* Support utrace(2)-based tracing. */
-#undef JEMALLOC_UTRACE
-
-/* Support optional abort() on OOM. */
-#undef JEMALLOC_XMALLOC
-
-/* Support lazy locking (avoid locking unless a second thread is launched). */
-#undef JEMALLOC_LAZY_LOCK
-
-/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
-#undef LG_TINY_MIN
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#undef LG_QUANTUM
-
-/* One page is 2^LG_PAGE bytes. */
-#undef LG_PAGE
-
-/*
- * One huge page is 2^LG_HUGEPAGE bytes.  Note that this is defined even if the
- * system does not explicitly support huge pages; system calls that require
- * explicit huge page support are separately configured.
- */
-#undef LG_HUGEPAGE
-
-/*
- * If defined, adjacent virtual memory mappings with identical attributes
- * automatically coalesce, and they fragment when changes are made to subranges.
- * This is the normal order of things for mmap()/munmap(), but on Windows
- * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
- * mappings do *not* coalesce/fragment.
- */
-#undef JEMALLOC_MAPS_COALESCE
-
-/*
- * If defined, use munmap() to unmap freed extents, rather than storing them for
- * later reuse.  This is disabled by default on Linux because common sequences
- * of mmap()/munmap() calls will cause virtual memory map holes.
- */
-#undef JEMALLOC_MUNMAP
-
-/* TLS is used to map arenas and magazine caches to threads. */
-#undef JEMALLOC_TLS
-
-/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
- */
-#undef JEMALLOC_INTERNAL_UNREACHABLE
-
-/*
- * ffs*() functions to use for bitmapping.  Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#undef JEMALLOC_INTERNAL_FFSLL
-#undef JEMALLOC_INTERNAL_FFSL
-#undef JEMALLOC_INTERNAL_FFS
-
-/*
- * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
- * within jemalloc-owned extents before dereferencing them.
- */
-#undef JEMALLOC_IVSALLOC
-
-/*
- * If defined, explicitly attempt to more uniformly distribute large allocation
- * pointer alignments across all cache indices.
- */
-#undef JEMALLOC_CACHE_OBLIVIOUS
-
-/*
- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
- */
-#undef JEMALLOC_ZONE
-
-/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- *                                         /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/* Defined if madvise(2) is available. */
-#undef JEMALLOC_HAVE_MADVISE
-
-/*
- * Methods for purging unused pages differ between operating systems.
- *
- *   madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- *                             will be discarded rather than swapped out.
- *   madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
- *                                 new pages will be demand-zeroed if the
- *                                 address region is later touched.
- */
-#undef JEMALLOC_PURGE_MADVISE_FREE
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#undef JEMALLOC_THP
-
-/* Define if operating system has alloca.h header. */
-#undef JEMALLOC_HAS_ALLOCA_H
-
-/* C99 restrict keyword supported. */
-#undef JEMALLOC_HAS_RESTRICT
-
-/* For use by hash code. */
-#undef JEMALLOC_BIG_ENDIAN
-
-/* sizeof(int) == 2^LG_SIZEOF_INT. */
-#undef LG_SIZEOF_INT
-
-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
-#undef LG_SIZEOF_LONG
-
-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
-#undef LG_SIZEOF_LONG_LONG
-
-/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
-#undef LG_SIZEOF_INTMAX_T
-
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
-#undef JEMALLOC_GLIBC_MALLOC_HOOK
-
-/* glibc memalign hook. */
-#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
-
-/* Adaptive mutex support in pthreads. */
-#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
-
-/*
- * If defined, jemalloc symbols are not exported (doesn't work when
- * JEMALLOC_PREFIX is not defined).
- */
-#undef JEMALLOC_EXPORT
-
-/* config.malloc_conf options string. */
-#undef JEMALLOC_CONFIG_MALLOC_CONF
-
-#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
deleted file mode 100644
index 80820f8..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MACROS_H
-#define JEMALLOC_INTERNAL_MACROS_H
-
-/*
- * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
- * functions that are static inline functions if inlining is enabled, and
- * single-definition library-private functions if inlining is disabled.
- *
- * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
- * which case the denoted functions are always static, regardless of whether
- * inlining is enabled.
- */
-#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
-   /* Disable inlining to make debugging/profiling easier. */
-#  define JEMALLOC_ALWAYS_INLINE
-#  define JEMALLOC_ALWAYS_INLINE_C static
-#  define JEMALLOC_INLINE
-#  define JEMALLOC_INLINE_C static
-#  define inline
-#else
-#  define JEMALLOC_ENABLE_INLINE
-#  ifdef JEMALLOC_HAVE_ATTR
-#    define JEMALLOC_ALWAYS_INLINE \
-	 static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
-#    define JEMALLOC_ALWAYS_INLINE_C \
-	 static inline JEMALLOC_ATTR(always_inline)
-#  else
-#    define JEMALLOC_ALWAYS_INLINE static inline
-#    define JEMALLOC_ALWAYS_INLINE_C static inline
-#  endif
-#  define JEMALLOC_INLINE static inline
-#  define JEMALLOC_INLINE_C static inline
-#  ifdef _MSC_VER
-#    define inline _inline
-#  endif
-#endif
-
-#ifdef JEMALLOC_CC_SILENCE
-#  define UNUSED JEMALLOC_ATTR(unused)
-#else
-#  define UNUSED
-#endif
-
-#define	ZU(z)	((size_t)z)
-#define	ZI(z)	((ssize_t)z)
-#define	QU(q)	((uint64_t)q)
-#define	QI(q)	((int64_t)q)
-
-#define	KZU(z)	ZU(z##ULL)
-#define	KZI(z)	ZI(z##LL)
-#define	KQU(q)	QU(q##ULL)
-#define	KQI(q)	QI(q##LL)
-
-#ifndef __DECONST
-#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
-#endif
-
-#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
-#  define restrict
-#endif
-
-#endif /* JEMALLOC_INTERNAL_MACROS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/large_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/large_externs.h
deleted file mode 100644
index 75a196c..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/large_externs.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
-#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-void	*large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
-void	*large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool zero);
-bool	large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
-    size_t usize_max, bool zero);
-void	*large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    size_t usize, size_t alignment, bool zero, tcache_t *tcache);
-#ifdef JEMALLOC_JET
-typedef void (large_dalloc_junk_t)(void *, size_t);
-extern large_dalloc_junk_t *large_dalloc_junk;
-typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
-extern large_dalloc_maybe_junk_t *large_dalloc_maybe_junk;
-#else
-void	large_dalloc_junk(void *ptr, size_t usize);
-void	large_dalloc_maybe_junk(void *ptr, size_t usize);
-#endif
-void	large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
-void	large_dalloc(tsdn_t *tsdn, extent_t *extent);
-size_t	large_salloc(tsdn_t *tsdn, const extent_t *extent);
-prof_tctx_t	*large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
-void	large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
-void	large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_externs.h
deleted file mode 100644
index 4897f4f..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_externs.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_EXTERNS_H
-#define JEMALLOC_INTERNAL_MUTEX_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-#ifdef JEMALLOC_LAZY_LOCK
-extern bool isthreaded;
-#else
-#  undef isthreaded /* Undo private_namespace.h definition. */
-#  define isthreaded true
-#endif
-
-bool	malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
-    witness_rank_t rank);
-void	malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void	malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void	malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-bool	malloc_mutex_boot(void);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_inlines.h
deleted file mode 100644
index d65fa13..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_inlines.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_INLINES_H
-#define JEMALLOC_INTERNAL_MUTEX_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void	malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void	malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void	malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-JEMALLOC_INLINE void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-	if (isthreaded) {
-		witness_assert_not_owner(tsdn, &mutex->witness);
-#ifdef _WIN32
-#  if _WIN32_WINNT >= 0x0600
-		AcquireSRWLockExclusive(&mutex->lock);
-#  else
-		EnterCriticalSection(&mutex->lock);
-#  endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-		os_unfair_lock_lock(&mutex->lock);
-#elif (defined(JEMALLOC_OSSPIN))
-		OSSpinLockLock(&mutex->lock);
-#else
-		pthread_mutex_lock(&mutex->lock);
-#endif
-		witness_lock(tsdn, &mutex->witness);
-	}
-}
-
-JEMALLOC_INLINE void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-	if (isthreaded) {
-		witness_unlock(tsdn, &mutex->witness);
-#ifdef _WIN32
-#  if _WIN32_WINNT >= 0x0600
-		ReleaseSRWLockExclusive(&mutex->lock);
-#  else
-		LeaveCriticalSection(&mutex->lock);
-#  endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-		os_unfair_lock_unlock(&mutex->lock);
-#elif (defined(JEMALLOC_OSSPIN))
-		OSSpinLockUnlock(&mutex->lock);
-#else
-		pthread_mutex_unlock(&mutex->lock);
-#endif
-	}
-}
-
-JEMALLOC_INLINE void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-	if (isthreaded)
-		witness_assert_owner(tsdn, &mutex->witness);
-}
-
-JEMALLOC_INLINE void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-	if (isthreaded)
-		witness_assert_not_owner(tsdn, &mutex->witness);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_structs.h
deleted file mode 100644
index 4a18a07..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_structs.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
-#define JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
-
-struct malloc_mutex_s {
-#ifdef _WIN32
-#  if _WIN32_WINNT >= 0x0600
-	SRWLOCK         	lock;
-#  else
-	CRITICAL_SECTION	lock;
-#  endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-	os_unfair_lock		lock;
-#elif (defined(JEMALLOC_OSSPIN))
-	OSSpinLock		lock;
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-	pthread_mutex_t		lock;
-	malloc_mutex_t		*postponed_next;
-#else
-	pthread_mutex_t		lock;
-#endif
-	witness_t		witness;
-};
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_types.h
deleted file mode 100644
index 8c9f249..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/mutex_types.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_TYPES_H
-#define JEMALLOC_INTERNAL_MUTEX_TYPES_H
-
-typedef struct malloc_mutex_s malloc_mutex_t;
-
-#ifdef _WIN32
-#  define MALLOC_MUTEX_INITIALIZER
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-#  define MALLOC_MUTEX_INITIALIZER					\
-     {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-#elif (defined(JEMALLOC_OSSPIN))
-#  define MALLOC_MUTEX_INITIALIZER					\
-     {0, WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-#  define MALLOC_MUTEX_INITIALIZER					\
-    {PTHREAD_MUTEX_INITIALIZER, NULL,					\
-     WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-#else
-#  if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) &&		\
-       defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
-#    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-#    define MALLOC_MUTEX_INITIALIZER					\
-       {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP,				\
-        WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-#  else
-#    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-#    define MALLOC_MUTEX_INITIALIZER					\
-       {PTHREAD_MUTEX_INITIALIZER,					\
-        WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-#  endif
-#endif
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_externs.h
deleted file mode 100644
index 4735e6d..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_externs.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_NSTIME_EXTERNS_H
-#define JEMALLOC_INTERNAL_NSTIME_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-void	nstime_init(nstime_t *time, uint64_t ns);
-void	nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
-uint64_t	nstime_ns(const nstime_t *time);
-uint64_t	nstime_sec(const nstime_t *time);
-uint64_t	nstime_nsec(const nstime_t *time);
-void	nstime_copy(nstime_t *time, const nstime_t *source);
-int	nstime_compare(const nstime_t *a, const nstime_t *b);
-void	nstime_add(nstime_t *time, const nstime_t *addend);
-void	nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
-void	nstime_imultiply(nstime_t *time, uint64_t multiplier);
-void	nstime_idivide(nstime_t *time, uint64_t divisor);
-uint64_t	nstime_divide(const nstime_t *time, const nstime_t *divisor);
-#ifdef JEMALLOC_JET
-typedef bool (nstime_monotonic_t)(void);
-extern nstime_monotonic_t *nstime_monotonic;
-typedef bool (nstime_update_t)(nstime_t *);
-extern nstime_update_t *nstime_update;
-#else
-bool	nstime_monotonic(void);
-bool	nstime_update(nstime_t *time);
-#endif
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_NSTIME_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_structs.h
deleted file mode 100644
index a637f61..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_structs.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_NSTIME_STRUCTS_H
-#define JEMALLOC_INTERNAL_NSTIME_STRUCTS_H
-
-struct nstime_s {
-	uint64_t	ns;
-};
-
-#endif /* JEMALLOC_INTERNAL_NSTIME_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_types.h
deleted file mode 100644
index 861c5a8..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/nstime_types.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_NSTIME_TYPES_H
-#define JEMALLOC_INTERNAL_NSTIME_TYPES_H
-
-typedef struct nstime_s nstime_t;
-
-/* Maximum supported number of seconds (~584 years). */
-#define	NSTIME_SEC_MAX	KQU(18446744072)
-
-#endif /* JEMALLOC_INTERNAL_NSTIME_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/pages_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/pages_externs.h
deleted file mode 100644
index c0c7fc1..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/pages_externs.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-static const bool pages_can_purge_lazy =
-#ifdef PAGES_CAN_PURGE_LAZY
-    true
-#else
-    false
-#endif
-    ;
-static const bool pages_can_purge_forced =
-#ifdef PAGES_CAN_PURGE_FORCED
-    true
-#else
-    false
-#endif
-    ;
-
-void	*pages_map(void *addr, size_t size, bool *commit);
-void	pages_unmap(void *addr, size_t size);
-void	*pages_trim(void *addr, size_t alloc_size, size_t leadsize,
-    size_t size, bool *commit);
-bool	pages_commit(void *addr, size_t size);
-bool	pages_decommit(void *addr, size_t size);
-bool	pages_purge_lazy(void *addr, size_t size);
-bool	pages_purge_forced(void *addr, size_t size);
-bool	pages_huge(void *addr, size_t size);
-bool	pages_nohuge(void *addr, size_t size);
-void	pages_boot(void);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/pages_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/pages_types.h
deleted file mode 100644
index 14a4c3b..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/pages_types.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PAGES_TYPES_H
-#define JEMALLOC_INTERNAL_PAGES_TYPES_H
-
-/* Page size.  LG_PAGE is determined by the configure script. */
-#ifdef PAGE_MASK
-#  undef PAGE_MASK
-#endif
-#define	PAGE		((size_t)(1U << LG_PAGE))
-#define	PAGE_MASK	((size_t)(PAGE - 1))
-/* Return the page base address for the page containing address a. */
-#define	PAGE_ADDR2BASE(a)						\
-	((void *)((uintptr_t)(a) & ~PAGE_MASK))
-/* Return the smallest pagesize multiple that is >= s. */
-#define	PAGE_CEILING(s)							\
-	(((s) + PAGE_MASK) & ~PAGE_MASK)
-
-/* Huge page size.  LG_HUGEPAGE is determined by the configure script. */
-#define	HUGEPAGE	((size_t)(1U << LG_HUGEPAGE))
-#define	HUGEPAGE_MASK	((size_t)(HUGEPAGE - 1))
-/* Return the huge page base address for the huge page containing address a. */
-#define	HUGEPAGE_ADDR2BASE(a)						\
-	((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
-/* Return the smallest pagesize multiple that is >= s. */
-#define	HUGEPAGE_CEILING(s)						\
-	(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
-
-/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
-#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
-#  define PAGES_CAN_PURGE_LAZY
-#endif
-/*
- * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
- *
- * The only supported way to hard-purge on Windows is to decommit and then
- * re-commit, but doing so is racy, and if re-commit fails it's a pain to
- * propagate the "poisoned" memory state.  Since we typically decommit as the
- * next step after purging on Windows anyway, there's no point in adding such
- * complexity.
- */
-#if !defined(_WIN32) && defined(JEMALLOC_PURGE_MADVISE_DONTNEED) || defined(__Fuchsia__)
-#  define PAGES_CAN_PURGE_FORCED
-#endif
-
-#endif /* JEMALLOC_INTERNAL_PAGES_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ph.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ph.h
deleted file mode 100644
index 9efb7b7..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ph.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * A Pairing Heap implementation.
- *
- * "The Pairing Heap: A New Form of Self-Adjusting Heap"
- * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
- *
- * With auxiliary twopass list, described in a follow on paper.
- *
- * "Pairing Heaps: Experiments and Analysis"
- * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
- *
- *******************************************************************************
- */
-
-#ifndef PH_H_
-#define	PH_H_
-
-/* Node structure. */
-#define	phn(a_type)							\
-struct {								\
-	a_type	*phn_prev;						\
-	a_type	*phn_next;						\
-	a_type	*phn_lchild;						\
-}
-
-/* Root structure. */
-#define	ph(a_type)							\
-struct {								\
-	a_type	*ph_root;						\
-}
-
-/* Internal utility macros. */
-#define	phn_lchild_get(a_type, a_field, a_phn)				\
-	(a_phn->a_field.phn_lchild)
-#define	phn_lchild_set(a_type, a_field, a_phn, a_lchild) do {		\
-	a_phn->a_field.phn_lchild = a_lchild;				\
-} while (0)
-
-#define	phn_next_get(a_type, a_field, a_phn)				\
-	(a_phn->a_field.phn_next)
-#define	phn_prev_set(a_type, a_field, a_phn, a_prev) do {		\
-	a_phn->a_field.phn_prev = a_prev;				\
-} while (0)
-
-#define	phn_prev_get(a_type, a_field, a_phn)				\
-	(a_phn->a_field.phn_prev)
-#define	phn_next_set(a_type, a_field, a_phn, a_next) do {		\
-	a_phn->a_field.phn_next = a_next;				\
-} while (0)
-
-#define	phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do {	\
-	a_type *phn0child;						\
-									\
-	assert(a_phn0 != NULL);						\
-	assert(a_phn1 != NULL);						\
-	assert(a_cmp(a_phn0, a_phn1) <= 0);				\
-									\
-	phn_prev_set(a_type, a_field, a_phn1, a_phn0);			\
-	phn0child = phn_lchild_get(a_type, a_field, a_phn0);		\
-	phn_next_set(a_type, a_field, a_phn1, phn0child);		\
-	if (phn0child != NULL)						\
-		phn_prev_set(a_type, a_field, phn0child, a_phn1);	\
-	phn_lchild_set(a_type, a_field, a_phn0, a_phn1);		\
-} while (0)
-
-#define	phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do {	\
-	if (a_phn0 == NULL)						\
-		r_phn = a_phn1;						\
-	else if (a_phn1 == NULL)					\
-		r_phn = a_phn0;						\
-	else if (a_cmp(a_phn0, a_phn1) < 0) {				\
-		phn_merge_ordered(a_type, a_field, a_phn0, a_phn1,	\
-		    a_cmp);						\
-		r_phn = a_phn0;						\
-	} else {							\
-		phn_merge_ordered(a_type, a_field, a_phn1, a_phn0,	\
-		    a_cmp);						\
-		r_phn = a_phn1;						\
-	}								\
-} while (0)
-
-#define	ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do {	\
-	a_type *head = NULL;						\
-	a_type *tail = NULL;						\
-	a_type *phn0 = a_phn;						\
-	a_type *phn1 = phn_next_get(a_type, a_field, phn0);		\
-									\
-	/*								\
-	 * Multipass merge, wherein the first two elements of a FIFO	\
-	 * are repeatedly merged, and each result is appended to the	\
-	 * singly linked FIFO, until the FIFO contains only a single	\
-	 * element.  We start with a sibling list but no reference to	\
-	 * its tail, so we do a single pass over the sibling list to	\
-	 * populate the FIFO.						\
-	 */								\
-	if (phn1 != NULL) {						\
-		a_type *phnrest = phn_next_get(a_type, a_field, phn1);	\
-		if (phnrest != NULL)					\
-			phn_prev_set(a_type, a_field, phnrest, NULL);	\
-		phn_prev_set(a_type, a_field, phn0, NULL);		\
-		phn_next_set(a_type, a_field, phn0, NULL);		\
-		phn_prev_set(a_type, a_field, phn1, NULL);		\
-		phn_next_set(a_type, a_field, phn1, NULL);		\
-		phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0);	\
-		head = tail = phn0;					\
-		phn0 = phnrest;						\
-		while (phn0 != NULL) {					\
-			phn1 = phn_next_get(a_type, a_field, phn0);	\
-			if (phn1 != NULL) {				\
-				phnrest = phn_next_get(a_type, a_field,	\
-				    phn1);				\
-				if (phnrest != NULL) {			\
-					phn_prev_set(a_type, a_field,	\
-					    phnrest, NULL);		\
-				}					\
-				phn_prev_set(a_type, a_field, phn0,	\
-				    NULL);				\
-				phn_next_set(a_type, a_field, phn0,	\
-				    NULL);				\
-				phn_prev_set(a_type, a_field, phn1,	\
-				    NULL);				\
-				phn_next_set(a_type, a_field, phn1,	\
-				    NULL);				\
-				phn_merge(a_type, a_field, phn0, phn1,	\
-				    a_cmp, phn0);			\
-				phn_next_set(a_type, a_field, tail,	\
-				    phn0);				\
-				tail = phn0;				\
-				phn0 = phnrest;				\
-			} else {					\
-				phn_next_set(a_type, a_field, tail,	\
-				    phn0);				\
-				tail = phn0;				\
-				phn0 = NULL;				\
-			}						\
-		}							\
-		phn0 = head;						\
-		phn1 = phn_next_get(a_type, a_field, phn0);		\
-		if (phn1 != NULL) {					\
-			while (true) {					\
-				head = phn_next_get(a_type, a_field,	\
-				    phn1);				\
-				assert(phn_prev_get(a_type, a_field,	\
-				    phn0) == NULL);			\
-				phn_next_set(a_type, a_field, phn0,	\
-				    NULL);				\
-				assert(phn_prev_get(a_type, a_field,	\
-				    phn1) == NULL);			\
-				phn_next_set(a_type, a_field, phn1,	\
-				    NULL);				\
-				phn_merge(a_type, a_field, phn0, phn1,	\
-				    a_cmp, phn0);			\
-				if (head == NULL)			\
-					break;				\
-				phn_next_set(a_type, a_field, tail,	\
-				    phn0);				\
-				tail = phn0;				\
-				phn0 = head;				\
-				phn1 = phn_next_get(a_type, a_field,	\
-				    phn0);				\
-			}						\
-		}							\
-	}								\
-	r_phn = phn0;							\
-} while (0)
-
-#define	ph_merge_aux(a_type, a_field, a_ph, a_cmp) do {			\
-	a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root);	\
-	if (phn != NULL) {						\
-		phn_prev_set(a_type, a_field, a_ph->ph_root, NULL);	\
-		phn_next_set(a_type, a_field, a_ph->ph_root, NULL);	\
-		phn_prev_set(a_type, a_field, phn, NULL);		\
-		ph_merge_siblings(a_type, a_field, phn, a_cmp, phn);	\
-		assert(phn_next_get(a_type, a_field, phn) == NULL);	\
-		phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp,	\
-		    a_ph->ph_root);					\
-	}								\
-} while (0)
-
-#define	ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do {	\
-	a_type *lchild = phn_lchild_get(a_type, a_field, a_phn);	\
-	if (lchild == NULL)						\
-		r_phn = NULL;						\
-	else {								\
-		ph_merge_siblings(a_type, a_field, lchild, a_cmp,	\
-		    r_phn);						\
-	}								\
-} while (0)
-
-/*
- * The ph_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to ph_gen().
- */
-#define	ph_proto(a_attr, a_prefix, a_ph_type, a_type)			\
-a_attr void	a_prefix##new(a_ph_type *ph);				\
-a_attr bool	a_prefix##empty(a_ph_type *ph);				\
-a_attr a_type	*a_prefix##first(a_ph_type *ph);			\
-a_attr void	a_prefix##insert(a_ph_type *ph, a_type *phn);		\
-a_attr a_type	*a_prefix##remove_first(a_ph_type *ph);			\
-a_attr void	a_prefix##remove(a_ph_type *ph, a_type *phn);
-
-/*
- * The ph_gen() macro generates a type-specific pairing heap implementation,
- * based on the above cpp macros.
- */
-#define	ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp)	\
-a_attr void								\
-a_prefix##new(a_ph_type *ph)						\
-{									\
-	memset(ph, 0, sizeof(ph(a_type)));				\
-}									\
-a_attr bool								\
-a_prefix##empty(a_ph_type *ph)						\
-{									\
-	return (ph->ph_root == NULL);					\
-}									\
-a_attr a_type *								\
-a_prefix##first(a_ph_type *ph)						\
-{									\
-	if (ph->ph_root == NULL)					\
-		return (NULL);						\
-	ph_merge_aux(a_type, a_field, ph, a_cmp);			\
-	return (ph->ph_root);						\
-}									\
-a_attr void								\
-a_prefix##insert(a_ph_type *ph, a_type *phn)				\
-{									\
-	memset(&phn->a_field, 0, sizeof(phn(a_type)));			\
-									\
-	/*								\
-	 * Treat the root as an aux list during insertion, and lazily	\
-	 * merge during a_prefix##remove_first().  For elements that	\
-	 * are inserted, then removed via a_prefix##remove() before the	\
-	 * aux list is ever processed, this makes insert/remove		\
-	 * constant-time, whereas eager merging would make insert	\
-	 * O(log n).							\
-	 */								\
-	if (ph->ph_root == NULL)					\
-		ph->ph_root = phn;					\
-	else {								\
-		phn_next_set(a_type, a_field, phn, phn_next_get(a_type,	\
-		    a_field, ph->ph_root));				\
-		if (phn_next_get(a_type, a_field, ph->ph_root) !=	\
-		    NULL) {						\
-			phn_prev_set(a_type, a_field,			\
-			    phn_next_get(a_type, a_field, ph->ph_root),	\
-			    phn);					\
-		}							\
-		phn_prev_set(a_type, a_field, phn, ph->ph_root);	\
-		phn_next_set(a_type, a_field, ph->ph_root, phn);	\
-	}								\
-}									\
-a_attr a_type *								\
-a_prefix##remove_first(a_ph_type *ph)					\
-{									\
-	a_type *ret;							\
-									\
-	if (ph->ph_root == NULL)					\
-		return (NULL);						\
-	ph_merge_aux(a_type, a_field, ph, a_cmp);			\
-									\
-	ret = ph->ph_root;						\
-									\
-	ph_merge_children(a_type, a_field, ph->ph_root, a_cmp,		\
-	    ph->ph_root);						\
-									\
-	return (ret);							\
-}									\
-a_attr void								\
-a_prefix##remove(a_ph_type *ph, a_type *phn)				\
-{									\
-	a_type *replace, *parent;					\
-									\
-	/*								\
-	 * We can delete from aux list without merging it, but we need	\
-	 * to merge if we are dealing with the root node.		\
-	 */								\
-	if (ph->ph_root == phn) {					\
-		ph_merge_aux(a_type, a_field, ph, a_cmp);		\
-		if (ph->ph_root == phn) {				\
-			ph_merge_children(a_type, a_field, ph->ph_root,	\
-			    a_cmp, ph->ph_root);			\
-			return;						\
-		}							\
-	}								\
-									\
-	/* Get parent (if phn is leftmost child) before mutating. */	\
-	if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) {	\
-		if (phn_lchild_get(a_type, a_field, parent) != phn)	\
-			parent = NULL;					\
-	}								\
-	/* Find a possible replacement node, and link to parent. */	\
-	ph_merge_children(a_type, a_field, phn, a_cmp, replace);	\
-	/* Set next/prev for sibling linked list. */			\
-	if (replace != NULL) {						\
-		if (parent != NULL) {					\
-			phn_prev_set(a_type, a_field, replace, parent);	\
-			phn_lchild_set(a_type, a_field, parent,		\
-			    replace);					\
-		} else {						\
-			phn_prev_set(a_type, a_field, replace,		\
-			    phn_prev_get(a_type, a_field, phn));	\
-			if (phn_prev_get(a_type, a_field, phn) !=	\
-			    NULL) {					\
-				phn_next_set(a_type, a_field,		\
-				    phn_prev_get(a_type, a_field, phn),	\
-				    replace);				\
-			}						\
-		}							\
-		phn_next_set(a_type, a_field, replace,			\
-		    phn_next_get(a_type, a_field, phn));		\
-		if (phn_next_get(a_type, a_field, phn) != NULL) {	\
-			phn_prev_set(a_type, a_field,			\
-			    phn_next_get(a_type, a_field, phn),		\
-			    replace);					\
-		}							\
-	} else {							\
-		if (parent != NULL) {					\
-			a_type *next = phn_next_get(a_type, a_field,	\
-			    phn);					\
-			phn_lchild_set(a_type, a_field, parent, next);	\
-			if (next != NULL) {				\
-				phn_prev_set(a_type, a_field, next,	\
-				    parent);				\
-			}						\
-		} else {						\
-			assert(phn_prev_get(a_type, a_field, phn) !=	\
-			    NULL);					\
-			phn_next_set(a_type, a_field,			\
-			    phn_prev_get(a_type, a_field, phn),		\
-			    phn_next_get(a_type, a_field, phn));	\
-		}							\
-		if (phn_next_get(a_type, a_field, phn) != NULL) {	\
-			phn_prev_set(a_type, a_field,			\
-			    phn_next_get(a_type, a_field, phn),		\
-			    phn_prev_get(a_type, a_field, phn));	\
-		}							\
-	}								\
-}
-
-#endif /* PH_H_ */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_namespace.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_namespace.h
deleted file mode 100644
index 66395fe..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ /dev/null
@@ -1,567 +0,0 @@
-#define	a0dalloc JEMALLOC_N(a0dalloc)
-#define	a0malloc JEMALLOC_N(a0malloc)
-#define	arena_aalloc JEMALLOC_N(arena_aalloc)
-#define	arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
-#define	arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
-#define	arena_bin_index JEMALLOC_N(arena_bin_index)
-#define	arena_bin_info JEMALLOC_N(arena_bin_info)
-#define	arena_boot JEMALLOC_N(arena_boot)
-#define	arena_choose JEMALLOC_N(arena_choose)
-#define	arena_choose_hard JEMALLOC_N(arena_choose_hard)
-#define	arena_choose_impl JEMALLOC_N(arena_choose_impl)
-#define	arena_cleanup JEMALLOC_N(arena_cleanup)
-#define	arena_dalloc JEMALLOC_N(arena_dalloc)
-#define	arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
-#define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-#define	arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted)
-#define	arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
-#define	arena_decay_tick JEMALLOC_N(arena_decay_tick)
-#define	arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
-#define	arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
-#define	arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
-#define	arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
-#define	arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
-#define	arena_destroy JEMALLOC_N(arena_destroy)
-#define	arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
-#define	arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
-#define	arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large)
-#define	arena_extent_cache_alloc JEMALLOC_N(arena_extent_cache_alloc)
-#define	arena_extent_cache_dalloc JEMALLOC_N(arena_extent_cache_dalloc)
-#define	arena_extent_cache_maybe_insert JEMALLOC_N(arena_extent_cache_maybe_insert)
-#define	arena_extent_cache_maybe_remove JEMALLOC_N(arena_extent_cache_maybe_remove)
-#define	arena_extent_dalloc_large JEMALLOC_N(arena_extent_dalloc_large)
-#define	arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand)
-#define	arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink)
-#define	arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
-#define	arena_get JEMALLOC_N(arena_get)
-#define	arena_ichoose JEMALLOC_N(arena_ichoose)
-#define	arena_ind_get JEMALLOC_N(arena_ind_get)
-#define	arena_init JEMALLOC_N(arena_init)
-#define	arena_internal_add JEMALLOC_N(arena_internal_add)
-#define	arena_internal_get JEMALLOC_N(arena_internal_get)
-#define	arena_internal_sub JEMALLOC_N(arena_internal_sub)
-#define	arena_malloc JEMALLOC_N(arena_malloc)
-#define	arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
-#define	arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
-#define	arena_migrate JEMALLOC_N(arena_migrate)
-#define	arena_new JEMALLOC_N(arena_new)
-#define	arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
-#define	arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
-#define	arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
-#define	arena_palloc JEMALLOC_N(arena_palloc)
-#define	arena_postfork_child JEMALLOC_N(arena_postfork_child)
-#define	arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
-#define	arena_prefork0 JEMALLOC_N(arena_prefork0)
-#define	arena_prefork1 JEMALLOC_N(arena_prefork1)
-#define	arena_prefork2 JEMALLOC_N(arena_prefork2)
-#define	arena_prefork3 JEMALLOC_N(arena_prefork3)
-#define	arena_prof_accum JEMALLOC_N(arena_prof_accum)
-#define	arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
-#define	arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
-#define	arena_prof_promote JEMALLOC_N(arena_prof_promote)
-#define	arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
-#define	arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
-#define	arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
-#define	arena_purge JEMALLOC_N(arena_purge)
-#define	arena_ralloc JEMALLOC_N(arena_ralloc)
-#define	arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
-#define	arena_reset JEMALLOC_N(arena_reset)
-#define	arena_salloc JEMALLOC_N(arena_salloc)
-#define	arena_sdalloc JEMALLOC_N(arena_sdalloc)
-#define	arena_set JEMALLOC_N(arena_set)
-#define	arena_slab_regind JEMALLOC_N(arena_slab_regind)
-#define	arena_stats_merge JEMALLOC_N(arena_stats_merge)
-#define	arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
-#define	arena_tdata_get JEMALLOC_N(arena_tdata_get)
-#define	arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
-#define	arenas JEMALLOC_N(arenas)
-#define	arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
-#define	atomic_add_p JEMALLOC_N(atomic_add_p)
-#define	atomic_add_u JEMALLOC_N(atomic_add_u)
-#define	atomic_add_u32 JEMALLOC_N(atomic_add_u32)
-#define	atomic_add_u64 JEMALLOC_N(atomic_add_u64)
-#define	atomic_add_zu JEMALLOC_N(atomic_add_zu)
-#define	atomic_cas_p JEMALLOC_N(atomic_cas_p)
-#define	atomic_cas_u JEMALLOC_N(atomic_cas_u)
-#define	atomic_cas_u32 JEMALLOC_N(atomic_cas_u32)
-#define	atomic_cas_u64 JEMALLOC_N(atomic_cas_u64)
-#define	atomic_cas_zu JEMALLOC_N(atomic_cas_zu)
-#define	atomic_sub_p JEMALLOC_N(atomic_sub_p)
-#define	atomic_sub_u JEMALLOC_N(atomic_sub_u)
-#define	atomic_sub_u32 JEMALLOC_N(atomic_sub_u32)
-#define	atomic_sub_u64 JEMALLOC_N(atomic_sub_u64)
-#define	atomic_sub_zu JEMALLOC_N(atomic_sub_zu)
-#define	atomic_write_p JEMALLOC_N(atomic_write_p)
-#define	atomic_write_u JEMALLOC_N(atomic_write_u)
-#define	atomic_write_u32 JEMALLOC_N(atomic_write_u32)
-#define	atomic_write_u64 JEMALLOC_N(atomic_write_u64)
-#define	atomic_write_zu JEMALLOC_N(atomic_write_zu)
-#define	b0get JEMALLOC_N(b0get)
-#define	base_alloc JEMALLOC_N(base_alloc)
-#define	base_boot JEMALLOC_N(base_boot)
-#define	base_delete JEMALLOC_N(base_delete)
-#define	base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get)
-#define	base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set)
-#define	base_ind_get JEMALLOC_N(base_ind_get)
-#define	base_new JEMALLOC_N(base_new)
-#define	base_postfork_child JEMALLOC_N(base_postfork_child)
-#define	base_postfork_parent JEMALLOC_N(base_postfork_parent)
-#define	base_prefork JEMALLOC_N(base_prefork)
-#define	base_stats_get JEMALLOC_N(base_stats_get)
-#define	bitmap_full JEMALLOC_N(bitmap_full)
-#define	bitmap_get JEMALLOC_N(bitmap_get)
-#define	bitmap_info_init JEMALLOC_N(bitmap_info_init)
-#define	bitmap_init JEMALLOC_N(bitmap_init)
-#define	bitmap_set JEMALLOC_N(bitmap_set)
-#define	bitmap_sfu JEMALLOC_N(bitmap_sfu)
-#define	bitmap_size JEMALLOC_N(bitmap_size)
-#define	bitmap_unset JEMALLOC_N(bitmap_unset)
-#define	bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
-#define	bootstrap_free JEMALLOC_N(bootstrap_free)
-#define	bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
-#define	bt_init JEMALLOC_N(bt_init)
-#define	buferror JEMALLOC_N(buferror)
-#define	ckh_count JEMALLOC_N(ckh_count)
-#define	ckh_delete JEMALLOC_N(ckh_delete)
-#define	ckh_insert JEMALLOC_N(ckh_insert)
-#define	ckh_iter JEMALLOC_N(ckh_iter)
-#define	ckh_new JEMALLOC_N(ckh_new)
-#define	ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
-#define	ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
-#define	ckh_remove JEMALLOC_N(ckh_remove)
-#define	ckh_search JEMALLOC_N(ckh_search)
-#define	ckh_string_hash JEMALLOC_N(ckh_string_hash)
-#define	ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
-#define	ctl_boot JEMALLOC_N(ctl_boot)
-#define	ctl_bymib JEMALLOC_N(ctl_bymib)
-#define	ctl_byname JEMALLOC_N(ctl_byname)
-#define	ctl_nametomib JEMALLOC_N(ctl_nametomib)
-#define	ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
-#define	ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
-#define	ctl_prefork JEMALLOC_N(ctl_prefork)
-#define	decay_ticker_get JEMALLOC_N(decay_ticker_get)
-#define	dss_prec_names JEMALLOC_N(dss_prec_names)
-#define	extent_active_get JEMALLOC_N(extent_active_get)
-#define	extent_active_set JEMALLOC_N(extent_active_set)
-#define	extent_ad_comp JEMALLOC_N(extent_ad_comp)
-#define	extent_addr_get JEMALLOC_N(extent_addr_get)
-#define	extent_addr_randomize JEMALLOC_N(extent_addr_randomize)
-#define	extent_addr_set JEMALLOC_N(extent_addr_set)
-#define	extent_alloc JEMALLOC_N(extent_alloc)
-#define	extent_alloc_cache JEMALLOC_N(extent_alloc_cache)
-#define	extent_alloc_cache_locked JEMALLOC_N(extent_alloc_cache_locked)
-#define	extent_alloc_dss JEMALLOC_N(extent_alloc_dss)
-#define	extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap)
-#define	extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper)
-#define	extent_arena_get JEMALLOC_N(extent_arena_get)
-#define	extent_arena_set JEMALLOC_N(extent_arena_set)
-#define	extent_base_get JEMALLOC_N(extent_base_get)
-#define	extent_before_get JEMALLOC_N(extent_before_get)
-#define	extent_boot JEMALLOC_N(extent_boot)
-#define	extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper)
-#define	extent_committed_get JEMALLOC_N(extent_committed_get)
-#define	extent_committed_set JEMALLOC_N(extent_committed_set)
-#define	extent_dalloc JEMALLOC_N(extent_dalloc)
-#define	extent_dalloc_cache JEMALLOC_N(extent_dalloc_cache)
-#define	extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap)
-#define	extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap)
-#define	extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper)
-#define	extent_dalloc_wrapper_try JEMALLOC_N(extent_dalloc_wrapper_try)
-#define	extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper)
-#define	extent_dss_boot JEMALLOC_N(extent_dss_boot)
-#define	extent_dss_mergeable JEMALLOC_N(extent_dss_mergeable)
-#define	extent_dss_prec_get JEMALLOC_N(extent_dss_prec_get)
-#define	extent_dss_prec_set JEMALLOC_N(extent_dss_prec_set)
-#define	extent_heap_empty JEMALLOC_N(extent_heap_empty)
-#define	extent_heap_first JEMALLOC_N(extent_heap_first)
-#define	extent_heap_insert JEMALLOC_N(extent_heap_insert)
-#define	extent_heap_new JEMALLOC_N(extent_heap_new)
-#define	extent_heap_remove JEMALLOC_N(extent_heap_remove)
-#define	extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first)
-#define	extent_hooks_default JEMALLOC_N(extent_hooks_default)
-#define	extent_hooks_get JEMALLOC_N(extent_hooks_get)
-#define	extent_hooks_set JEMALLOC_N(extent_hooks_set)
-#define	extent_in_dss JEMALLOC_N(extent_in_dss)
-#define	extent_init JEMALLOC_N(extent_init)
-#define	extent_last_get JEMALLOC_N(extent_last_get)
-#define	extent_lookup JEMALLOC_N(extent_lookup)
-#define	extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper)
-#define	extent_past_get JEMALLOC_N(extent_past_get)
-#define	extent_prof_tctx_get JEMALLOC_N(extent_prof_tctx_get)
-#define	extent_prof_tctx_set JEMALLOC_N(extent_prof_tctx_set)
-#define	extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper)
-#define	extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper)
-#define	extent_retained_get JEMALLOC_N(extent_retained_get)
-#define	extent_ring_insert JEMALLOC_N(extent_ring_insert)
-#define	extent_ring_remove JEMALLOC_N(extent_ring_remove)
-#define	extent_size_get JEMALLOC_N(extent_size_get)
-#define	extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
-#define	extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
-#define	extent_size_set JEMALLOC_N(extent_size_set)
-#define	extent_slab_data_get JEMALLOC_N(extent_slab_data_get)
-#define	extent_slab_data_get_const JEMALLOC_N(extent_slab_data_get_const)
-#define	extent_slab_get JEMALLOC_N(extent_slab_get)
-#define	extent_slab_set JEMALLOC_N(extent_slab_set)
-#define	extent_sn_comp JEMALLOC_N(extent_sn_comp)
-#define	extent_sn_get JEMALLOC_N(extent_sn_get)
-#define	extent_sn_set JEMALLOC_N(extent_sn_set)
-#define	extent_snad_comp JEMALLOC_N(extent_snad_comp)
-#define	extent_split_wrapper JEMALLOC_N(extent_split_wrapper)
-#define	extent_usize_get JEMALLOC_N(extent_usize_get)
-#define	extent_usize_set JEMALLOC_N(extent_usize_set)
-#define	extent_zeroed_get JEMALLOC_N(extent_zeroed_get)
-#define	extent_zeroed_set JEMALLOC_N(extent_zeroed_set)
-#define	extents_rtree JEMALLOC_N(extents_rtree)
-#define	ffs_llu JEMALLOC_N(ffs_llu)
-#define	ffs_lu JEMALLOC_N(ffs_lu)
-#define	ffs_u JEMALLOC_N(ffs_u)
-#define	ffs_u32 JEMALLOC_N(ffs_u32)
-#define	ffs_u64 JEMALLOC_N(ffs_u64)
-#define	ffs_zu JEMALLOC_N(ffs_zu)
-#define	get_errno JEMALLOC_N(get_errno)
-#define	hash JEMALLOC_N(hash)
-#define	hash_fmix_32 JEMALLOC_N(hash_fmix_32)
-#define	hash_fmix_64 JEMALLOC_N(hash_fmix_64)
-#define	hash_get_block_32 JEMALLOC_N(hash_get_block_32)
-#define	hash_get_block_64 JEMALLOC_N(hash_get_block_64)
-#define	hash_rotl_32 JEMALLOC_N(hash_rotl_32)
-#define	hash_rotl_64 JEMALLOC_N(hash_rotl_64)
-#define	hash_x64_128 JEMALLOC_N(hash_x64_128)
-#define	hash_x86_128 JEMALLOC_N(hash_x86_128)
-#define	hash_x86_32 JEMALLOC_N(hash_x86_32)
-#define	iaalloc JEMALLOC_N(iaalloc)
-#define	ialloc JEMALLOC_N(ialloc)
-#define	iallocztm JEMALLOC_N(iallocztm)
-#define	iarena_cleanup JEMALLOC_N(iarena_cleanup)
-#define	idalloc JEMALLOC_N(idalloc)
-#define	idalloctm JEMALLOC_N(idalloctm)
-#define	iealloc JEMALLOC_N(iealloc)
-#define	index2size JEMALLOC_N(index2size)
-#define	index2size_compute JEMALLOC_N(index2size_compute)
-#define	index2size_lookup JEMALLOC_N(index2size_lookup)
-#define	index2size_tab JEMALLOC_N(index2size_tab)
-#define	ipalloc JEMALLOC_N(ipalloc)
-#define	ipalloct JEMALLOC_N(ipalloct)
-#define	ipallocztm JEMALLOC_N(ipallocztm)
-#define	iralloc JEMALLOC_N(iralloc)
-#define	iralloct JEMALLOC_N(iralloct)
-#define	iralloct_realign JEMALLOC_N(iralloct_realign)
-#define	isalloc JEMALLOC_N(isalloc)
-#define	isdalloct JEMALLOC_N(isdalloct)
-#define	isthreaded JEMALLOC_N(isthreaded)
-#define	ivsalloc JEMALLOC_N(ivsalloc)
-#define	ixalloc JEMALLOC_N(ixalloc)
-#define	jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
-#define	jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
-#define	jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
-#define	large_dalloc JEMALLOC_N(large_dalloc)
-#define	large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
-#define	large_dalloc_junked_locked JEMALLOC_N(large_dalloc_junked_locked)
-#define	large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
-#define	large_malloc JEMALLOC_N(large_malloc)
-#define	large_palloc JEMALLOC_N(large_palloc)
-#define	large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get)
-#define	large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset)
-#define	large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set)
-#define	large_ralloc JEMALLOC_N(large_ralloc)
-#define	large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move)
-#define	large_salloc JEMALLOC_N(large_salloc)
-#define	lg_floor JEMALLOC_N(lg_floor)
-#define	lg_prof_sample JEMALLOC_N(lg_prof_sample)
-#define	malloc_cprintf JEMALLOC_N(malloc_cprintf)
-#define	malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
-#define	malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
-#define	malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
-#define	malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
-#define	malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
-#define	malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
-#define	malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
-#define	malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
-#define	malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
-#define	malloc_printf JEMALLOC_N(malloc_printf)
-#define	malloc_snprintf JEMALLOC_N(malloc_snprintf)
-#define	malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
-#define	malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
-#define	malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
-#define	malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
-#define	malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
-#define	malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
-#define	malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
-#define	malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
-#define	malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
-#define	malloc_write JEMALLOC_N(malloc_write)
-#define	mb_write JEMALLOC_N(mb_write)
-#define	narenas_auto JEMALLOC_N(narenas_auto)
-#define	narenas_total_get JEMALLOC_N(narenas_total_get)
-#define	ncpus JEMALLOC_N(ncpus)
-#define	nhbins JEMALLOC_N(nhbins)
-#define	nstime_add JEMALLOC_N(nstime_add)
-#define	nstime_compare JEMALLOC_N(nstime_compare)
-#define	nstime_copy JEMALLOC_N(nstime_copy)
-#define	nstime_divide JEMALLOC_N(nstime_divide)
-#define	nstime_idivide JEMALLOC_N(nstime_idivide)
-#define	nstime_imultiply JEMALLOC_N(nstime_imultiply)
-#define	nstime_init JEMALLOC_N(nstime_init)
-#define	nstime_init2 JEMALLOC_N(nstime_init2)
-#define	nstime_monotonic JEMALLOC_N(nstime_monotonic)
-#define	nstime_ns JEMALLOC_N(nstime_ns)
-#define	nstime_nsec JEMALLOC_N(nstime_nsec)
-#define	nstime_sec JEMALLOC_N(nstime_sec)
-#define	nstime_subtract JEMALLOC_N(nstime_subtract)
-#define	nstime_update JEMALLOC_N(nstime_update)
-#define	opt_abort JEMALLOC_N(opt_abort)
-#define	opt_decay_time JEMALLOC_N(opt_decay_time)
-#define	opt_dss JEMALLOC_N(opt_dss)
-#define	opt_junk JEMALLOC_N(opt_junk)
-#define	opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
-#define	opt_junk_free JEMALLOC_N(opt_junk_free)
-#define	opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
-#define	opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
-#define	opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
-#define	opt_narenas JEMALLOC_N(opt_narenas)
-#define	opt_prof JEMALLOC_N(opt_prof)
-#define	opt_prof_accum JEMALLOC_N(opt_prof_accum)
-#define	opt_prof_active JEMALLOC_N(opt_prof_active)
-#define	opt_prof_final JEMALLOC_N(opt_prof_final)
-#define	opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
-#define	opt_prof_leak JEMALLOC_N(opt_prof_leak)
-#define	opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
-#define	opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
-#define	opt_stats_print JEMALLOC_N(opt_stats_print)
-#define	opt_tcache JEMALLOC_N(opt_tcache)
-#define	opt_utrace JEMALLOC_N(opt_utrace)
-#define	opt_xmalloc JEMALLOC_N(opt_xmalloc)
-#define	opt_zero JEMALLOC_N(opt_zero)
-#define	pages_boot JEMALLOC_N(pages_boot)
-#define	pages_commit JEMALLOC_N(pages_commit)
-#define	pages_decommit JEMALLOC_N(pages_decommit)
-#define	pages_huge JEMALLOC_N(pages_huge)
-#define	pages_map JEMALLOC_N(pages_map)
-#define	pages_nohuge JEMALLOC_N(pages_nohuge)
-#define	pages_purge_forced JEMALLOC_N(pages_purge_forced)
-#define	pages_purge_lazy JEMALLOC_N(pages_purge_lazy)
-#define	pages_trim JEMALLOC_N(pages_trim)
-#define	pages_unmap JEMALLOC_N(pages_unmap)
-#define	pind2sz JEMALLOC_N(pind2sz)
-#define	pind2sz_compute JEMALLOC_N(pind2sz_compute)
-#define	pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
-#define	pind2sz_tab JEMALLOC_N(pind2sz_tab)
-#define	pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
-#define	pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
-#define	pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
-#define	prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
-#define	prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
-#define	prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
-#define	prng_range_u32 JEMALLOC_N(prng_range_u32)
-#define	prng_range_u64 JEMALLOC_N(prng_range_u64)
-#define	prng_range_zu JEMALLOC_N(prng_range_zu)
-#define	prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
-#define	prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
-#define	prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
-#define	prof_active JEMALLOC_N(prof_active)
-#define	prof_active_get JEMALLOC_N(prof_active_get)
-#define	prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
-#define	prof_active_set JEMALLOC_N(prof_active_set)
-#define	prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
-#define	prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
-#define	prof_backtrace JEMALLOC_N(prof_backtrace)
-#define	prof_boot0 JEMALLOC_N(prof_boot0)
-#define	prof_boot1 JEMALLOC_N(prof_boot1)
-#define	prof_boot2 JEMALLOC_N(prof_boot2)
-#define	prof_bt_count JEMALLOC_N(prof_bt_count)
-#define	prof_cnt_all JEMALLOC_N(prof_cnt_all)
-#define	prof_dump_header JEMALLOC_N(prof_dump_header)
-#define	prof_dump_open JEMALLOC_N(prof_dump_open)
-#define	prof_free JEMALLOC_N(prof_free)
-#define	prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
-#define	prof_gdump JEMALLOC_N(prof_gdump)
-#define	prof_gdump_get JEMALLOC_N(prof_gdump_get)
-#define	prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
-#define	prof_gdump_set JEMALLOC_N(prof_gdump_set)
-#define	prof_gdump_val JEMALLOC_N(prof_gdump_val)
-#define	prof_idump JEMALLOC_N(prof_idump)
-#define	prof_interval JEMALLOC_N(prof_interval)
-#define	prof_lookup JEMALLOC_N(prof_lookup)
-#define	prof_malloc JEMALLOC_N(prof_malloc)
-#define	prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
-#define	prof_mdump JEMALLOC_N(prof_mdump)
-#define	prof_postfork_child JEMALLOC_N(prof_postfork_child)
-#define	prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
-#define	prof_prefork0 JEMALLOC_N(prof_prefork0)
-#define	prof_prefork1 JEMALLOC_N(prof_prefork1)
-#define	prof_realloc JEMALLOC_N(prof_realloc)
-#define	prof_reset JEMALLOC_N(prof_reset)
-#define	prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
-#define	prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
-#define	prof_tctx_get JEMALLOC_N(prof_tctx_get)
-#define	prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
-#define	prof_tctx_set JEMALLOC_N(prof_tctx_set)
-#define	prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
-#define	prof_tdata_count JEMALLOC_N(prof_tdata_count)
-#define	prof_tdata_get JEMALLOC_N(prof_tdata_get)
-#define	prof_tdata_init JEMALLOC_N(prof_tdata_init)
-#define	prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
-#define	prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
-#define	prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
-#define	prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
-#define	prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
-#define	prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
-#define	prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
-#define	psz2ind JEMALLOC_N(psz2ind)
-#define	psz2u JEMALLOC_N(psz2u)
-#define	rtree_child_read JEMALLOC_N(rtree_child_read)
-#define	rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
-#define	rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
-#define	rtree_clear JEMALLOC_N(rtree_clear)
-#define	rtree_ctx_start_level JEMALLOC_N(rtree_ctx_start_level)
-#define	rtree_delete JEMALLOC_N(rtree_delete)
-#define	rtree_elm_acquire JEMALLOC_N(rtree_elm_acquire)
-#define	rtree_elm_lookup JEMALLOC_N(rtree_elm_lookup)
-#define	rtree_elm_read JEMALLOC_N(rtree_elm_read)
-#define	rtree_elm_read_acquired JEMALLOC_N(rtree_elm_read_acquired)
-#define	rtree_elm_release JEMALLOC_N(rtree_elm_release)
-#define	rtree_elm_witness_access JEMALLOC_N(rtree_elm_witness_access)
-#define	rtree_elm_witness_acquire JEMALLOC_N(rtree_elm_witness_acquire)
-#define	rtree_elm_witness_release JEMALLOC_N(rtree_elm_witness_release)
-#define	rtree_elm_write JEMALLOC_N(rtree_elm_write)
-#define	rtree_elm_write_acquired JEMALLOC_N(rtree_elm_write_acquired)
-#define	rtree_new JEMALLOC_N(rtree_new)
-#define	rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
-#define	rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
-#define	rtree_node_valid JEMALLOC_N(rtree_node_valid)
-#define	rtree_read JEMALLOC_N(rtree_read)
-#define	rtree_start_level JEMALLOC_N(rtree_start_level)
-#define	rtree_subkey JEMALLOC_N(rtree_subkey)
-#define	rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
-#define	rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
-#define	rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
-#define	rtree_write JEMALLOC_N(rtree_write)
-#define	s2u JEMALLOC_N(s2u)
-#define	s2u_compute JEMALLOC_N(s2u_compute)
-#define	s2u_lookup JEMALLOC_N(s2u_lookup)
-#define	sa2u JEMALLOC_N(sa2u)
-#define	set_errno JEMALLOC_N(set_errno)
-#define	size2index JEMALLOC_N(size2index)
-#define	size2index_compute JEMALLOC_N(size2index_compute)
-#define	size2index_lookup JEMALLOC_N(size2index_lookup)
-#define	size2index_tab JEMALLOC_N(size2index_tab)
-#define	spin_adaptive JEMALLOC_N(spin_adaptive)
-#define	spin_init JEMALLOC_N(spin_init)
-#define	stats_print JEMALLOC_N(stats_print)
-#define	tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
-#define	tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
-#define	tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
-#define	tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
-#define	tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
-#define	tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
-#define	tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
-#define	tcache_bin_info JEMALLOC_N(tcache_bin_info)
-#define	tcache_boot JEMALLOC_N(tcache_boot)
-#define	tcache_cleanup JEMALLOC_N(tcache_cleanup)
-#define	tcache_create JEMALLOC_N(tcache_create)
-#define	tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
-#define	tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
-#define	tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
-#define	tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
-#define	tcache_event JEMALLOC_N(tcache_event)
-#define	tcache_event_hard JEMALLOC_N(tcache_event_hard)
-#define	tcache_flush JEMALLOC_N(tcache_flush)
-#define	tcache_get JEMALLOC_N(tcache_get)
-#define	tcache_get_hard JEMALLOC_N(tcache_get_hard)
-#define	tcache_maxclass JEMALLOC_N(tcache_maxclass)
-#define	tcache_salloc JEMALLOC_N(tcache_salloc)
-#define	tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
-#define	tcaches JEMALLOC_N(tcaches)
-#define	tcaches_create JEMALLOC_N(tcaches_create)
-#define	tcaches_destroy JEMALLOC_N(tcaches_destroy)
-#define	tcaches_flush JEMALLOC_N(tcaches_flush)
-#define	tcaches_get JEMALLOC_N(tcaches_get)
-#define	ticker_copy JEMALLOC_N(ticker_copy)
-#define	ticker_init JEMALLOC_N(ticker_init)
-#define	ticker_read JEMALLOC_N(ticker_read)
-#define	ticker_tick JEMALLOC_N(ticker_tick)
-#define	ticker_ticks JEMALLOC_N(ticker_ticks)
-#define	tsd_arena_get JEMALLOC_N(tsd_arena_get)
-#define	tsd_arena_set JEMALLOC_N(tsd_arena_set)
-#define	tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
-#define	tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
-#define	tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
-#define	tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
-#define	tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
-#define	tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
-#define	tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
-#define	tsd_boot JEMALLOC_N(tsd_boot)
-#define	tsd_boot0 JEMALLOC_N(tsd_boot0)
-#define	tsd_boot1 JEMALLOC_N(tsd_boot1)
-#define	tsd_booted JEMALLOC_N(tsd_booted)
-#define	tsd_booted_get JEMALLOC_N(tsd_booted_get)
-#define	tsd_cleanup JEMALLOC_N(tsd_cleanup)
-#define	tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
-#define	tsd_fetch JEMALLOC_N(tsd_fetch)
-#define	tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
-#define	tsd_get JEMALLOC_N(tsd_get)
-#define	tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
-#define	tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
-#define	tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
-#define	tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
-#define	tsd_initialized JEMALLOC_N(tsd_initialized)
-#define	tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
-#define	tsd_init_finish JEMALLOC_N(tsd_init_finish)
-#define	tsd_init_head JEMALLOC_N(tsd_init_head)
-#define	tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
-#define	tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
-#define	tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
-#define	tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
-#define	tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
-#define	tsd_nominal JEMALLOC_N(tsd_nominal)
-#define	tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
-#define	tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
-#define	tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
-#define	tsd_rtree_ctx_get JEMALLOC_N(tsd_rtree_ctx_get)
-#define	tsd_rtree_ctx_set JEMALLOC_N(tsd_rtree_ctx_set)
-#define	tsd_rtree_ctxp_get JEMALLOC_N(tsd_rtree_ctxp_get)
-#define	tsd_rtree_elm_witnesses_get JEMALLOC_N(tsd_rtree_elm_witnesses_get)
-#define	tsd_rtree_elm_witnesses_set JEMALLOC_N(tsd_rtree_elm_witnesses_set)
-#define	tsd_rtree_elm_witnessesp_get JEMALLOC_N(tsd_rtree_elm_witnessesp_get)
-#define	tsd_set JEMALLOC_N(tsd_set)
-#define	tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
-#define	tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
-#define	tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
-#define	tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
-#define	tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
-#define	tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
-#define	tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
-#define	tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
-#define	tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
-#define	tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
-#define	tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
-#define	tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
-#define	tsd_tls JEMALLOC_N(tsd_tls)
-#define	tsd_tsd JEMALLOC_N(tsd_tsd)
-#define	tsd_tsdn JEMALLOC_N(tsd_tsdn)
-#define	tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
-#define	tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
-#define	tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
-#define	tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
-#define	tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
-#define	tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
-#define	tsdn_fetch JEMALLOC_N(tsdn_fetch)
-#define	tsdn_null JEMALLOC_N(tsdn_null)
-#define	tsdn_rtree_ctx JEMALLOC_N(tsdn_rtree_ctx)
-#define	tsdn_tsd JEMALLOC_N(tsdn_tsd)
-#define	witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
-#define	witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
-#define	witness_assert_owner JEMALLOC_N(witness_assert_owner)
-#define	witness_init JEMALLOC_N(witness_init)
-#define	witness_lock JEMALLOC_N(witness_lock)
-#define	witness_lock_error JEMALLOC_N(witness_lock_error)
-#define	witness_lockless_error JEMALLOC_N(witness_lockless_error)
-#define	witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-#define	witness_owner JEMALLOC_N(witness_owner)
-#define	witness_owner_error JEMALLOC_N(witness_owner_error)
-#define	witness_postfork_child JEMALLOC_N(witness_postfork_child)
-#define	witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
-#define	witness_prefork JEMALLOC_N(witness_prefork)
-#define	witness_unlock JEMALLOC_N(witness_unlock)
-#define	witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
-#define	zone_register JEMALLOC_N(zone_register)
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_namespace.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_namespace.sh
deleted file mode 100755
index cd25eb3..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_namespace.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-for symbol in `cat $1` ; do
-  echo "#define	${symbol} JEMALLOC_N(${symbol})"
-done
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_symbols.txt b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_symbols.txt
deleted file mode 100644
index 745220e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_symbols.txt
+++ /dev/null
@@ -1,567 +0,0 @@
-a0dalloc
-a0malloc
-arena_aalloc
-arena_alloc_junk_small
-arena_basic_stats_merge
-arena_bin_index
-arena_bin_info
-arena_boot
-arena_choose
-arena_choose_hard
-arena_choose_impl
-arena_cleanup
-arena_dalloc
-arena_dalloc_bin_junked_locked
-arena_dalloc_junk_small
-arena_dalloc_promoted
-arena_dalloc_small
-arena_decay_tick
-arena_decay_ticks
-arena_decay_time_default_get
-arena_decay_time_default_set
-arena_decay_time_get
-arena_decay_time_set
-arena_destroy
-arena_dss_prec_get
-arena_dss_prec_set
-arena_extent_alloc_large
-arena_extent_cache_alloc
-arena_extent_cache_dalloc
-arena_extent_cache_maybe_insert
-arena_extent_cache_maybe_remove
-arena_extent_dalloc_large
-arena_extent_ralloc_large_expand
-arena_extent_ralloc_large_shrink
-arena_extent_sn_next
-arena_get
-arena_ichoose
-arena_ind_get
-arena_init
-arena_internal_add
-arena_internal_get
-arena_internal_sub
-arena_malloc
-arena_malloc_hard
-arena_maybe_purge
-arena_migrate
-arena_new
-arena_nthreads_dec
-arena_nthreads_get
-arena_nthreads_inc
-arena_palloc
-arena_postfork_child
-arena_postfork_parent
-arena_prefork0
-arena_prefork1
-arena_prefork2
-arena_prefork3
-arena_prof_accum
-arena_prof_accum_impl
-arena_prof_accum_locked
-arena_prof_promote
-arena_prof_tctx_get
-arena_prof_tctx_reset
-arena_prof_tctx_set
-arena_purge
-arena_ralloc
-arena_ralloc_no_move
-arena_reset
-arena_salloc
-arena_sdalloc
-arena_set
-arena_slab_regind
-arena_stats_merge
-arena_tcache_fill_small
-arena_tdata_get
-arena_tdata_get_hard
-arenas
-arenas_tdata_cleanup
-atomic_add_p
-atomic_add_u
-atomic_add_u32
-atomic_add_u64
-atomic_add_zu
-atomic_cas_p
-atomic_cas_u
-atomic_cas_u32
-atomic_cas_u64
-atomic_cas_zu
-atomic_sub_p
-atomic_sub_u
-atomic_sub_u32
-atomic_sub_u64
-atomic_sub_zu
-atomic_write_p
-atomic_write_u
-atomic_write_u32
-atomic_write_u64
-atomic_write_zu
-b0get
-base_alloc
-base_boot
-base_delete
-base_extent_hooks_get
-base_extent_hooks_set
-base_ind_get
-base_new
-base_postfork_child
-base_postfork_parent
-base_prefork
-base_stats_get
-bitmap_full
-bitmap_get
-bitmap_info_init
-bitmap_init
-bitmap_set
-bitmap_sfu
-bitmap_size
-bitmap_unset
-bootstrap_calloc
-bootstrap_free
-bootstrap_malloc
-bt_init
-buferror
-ckh_count
-ckh_delete
-ckh_insert
-ckh_iter
-ckh_new
-ckh_pointer_hash
-ckh_pointer_keycomp
-ckh_remove
-ckh_search
-ckh_string_hash
-ckh_string_keycomp
-ctl_boot
-ctl_bymib
-ctl_byname
-ctl_nametomib
-ctl_postfork_child
-ctl_postfork_parent
-ctl_prefork
-decay_ticker_get
-dss_prec_names
-extent_active_get
-extent_active_set
-extent_ad_comp
-extent_addr_get
-extent_addr_randomize
-extent_addr_set
-extent_alloc
-extent_alloc_cache
-extent_alloc_cache_locked
-extent_alloc_dss
-extent_alloc_mmap
-extent_alloc_wrapper
-extent_arena_get
-extent_arena_set
-extent_base_get
-extent_before_get
-extent_boot
-extent_commit_wrapper
-extent_committed_get
-extent_committed_set
-extent_dalloc
-extent_dalloc_cache
-extent_dalloc_gap
-extent_dalloc_mmap
-extent_dalloc_wrapper
-extent_dalloc_wrapper_try
-extent_decommit_wrapper
-extent_dss_boot
-extent_dss_mergeable
-extent_dss_prec_get
-extent_dss_prec_set
-extent_heap_empty
-extent_heap_first
-extent_heap_insert
-extent_heap_new
-extent_heap_remove
-extent_heap_remove_first
-extent_hooks_default
-extent_hooks_get
-extent_hooks_set
-extent_in_dss
-extent_init
-extent_last_get
-extent_lookup
-extent_merge_wrapper
-extent_past_get
-extent_prof_tctx_get
-extent_prof_tctx_set
-extent_purge_forced_wrapper
-extent_purge_lazy_wrapper
-extent_retained_get
-extent_ring_insert
-extent_ring_remove
-extent_size_get
-extent_size_quantize_ceil
-extent_size_quantize_floor
-extent_size_set
-extent_slab_data_get
-extent_slab_data_get_const
-extent_slab_get
-extent_slab_set
-extent_sn_comp
-extent_sn_get
-extent_sn_set
-extent_snad_comp
-extent_split_wrapper
-extent_usize_get
-extent_usize_set
-extent_zeroed_get
-extent_zeroed_set
-extents_rtree
-ffs_llu
-ffs_lu
-ffs_u
-ffs_u32
-ffs_u64
-ffs_zu
-get_errno
-hash
-hash_fmix_32
-hash_fmix_64
-hash_get_block_32
-hash_get_block_64
-hash_rotl_32
-hash_rotl_64
-hash_x64_128
-hash_x86_128
-hash_x86_32
-iaalloc
-ialloc
-iallocztm
-iarena_cleanup
-idalloc
-idalloctm
-iealloc
-index2size
-index2size_compute
-index2size_lookup
-index2size_tab
-ipalloc
-ipalloct
-ipallocztm
-iralloc
-iralloct
-iralloct_realign
-isalloc
-isdalloct
-isthreaded
-ivsalloc
-ixalloc
-jemalloc_postfork_child
-jemalloc_postfork_parent
-jemalloc_prefork
-large_dalloc
-large_dalloc_junk
-large_dalloc_junked_locked
-large_dalloc_maybe_junk
-large_malloc
-large_palloc
-large_prof_tctx_get
-large_prof_tctx_reset
-large_prof_tctx_set
-large_ralloc
-large_ralloc_no_move
-large_salloc
-lg_floor
-lg_prof_sample
-malloc_cprintf
-malloc_mutex_assert_not_owner
-malloc_mutex_assert_owner
-malloc_mutex_boot
-malloc_mutex_init
-malloc_mutex_lock
-malloc_mutex_postfork_child
-malloc_mutex_postfork_parent
-malloc_mutex_prefork
-malloc_mutex_unlock
-malloc_printf
-malloc_snprintf
-malloc_strtoumax
-malloc_tsd_boot0
-malloc_tsd_boot1
-malloc_tsd_cleanup_register
-malloc_tsd_dalloc
-malloc_tsd_malloc
-malloc_tsd_no_cleanup
-malloc_vcprintf
-malloc_vsnprintf
-malloc_write
-mb_write
-narenas_auto
-narenas_total_get
-ncpus
-nhbins
-nstime_add
-nstime_compare
-nstime_copy
-nstime_divide
-nstime_idivide
-nstime_imultiply
-nstime_init
-nstime_init2
-nstime_monotonic
-nstime_ns
-nstime_nsec
-nstime_sec
-nstime_subtract
-nstime_update
-opt_abort
-opt_decay_time
-opt_dss
-opt_junk
-opt_junk_alloc
-opt_junk_free
-opt_lg_prof_interval
-opt_lg_prof_sample
-opt_lg_tcache_max
-opt_narenas
-opt_prof
-opt_prof_accum
-opt_prof_active
-opt_prof_final
-opt_prof_gdump
-opt_prof_leak
-opt_prof_prefix
-opt_prof_thread_active_init
-opt_stats_print
-opt_tcache
-opt_utrace
-opt_xmalloc
-opt_zero
-pages_boot
-pages_commit
-pages_decommit
-pages_huge
-pages_map
-pages_nohuge
-pages_purge_forced
-pages_purge_lazy
-pages_trim
-pages_unmap
-pind2sz
-pind2sz_compute
-pind2sz_lookup
-pind2sz_tab
-pow2_ceil_u32
-pow2_ceil_u64
-pow2_ceil_zu
-prng_lg_range_u32
-prng_lg_range_u64
-prng_lg_range_zu
-prng_range_u32
-prng_range_u64
-prng_range_zu
-prng_state_next_u32
-prng_state_next_u64
-prng_state_next_zu
-prof_active
-prof_active_get
-prof_active_get_unlocked
-prof_active_set
-prof_alloc_prep
-prof_alloc_rollback
-prof_backtrace
-prof_boot0
-prof_boot1
-prof_boot2
-prof_bt_count
-prof_cnt_all
-prof_dump_header
-prof_dump_open
-prof_free
-prof_free_sampled_object
-prof_gdump
-prof_gdump_get
-prof_gdump_get_unlocked
-prof_gdump_set
-prof_gdump_val
-prof_idump
-prof_interval
-prof_lookup
-prof_malloc
-prof_malloc_sample_object
-prof_mdump
-prof_postfork_child
-prof_postfork_parent
-prof_prefork0
-prof_prefork1
-prof_realloc
-prof_reset
-prof_sample_accum_update
-prof_sample_threshold_update
-prof_tctx_get
-prof_tctx_reset
-prof_tctx_set
-prof_tdata_cleanup
-prof_tdata_count
-prof_tdata_get
-prof_tdata_init
-prof_tdata_reinit
-prof_thread_active_get
-prof_thread_active_init_get
-prof_thread_active_init_set
-prof_thread_active_set
-prof_thread_name_get
-prof_thread_name_set
-psz2ind
-psz2u
-rtree_child_read
-rtree_child_read_hard
-rtree_child_tryread
-rtree_clear
-rtree_ctx_start_level
-rtree_delete
-rtree_elm_acquire
-rtree_elm_lookup
-rtree_elm_read
-rtree_elm_read_acquired
-rtree_elm_release
-rtree_elm_witness_access
-rtree_elm_witness_acquire
-rtree_elm_witness_release
-rtree_elm_write
-rtree_elm_write_acquired
-rtree_new
-rtree_node_alloc
-rtree_node_dalloc
-rtree_node_valid
-rtree_read
-rtree_start_level
-rtree_subkey
-rtree_subtree_read
-rtree_subtree_read_hard
-rtree_subtree_tryread
-rtree_write
-s2u
-s2u_compute
-s2u_lookup
-sa2u
-set_errno
-size2index
-size2index_compute
-size2index_lookup
-size2index_tab
-spin_adaptive
-spin_init
-stats_print
-tcache_alloc_easy
-tcache_alloc_large
-tcache_alloc_small
-tcache_alloc_small_hard
-tcache_arena_reassociate
-tcache_bin_flush_large
-tcache_bin_flush_small
-tcache_bin_info
-tcache_boot
-tcache_cleanup
-tcache_create
-tcache_dalloc_large
-tcache_dalloc_small
-tcache_enabled_get
-tcache_enabled_set
-tcache_event
-tcache_event_hard
-tcache_flush
-tcache_get
-tcache_get_hard
-tcache_maxclass
-tcache_salloc
-tcache_stats_merge
-tcaches
-tcaches_create
-tcaches_destroy
-tcaches_flush
-tcaches_get
-ticker_copy
-ticker_init
-ticker_read
-ticker_tick
-ticker_ticks
-tsd_arena_get
-tsd_arena_set
-tsd_arenap_get
-tsd_arenas_tdata_bypass_get
-tsd_arenas_tdata_bypass_set
-tsd_arenas_tdata_bypassp_get
-tsd_arenas_tdata_get
-tsd_arenas_tdata_set
-tsd_arenas_tdatap_get
-tsd_boot
-tsd_boot0
-tsd_boot1
-tsd_booted
-tsd_booted_get
-tsd_cleanup
-tsd_cleanup_wrapper
-tsd_fetch
-tsd_fetch_impl
-tsd_get
-tsd_get_allocates
-tsd_iarena_get
-tsd_iarena_set
-tsd_iarenap_get
-tsd_initialized
-tsd_init_check_recursion
-tsd_init_finish
-tsd_init_head
-tsd_narenas_tdata_get
-tsd_narenas_tdata_set
-tsd_narenas_tdatap_get
-tsd_wrapper_get
-tsd_wrapper_set
-tsd_nominal
-tsd_prof_tdata_get
-tsd_prof_tdata_set
-tsd_prof_tdatap_get
-tsd_rtree_ctx_get
-tsd_rtree_ctx_set
-tsd_rtree_ctxp_get
-tsd_rtree_elm_witnesses_get
-tsd_rtree_elm_witnesses_set
-tsd_rtree_elm_witnessesp_get
-tsd_set
-tsd_tcache_enabled_get
-tsd_tcache_enabled_set
-tsd_tcache_enabledp_get
-tsd_tcache_get
-tsd_tcache_set
-tsd_tcachep_get
-tsd_thread_allocated_get
-tsd_thread_allocated_set
-tsd_thread_allocatedp_get
-tsd_thread_deallocated_get
-tsd_thread_deallocated_set
-tsd_thread_deallocatedp_get
-tsd_tls
-tsd_tsd
-tsd_tsdn
-tsd_witness_fork_get
-tsd_witness_fork_set
-tsd_witness_forkp_get
-tsd_witnesses_get
-tsd_witnesses_set
-tsd_witnessesp_get
-tsdn_fetch
-tsdn_null
-tsdn_rtree_ctx
-tsdn_tsd
-witness_assert_lockless
-witness_assert_not_owner
-witness_assert_owner
-witness_init
-witness_lock
-witness_lock_error
-witness_lockless_error
-witness_not_owner_error
-witness_owner
-witness_owner_error
-witness_postfork_child
-witness_postfork_parent
-witness_prefork
-witness_unlock
-witnesses_cleanup
-zone_register
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_unnamespace.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_unnamespace.h
deleted file mode 100644
index 03cc1d4..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_unnamespace.h
+++ /dev/null
@@ -1,567 +0,0 @@
-#undef a0dalloc
-#undef a0malloc
-#undef arena_aalloc
-#undef arena_alloc_junk_small
-#undef arena_basic_stats_merge
-#undef arena_bin_index
-#undef arena_bin_info
-#undef arena_boot
-#undef arena_choose
-#undef arena_choose_hard
-#undef arena_choose_impl
-#undef arena_cleanup
-#undef arena_dalloc
-#undef arena_dalloc_bin_junked_locked
-#undef arena_dalloc_junk_small
-#undef arena_dalloc_promoted
-#undef arena_dalloc_small
-#undef arena_decay_tick
-#undef arena_decay_ticks
-#undef arena_decay_time_default_get
-#undef arena_decay_time_default_set
-#undef arena_decay_time_get
-#undef arena_decay_time_set
-#undef arena_destroy
-#undef arena_dss_prec_get
-#undef arena_dss_prec_set
-#undef arena_extent_alloc_large
-#undef arena_extent_cache_alloc
-#undef arena_extent_cache_dalloc
-#undef arena_extent_cache_maybe_insert
-#undef arena_extent_cache_maybe_remove
-#undef arena_extent_dalloc_large
-#undef arena_extent_ralloc_large_expand
-#undef arena_extent_ralloc_large_shrink
-#undef arena_extent_sn_next
-#undef arena_get
-#undef arena_ichoose
-#undef arena_ind_get
-#undef arena_init
-#undef arena_internal_add
-#undef arena_internal_get
-#undef arena_internal_sub
-#undef arena_malloc
-#undef arena_malloc_hard
-#undef arena_maybe_purge
-#undef arena_migrate
-#undef arena_new
-#undef arena_nthreads_dec
-#undef arena_nthreads_get
-#undef arena_nthreads_inc
-#undef arena_palloc
-#undef arena_postfork_child
-#undef arena_postfork_parent
-#undef arena_prefork0
-#undef arena_prefork1
-#undef arena_prefork2
-#undef arena_prefork3
-#undef arena_prof_accum
-#undef arena_prof_accum_impl
-#undef arena_prof_accum_locked
-#undef arena_prof_promote
-#undef arena_prof_tctx_get
-#undef arena_prof_tctx_reset
-#undef arena_prof_tctx_set
-#undef arena_purge
-#undef arena_ralloc
-#undef arena_ralloc_no_move
-#undef arena_reset
-#undef arena_salloc
-#undef arena_sdalloc
-#undef arena_set
-#undef arena_slab_regind
-#undef arena_stats_merge
-#undef arena_tcache_fill_small
-#undef arena_tdata_get
-#undef arena_tdata_get_hard
-#undef arenas
-#undef arenas_tdata_cleanup
-#undef atomic_add_p
-#undef atomic_add_u
-#undef atomic_add_u32
-#undef atomic_add_u64
-#undef atomic_add_zu
-#undef atomic_cas_p
-#undef atomic_cas_u
-#undef atomic_cas_u32
-#undef atomic_cas_u64
-#undef atomic_cas_zu
-#undef atomic_sub_p
-#undef atomic_sub_u
-#undef atomic_sub_u32
-#undef atomic_sub_u64
-#undef atomic_sub_zu
-#undef atomic_write_p
-#undef atomic_write_u
-#undef atomic_write_u32
-#undef atomic_write_u64
-#undef atomic_write_zu
-#undef b0get
-#undef base_alloc
-#undef base_boot
-#undef base_delete
-#undef base_extent_hooks_get
-#undef base_extent_hooks_set
-#undef base_ind_get
-#undef base_new
-#undef base_postfork_child
-#undef base_postfork_parent
-#undef base_prefork
-#undef base_stats_get
-#undef bitmap_full
-#undef bitmap_get
-#undef bitmap_info_init
-#undef bitmap_init
-#undef bitmap_set
-#undef bitmap_sfu
-#undef bitmap_size
-#undef bitmap_unset
-#undef bootstrap_calloc
-#undef bootstrap_free
-#undef bootstrap_malloc
-#undef bt_init
-#undef buferror
-#undef ckh_count
-#undef ckh_delete
-#undef ckh_insert
-#undef ckh_iter
-#undef ckh_new
-#undef ckh_pointer_hash
-#undef ckh_pointer_keycomp
-#undef ckh_remove
-#undef ckh_search
-#undef ckh_string_hash
-#undef ckh_string_keycomp
-#undef ctl_boot
-#undef ctl_bymib
-#undef ctl_byname
-#undef ctl_nametomib
-#undef ctl_postfork_child
-#undef ctl_postfork_parent
-#undef ctl_prefork
-#undef decay_ticker_get
-#undef dss_prec_names
-#undef extent_active_get
-#undef extent_active_set
-#undef extent_ad_comp
-#undef extent_addr_get
-#undef extent_addr_randomize
-#undef extent_addr_set
-#undef extent_alloc
-#undef extent_alloc_cache
-#undef extent_alloc_cache_locked
-#undef extent_alloc_dss
-#undef extent_alloc_mmap
-#undef extent_alloc_wrapper
-#undef extent_arena_get
-#undef extent_arena_set
-#undef extent_base_get
-#undef extent_before_get
-#undef extent_boot
-#undef extent_commit_wrapper
-#undef extent_committed_get
-#undef extent_committed_set
-#undef extent_dalloc
-#undef extent_dalloc_cache
-#undef extent_dalloc_gap
-#undef extent_dalloc_mmap
-#undef extent_dalloc_wrapper
-#undef extent_dalloc_wrapper_try
-#undef extent_decommit_wrapper
-#undef extent_dss_boot
-#undef extent_dss_mergeable
-#undef extent_dss_prec_get
-#undef extent_dss_prec_set
-#undef extent_heap_empty
-#undef extent_heap_first
-#undef extent_heap_insert
-#undef extent_heap_new
-#undef extent_heap_remove
-#undef extent_heap_remove_first
-#undef extent_hooks_default
-#undef extent_hooks_get
-#undef extent_hooks_set
-#undef extent_in_dss
-#undef extent_init
-#undef extent_last_get
-#undef extent_lookup
-#undef extent_merge_wrapper
-#undef extent_past_get
-#undef extent_prof_tctx_get
-#undef extent_prof_tctx_set
-#undef extent_purge_forced_wrapper
-#undef extent_purge_lazy_wrapper
-#undef extent_retained_get
-#undef extent_ring_insert
-#undef extent_ring_remove
-#undef extent_size_get
-#undef extent_size_quantize_ceil
-#undef extent_size_quantize_floor
-#undef extent_size_set
-#undef extent_slab_data_get
-#undef extent_slab_data_get_const
-#undef extent_slab_get
-#undef extent_slab_set
-#undef extent_sn_comp
-#undef extent_sn_get
-#undef extent_sn_set
-#undef extent_snad_comp
-#undef extent_split_wrapper
-#undef extent_usize_get
-#undef extent_usize_set
-#undef extent_zeroed_get
-#undef extent_zeroed_set
-#undef extents_rtree
-#undef ffs_llu
-#undef ffs_lu
-#undef ffs_u
-#undef ffs_u32
-#undef ffs_u64
-#undef ffs_zu
-#undef get_errno
-#undef hash
-#undef hash_fmix_32
-#undef hash_fmix_64
-#undef hash_get_block_32
-#undef hash_get_block_64
-#undef hash_rotl_32
-#undef hash_rotl_64
-#undef hash_x64_128
-#undef hash_x86_128
-#undef hash_x86_32
-#undef iaalloc
-#undef ialloc
-#undef iallocztm
-#undef iarena_cleanup
-#undef idalloc
-#undef idalloctm
-#undef iealloc
-#undef index2size
-#undef index2size_compute
-#undef index2size_lookup
-#undef index2size_tab
-#undef ipalloc
-#undef ipalloct
-#undef ipallocztm
-#undef iralloc
-#undef iralloct
-#undef iralloct_realign
-#undef isalloc
-#undef isdalloct
-#undef isthreaded
-#undef ivsalloc
-#undef ixalloc
-#undef jemalloc_postfork_child
-#undef jemalloc_postfork_parent
-#undef jemalloc_prefork
-#undef large_dalloc
-#undef large_dalloc_junk
-#undef large_dalloc_junked_locked
-#undef large_dalloc_maybe_junk
-#undef large_malloc
-#undef large_palloc
-#undef large_prof_tctx_get
-#undef large_prof_tctx_reset
-#undef large_prof_tctx_set
-#undef large_ralloc
-#undef large_ralloc_no_move
-#undef large_salloc
-#undef lg_floor
-#undef lg_prof_sample
-#undef malloc_cprintf
-#undef malloc_mutex_assert_not_owner
-#undef malloc_mutex_assert_owner
-#undef malloc_mutex_boot
-#undef malloc_mutex_init
-#undef malloc_mutex_lock
-#undef malloc_mutex_postfork_child
-#undef malloc_mutex_postfork_parent
-#undef malloc_mutex_prefork
-#undef malloc_mutex_unlock
-#undef malloc_printf
-#undef malloc_snprintf
-#undef malloc_strtoumax
-#undef malloc_tsd_boot0
-#undef malloc_tsd_boot1
-#undef malloc_tsd_cleanup_register
-#undef malloc_tsd_dalloc
-#undef malloc_tsd_malloc
-#undef malloc_tsd_no_cleanup
-#undef malloc_vcprintf
-#undef malloc_vsnprintf
-#undef malloc_write
-#undef mb_write
-#undef narenas_auto
-#undef narenas_total_get
-#undef ncpus
-#undef nhbins
-#undef nstime_add
-#undef nstime_compare
-#undef nstime_copy
-#undef nstime_divide
-#undef nstime_idivide
-#undef nstime_imultiply
-#undef nstime_init
-#undef nstime_init2
-#undef nstime_monotonic
-#undef nstime_ns
-#undef nstime_nsec
-#undef nstime_sec
-#undef nstime_subtract
-#undef nstime_update
-#undef opt_abort
-#undef opt_decay_time
-#undef opt_dss
-#undef opt_junk
-#undef opt_junk_alloc
-#undef opt_junk_free
-#undef opt_lg_prof_interval
-#undef opt_lg_prof_sample
-#undef opt_lg_tcache_max
-#undef opt_narenas
-#undef opt_prof
-#undef opt_prof_accum
-#undef opt_prof_active
-#undef opt_prof_final
-#undef opt_prof_gdump
-#undef opt_prof_leak
-#undef opt_prof_prefix
-#undef opt_prof_thread_active_init
-#undef opt_stats_print
-#undef opt_tcache
-#undef opt_utrace
-#undef opt_xmalloc
-#undef opt_zero
-#undef pages_boot
-#undef pages_commit
-#undef pages_decommit
-#undef pages_huge
-#undef pages_map
-#undef pages_nohuge
-#undef pages_purge_forced
-#undef pages_purge_lazy
-#undef pages_trim
-#undef pages_unmap
-#undef pind2sz
-#undef pind2sz_compute
-#undef pind2sz_lookup
-#undef pind2sz_tab
-#undef pow2_ceil_u32
-#undef pow2_ceil_u64
-#undef pow2_ceil_zu
-#undef prng_lg_range_u32
-#undef prng_lg_range_u64
-#undef prng_lg_range_zu
-#undef prng_range_u32
-#undef prng_range_u64
-#undef prng_range_zu
-#undef prng_state_next_u32
-#undef prng_state_next_u64
-#undef prng_state_next_zu
-#undef prof_active
-#undef prof_active_get
-#undef prof_active_get_unlocked
-#undef prof_active_set
-#undef prof_alloc_prep
-#undef prof_alloc_rollback
-#undef prof_backtrace
-#undef prof_boot0
-#undef prof_boot1
-#undef prof_boot2
-#undef prof_bt_count
-#undef prof_cnt_all
-#undef prof_dump_header
-#undef prof_dump_open
-#undef prof_free
-#undef prof_free_sampled_object
-#undef prof_gdump
-#undef prof_gdump_get
-#undef prof_gdump_get_unlocked
-#undef prof_gdump_set
-#undef prof_gdump_val
-#undef prof_idump
-#undef prof_interval
-#undef prof_lookup
-#undef prof_malloc
-#undef prof_malloc_sample_object
-#undef prof_mdump
-#undef prof_postfork_child
-#undef prof_postfork_parent
-#undef prof_prefork0
-#undef prof_prefork1
-#undef prof_realloc
-#undef prof_reset
-#undef prof_sample_accum_update
-#undef prof_sample_threshold_update
-#undef prof_tctx_get
-#undef prof_tctx_reset
-#undef prof_tctx_set
-#undef prof_tdata_cleanup
-#undef prof_tdata_count
-#undef prof_tdata_get
-#undef prof_tdata_init
-#undef prof_tdata_reinit
-#undef prof_thread_active_get
-#undef prof_thread_active_init_get
-#undef prof_thread_active_init_set
-#undef prof_thread_active_set
-#undef prof_thread_name_get
-#undef prof_thread_name_set
-#undef psz2ind
-#undef psz2u
-#undef rtree_child_read
-#undef rtree_child_read_hard
-#undef rtree_child_tryread
-#undef rtree_clear
-#undef rtree_ctx_start_level
-#undef rtree_delete
-#undef rtree_elm_acquire
-#undef rtree_elm_lookup
-#undef rtree_elm_read
-#undef rtree_elm_read_acquired
-#undef rtree_elm_release
-#undef rtree_elm_witness_access
-#undef rtree_elm_witness_acquire
-#undef rtree_elm_witness_release
-#undef rtree_elm_write
-#undef rtree_elm_write_acquired
-#undef rtree_new
-#undef rtree_node_alloc
-#undef rtree_node_dalloc
-#undef rtree_node_valid
-#undef rtree_read
-#undef rtree_start_level
-#undef rtree_subkey
-#undef rtree_subtree_read
-#undef rtree_subtree_read_hard
-#undef rtree_subtree_tryread
-#undef rtree_write
-#undef s2u
-#undef s2u_compute
-#undef s2u_lookup
-#undef sa2u
-#undef set_errno
-#undef size2index
-#undef size2index_compute
-#undef size2index_lookup
-#undef size2index_tab
-#undef spin_adaptive
-#undef spin_init
-#undef stats_print
-#undef tcache_alloc_easy
-#undef tcache_alloc_large
-#undef tcache_alloc_small
-#undef tcache_alloc_small_hard
-#undef tcache_arena_reassociate
-#undef tcache_bin_flush_large
-#undef tcache_bin_flush_small
-#undef tcache_bin_info
-#undef tcache_boot
-#undef tcache_cleanup
-#undef tcache_create
-#undef tcache_dalloc_large
-#undef tcache_dalloc_small
-#undef tcache_enabled_get
-#undef tcache_enabled_set
-#undef tcache_event
-#undef tcache_event_hard
-#undef tcache_flush
-#undef tcache_get
-#undef tcache_get_hard
-#undef tcache_maxclass
-#undef tcache_salloc
-#undef tcache_stats_merge
-#undef tcaches
-#undef tcaches_create
-#undef tcaches_destroy
-#undef tcaches_flush
-#undef tcaches_get
-#undef ticker_copy
-#undef ticker_init
-#undef ticker_read
-#undef ticker_tick
-#undef ticker_ticks
-#undef tsd_arena_get
-#undef tsd_arena_set
-#undef tsd_arenap_get
-#undef tsd_arenas_tdata_bypass_get
-#undef tsd_arenas_tdata_bypass_set
-#undef tsd_arenas_tdata_bypassp_get
-#undef tsd_arenas_tdata_get
-#undef tsd_arenas_tdata_set
-#undef tsd_arenas_tdatap_get
-#undef tsd_boot
-#undef tsd_boot0
-#undef tsd_boot1
-#undef tsd_booted
-#undef tsd_booted_get
-#undef tsd_cleanup
-#undef tsd_cleanup_wrapper
-#undef tsd_fetch
-#undef tsd_fetch_impl
-#undef tsd_get
-#undef tsd_get_allocates
-#undef tsd_iarena_get
-#undef tsd_iarena_set
-#undef tsd_iarenap_get
-#undef tsd_initialized
-#undef tsd_init_check_recursion
-#undef tsd_init_finish
-#undef tsd_init_head
-#undef tsd_narenas_tdata_get
-#undef tsd_narenas_tdata_set
-#undef tsd_narenas_tdatap_get
-#undef tsd_wrapper_get
-#undef tsd_wrapper_set
-#undef tsd_nominal
-#undef tsd_prof_tdata_get
-#undef tsd_prof_tdata_set
-#undef tsd_prof_tdatap_get
-#undef tsd_rtree_ctx_get
-#undef tsd_rtree_ctx_set
-#undef tsd_rtree_ctxp_get
-#undef tsd_rtree_elm_witnesses_get
-#undef tsd_rtree_elm_witnesses_set
-#undef tsd_rtree_elm_witnessesp_get
-#undef tsd_set
-#undef tsd_tcache_enabled_get
-#undef tsd_tcache_enabled_set
-#undef tsd_tcache_enabledp_get
-#undef tsd_tcache_get
-#undef tsd_tcache_set
-#undef tsd_tcachep_get
-#undef tsd_thread_allocated_get
-#undef tsd_thread_allocated_set
-#undef tsd_thread_allocatedp_get
-#undef tsd_thread_deallocated_get
-#undef tsd_thread_deallocated_set
-#undef tsd_thread_deallocatedp_get
-#undef tsd_tls
-#undef tsd_tsd
-#undef tsd_tsdn
-#undef tsd_witness_fork_get
-#undef tsd_witness_fork_set
-#undef tsd_witness_forkp_get
-#undef tsd_witnesses_get
-#undef tsd_witnesses_set
-#undef tsd_witnessesp_get
-#undef tsdn_fetch
-#undef tsdn_null
-#undef tsdn_rtree_ctx
-#undef tsdn_tsd
-#undef witness_assert_lockless
-#undef witness_assert_not_owner
-#undef witness_assert_owner
-#undef witness_init
-#undef witness_lock
-#undef witness_lock_error
-#undef witness_lockless_error
-#undef witness_not_owner_error
-#undef witness_owner
-#undef witness_owner_error
-#undef witness_postfork_child
-#undef witness_postfork_parent
-#undef witness_prefork
-#undef witness_unlock
-#undef witnesses_cleanup
-#undef zone_register
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_unnamespace.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_unnamespace.sh
deleted file mode 100755
index 23fed8e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/private_unnamespace.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-for symbol in `cat $1` ; do
-  echo "#undef ${symbol}"
-done
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prng_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prng_inlines.h
deleted file mode 100644
index 8cc19ce..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prng_inlines.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PRNG_INLINES_H
-#define JEMALLOC_INTERNAL_PRNG_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-uint32_t	prng_state_next_u32(uint32_t state);
-uint64_t	prng_state_next_u64(uint64_t state);
-size_t	prng_state_next_zu(size_t state);
-
-uint32_t	prng_lg_range_u32(uint32_t *state, unsigned lg_range,
-    bool atomic);
-uint64_t	prng_lg_range_u64(uint64_t *state, unsigned lg_range);
-size_t	prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
-
-uint32_t	prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
-uint64_t	prng_range_u64(uint64_t *state, uint64_t range);
-size_t	prng_range_zu(size_t *state, size_t range, bool atomic);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state)
-{
-	return ((state * PRNG_A_32) + PRNG_C_32);
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state)
-{
-	return ((state * PRNG_A_64) + PRNG_C_64);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state)
-{
-#if LG_SIZEOF_PTR == 2
-	return ((state * PRNG_A_32) + PRNG_C_32);
-#elif LG_SIZEOF_PTR == 3
-	return ((state * PRNG_A_64) + PRNG_C_64);
-#else
-#error Unsupported pointer size
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
-{
-	uint32_t ret, state1;
-
-	assert(lg_range > 0);
-	assert(lg_range <= 32);
-
-	if (atomic) {
-		uint32_t state0;
-
-		do {
-			state0 = atomic_read_u32(state);
-			state1 = prng_state_next_u32(state0);
-		} while (atomic_cas_u32(state, state0, state1));
-	} else {
-		state1 = prng_state_next_u32(*state);
-		*state = state1;
-	}
-	ret = state1 >> (32 - lg_range);
-
-	return (ret);
-}
-
-/* 64-bit atomic operations cannot be supported on all relevant platforms. */
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range)
-{
-	uint64_t ret, state1;
-
-	assert(lg_range > 0);
-	assert(lg_range <= 64);
-
-	state1 = prng_state_next_u64(*state);
-	*state = state1;
-	ret = state1 >> (64 - lg_range);
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
-{
-	size_t ret, state1;
-
-	assert(lg_range > 0);
-	assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
-
-	if (atomic) {
-		size_t state0;
-
-		do {
-			state0 = atomic_read_zu(state);
-			state1 = prng_state_next_zu(state0);
-		} while (atomic_cas_zu(state, state0, state1));
-	} else {
-		state1 = prng_state_next_zu(*state);
-		*state = state1;
-	}
-	ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
-{
-	uint32_t ret;
-	unsigned lg_range;
-
-	assert(range > 1);
-
-	/* Compute the ceiling of lg(range). */
-	lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
-
-	/* Generate a result in [0..range) via repeated trial. */
-	do {
-		ret = prng_lg_range_u32(state, lg_range, atomic);
-	} while (ret >= range);
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range)
-{
-	uint64_t ret;
-	unsigned lg_range;
-
-	assert(range > 1);
-
-	/* Compute the ceiling of lg(range). */
-	lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
-	/* Generate a result in [0..range) via repeated trial. */
-	do {
-		ret = prng_lg_range_u64(state, lg_range);
-	} while (ret >= range);
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(size_t *state, size_t range, bool atomic)
-{
-	size_t ret;
-	unsigned lg_range;
-
-	assert(range > 1);
-
-	/* Compute the ceiling of lg(range). */
-	lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
-	/* Generate a result in [0..range) via repeated trial. */
-	do {
-		ret = prng_lg_range_zu(state, lg_range, atomic);
-	} while (ret >= range);
-
-	return (ret);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_PRNG_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prng_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prng_types.h
deleted file mode 100644
index dec44c0..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prng_types.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PRNG_TYPES_H
-#define JEMALLOC_INTERNAL_PRNG_TYPES_H
-
-/*
- * Simple linear congruential pseudo-random number generator:
- *
- *   prng(y) = (a*x + c) % m
- *
- * where the following constants ensure maximal period:
- *
- *   a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
- *   c == Odd number (relatively prime to 2^n).
- *   m == 2^32
- *
- * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
- *
- * This choice of m has the disadvantage that the quality of the bits is
- * proportional to bit position.  For example, the lowest bit has a cycle of 2,
- * the next has a cycle of 4, etc.  For this reason, we prefer to use the upper
- * bits.
- */
-
-#define	PRNG_A_32	UINT32_C(1103515241)
-#define	PRNG_C_32	UINT32_C(12347)
-
-#define	PRNG_A_64	UINT64_C(6364136223846793005)
-#define	PRNG_C_64	UINT64_C(1442695040888963407)
-
-#endif /* JEMALLOC_INTERNAL_PRNG_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_externs.h
deleted file mode 100644
index 8f2ee664..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_externs.h
+++ /dev/null
@@ -1,88 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
-#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-extern bool	opt_prof;
-extern bool	opt_prof_active;
-extern bool	opt_prof_thread_active_init;
-extern size_t	opt_lg_prof_sample;   /* Mean bytes between samples. */
-extern ssize_t	opt_lg_prof_interval; /* lg(prof_interval). */
-extern bool	opt_prof_gdump;       /* High-water memory dumping. */
-extern bool	opt_prof_final;       /* Final profile dumping. */
-extern bool	opt_prof_leak;        /* Dump leak summary at exit. */
-extern bool	opt_prof_accum;       /* Report cumulative bytes. */
-extern char	opt_prof_prefix[
-    /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
-    PATH_MAX +
-#endif
-    1];
-
-/* Accessed via prof_active_[gs]et{_unlocked,}(). */
-extern bool	prof_active;
-
-/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
-extern bool	prof_gdump_val;
-
-/*
- * Profile dump interval, measured in bytes allocated.  Each arena triggers a
- * profile dump when it reaches this threshold.  The effect is that the
- * interval between profile dumps averages prof_interval, though the actual
- * interval between dumps will tend to be sporadic, and the interval will be a
- * maximum of approximately (prof_interval * narenas).
- */
-extern uint64_t	prof_interval;
-
-/*
- * Initialized as opt_lg_prof_sample, and potentially modified during profiling
- * resets.
- */
-extern size_t	lg_prof_sample;
-
-void	prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void	prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent,
-    const void *ptr, size_t usize, prof_tctx_t *tctx);
-void	prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
-void	bt_init(prof_bt_t *bt, void **vec);
-void	prof_backtrace(prof_bt_t *bt);
-prof_tctx_t	*prof_lookup(tsd_t *tsd, prof_bt_t *bt);
-#ifdef JEMALLOC_JET
-size_t	prof_tdata_count(void);
-size_t	prof_bt_count(void);
-typedef int (prof_dump_open_t)(bool, const char *);
-extern prof_dump_open_t *prof_dump_open;
-typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
-extern prof_dump_header_t *prof_dump_header;
-void	prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes,
-    uint64_t *accumobjs, uint64_t *accumbytes);
-#endif
-void	prof_idump(tsdn_t *tsdn);
-bool	prof_mdump(tsd_t *tsd, const char *filename);
-void	prof_gdump(tsdn_t *tsdn);
-prof_tdata_t	*prof_tdata_init(tsd_t *tsd);
-prof_tdata_t	*prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-void	prof_reset(tsd_t *tsd, size_t lg_sample);
-void	prof_tdata_cleanup(tsd_t *tsd);
-bool	prof_active_get(tsdn_t *tsdn);
-bool	prof_active_set(tsdn_t *tsdn, bool active);
-const char	*prof_thread_name_get(tsd_t *tsd);
-int	prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool	prof_thread_active_get(tsd_t *tsd);
-bool	prof_thread_active_set(tsd_t *tsd, bool active);
-bool	prof_thread_active_init_get(tsdn_t *tsdn);
-bool	prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
-bool	prof_gdump_get(tsdn_t *tsdn);
-bool	prof_gdump_set(tsdn_t *tsdn, bool active);
-void	prof_boot0(void);
-void	prof_boot1(void);
-bool	prof_boot2(tsd_t *tsd);
-void	prof_prefork0(tsdn_t *tsdn);
-void	prof_prefork1(tsdn_t *tsdn);
-void	prof_postfork_parent(tsdn_t *tsdn);
-void	prof_postfork_child(tsdn_t *tsdn);
-void	prof_sample_threshold_update(prof_tdata_t *tdata);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_inlines.h
deleted file mode 100644
index 394b7b3..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_inlines.h
+++ /dev/null
@@ -1,245 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
-#define JEMALLOC_INTERNAL_PROF_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool	prof_active_get_unlocked(void);
-bool	prof_gdump_get_unlocked(void);
-prof_tdata_t	*prof_tdata_get(tsd_t *tsd, bool create);
-prof_tctx_t	*prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
-    const void *ptr);
-void	prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize, prof_tctx_t *tctx);
-void	prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    prof_tctx_t *tctx);
-bool	prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
-    prof_tdata_t **tdata_out);
-prof_tctx_t	*prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
-    bool update);
-void	prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize, prof_tctx_t *tctx);
-void	prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr,
-    size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated,
-    extent_t *old_extent, const void *old_ptr, size_t old_usize,
-    prof_tctx_t *old_tctx);
-void	prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr,
-    size_t usize);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
-JEMALLOC_ALWAYS_INLINE bool
-prof_active_get_unlocked(void)
-{
-	/*
-	 * Even if opt_prof is true, sampling can be temporarily disabled by
-	 * setting prof_active to false.  No locking is used when reading
-	 * prof_active in the fast path, so there are no guarantees regarding
-	 * how long it will take for all threads to notice state changes.
-	 */
-	return (prof_active);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_gdump_get_unlocked(void)
-{
-	/*
-	 * No locking is used when reading prof_gdump_val in the fast path, so
-	 * there are no guarantees regarding how long it will take for all
-	 * threads to notice state changes.
-	 */
-	return (prof_gdump_val);
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tdata_t *
-prof_tdata_get(tsd_t *tsd, bool create)
-{
-	prof_tdata_t *tdata;
-
-	cassert(config_prof);
-
-	tdata = tsd_prof_tdata_get(tsd);
-	if (create) {
-		if (unlikely(tdata == NULL)) {
-			if (tsd_nominal(tsd)) {
-				tdata = prof_tdata_init(tsd);
-				tsd_prof_tdata_set(tsd, tdata);
-			}
-		} else if (unlikely(tdata->expired)) {
-			tdata = prof_tdata_reinit(tsd, tdata);
-			tsd_prof_tdata_set(tsd, tdata);
-		}
-		assert(tdata == NULL || tdata->attached);
-	}
-
-	return (tdata);
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-
-	return (arena_prof_tctx_get(tsdn, extent, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
-    prof_tctx_t *tctx)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-
-	arena_prof_tctx_set(tsdn, extent, ptr, usize, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    prof_tctx_t *tctx)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-
-	arena_prof_tctx_reset(tsdn, extent, ptr, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
-    prof_tdata_t **tdata_out)
-{
-	prof_tdata_t *tdata;
-
-	cassert(config_prof);
-
-	tdata = prof_tdata_get(tsd, true);
-	if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
-		tdata = NULL;
-
-	if (tdata_out != NULL)
-		*tdata_out = tdata;
-
-	if (unlikely(tdata == NULL))
-		return (true);
-
-	if (likely(tdata->bytes_until_sample >= usize)) {
-		if (update)
-			tdata->bytes_until_sample -= usize;
-		return (true);
-	} else {
-		/* Compute new sample threshold. */
-		if (update)
-			prof_sample_threshold_update(tdata);
-		return (!tdata->active);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
-{
-	prof_tctx_t *ret;
-	prof_tdata_t *tdata;
-	prof_bt_t bt;
-
-	assert(usize == s2u(usize));
-
-	if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
-	    &tdata)))
-		ret = (prof_tctx_t *)(uintptr_t)1U;
-	else {
-		bt_init(&bt, tdata->vec);
-		prof_backtrace(&bt);
-		ret = prof_lookup(tsd, &bt);
-	}
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
-    prof_tctx_t *tctx)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-	assert(usize == isalloc(tsdn, extent, ptr));
-
-	if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
-		prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
-	else {
-		prof_tctx_set(tsdn, extent, ptr, usize,
-		    (prof_tctx_t *)(uintptr_t)1U);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
-    prof_tctx_t *tctx, bool prof_active, bool updated, extent_t *old_extent,
-    const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx)
-{
-	bool sampled, old_sampled, moved;
-
-	cassert(config_prof);
-	assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
-
-	if (prof_active && !updated && ptr != NULL) {
-		assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
-		if (prof_sample_accum_update(tsd, usize, true, NULL)) {
-			/*
-			 * Don't sample.  The usize passed to prof_alloc_prep()
-			 * was larger than what actually got allocated, so a
-			 * backtrace was captured for this allocation, even
-			 * though its actual usize was insufficient to cross the
-			 * sample threshold.
-			 */
-			prof_alloc_rollback(tsd, tctx, true);
-			tctx = (prof_tctx_t *)(uintptr_t)1U;
-		}
-	}
-
-	sampled = ((uintptr_t)tctx > (uintptr_t)1U);
-	old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
-	moved = (ptr != old_ptr);
-
-	if (unlikely(sampled)) {
-		prof_malloc_sample_object(tsd_tsdn(tsd), extent, ptr, usize,
-		    tctx);
-	} else if (moved) {
-		prof_tctx_set(tsd_tsdn(tsd), extent, ptr, usize,
-		    (prof_tctx_t *)(uintptr_t)1U);
-	} else if (unlikely(old_sampled)) {
-		/*
-		 * prof_tctx_set() would work for the !moved case as well, but
-		 * prof_tctx_reset() is slightly cheaper, and the proper thing
-		 * to do here in the presence of explicit knowledge re: moved
-		 * state.
-		 */
-		prof_tctx_reset(tsd_tsdn(tsd), extent, ptr, tctx);
-	} else {
-		assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), extent, ptr) ==
-		    (uintptr_t)1U);
-	}
-
-	/*
-	 * The prof_free_sampled_object() call must come after the
-	 * prof_malloc_sample_object() call, because tctx and old_tctx may be
-	 * the same, in which case reversing the call order could cause the tctx
-	 * to be prematurely destroyed as a side effect of momentarily zeroed
-	 * counters.
-	 */
-	if (unlikely(old_sampled)) {
-		prof_free_sampled_object(tsd, old_usize, old_tctx);
-	}
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize)
-{
-	prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
-
-	cassert(config_prof);
-	assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
-
-	if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
-		prof_free_sampled_object(tsd, usize, tctx);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_structs.h
deleted file mode 100644
index caae125..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_structs.h
+++ /dev/null
@@ -1,187 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
-#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
-
-struct prof_bt_s {
-	/* Backtrace, stored as len program counters. */
-	void		**vec;
-	unsigned	len;
-};
-
-#ifdef JEMALLOC_PROF_LIBGCC
-/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
-typedef struct {
-	prof_bt_t	*bt;
-	unsigned	max;
-} prof_unwind_data_t;
-#endif
-
-struct prof_cnt_s {
-	/* Profiling counters. */
-	uint64_t	curobjs;
-	uint64_t	curbytes;
-	uint64_t	accumobjs;
-	uint64_t	accumbytes;
-};
-
-typedef enum {
-	prof_tctx_state_initializing,
-	prof_tctx_state_nominal,
-	prof_tctx_state_dumping,
-	prof_tctx_state_purgatory /* Dumper must finish destroying. */
-} prof_tctx_state_t;
-
-struct prof_tctx_s {
-	/* Thread data for thread that performed the allocation. */
-	prof_tdata_t		*tdata;
-
-	/*
-	 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
-	 * defunct during teardown.
-	 */
-	uint64_t		thr_uid;
-	uint64_t		thr_discrim;
-
-	/* Profiling counters, protected by tdata->lock. */
-	prof_cnt_t		cnts;
-
-	/* Associated global context. */
-	prof_gctx_t		*gctx;
-
-	/*
-	 * UID that distinguishes multiple tctx's created by the same thread,
-	 * but coexisting in gctx->tctxs.  There are two ways that such
-	 * coexistence can occur:
-	 * - A dumper thread can cause a tctx to be retained in the purgatory
-	 *   state.
-	 * - Although a single "producer" thread must create all tctx's which
-	 *   share the same thr_uid, multiple "consumers" can each concurrently
-	 *   execute portions of prof_tctx_destroy().  prof_tctx_destroy() only
-	 *   gets called once each time cnts.cur{objs,bytes} drop to 0, but this
-	 *   threshold can be hit again before the first consumer finishes
-	 *   executing prof_tctx_destroy().
-	 */
-	uint64_t		tctx_uid;
-
-	/* Linkage into gctx's tctxs. */
-	rb_node(prof_tctx_t)	tctx_link;
-
-	/*
-	 * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
-	 * sample vs destroy race.
-	 */
-	bool			prepared;
-
-	/* Current dump-related state, protected by gctx->lock. */
-	prof_tctx_state_t	state;
-
-	/*
-	 * Copy of cnts snapshotted during early dump phase, protected by
-	 * dump_mtx.
-	 */
-	prof_cnt_t		dump_cnts;
-};
-typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
-
-struct prof_gctx_s {
-	/* Protects nlimbo, cnt_summed, and tctxs. */
-	malloc_mutex_t		*lock;
-
-	/*
-	 * Number of threads that currently cause this gctx to be in a state of
-	 * limbo due to one of:
-	 *   - Initializing this gctx.
-	 *   - Initializing per thread counters associated with this gctx.
-	 *   - Preparing to destroy this gctx.
-	 *   - Dumping a heap profile that includes this gctx.
-	 * nlimbo must be 1 (single destroyer) in order to safely destroy the
-	 * gctx.
-	 */
-	unsigned		nlimbo;
-
-	/*
-	 * Tree of profile counters, one for each thread that has allocated in
-	 * this context.
-	 */
-	prof_tctx_tree_t	tctxs;
-
-	/* Linkage for tree of contexts to be dumped. */
-	rb_node(prof_gctx_t)	dump_link;
-
-	/* Temporary storage for summation during dump. */
-	prof_cnt_t		cnt_summed;
-
-	/* Associated backtrace. */
-	prof_bt_t		bt;
-
-	/* Backtrace vector, variable size, referred to by bt. */
-	void			*vec[1];
-};
-typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
-
-struct prof_tdata_s {
-	malloc_mutex_t		*lock;
-
-	/* Monotonically increasing unique thread identifier. */
-	uint64_t		thr_uid;
-
-	/*
-	 * Monotonically increasing discriminator among tdata structures
-	 * associated with the same thr_uid.
-	 */
-	uint64_t		thr_discrim;
-
-	/* Included in heap profile dumps if non-NULL. */
-	char			*thread_name;
-
-	bool			attached;
-	bool			expired;
-
-	rb_node(prof_tdata_t)	tdata_link;
-
-	/*
-	 * Counter used to initialize prof_tctx_t's tctx_uid.  No locking is
-	 * necessary when incrementing this field, because only one thread ever
-	 * does so.
-	 */
-	uint64_t		tctx_uid_next;
-
-	/*
-	 * Hash of (prof_bt_t *)-->(prof_tctx_t *).  Each thread tracks
-	 * backtraces for which it has non-zero allocation/deallocation counters
-	 * associated with thread-specific prof_tctx_t objects.  Other threads
-	 * may write to prof_tctx_t contents when freeing associated objects.
-	 */
-	ckh_t			bt2tctx;
-
-	/* Sampling state. */
-	uint64_t		prng_state;
-	uint64_t		bytes_until_sample;
-
-	/* State used to avoid dumping while operating on prof internals. */
-	bool			enq;
-	bool			enq_idump;
-	bool			enq_gdump;
-
-	/*
-	 * Set to true during an early dump phase for tdata's which are
-	 * currently being dumped.  New threads' tdata's have this initialized
-	 * to false so that they aren't accidentally included in later dump
-	 * phases.
-	 */
-	bool			dumping;
-
-	/*
-	 * True if profiling is active for this tdata's thread
-	 * (thread.prof.active mallctl).
-	 */
-	bool			active;
-
-	/* Temporary storage for summation during dump. */
-	prof_cnt_t		cnt_summed;
-
-	/* Backtrace vector, used for calls to prof_backtrace(). */
-	void			*vec[PROF_BT_MAX];
-};
-typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
-
-#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_types.h
deleted file mode 100644
index e1eb7fb..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/prof_types.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
-#define JEMALLOC_INTERNAL_PROF_TYPES_H
-
-typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_cnt_s prof_cnt_t;
-typedef struct prof_tctx_s prof_tctx_t;
-typedef struct prof_gctx_s prof_gctx_t;
-typedef struct prof_tdata_s prof_tdata_t;
-
-/* Option defaults. */
-#ifdef JEMALLOC_PROF
-#  define PROF_PREFIX_DEFAULT		"jeprof"
-#else
-#  define PROF_PREFIX_DEFAULT		""
-#endif
-#define	LG_PROF_SAMPLE_DEFAULT		19
-#define	LG_PROF_INTERVAL_DEFAULT	-1
-
-/*
- * Hard limit on stack backtrace depth.  The version of prof_backtrace() that
- * is based on __builtin_return_address() necessarily has a hard-coded number
- * of backtrace frame handlers, and should be kept in sync with this setting.
- */
-#define	PROF_BT_MAX			128
-
-/* Initial hash table size. */
-#define	PROF_CKH_MINITEMS		64
-
-/* Size of memory buffer to use when writing dump files. */
-#define	PROF_DUMP_BUFSIZE		65536
-
-/* Size of stack-allocated buffer used by prof_printf(). */
-#define	PROF_PRINTF_BUFSIZE		128
-
-/*
- * Number of mutexes shared among all gctx's.  No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define	PROF_NCTX_LOCKS			1024
-
-/*
- * Number of mutexes shared among all tdata's.  No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define	PROF_NTDATA_LOCKS		256
-
-/*
- * prof_tdata pointers close to NULL are used to encode state information that
- * is used for cleaning up during thread shutdown.
- */
-#define	PROF_TDATA_STATE_REINCARNATED	((prof_tdata_t *)(uintptr_t)1)
-#define	PROF_TDATA_STATE_PURGATORY	((prof_tdata_t *)(uintptr_t)2)
-#define	PROF_TDATA_STATE_MAX		PROF_TDATA_STATE_PURGATORY
-
-#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_namespace.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_namespace.h
deleted file mode 100644
index c43cb615..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_namespace.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#define	je_malloc_conf JEMALLOC_N(malloc_conf)
-#define	je_malloc_message JEMALLOC_N(malloc_message)
-#define	je_malloc JEMALLOC_N(malloc)
-#define	je_calloc JEMALLOC_N(calloc)
-#define	je_posix_memalign JEMALLOC_N(posix_memalign)
-#define	je_aligned_alloc JEMALLOC_N(aligned_alloc)
-#define	je_realloc JEMALLOC_N(realloc)
-#define	je_free JEMALLOC_N(free)
-#define	je_mallocx JEMALLOC_N(mallocx)
-#define	je_rallocx JEMALLOC_N(rallocx)
-#define	je_xallocx JEMALLOC_N(xallocx)
-#define	je_sallocx JEMALLOC_N(sallocx)
-#define	je_dallocx JEMALLOC_N(dallocx)
-#define	je_sdallocx JEMALLOC_N(sdallocx)
-#define	je_nallocx JEMALLOC_N(nallocx)
-#define	je_mallctl JEMALLOC_N(mallctl)
-#define	je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
-#define	je_mallctlbymib JEMALLOC_N(mallctlbymib)
-#define	je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
-#define	je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
-#define	je_memalign JEMALLOC_N(memalign)
-#define	je_valloc JEMALLOC_N(valloc)
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_namespace.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_namespace.sh
deleted file mode 100755
index 362109f..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_namespace.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-for nm in `cat $1` ; do
-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
-  echo "#define	je_${n} JEMALLOC_N(${n})"
-done
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_symbols.txt b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_symbols.txt
deleted file mode 100644
index b999d8d..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_symbols.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-malloc_conf:malloc_conf
-malloc_message:malloc_message
-malloc:malloc
-calloc:calloc
-posix_memalign:posix_memalign
-aligned_alloc:aligned_alloc
-realloc:realloc
-free:free
-mallocx:mallocx
-rallocx:rallocx
-xallocx:xallocx
-sallocx:sallocx
-dallocx:dallocx
-sdallocx:sdallocx
-nallocx:nallocx
-mallctl:mallctl
-mallctlnametomib:mallctlnametomib
-mallctlbymib:mallctlbymib
-malloc_stats_print:malloc_stats_print
-malloc_usable_size:malloc_usable_size
-memalign:memalign
-valloc:valloc
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_unnamespace.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_unnamespace.h
deleted file mode 100644
index 46819485..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_unnamespace.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#undef je_malloc_conf
-#undef je_malloc_message
-#undef je_malloc
-#undef je_calloc
-#undef je_posix_memalign
-#undef je_aligned_alloc
-#undef je_realloc
-#undef je_free
-#undef je_mallocx
-#undef je_rallocx
-#undef je_xallocx
-#undef je_sallocx
-#undef je_dallocx
-#undef je_sdallocx
-#undef je_nallocx
-#undef je_mallctl
-#undef je_mallctlnametomib
-#undef je_mallctlbymib
-#undef je_malloc_stats_print
-#undef je_malloc_usable_size
-#undef je_memalign
-#undef je_valloc
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_unnamespace.sh
deleted file mode 100755
index 4239d177..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/public_unnamespace.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-for nm in `cat $1` ; do
-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
-  echo "#undef je_${n}"
-done
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ql.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ql.h
deleted file mode 100644
index 424485c..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ql.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_QL_H
-#define JEMALLOC_INTERNAL_QL_H
-
-/* List definitions. */
-#define	ql_head(a_type)							\
-struct {								\
-	a_type *qlh_first;						\
-}
-
-#define	ql_head_initializer(a_head) {NULL}
-
-#define	ql_elm(a_type)	qr(a_type)
-
-/* List functions. */
-#define	ql_new(a_head) do {						\
-	(a_head)->qlh_first = NULL;					\
-} while (0)
-
-#define	ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-
-#define	ql_first(a_head) ((a_head)->qlh_first)
-
-#define	ql_last(a_head, a_field)					\
-	((ql_first(a_head) != NULL)					\
-	    ? qr_prev(ql_first(a_head), a_field) : NULL)
-
-#define	ql_next(a_head, a_elm, a_field)					\
-	((ql_last(a_head, a_field) != (a_elm))				\
-	    ? qr_next((a_elm), a_field)	: NULL)
-
-#define	ql_prev(a_head, a_elm, a_field)					\
-	((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field)	\
-				       : NULL)
-
-#define	ql_before_insert(a_head, a_qlelm, a_elm, a_field) do {		\
-	qr_before_insert((a_qlelm), (a_elm), a_field);			\
-	if (ql_first(a_head) == (a_qlelm)) {				\
-		ql_first(a_head) = (a_elm);				\
-	}								\
-} while (0)
-
-#define	ql_after_insert(a_qlelm, a_elm, a_field)			\
-	qr_after_insert((a_qlelm), (a_elm), a_field)
-
-#define	ql_head_insert(a_head, a_elm, a_field) do {			\
-	if (ql_first(a_head) != NULL) {					\
-		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
-	}								\
-	ql_first(a_head) = (a_elm);					\
-} while (0)
-
-#define	ql_tail_insert(a_head, a_elm, a_field) do {			\
-	if (ql_first(a_head) != NULL) {					\
-		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
-	}								\
-	ql_first(a_head) = qr_next((a_elm), a_field);			\
-} while (0)
-
-#define	ql_remove(a_head, a_elm, a_field) do {				\
-	if (ql_first(a_head) == (a_elm)) {				\
-		ql_first(a_head) = qr_next(ql_first(a_head), a_field);	\
-	}								\
-	if (ql_first(a_head) != (a_elm)) {				\
-		qr_remove((a_elm), a_field);				\
-	} else {							\
-		ql_first(a_head) = NULL;				\
-	}								\
-} while (0)
-
-#define	ql_head_remove(a_head, a_type, a_field) do {			\
-	a_type *t = ql_first(a_head);					\
-	ql_remove((a_head), t, a_field);				\
-} while (0)
-
-#define	ql_tail_remove(a_head, a_type, a_field) do {			\
-	a_type *t = ql_last(a_head, a_field);				\
-	ql_remove((a_head), t, a_field);				\
-} while (0)
-
-#define	ql_foreach(a_var, a_head, a_field)				\
-	qr_foreach((a_var), ql_first(a_head), a_field)
-
-#define	ql_reverse_foreach(a_var, a_head, a_field)			\
-	qr_reverse_foreach((a_var), ql_first(a_head), a_field)
-
-#endif /* JEMALLOC_INTERNAL_QL_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/qr.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/qr.h
deleted file mode 100644
index 06dfdafd..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/qr.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_QR_H
-#define JEMALLOC_INTERNAL_QR_H
-
-/* Ring definitions. */
-#define	qr(a_type)							\
-struct {								\
-	a_type	*qre_next;						\
-	a_type	*qre_prev;						\
-}
-
-/* Ring functions. */
-#define	qr_new(a_qr, a_field) do {					\
-	(a_qr)->a_field.qre_next = (a_qr);				\
-	(a_qr)->a_field.qre_prev = (a_qr);				\
-} while (0)
-
-#define	qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-
-#define	qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-
-#define	qr_before_insert(a_qrelm, a_qr, a_field) do {			\
-	(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev;		\
-	(a_qr)->a_field.qre_next = (a_qrelm);				\
-	(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr);		\
-	(a_qrelm)->a_field.qre_prev = (a_qr);				\
-} while (0)
-
-#define	qr_after_insert(a_qrelm, a_qr, a_field)				\
-    do									\
-    {									\
-	(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next;		\
-	(a_qr)->a_field.qre_prev = (a_qrelm);				\
-	(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr);		\
-	(a_qrelm)->a_field.qre_next = (a_qr);				\
-    } while (0)
-
-#define	qr_meld(a_qr_a, a_qr_b, a_type, a_field) do {			\
-	a_type *t;							\
-	(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b);	\
-	(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a);	\
-	t = (a_qr_a)->a_field.qre_prev;					\
-	(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev;	\
-	(a_qr_b)->a_field.qre_prev = t;					\
-} while (0)
-
-/*
- * qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code.
- */
-#define	qr_split(a_qr_a, a_qr_b, a_type, a_field)			\
-	qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
-
-#define	qr_remove(a_qr, a_field) do {					\
-	(a_qr)->a_field.qre_prev->a_field.qre_next			\
-	    = (a_qr)->a_field.qre_next;					\
-	(a_qr)->a_field.qre_next->a_field.qre_prev			\
-	    = (a_qr)->a_field.qre_prev;					\
-	(a_qr)->a_field.qre_next = (a_qr);				\
-	(a_qr)->a_field.qre_prev = (a_qr);				\
-} while (0)
-
-#define	qr_foreach(var, a_qr, a_field)					\
-	for ((var) = (a_qr);						\
-	    (var) != NULL;						\
-	    (var) = (((var)->a_field.qre_next != (a_qr))		\
-	    ? (var)->a_field.qre_next : NULL))
-
-#define	qr_reverse_foreach(var, a_qr, a_field)				\
-	for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL;	\
-	    (var) != NULL;						\
-	    (var) = (((var) != (a_qr))					\
-	    ? (var)->a_field.qre_prev : NULL))
-
-#endif /* JEMALLOC_INTERNAL_QR_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rb.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rb.h
deleted file mode 100644
index 3770342..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rb.h
+++ /dev/null
@@ -1,1003 +0,0 @@
-/*-
- *******************************************************************************
- *
- * cpp macro implementation of left-leaning 2-3 red-black trees.  Parent
- * pointers are not used, and color bits are stored in the least significant
- * bit of right-child pointers (if RB_COMPACT is defined), thus making node
- * linkage as compact as is possible for red-black trees.
- *
- * Usage:
- *
- *   #include <stdint.h>
- *   #include <stdbool.h>
- *   #define NDEBUG // (Optional, see assert(3).)
- *   #include <assert.h>
- *   #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
- *   #include <rb.h>
- *   ...
- *
- *******************************************************************************
- */
-
-#ifndef RB_H_
-#define	RB_H_
-
-#ifdef RB_COMPACT
-/* Node structure. */
-#define	rb_node(a_type)							\
-struct {								\
-    a_type *rbn_left;							\
-    a_type *rbn_right_red;						\
-}
-#else
-#define	rb_node(a_type)							\
-struct {								\
-    a_type *rbn_left;							\
-    a_type *rbn_right;							\
-    bool rbn_red;							\
-}
-#endif
-
-/* Root structure. */
-#define	rb_tree(a_type)							\
-struct {								\
-    a_type *rbt_root;							\
-}
-
-/* Left accessors. */
-#define	rbtn_left_get(a_type, a_field, a_node)				\
-    ((a_node)->a_field.rbn_left)
-#define	rbtn_left_set(a_type, a_field, a_node, a_left) do {		\
-    (a_node)->a_field.rbn_left = a_left;				\
-} while (0)
-
-#ifdef RB_COMPACT
-/* Right accessors. */
-#define	rbtn_right_get(a_type, a_field, a_node)				\
-    ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red)		\
-      & ((ssize_t)-2)))
-#define	rbtn_right_set(a_type, a_field, a_node, a_right) do {		\
-    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right)	\
-      | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1)));	\
-} while (0)
-
-/* Color accessors. */
-#define	rbtn_red_get(a_type, a_field, a_node)				\
-    ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red)		\
-      & ((size_t)1)))
-#define	rbtn_color_set(a_type, a_field, a_node, a_red) do {		\
-    (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t)		\
-      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2))			\
-      | ((ssize_t)a_red));						\
-} while (0)
-#define	rbtn_red_set(a_type, a_field, a_node) do {			\
-    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t)		\
-      (a_node)->a_field.rbn_right_red) | ((size_t)1));			\
-} while (0)
-#define	rbtn_black_set(a_type, a_field, a_node) do {			\
-    (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t)		\
-      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2));		\
-} while (0)
-
-/* Node initializer. */
-#define	rbt_node_new(a_type, a_field, a_rbt, a_node) do {		\
-    /* Bookkeeping bit cannot be used by node pointer. */		\
-    assert(((uintptr_t)(a_node) & 0x1) == 0);				\
-    rbtn_left_set(a_type, a_field, (a_node), NULL);	\
-    rbtn_right_set(a_type, a_field, (a_node), NULL);	\
-    rbtn_red_set(a_type, a_field, (a_node));				\
-} while (0)
-#else
-/* Right accessors. */
-#define	rbtn_right_get(a_type, a_field, a_node)				\
-    ((a_node)->a_field.rbn_right)
-#define	rbtn_right_set(a_type, a_field, a_node, a_right) do {		\
-    (a_node)->a_field.rbn_right = a_right;				\
-} while (0)
-
-/* Color accessors. */
-#define	rbtn_red_get(a_type, a_field, a_node)				\
-    ((a_node)->a_field.rbn_red)
-#define	rbtn_color_set(a_type, a_field, a_node, a_red) do {		\
-    (a_node)->a_field.rbn_red = (a_red);				\
-} while (0)
-#define	rbtn_red_set(a_type, a_field, a_node) do {			\
-    (a_node)->a_field.rbn_red = true;					\
-} while (0)
-#define	rbtn_black_set(a_type, a_field, a_node) do {			\
-    (a_node)->a_field.rbn_red = false;					\
-} while (0)
-
-/* Node initializer. */
-#define	rbt_node_new(a_type, a_field, a_rbt, a_node) do {		\
-    rbtn_left_set(a_type, a_field, (a_node), NULL);	\
-    rbtn_right_set(a_type, a_field, (a_node), NULL);	\
-    rbtn_red_set(a_type, a_field, (a_node));				\
-} while (0)
-#endif
-
-/* Tree initializer. */
-#define	rb_new(a_type, a_field, a_rbt) do {				\
-    (a_rbt)->rbt_root = NULL;						\
-} while (0)
-
-/* Internal utility macros. */
-#define	rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do {		\
-    (r_node) = (a_root);						\
-    if ((r_node) != NULL) {						\
-	for (;								\
-	  rbtn_left_get(a_type, a_field, (r_node)) != NULL;		\
-	  (r_node) = rbtn_left_get(a_type, a_field, (r_node))) {	\
-	}								\
-    }									\
-} while (0)
-
-#define	rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do {		\
-    (r_node) = (a_root);						\
-    if ((r_node) != NULL) {						\
-	for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL;	\
-	  (r_node) = rbtn_right_get(a_type, a_field, (r_node))) {	\
-	}								\
-    }									\
-} while (0)
-
-#define	rbtn_rotate_left(a_type, a_field, a_node, r_node) do {		\
-    (r_node) = rbtn_right_get(a_type, a_field, (a_node));		\
-    rbtn_right_set(a_type, a_field, (a_node),				\
-      rbtn_left_get(a_type, a_field, (r_node)));			\
-    rbtn_left_set(a_type, a_field, (r_node), (a_node));			\
-} while (0)
-
-#define	rbtn_rotate_right(a_type, a_field, a_node, r_node) do {		\
-    (r_node) = rbtn_left_get(a_type, a_field, (a_node));		\
-    rbtn_left_set(a_type, a_field, (a_node),				\
-      rbtn_right_get(a_type, a_field, (r_node)));			\
-    rbtn_right_set(a_type, a_field, (r_node), (a_node));		\
-} while (0)
-
-/*
- * The rb_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to rb_gen().
- */
-
-#define	rb_proto(a_attr, a_prefix, a_rbt_type, a_type)			\
-a_attr void								\
-a_prefix##new(a_rbt_type *rbtree);					\
-a_attr bool								\
-a_prefix##empty(a_rbt_type *rbtree);					\
-a_attr a_type *								\
-a_prefix##first(a_rbt_type *rbtree);					\
-a_attr a_type *								\
-a_prefix##last(a_rbt_type *rbtree);					\
-a_attr a_type *								\
-a_prefix##next(a_rbt_type *rbtree, a_type *node);			\
-a_attr a_type *								\
-a_prefix##prev(a_rbt_type *rbtree, a_type *node);			\
-a_attr a_type *								\
-a_prefix##search(a_rbt_type *rbtree, const a_type *key);		\
-a_attr a_type *								\
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key);		\
-a_attr a_type *								\
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key);		\
-a_attr void								\
-a_prefix##insert(a_rbt_type *rbtree, a_type *node);			\
-a_attr void								\
-a_prefix##remove(a_rbt_type *rbtree, a_type *node);			\
-a_attr a_type *								\
-a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(	\
-  a_rbt_type *, a_type *, void *), void *arg);				\
-a_attr a_type *								\
-a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,		\
-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);		\
-a_attr void								\
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *),	\
-  void *arg);
-
-/*
- * The rb_gen() macro generates a type-specific red-black tree implementation,
- * based on the above cpp macros.
- *
- * Arguments:
- *
- *   a_attr    : Function attribute for generated functions (ex: static).
- *   a_prefix  : Prefix for generated functions (ex: ex_).
- *   a_rb_type : Type for red-black tree data structure (ex: ex_t).
- *   a_type    : Type for red-black tree node data structure (ex: ex_node_t).
- *   a_field   : Name of red-black tree node linkage (ex: ex_link).
- *   a_cmp     : Node comparison function name, with the following prototype:
- *                 int (a_cmp *)(a_type *a_node, a_type *a_other);
- *                                       ^^^^^^
- *                                    or a_key
- *               Interpretation of comparison function return values:
- *                 -1 : a_node <  a_other
- *                  0 : a_node == a_other
- *                  1 : a_node >  a_other
- *               In all cases, the a_node or a_key macro argument is the first
- *               argument to the comparison function, which makes it possible
- *               to write comparison functions that treat the first argument
- *               specially.
- *
- * Assuming the following setup:
- *
- *   typedef struct ex_node_s ex_node_t;
- *   struct ex_node_s {
- *       rb_node(ex_node_t) ex_link;
- *   };
- *   typedef rb_tree(ex_node_t) ex_t;
- *   rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
- *
- * The following API is generated:
- *
- *   static void
- *   ex_new(ex_t *tree);
- *       Description: Initialize a red-black tree structure.
- *       Args:
- *         tree: Pointer to an uninitialized red-black tree object.
- *
- *   static bool
- *   ex_empty(ex_t *tree);
- *       Description: Determine whether tree is empty.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *       Ret: True if tree is empty, false otherwise.
- *
- *   static ex_node_t *
- *   ex_first(ex_t *tree);
- *   static ex_node_t *
- *   ex_last(ex_t *tree);
- *       Description: Get the first/last node in tree.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *       Ret: First/last node in tree, or NULL if tree is empty.
- *
- *   static ex_node_t *
- *   ex_next(ex_t *tree, ex_node_t *node);
- *   static ex_node_t *
- *   ex_prev(ex_t *tree, ex_node_t *node);
- *       Description: Get node's successor/predecessor.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *         node: A node in tree.
- *       Ret: node's successor/predecessor in tree, or NULL if node is
- *            last/first.
- *
- *   static ex_node_t *
- *   ex_search(ex_t *tree, const ex_node_t *key);
- *       Description: Search for node that matches key.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *         key : Search key.
- *       Ret: Node in tree that matches key, or NULL if no match.
- *
- *   static ex_node_t *
- *   ex_nsearch(ex_t *tree, const ex_node_t *key);
- *   static ex_node_t *
- *   ex_psearch(ex_t *tree, const ex_node_t *key);
- *       Description: Search for node that matches key.  If no match is found,
- *                    return what would be key's successor/predecessor, were
- *                    key in tree.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *         key : Search key.
- *       Ret: Node in tree that matches key, or if no match, hypothetical node's
- *            successor/predecessor (NULL if no successor/predecessor).
- *
- *   static void
- *   ex_insert(ex_t *tree, ex_node_t *node);
- *       Description: Insert node into tree.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *         node: Node to be inserted into tree.
- *
- *   static void
- *   ex_remove(ex_t *tree, ex_node_t *node);
- *       Description: Remove node from tree.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *         node: Node in tree to be removed.
- *
- *   static ex_node_t *
- *   ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
- *     ex_node_t *, void *), void *arg);
- *   static ex_node_t *
- *   ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
- *     ex_node_t *, void *), void *arg);
- *       Description: Iterate forward/backward over tree, starting at node.  If
- *                    tree is modified, iteration must be immediately
- *                    terminated by the callback function that causes the
- *                    modification.
- *       Args:
- *         tree : Pointer to an initialized red-black tree object.
- *         start: Node at which to start iteration, or NULL to start at
- *                first/last node.
- *         cb   : Callback function, which is called for each node during
- *                iteration.  Under normal circumstances the callback function
- *                should return NULL, which causes iteration to continue.  If a
- *                callback function returns non-NULL, iteration is immediately
- *                terminated and the non-NULL return value is returned by the
- *                iterator.  This is useful for re-starting iteration after
- *                modifying tree.
- *         arg  : Opaque pointer passed to cb().
- *       Ret: NULL if iteration completed, or the non-NULL callback return value
- *            that caused termination of the iteration.
- *
- *   static void
- *   ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
- *       Description: Iterate over the tree with post-order traversal, remove
- *                    each node, and run the callback if non-null.  This is
- *                    used for destroying a tree without paying the cost to
- *                    rebalance it.  The tree must not be otherwise altered
- *                    during traversal.
- *       Args:
- *         tree: Pointer to an initialized red-black tree object.
- *         cb  : Callback function, which, if non-null, is called for each node
- *               during iteration.  There is no way to stop iteration once it
- *               has begun.
- *         arg : Opaque pointer passed to cb().
- */
-#define	rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp)	\
-a_attr void								\
-a_prefix##new(a_rbt_type *rbtree) {					\
-    rb_new(a_type, a_field, rbtree);					\
-}									\
-a_attr bool								\
-a_prefix##empty(a_rbt_type *rbtree) {					\
-    return (rbtree->rbt_root == NULL);					\
-}									\
-a_attr a_type *								\
-a_prefix##first(a_rbt_type *rbtree) {					\
-    a_type *ret;							\
-    rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret);		\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##last(a_rbt_type *rbtree) {					\
-    a_type *ret;							\
-    rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret);		\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##next(a_rbt_type *rbtree, a_type *node) {			\
-    a_type *ret;							\
-    if (rbtn_right_get(a_type, a_field, node) != NULL) {		\
-	rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type,	\
-	  a_field, node), ret);						\
-    } else {								\
-	a_type *tnode = rbtree->rbt_root;				\
-	assert(tnode != NULL);						\
-	ret = NULL;							\
-	while (true) {							\
-	    int cmp = (a_cmp)(node, tnode);				\
-	    if (cmp < 0) {						\
-		ret = tnode;						\
-		tnode = rbtn_left_get(a_type, a_field, tnode);		\
-	    } else if (cmp > 0) {					\
-		tnode = rbtn_right_get(a_type, a_field, tnode);		\
-	    } else {							\
-		break;							\
-	    }								\
-	    assert(tnode != NULL);					\
-	}								\
-    }									\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##prev(a_rbt_type *rbtree, a_type *node) {			\
-    a_type *ret;							\
-    if (rbtn_left_get(a_type, a_field, node) != NULL) {			\
-	rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type,	\
-	  a_field, node), ret);						\
-    } else {								\
-	a_type *tnode = rbtree->rbt_root;				\
-	assert(tnode != NULL);						\
-	ret = NULL;							\
-	while (true) {							\
-	    int cmp = (a_cmp)(node, tnode);				\
-	    if (cmp < 0) {						\
-		tnode = rbtn_left_get(a_type, a_field, tnode);		\
-	    } else if (cmp > 0) {					\
-		ret = tnode;						\
-		tnode = rbtn_right_get(a_type, a_field, tnode);		\
-	    } else {							\
-		break;							\
-	    }								\
-	    assert(tnode != NULL);					\
-	}								\
-    }									\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##search(a_rbt_type *rbtree, const a_type *key) {		\
-    a_type *ret;							\
-    int cmp;								\
-    ret = rbtree->rbt_root;						\
-    while (ret != NULL							\
-      && (cmp = (a_cmp)(key, ret)) != 0) {				\
-	if (cmp < 0) {							\
-	    ret = rbtn_left_get(a_type, a_field, ret);			\
-	} else {							\
-	    ret = rbtn_right_get(a_type, a_field, ret);			\
-	}								\
-    }									\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) {		\
-    a_type *ret;							\
-    a_type *tnode = rbtree->rbt_root;					\
-    ret = NULL;								\
-    while (tnode != NULL) {						\
-	int cmp = (a_cmp)(key, tnode);					\
-	if (cmp < 0) {							\
-	    ret = tnode;						\
-	    tnode = rbtn_left_get(a_type, a_field, tnode);		\
-	} else if (cmp > 0) {						\
-	    tnode = rbtn_right_get(a_type, a_field, tnode);		\
-	} else {							\
-	    ret = tnode;						\
-	    break;							\
-	}								\
-    }									\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) {		\
-    a_type *ret;							\
-    a_type *tnode = rbtree->rbt_root;					\
-    ret = NULL;								\
-    while (tnode != NULL) {						\
-	int cmp = (a_cmp)(key, tnode);					\
-	if (cmp < 0) {							\
-	    tnode = rbtn_left_get(a_type, a_field, tnode);		\
-	} else if (cmp > 0) {						\
-	    ret = tnode;						\
-	    tnode = rbtn_right_get(a_type, a_field, tnode);		\
-	} else {							\
-	    ret = tnode;						\
-	    break;							\
-	}								\
-    }									\
-    return (ret);							\
-}									\
-a_attr void								\
-a_prefix##insert(a_rbt_type *rbtree, a_type *node) {			\
-    struct {								\
-	a_type *node;							\
-	int cmp;							\
-    } path[sizeof(void *) << 4], *pathp;				\
-    rbt_node_new(a_type, a_field, rbtree, node);			\
-    /* Wind. */								\
-    path->node = rbtree->rbt_root;					\
-    for (pathp = path; pathp->node != NULL; pathp++) {			\
-	int cmp = pathp->cmp = a_cmp(node, pathp->node);		\
-	assert(cmp != 0);						\
-	if (cmp < 0) {							\
-	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
-	      pathp->node);						\
-	} else {							\
-	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
-	      pathp->node);						\
-	}								\
-    }									\
-    pathp->node = node;							\
-    /* Unwind. */							\
-    for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {	\
-	a_type *cnode = pathp->node;					\
-	if (pathp->cmp < 0) {						\
-	    a_type *left = pathp[1].node;				\
-	    rbtn_left_set(a_type, a_field, cnode, left);		\
-	    if (rbtn_red_get(a_type, a_field, left)) {			\
-		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
-		if (leftleft != NULL && rbtn_red_get(a_type, a_field,	\
-		  leftleft)) {						\
-		    /* Fix up 4-node. */				\
-		    a_type *tnode;					\
-		    rbtn_black_set(a_type, a_field, leftleft);		\
-		    rbtn_rotate_right(a_type, a_field, cnode, tnode);	\
-		    cnode = tnode;					\
-		}							\
-	    } else {							\
-		return;							\
-	    }								\
-	} else {							\
-	    a_type *right = pathp[1].node;				\
-	    rbtn_right_set(a_type, a_field, cnode, right);		\
-	    if (rbtn_red_get(a_type, a_field, right)) {			\
-		a_type *left = rbtn_left_get(a_type, a_field, cnode);	\
-		if (left != NULL && rbtn_red_get(a_type, a_field,	\
-		  left)) {						\
-		    /* Split 4-node. */					\
-		    rbtn_black_set(a_type, a_field, left);		\
-		    rbtn_black_set(a_type, a_field, right);		\
-		    rbtn_red_set(a_type, a_field, cnode);		\
-		} else {						\
-		    /* Lean left. */					\
-		    a_type *tnode;					\
-		    bool tred = rbtn_red_get(a_type, a_field, cnode);	\
-		    rbtn_rotate_left(a_type, a_field, cnode, tnode);	\
-		    rbtn_color_set(a_type, a_field, tnode, tred);	\
-		    rbtn_red_set(a_type, a_field, cnode);		\
-		    cnode = tnode;					\
-		}							\
-	    } else {							\
-		return;							\
-	    }								\
-	}								\
-	pathp->node = cnode;						\
-    }									\
-    /* Set root, and make it black. */					\
-    rbtree->rbt_root = path->node;					\
-    rbtn_black_set(a_type, a_field, rbtree->rbt_root);			\
-}									\
-a_attr void								\
-a_prefix##remove(a_rbt_type *rbtree, a_type *node) {			\
-    struct {								\
-	a_type *node;							\
-	int cmp;							\
-    } *pathp, *nodep, path[sizeof(void *) << 4];			\
-    /* Wind. */								\
-    nodep = NULL; /* Silence compiler warning. */			\
-    path->node = rbtree->rbt_root;					\
-    for (pathp = path; pathp->node != NULL; pathp++) {			\
-	int cmp = pathp->cmp = a_cmp(node, pathp->node);		\
-	if (cmp < 0) {							\
-	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
-	      pathp->node);						\
-	} else {							\
-	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
-	      pathp->node);						\
-	    if (cmp == 0) {						\
-	        /* Find node's successor, in preparation for swap. */	\
-		pathp->cmp = 1;						\
-		nodep = pathp;						\
-		for (pathp++; pathp->node != NULL;			\
-		  pathp++) {						\
-		    pathp->cmp = -1;					\
-		    pathp[1].node = rbtn_left_get(a_type, a_field,	\
-		      pathp->node);					\
-		}							\
-		break;							\
-	    }								\
-	}								\
-    }									\
-    assert(nodep->node == node);					\
-    pathp--;								\
-    if (pathp->node != node) {						\
-	/* Swap node with its successor. */				\
-	bool tred = rbtn_red_get(a_type, a_field, pathp->node);		\
-	rbtn_color_set(a_type, a_field, pathp->node,			\
-	  rbtn_red_get(a_type, a_field, node));				\
-	rbtn_left_set(a_type, a_field, pathp->node,			\
-	  rbtn_left_get(a_type, a_field, node));			\
-	/* If node's successor is its right child, the following code */\
-	/* will do the wrong thing for the right child pointer.       */\
-	/* However, it doesn't matter, because the pointer will be    */\
-	/* properly set when the successor is pruned.                 */\
-	rbtn_right_set(a_type, a_field, pathp->node,			\
-	  rbtn_right_get(a_type, a_field, node));			\
-	rbtn_color_set(a_type, a_field, node, tred);			\
-	/* The pruned leaf node's child pointers are never accessed   */\
-	/* again, so don't bother setting them to nil.                */\
-	nodep->node = pathp->node;					\
-	pathp->node = node;						\
-	if (nodep == path) {						\
-	    rbtree->rbt_root = nodep->node;				\
-	} else {							\
-	    if (nodep[-1].cmp < 0) {					\
-		rbtn_left_set(a_type, a_field, nodep[-1].node,		\
-		  nodep->node);						\
-	    } else {							\
-		rbtn_right_set(a_type, a_field, nodep[-1].node,		\
-		  nodep->node);						\
-	    }								\
-	}								\
-    } else {								\
-	a_type *left = rbtn_left_get(a_type, a_field, node);		\
-	if (left != NULL) {						\
-	    /* node has no successor, but it has a left child.        */\
-	    /* Splice node out, without losing the left child.        */\
-	    assert(!rbtn_red_get(a_type, a_field, node));		\
-	    assert(rbtn_red_get(a_type, a_field, left));		\
-	    rbtn_black_set(a_type, a_field, left);			\
-	    if (pathp == path) {					\
-		rbtree->rbt_root = left;				\
-	    } else {							\
-		if (pathp[-1].cmp < 0) {				\
-		    rbtn_left_set(a_type, a_field, pathp[-1].node,	\
-		      left);						\
-		} else {						\
-		    rbtn_right_set(a_type, a_field, pathp[-1].node,	\
-		      left);						\
-		}							\
-	    }								\
-	    return;							\
-	} else if (pathp == path) {					\
-	    /* The tree only contained one node. */			\
-	    rbtree->rbt_root = NULL;					\
-	    return;							\
-	}								\
-    }									\
-    if (rbtn_red_get(a_type, a_field, pathp->node)) {			\
-	/* Prune red node, which requires no fixup. */			\
-	assert(pathp[-1].cmp < 0);					\
-	rbtn_left_set(a_type, a_field, pathp[-1].node, NULL);		\
-	return;								\
-    }									\
-    /* The node to be pruned is black, so unwind until balance is     */\
-    /* restored.                                                      */\
-    pathp->node = NULL;							\
-    for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {	\
-	assert(pathp->cmp != 0);					\
-	if (pathp->cmp < 0) {						\
-	    rbtn_left_set(a_type, a_field, pathp->node,			\
-	      pathp[1].node);						\
-	    if (rbtn_red_get(a_type, a_field, pathp->node)) {		\
-		a_type *right = rbtn_right_get(a_type, a_field,		\
-		  pathp->node);						\
-		a_type *rightleft = rbtn_left_get(a_type, a_field,	\
-		  right);						\
-		a_type *tnode;						\
-		if (rightleft != NULL && rbtn_red_get(a_type, a_field,	\
-		  rightleft)) {						\
-		    /* In the following diagrams, ||, //, and \\      */\
-		    /* indicate the path to the removed node.         */\
-		    /*                                                */\
-		    /*      ||                                        */\
-		    /*    pathp(r)                                    */\
-		    /*  //        \                                   */\
-		    /* (b)        (b)                                 */\
-		    /*           /                                    */\
-		    /*          (r)                                   */\
-		    /*                                                */\
-		    rbtn_black_set(a_type, a_field, pathp->node);	\
-		    rbtn_rotate_right(a_type, a_field, right, tnode);	\
-		    rbtn_right_set(a_type, a_field, pathp->node, tnode);\
-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		} else {						\
-		    /*      ||                                        */\
-		    /*    pathp(r)                                    */\
-		    /*  //        \                                   */\
-		    /* (b)        (b)                                 */\
-		    /*           /                                    */\
-		    /*          (b)                                   */\
-		    /*                                                */\
-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		}							\
-		/* Balance restored, but rotation modified subtree    */\
-		/* root.                                              */\
-		assert((uintptr_t)pathp > (uintptr_t)path);		\
-		if (pathp[-1].cmp < 0) {				\
-		    rbtn_left_set(a_type, a_field, pathp[-1].node,	\
-		      tnode);						\
-		} else {						\
-		    rbtn_right_set(a_type, a_field, pathp[-1].node,	\
-		      tnode);						\
-		}							\
-		return;							\
-	    } else {							\
-		a_type *right = rbtn_right_get(a_type, a_field,		\
-		  pathp->node);						\
-		a_type *rightleft = rbtn_left_get(a_type, a_field,	\
-		  right);						\
-		if (rightleft != NULL && rbtn_red_get(a_type, a_field,	\
-		  rightleft)) {						\
-		    /*      ||                                        */\
-		    /*    pathp(b)                                    */\
-		    /*  //        \                                   */\
-		    /* (b)        (b)                                 */\
-		    /*           /                                    */\
-		    /*          (r)                                   */\
-		    a_type *tnode;					\
-		    rbtn_black_set(a_type, a_field, rightleft);		\
-		    rbtn_rotate_right(a_type, a_field, right, tnode);	\
-		    rbtn_right_set(a_type, a_field, pathp->node, tnode);\
-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		    /* Balance restored, but rotation modified        */\
-		    /* subtree root, which may actually be the tree   */\
-		    /* root.                                          */\
-		    if (pathp == path) {				\
-			/* Set root. */					\
-			rbtree->rbt_root = tnode;			\
-		    } else {						\
-			if (pathp[-1].cmp < 0) {			\
-			    rbtn_left_set(a_type, a_field,		\
-			      pathp[-1].node, tnode);			\
-			} else {					\
-			    rbtn_right_set(a_type, a_field,		\
-			      pathp[-1].node, tnode);			\
-			}						\
-		    }							\
-		    return;						\
-		} else {						\
-		    /*      ||                                        */\
-		    /*    pathp(b)                                    */\
-		    /*  //        \                                   */\
-		    /* (b)        (b)                                 */\
-		    /*           /                                    */\
-		    /*          (b)                                   */\
-		    a_type *tnode;					\
-		    rbtn_red_set(a_type, a_field, pathp->node);		\
-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		    pathp->node = tnode;				\
-		}							\
-	    }								\
-	} else {							\
-	    a_type *left;						\
-	    rbtn_right_set(a_type, a_field, pathp->node,		\
-	      pathp[1].node);						\
-	    left = rbtn_left_get(a_type, a_field, pathp->node);		\
-	    if (rbtn_red_get(a_type, a_field, left)) {			\
-		a_type *tnode;						\
-		a_type *leftright = rbtn_right_get(a_type, a_field,	\
-		  left);						\
-		a_type *leftrightleft = rbtn_left_get(a_type, a_field,	\
-		  leftright);						\
-		if (leftrightleft != NULL && rbtn_red_get(a_type,	\
-		  a_field, leftrightleft)) {				\
-		    /*      ||                                        */\
-		    /*    pathp(b)                                    */\
-		    /*   /        \\                                  */\
-		    /* (r)        (b)                                 */\
-		    /*   \                                            */\
-		    /*   (b)                                          */\
-		    /*   /                                            */\
-		    /* (r)                                            */\
-		    a_type *unode;					\
-		    rbtn_black_set(a_type, a_field, leftrightleft);	\
-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
-		      unode);						\
-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		    rbtn_right_set(a_type, a_field, unode, tnode);	\
-		    rbtn_rotate_left(a_type, a_field, unode, tnode);	\
-		} else {						\
-		    /*      ||                                        */\
-		    /*    pathp(b)                                    */\
-		    /*   /        \\                                  */\
-		    /* (r)        (b)                                 */\
-		    /*   \                                            */\
-		    /*   (b)                                          */\
-		    /*   /                                            */\
-		    /* (b)                                            */\
-		    assert(leftright != NULL);				\
-		    rbtn_red_set(a_type, a_field, leftright);		\
-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		    rbtn_black_set(a_type, a_field, tnode);		\
-		}							\
-		/* Balance restored, but rotation modified subtree    */\
-		/* root, which may actually be the tree root.         */\
-		if (pathp == path) {					\
-		    /* Set root. */					\
-		    rbtree->rbt_root = tnode;				\
-		} else {						\
-		    if (pathp[-1].cmp < 0) {				\
-			rbtn_left_set(a_type, a_field, pathp[-1].node,	\
-			  tnode);					\
-		    } else {						\
-			rbtn_right_set(a_type, a_field, pathp[-1].node,	\
-			  tnode);					\
-		    }							\
-		}							\
-		return;							\
-	    } else if (rbtn_red_get(a_type, a_field, pathp->node)) {	\
-		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
-		if (leftleft != NULL && rbtn_red_get(a_type, a_field,	\
-		  leftleft)) {						\
-		    /*        ||                                      */\
-		    /*      pathp(r)                                  */\
-		    /*     /        \\                                */\
-		    /*   (b)        (b)                               */\
-		    /*   /                                            */\
-		    /* (r)                                            */\
-		    a_type *tnode;					\
-		    rbtn_black_set(a_type, a_field, pathp->node);	\
-		    rbtn_red_set(a_type, a_field, left);		\
-		    rbtn_black_set(a_type, a_field, leftleft);		\
-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		    /* Balance restored, but rotation modified        */\
-		    /* subtree root.                                  */\
-		    assert((uintptr_t)pathp > (uintptr_t)path);		\
-		    if (pathp[-1].cmp < 0) {				\
-			rbtn_left_set(a_type, a_field, pathp[-1].node,	\
-			  tnode);					\
-		    } else {						\
-			rbtn_right_set(a_type, a_field, pathp[-1].node,	\
-			  tnode);					\
-		    }							\
-		    return;						\
-		} else {						\
-		    /*        ||                                      */\
-		    /*      pathp(r)                                  */\
-		    /*     /        \\                                */\
-		    /*   (b)        (b)                               */\
-		    /*   /                                            */\
-		    /* (b)                                            */\
-		    rbtn_red_set(a_type, a_field, left);		\
-		    rbtn_black_set(a_type, a_field, pathp->node);	\
-		    /* Balance restored. */				\
-		    return;						\
-		}							\
-	    } else {							\
-		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
-		if (leftleft != NULL && rbtn_red_get(a_type, a_field,	\
-		  leftleft)) {						\
-		    /*               ||                               */\
-		    /*             pathp(b)                           */\
-		    /*            /        \\                         */\
-		    /*          (b)        (b)                        */\
-		    /*          /                                     */\
-		    /*        (r)                                     */\
-		    a_type *tnode;					\
-		    rbtn_black_set(a_type, a_field, leftleft);		\
-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
-		      tnode);						\
-		    /* Balance restored, but rotation modified        */\
-		    /* subtree root, which may actually be the tree   */\
-		    /* root.                                          */\
-		    if (pathp == path) {				\
-			/* Set root. */					\
-			rbtree->rbt_root = tnode;			\
-		    } else {						\
-			if (pathp[-1].cmp < 0) {			\
-			    rbtn_left_set(a_type, a_field,		\
-			      pathp[-1].node, tnode);			\
-			} else {					\
-			    rbtn_right_set(a_type, a_field,		\
-			      pathp[-1].node, tnode);			\
-			}						\
-		    }							\
-		    return;						\
-		} else {						\
-		    /*               ||                               */\
-		    /*             pathp(b)                           */\
-		    /*            /        \\                         */\
-		    /*          (b)        (b)                        */\
-		    /*          /                                     */\
-		    /*        (b)                                     */\
-		    rbtn_red_set(a_type, a_field, left);		\
-		}							\
-	    }								\
-	}								\
-    }									\
-    /* Set root. */							\
-    rbtree->rbt_root = path->node;					\
-    assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root));		\
-}									\
-a_attr a_type *								\
-a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node,		\
-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
-    if (node == NULL) {							\
-	return (NULL);							\
-    } else {								\
-	a_type *ret;							\
-	if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type,	\
-	  a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node,	\
-	  arg)) != NULL) {						\
-	    return (ret);						\
-	}								\
-	return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
-	  a_field, node), cb, arg));					\
-    }									\
-}									\
-a_attr a_type *								\
-a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node,	\
-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
-    int cmp = a_cmp(start, node);					\
-    if (cmp < 0) {							\
-	a_type *ret;							\
-	if ((ret = a_prefix##iter_start(rbtree, start,			\
-	  rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL ||	\
-	  (ret = cb(rbtree, node, arg)) != NULL) {			\
-	    return (ret);						\
-	}								\
-	return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
-	  a_field, node), cb, arg));					\
-    } else if (cmp > 0) {						\
-	return (a_prefix##iter_start(rbtree, start,			\
-	  rbtn_right_get(a_type, a_field, node), cb, arg));		\
-    } else {								\
-	a_type *ret;							\
-	if ((ret = cb(rbtree, node, arg)) != NULL) {			\
-	    return (ret);						\
-	}								\
-	return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
-	  a_field, node), cb, arg));					\
-    }									\
-}									\
-a_attr a_type *								\
-a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(	\
-  a_rbt_type *, a_type *, void *), void *arg) {				\
-    a_type *ret;							\
-    if (start != NULL) {						\
-	ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root,	\
-	  cb, arg);							\
-    } else {								\
-	ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
-    }									\
-    return (ret);							\
-}									\
-a_attr a_type *								\
-a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node,	\
-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
-    if (node == NULL) {							\
-	return (NULL);							\
-    } else {								\
-	a_type *ret;							\
-	if ((ret = a_prefix##reverse_iter_recurse(rbtree,		\
-	  rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL ||	\
-	  (ret = cb(rbtree, node, arg)) != NULL) {			\
-	    return (ret);						\
-	}								\
-	return (a_prefix##reverse_iter_recurse(rbtree,			\
-	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
-    }									\
-}									\
-a_attr a_type *								\
-a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start,		\
-  a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *),		\
-  void *arg) {								\
-    int cmp = a_cmp(start, node);					\
-    if (cmp > 0) {							\
-	a_type *ret;							\
-	if ((ret = a_prefix##reverse_iter_start(rbtree, start,		\
-	  rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL ||	\
-	  (ret = cb(rbtree, node, arg)) != NULL) {			\
-	    return (ret);						\
-	}								\
-	return (a_prefix##reverse_iter_recurse(rbtree,			\
-	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
-    } else if (cmp < 0) {						\
-	return (a_prefix##reverse_iter_start(rbtree, start,		\
-	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
-    } else {								\
-	a_type *ret;							\
-	if ((ret = cb(rbtree, node, arg)) != NULL) {			\
-	    return (ret);						\
-	}								\
-	return (a_prefix##reverse_iter_recurse(rbtree,			\
-	  rbtn_left_get(a_type, a_field, node), cb, arg));		\
-    }									\
-}									\
-a_attr a_type *								\
-a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,		\
-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
-    a_type *ret;							\
-    if (start != NULL) {						\
-	ret = a_prefix##reverse_iter_start(rbtree, start,		\
-	  rbtree->rbt_root, cb, arg);					\
-    } else {								\
-	ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root,	\
-	  cb, arg);							\
-    }									\
-    return (ret);							\
-}									\
-a_attr void								\
-a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)(	\
-  a_type *, void *), void *arg) {					\
-    if (node == NULL) {							\
-	return;								\
-    }									\
-    a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field,	\
-      node), cb, arg);							\
-    rbtn_left_set(a_type, a_field, (node), NULL);			\
-    a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field,	\
-      node), cb, arg);							\
-    rbtn_right_set(a_type, a_field, (node), NULL);			\
-    if (cb) {								\
-	cb(node, arg);							\
-    }									\
-}									\
-a_attr void								\
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *),	\
-  void *arg) {								\
-    a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg);	\
-    rbtree->rbt_root = NULL;						\
-}
-
-#endif /* RB_H_ */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_externs.h
deleted file mode 100644
index 811c0ff..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_externs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_EXTERNS_H
-#define JEMALLOC_INTERNAL_RTREE_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-bool rtree_new(rtree_t *rtree, unsigned bits);
-#ifdef JEMALLOC_JET
-typedef rtree_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_node_alloc_t *rtree_node_alloc;
-typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_elm_t *);
-extern rtree_node_dalloc_t *rtree_node_dalloc;
-void	rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
-#endif
-rtree_elm_t	*rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree,
-    unsigned level);
-rtree_elm_t	*rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree,
-    rtree_elm_t *elm, unsigned level);
-void	rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree,
-    uintptr_t key, const rtree_elm_t *elm);
-void	rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
-    const rtree_elm_t *elm);
-void	rtree_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
-    const rtree_elm_t *elm);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_RTREE_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_inlines.h
deleted file mode 100644
index 7e79a6a..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_inlines.h
+++ /dev/null
@@ -1,437 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_INLINES_H
-#define JEMALLOC_INTERNAL_RTREE_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-unsigned	rtree_start_level(const rtree_t *rtree, uintptr_t key);
-unsigned	rtree_ctx_start_level(const rtree_t *rtree,
-    const rtree_ctx_t *rtree_ctx, uintptr_t key);
-uintptr_t	rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
-
-bool	rtree_node_valid(rtree_elm_t *node);
-rtree_elm_t	*rtree_child_tryread(rtree_elm_t *elm, bool dependent);
-rtree_elm_t	*rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
-    unsigned level, bool dependent);
-extent_t	*rtree_elm_read(rtree_elm_t *elm, bool dependent);
-void	rtree_elm_write(rtree_elm_t *elm, const extent_t *extent);
-rtree_elm_t	*rtree_subtree_tryread(rtree_t *rtree, unsigned level,
-    bool dependent);
-rtree_elm_t	*rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree,
-    unsigned level, bool dependent);
-rtree_elm_t	*rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree,
-    rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
-
-bool	rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
-    uintptr_t key, const extent_t *extent);
-extent_t	*rtree_read(tsdn_t *tsdn, rtree_t *rtree,
-    rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent);
-rtree_elm_t	*rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree,
-    rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
-extent_t	*rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree,
-    rtree_elm_t *elm);
-void	rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree,
-    rtree_elm_t *elm, const extent_t *extent);
-void	rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm);
-void	rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
-    uintptr_t key);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-JEMALLOC_ALWAYS_INLINE unsigned
-rtree_start_level(const rtree_t *rtree, uintptr_t key)
-{
-	unsigned start_level;
-
-	if (unlikely(key == 0))
-		return (rtree->height - 1);
-
-	start_level = rtree->start_level[(lg_floor(key) + 1) >>
-	    LG_RTREE_BITS_PER_LEVEL];
-	assert(start_level < rtree->height);
-	return (start_level);
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
-    uintptr_t key)
-{
-	unsigned start_level;
-	uintptr_t key_diff;
-
-	/* Compute the difference between old and new lookup keys. */
-	key_diff = key ^ rtree_ctx->key;
-	assert(key_diff != 0); /* Handled in rtree_elm_lookup(). */
-
-	/*
-	 * Compute the last traversal path element at which the keys' paths
-	 * are the same.
-	 */
-	start_level = rtree->start_level[(lg_floor(key_diff) + 1) >>
-	    LG_RTREE_BITS_PER_LEVEL];
-	assert(start_level < rtree->height);
-	return (start_level);
-}
-
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
-{
-	return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
-	    rtree->levels[level].cumbits)) & ((ZU(1) <<
-	    rtree->levels[level].bits) - 1));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_node_valid(rtree_elm_t *node)
-{
-	return ((uintptr_t)node != (uintptr_t)0);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_child_tryread(rtree_elm_t *elm, bool dependent)
-{
-	rtree_elm_t *child;
-
-	/* Double-checked read (first read may be stale). */
-	child = elm->child;
-	if (!dependent && !rtree_node_valid(child))
-		child = (rtree_elm_t *)atomic_read_p(&elm->pun);
-	assert(!dependent || child != NULL);
-	return (child);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level,
-    bool dependent)
-{
-	rtree_elm_t *child;
-
-	child = rtree_child_tryread(elm, dependent);
-	if (!dependent && unlikely(!rtree_node_valid(child)))
-		child = rtree_child_read_hard(tsdn, rtree, elm, level);
-	assert(!dependent || child != NULL);
-	return (child);
-}
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_elm_read(rtree_elm_t *elm, bool dependent)
-{
-	extent_t *extent;
-
-	if (dependent) {
-		/*
-		 * Reading a value on behalf of a pointer to a valid allocation
-		 * is guaranteed to be a clean read even without
-		 * synchronization, because the rtree update became visible in
-		 * memory before the pointer came into existence.
-		 */
-		extent = elm->extent;
-	} else {
-		/*
-		 * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
-		 * dependent on a previous rtree write, which means a stale read
-		 * could result if synchronization were omitted here.
-		 */
-		extent = (extent_t *)atomic_read_p(&elm->pun);
-	}
-
-	/* Mask the lock bit. */
-	extent = (extent_t *)((uintptr_t)extent & ~((uintptr_t)0x1));
-
-	return (extent);
-}
-
-JEMALLOC_INLINE void
-rtree_elm_write(rtree_elm_t *elm, const extent_t *extent)
-{
-	atomic_write_p(&elm->pun, extent);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
-{
-	rtree_elm_t *subtree;
-
-	/* Double-checked read (first read may be stale). */
-	subtree = rtree->levels[level].subtree;
-	if (!dependent && unlikely(!rtree_node_valid(subtree))) {
-		subtree = (rtree_elm_t *)atomic_read_p(
-		    &rtree->levels[level].subtree_pun);
-	}
-	assert(!dependent || subtree != NULL);
-	return (subtree);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level, bool dependent)
-{
-	rtree_elm_t *subtree;
-
-	subtree = rtree_subtree_tryread(rtree, level, dependent);
-	if (!dependent && unlikely(!rtree_node_valid(subtree)))
-		subtree = rtree_subtree_read_hard(tsdn, rtree, level);
-	assert(!dependent || subtree != NULL);
-	return (subtree);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
-    uintptr_t key, bool dependent, bool init_missing)
-{
-	uintptr_t subkey;
-	unsigned start_level;
-	rtree_elm_t *node;
-
-	assert(!dependent || !init_missing);
-
-	if (dependent || init_missing) {
-		if (likely(rtree_ctx->valid)) {
-			if (key == rtree_ctx->key)
-				return (rtree_ctx->elms[rtree->height]);
-			else {
-				unsigned no_ctx_start_level =
-				    rtree_start_level(rtree, key);
-				unsigned ctx_start_level;
-
-				if (likely(no_ctx_start_level <=
-				    rtree_ctx->start_level && (ctx_start_level =
-				    rtree_ctx_start_level(rtree, rtree_ctx,
-				    key)) >= rtree_ctx->start_level)) {
-					start_level = ctx_start_level;
-					node = rtree_ctx->elms[ctx_start_level];
-				} else {
-					start_level = no_ctx_start_level;
-					node = init_missing ?
-					    rtree_subtree_read(tsdn, rtree,
-					    no_ctx_start_level, dependent) :
-					    rtree_subtree_tryread(rtree,
-					    no_ctx_start_level, dependent);
-					rtree_ctx->start_level =
-					    no_ctx_start_level;
-					rtree_ctx->elms[no_ctx_start_level] =
-					    node;
-				}
-			}
-		} else {
-			unsigned no_ctx_start_level = rtree_start_level(rtree,
-			    key);
-
-			start_level = no_ctx_start_level;
-			node = init_missing ? rtree_subtree_read(tsdn, rtree,
-			    no_ctx_start_level, dependent) :
-			    rtree_subtree_tryread(rtree, no_ctx_start_level,
-			    dependent);
-			rtree_ctx->valid = true;
-			rtree_ctx->start_level = no_ctx_start_level;
-			rtree_ctx->elms[no_ctx_start_level] = node;
-		}
-		rtree_ctx->key = key;
-	} else {
-		start_level = rtree_start_level(rtree, key);
-		node = init_missing ? rtree_subtree_read(tsdn, rtree,
-		    start_level, dependent) : rtree_subtree_tryread(rtree,
-		    start_level, dependent);
-	}
-
-#define	RTREE_GET_BIAS	(RTREE_HEIGHT_MAX - rtree->height)
-	switch (start_level + RTREE_GET_BIAS) {
-#define	RTREE_GET_SUBTREE(level)					\
-	case level:							\
-		assert(level < (RTREE_HEIGHT_MAX-1));			\
-		if (!dependent && unlikely(!rtree_node_valid(node))) {	\
-			if (init_missing)				\
-				rtree_ctx->valid = false;		\
-			return (NULL);					\
-		}							\
-		subkey = rtree_subkey(rtree, key, level -		\
-		    RTREE_GET_BIAS);					\
-		node = init_missing ? rtree_child_read(tsdn, rtree,	\
-		    &node[subkey], level - RTREE_GET_BIAS, dependent) :	\
-		    rtree_child_tryread(&node[subkey], dependent);	\
-		if (dependent || init_missing) {			\
-			rtree_ctx->elms[level - RTREE_GET_BIAS + 1] =	\
-			    node;					\
-		}							\
-		/* Fall through. */
-#define	RTREE_GET_LEAF(level)						\
-	case level:							\
-		assert(level == (RTREE_HEIGHT_MAX-1));			\
-		if (!dependent && unlikely(!rtree_node_valid(node))) {	\
-			if (init_missing)				\
-				rtree_ctx->valid = false;		\
-			return (NULL);					\
-		}							\
-		subkey = rtree_subkey(rtree, key, level -		\
-		    RTREE_GET_BIAS);					\
-		/*							\
-		 * node is a leaf, so it contains values rather than	\
-		 * child pointers.					\
-		 */							\
-		node = &node[subkey];					\
-		if (dependent || init_missing) {			\
-			rtree_ctx->elms[level - RTREE_GET_BIAS + 1] =	\
-			    node;					\
-		}							\
-		return (node);
-#if RTREE_HEIGHT_MAX > 1
-	RTREE_GET_SUBTREE(0)
-#endif
-#if RTREE_HEIGHT_MAX > 2
-	RTREE_GET_SUBTREE(1)
-#endif
-#if RTREE_HEIGHT_MAX > 3
-	RTREE_GET_SUBTREE(2)
-#endif
-#if RTREE_HEIGHT_MAX > 4
-	RTREE_GET_SUBTREE(3)
-#endif
-#if RTREE_HEIGHT_MAX > 5
-	RTREE_GET_SUBTREE(4)
-#endif
-#if RTREE_HEIGHT_MAX > 6
-	RTREE_GET_SUBTREE(5)
-#endif
-#if RTREE_HEIGHT_MAX > 7
-	RTREE_GET_SUBTREE(6)
-#endif
-#if RTREE_HEIGHT_MAX > 8
-	RTREE_GET_SUBTREE(7)
-#endif
-#if RTREE_HEIGHT_MAX > 9
-	RTREE_GET_SUBTREE(8)
-#endif
-#if RTREE_HEIGHT_MAX > 10
-	RTREE_GET_SUBTREE(9)
-#endif
-#if RTREE_HEIGHT_MAX > 11
-	RTREE_GET_SUBTREE(10)
-#endif
-#if RTREE_HEIGHT_MAX > 12
-	RTREE_GET_SUBTREE(11)
-#endif
-#if RTREE_HEIGHT_MAX > 13
-	RTREE_GET_SUBTREE(12)
-#endif
-#if RTREE_HEIGHT_MAX > 14
-	RTREE_GET_SUBTREE(13)
-#endif
-#if RTREE_HEIGHT_MAX > 15
-	RTREE_GET_SUBTREE(14)
-#endif
-#if RTREE_HEIGHT_MAX > 16
-#  error Unsupported RTREE_HEIGHT_MAX
-#endif
-	RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
-#undef RTREE_GET_SUBTREE
-#undef RTREE_GET_LEAF
-	default: not_reached();
-	}
-#undef RTREE_GET_BIAS
-	not_reached();
-}
-
-JEMALLOC_INLINE bool
-rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
-    const extent_t *extent)
-{
-	rtree_elm_t *elm;
-
-	assert(extent != NULL); /* Use rtree_clear() for this case. */
-	assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
-
-	elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true);
-	if (elm == NULL)
-		return (true);
-	assert(rtree_elm_read(elm, false) == NULL);
-	rtree_elm_write(elm, extent);
-
-	return (false);
-}
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
-    bool dependent)
-{
-	rtree_elm_t *elm;
-
-	elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false);
-	if (elm == NULL)
-		return (NULL);
-
-	return (rtree_elm_read(elm, dependent));
-}
-
-JEMALLOC_INLINE rtree_elm_t *
-rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
-    uintptr_t key, bool dependent, bool init_missing)
-{
-	rtree_elm_t *elm;
-
-	elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent,
-	    init_missing);
-	if (!dependent && elm == NULL)
-		return (NULL);
-	{
-		extent_t *extent;
-		void *s;
-
-		do {
-			extent = rtree_elm_read(elm, false);
-			/* The least significant bit serves as a lock. */
-			s = (void *)((uintptr_t)extent | (uintptr_t)0x1);
-		} while (atomic_cas_p(&elm->pun, (void *)extent, s));
-	}
-
-	if (config_debug)
-		rtree_elm_witness_acquire(tsdn, rtree, key, elm);
-
-	return (elm);
-}
-
-JEMALLOC_INLINE extent_t *
-rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
-{
-	extent_t *extent;
-
-	assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
-	extent = (extent_t *)((uintptr_t)elm->pun & ~((uintptr_t)0x1));
-	assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
-
-	if (config_debug)
-		rtree_elm_witness_access(tsdn, rtree, elm);
-
-	return (extent);
-}
-
-JEMALLOC_INLINE void
-rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm,
-    const extent_t *extent)
-{
-	assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
-	assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
-
-	if (config_debug)
-		rtree_elm_witness_access(tsdn, rtree, elm);
-
-	elm->pun = (void *)((uintptr_t)extent | (uintptr_t)0x1);
-	assert(rtree_elm_read_acquired(tsdn, rtree, elm) == extent);
-}
-
-JEMALLOC_INLINE void
-rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
-{
-	rtree_elm_write(elm, rtree_elm_read_acquired(tsdn, rtree, elm));
-	if (config_debug)
-		rtree_elm_witness_release(tsdn, rtree, elm);
-}
-
-JEMALLOC_INLINE void
-rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key)
-{
-	rtree_elm_t *elm;
-
-	elm = rtree_elm_acquire(tsdn, rtree, rtree_ctx, key, true, false);
-	rtree_elm_write_acquired(tsdn, rtree, elm, NULL);
-	rtree_elm_release(tsdn, rtree, elm);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_RTREE_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_structs.h
deleted file mode 100644
index 5a7a23c..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_structs.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_STRUCTS_H
-#define JEMALLOC_INTERNAL_RTREE_STRUCTS_H
-
-struct rtree_elm_s {
-	union {
-		void		*pun;
-		rtree_elm_t	*child;
-		extent_t	*extent;
-	};
-};
-
-struct rtree_elm_witness_s {
-	const rtree_elm_t	*elm;
-	witness_t		witness;
-};
-
-struct rtree_elm_witness_tsd_s {
-	rtree_elm_witness_t	witnesses[RTREE_ELM_ACQUIRE_MAX];
-};
-
-struct rtree_level_s {
-	/*
-	 * A non-NULL subtree points to a subtree rooted along the hypothetical
-	 * path to the leaf node corresponding to key 0.  Depending on what keys
-	 * have been used to store to the tree, an arbitrary combination of
-	 * subtree pointers may remain NULL.
-	 *
-	 * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
-	 * This results in a 3-level tree, and the leftmost leaf can be directly
-	 * accessed via levels[2], the subtree prefixed by 0x0000 (excluding
-	 * 0x00000000) can be accessed via levels[1], and the remainder of the
-	 * tree can be accessed via levels[0].
-	 *
-	 *   levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
-	 *
-	 *   levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
-	 *
-	 *   levels[2] : [extent(0x000000000000) | extent(0x000000000001) | ...]
-	 *
-	 * This has practical implications on x64, which currently uses only the
-	 * lower 47 bits of virtual address space in userland, thus leaving
-	 * levels[0] unused and avoiding a level of tree traversal.
-	 */
-	union {
-		void		*subtree_pun;
-		rtree_elm_t	*subtree;
-	};
-	/* Number of key bits distinguished by this level. */
-	unsigned		bits;
-	/*
-	 * Cumulative number of key bits distinguished by traversing to
-	 * corresponding tree level.
-	 */
-	unsigned		cumbits;
-};
-
-struct rtree_ctx_s {
-	/* If false, key/elms have not yet been initialized by a lookup. */
-	bool		valid;
-	/* Key that corresponds to the tree path recorded in elms. */
-	uintptr_t	key;
-	/* Memoized rtree_start_level(key). */
-	unsigned	start_level;
-	/*
-	 * A path through rtree, driven by key.  Only elements that could
-	 * actually be used for subsequent lookups are initialized, i.e. if
-	 * start_level = rtree_start_level(key) is non-zero, the first
-	 * start_level elements are uninitialized.  The last element contains a
-	 * pointer to the leaf node element that corresponds to key, so that
-	 * exact matches require no tree node offset computation.
-	 */
-	rtree_elm_t	*elms[RTREE_HEIGHT_MAX + 1];
-};
-
-struct rtree_s {
-	unsigned		height;
-	/*
-	 * Precomputed table used to convert from the number of leading 0 key
-	 * bits to which subtree level to start at.
-	 */
-	unsigned		start_level[RTREE_HEIGHT_MAX + 1];
-	rtree_level_t		levels[RTREE_HEIGHT_MAX];
-	malloc_mutex_t		init_lock;
-};
-
-#endif /* JEMALLOC_INTERNAL_RTREE_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_types.h
deleted file mode 100644
index c02ab7a..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/rtree_types.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_TYPES_H
-#define JEMALLOC_INTERNAL_RTREE_TYPES_H
-
-/*
- * This radix tree implementation is tailored to the singular purpose of
- * associating metadata with extents that are currently owned by jemalloc.
- *
- *******************************************************************************
- */
-
-typedef struct rtree_elm_s rtree_elm_t;
-typedef struct rtree_elm_witness_s rtree_elm_witness_t;
-typedef struct rtree_elm_witness_tsd_s rtree_elm_witness_tsd_t;
-typedef struct rtree_level_s rtree_level_t;
-typedef struct rtree_ctx_s rtree_ctx_t;
-typedef struct rtree_s rtree_t;
-
-/*
- * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
- * machine address width.
- */
-#define	LG_RTREE_BITS_PER_LEVEL	4
-#define	RTREE_BITS_PER_LEVEL	(1U << LG_RTREE_BITS_PER_LEVEL)
-/* Maximum rtree height. */
-#define	RTREE_HEIGHT_MAX						\
-    ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
-
-#define	RTREE_CTX_INITIALIZER	{					\
-	false,								\
-	0,								\
-	0,								\
-	{NULL /* C initializes all trailing elements to NULL. */}	\
-}
-
-/*
- * Maximum number of concurrently acquired elements per thread.  This controls
- * how many witness_t structures are embedded in tsd.  Ideally rtree_elm_t would
- * have a witness_t directly embedded, but that would dramatically bloat the
- * tree.  This must contain enough entries to e.g. coalesce two extents.
- */
-#define	RTREE_ELM_ACQUIRE_MAX	4
-
-/* Initializers for rtree_elm_witness_tsd_t. */
-#define	RTREE_ELM_WITNESS_INITIALIZER {					\
-	NULL,								\
-	WITNESS_INITIALIZER("rtree_elm", WITNESS_RANK_RTREE_ELM)	\
-}
-
-#define	RTREE_ELM_WITNESS_TSD_INITIALIZER {				\
-	{								\
-		RTREE_ELM_WITNESS_INITIALIZER,				\
-		RTREE_ELM_WITNESS_INITIALIZER,				\
-		RTREE_ELM_WITNESS_INITIALIZER,				\
-		RTREE_ELM_WITNESS_INITIALIZER				\
-	}								\
-}
-
-#endif /* JEMALLOC_INTERNAL_RTREE_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/size_classes.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/size_classes.h
deleted file mode 100644
index 3eb6d12..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/size_classes.h
+++ /dev/null
@@ -1,1412 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
-#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
-
-/* This file was automatically generated by size_classes.sh. */
-
-/*
- * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
- * be defined prior to inclusion, and it in turn defines:
- *
- *   LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
- *   SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
- *                 bin, pgs, lg_delta_lookup) tuples.
- *     index: Size class index.
- *     lg_grp: Lg group base size (no deltas added).
- *     lg_delta: Lg delta to previous size class.
- *     ndelta: Delta multiplier.  size == 1<<lg_grp + ndelta<<lg_delta
- *     psz: 'yes' if a multiple of the page size, 'no' otherwise.
- *     bin: 'yes' if a small bin size class, 'no' otherwise.
- *     pgs: Slab page count if a small bin size class, 0 otherwise.
- *     lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
- *                      otherwise.
- *   NTBINS: Number of tiny bins.
- *   NLBINS: Number of bins supported by the lookup table.
- *   NBINS: Number of small size class bins.
- *   NSIZES: Number of size classes.
- *   NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
- *   LG_TINY_MAXCLASS: Lg of maximum tiny size class.
- *   LOOKUP_MAXCLASS: Maximum size class included in lookup table.
- *   SMALL_MAXCLASS: Maximum small size class.
- *   LG_LARGE_MINCLASS: Lg of minimum large size class.
- *   LARGE_MAXCLASS: Maximum (large) size class.
- */
-
-#define	LG_SIZE_CLASS_GROUP	2
-
-#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
-#define	SIZE_CLASSES \
-  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
-    SC(  0,      3,        3,      0,  no, yes,   1,  3) \
-    SC(  1,      3,        3,      1,  no, yes,   1,  3) \
-    SC(  2,      3,        3,      2,  no, yes,   3,  3) \
-    SC(  3,      3,        3,      3,  no, yes,   1,  3) \
-                                                         \
-    SC(  4,      5,        3,      1,  no, yes,   5,  3) \
-    SC(  5,      5,        3,      2,  no, yes,   3,  3) \
-    SC(  6,      5,        3,      3,  no, yes,   7,  3) \
-    SC(  7,      5,        3,      4,  no, yes,   1,  3) \
-                                                         \
-    SC(  8,      6,        4,      1,  no, yes,   5,  4) \
-    SC(  9,      6,        4,      2,  no, yes,   3,  4) \
-    SC( 10,      6,        4,      3,  no, yes,   7,  4) \
-    SC( 11,      6,        4,      4,  no, yes,   1,  4) \
-                                                         \
-    SC( 12,      7,        5,      1,  no, yes,   5,  5) \
-    SC( 13,      7,        5,      2,  no, yes,   3,  5) \
-    SC( 14,      7,        5,      3,  no, yes,   7,  5) \
-    SC( 15,      7,        5,      4,  no, yes,   1,  5) \
-                                                         \
-    SC( 16,      8,        6,      1,  no, yes,   5,  6) \
-    SC( 17,      8,        6,      2,  no, yes,   3,  6) \
-    SC( 18,      8,        6,      3,  no, yes,   7,  6) \
-    SC( 19,      8,        6,      4,  no, yes,   1,  6) \
-                                                         \
-    SC( 20,      9,        7,      1,  no, yes,   5,  7) \
-    SC( 21,      9,        7,      2,  no, yes,   3,  7) \
-    SC( 22,      9,        7,      3,  no, yes,   7,  7) \
-    SC( 23,      9,        7,      4,  no, yes,   1,  7) \
-                                                         \
-    SC( 24,     10,        8,      1,  no, yes,   5,  8) \
-    SC( 25,     10,        8,      2,  no, yes,   3,  8) \
-    SC( 26,     10,        8,      3,  no, yes,   7,  8) \
-    SC( 27,     10,        8,      4,  no, yes,   1,  8) \
-                                                         \
-    SC( 28,     11,        9,      1,  no, yes,   5,  9) \
-    SC( 29,     11,        9,      2,  no, yes,   3,  9) \
-    SC( 30,     11,        9,      3,  no, yes,   7,  9) \
-    SC( 31,     11,        9,      4, yes, yes,   1,  9) \
-                                                         \
-    SC( 32,     12,       10,      1,  no, yes,   5, no) \
-    SC( 33,     12,       10,      2,  no, yes,   3, no) \
-    SC( 34,     12,       10,      3,  no, yes,   7, no) \
-    SC( 35,     12,       10,      4, yes, yes,   2, no) \
-                                                         \
-    SC( 36,     13,       11,      1,  no, yes,   5, no) \
-    SC( 37,     13,       11,      2, yes, yes,   3, no) \
-    SC( 38,     13,       11,      3,  no, yes,   7, no) \
-    SC( 39,     13,       11,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 40,     14,       12,      1, yes,  no,   0, no) \
-    SC( 41,     14,       12,      2, yes,  no,   0, no) \
-    SC( 42,     14,       12,      3, yes,  no,   0, no) \
-    SC( 43,     14,       12,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 44,     15,       13,      1, yes,  no,   0, no) \
-    SC( 45,     15,       13,      2, yes,  no,   0, no) \
-    SC( 46,     15,       13,      3, yes,  no,   0, no) \
-    SC( 47,     15,       13,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 48,     16,       14,      1, yes,  no,   0, no) \
-    SC( 49,     16,       14,      2, yes,  no,   0, no) \
-    SC( 50,     16,       14,      3, yes,  no,   0, no) \
-    SC( 51,     16,       14,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 52,     17,       15,      1, yes,  no,   0, no) \
-    SC( 53,     17,       15,      2, yes,  no,   0, no) \
-    SC( 54,     17,       15,      3, yes,  no,   0, no) \
-    SC( 55,     17,       15,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 56,     18,       16,      1, yes,  no,   0, no) \
-    SC( 57,     18,       16,      2, yes,  no,   0, no) \
-    SC( 58,     18,       16,      3, yes,  no,   0, no) \
-    SC( 59,     18,       16,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 60,     19,       17,      1, yes,  no,   0, no) \
-    SC( 61,     19,       17,      2, yes,  no,   0, no) \
-    SC( 62,     19,       17,      3, yes,  no,   0, no) \
-    SC( 63,     19,       17,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 64,     20,       18,      1, yes,  no,   0, no) \
-    SC( 65,     20,       18,      2, yes,  no,   0, no) \
-    SC( 66,     20,       18,      3, yes,  no,   0, no) \
-    SC( 67,     20,       18,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 68,     21,       19,      1, yes,  no,   0, no) \
-    SC( 69,     21,       19,      2, yes,  no,   0, no) \
-    SC( 70,     21,       19,      3, yes,  no,   0, no) \
-    SC( 71,     21,       19,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 72,     22,       20,      1, yes,  no,   0, no) \
-    SC( 73,     22,       20,      2, yes,  no,   0, no) \
-    SC( 74,     22,       20,      3, yes,  no,   0, no) \
-    SC( 75,     22,       20,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 76,     23,       21,      1, yes,  no,   0, no) \
-    SC( 77,     23,       21,      2, yes,  no,   0, no) \
-    SC( 78,     23,       21,      3, yes,  no,   0, no) \
-    SC( 79,     23,       21,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 80,     24,       22,      1, yes,  no,   0, no) \
-    SC( 81,     24,       22,      2, yes,  no,   0, no) \
-    SC( 82,     24,       22,      3, yes,  no,   0, no) \
-    SC( 83,     24,       22,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 84,     25,       23,      1, yes,  no,   0, no) \
-    SC( 85,     25,       23,      2, yes,  no,   0, no) \
-    SC( 86,     25,       23,      3, yes,  no,   0, no) \
-    SC( 87,     25,       23,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 88,     26,       24,      1, yes,  no,   0, no) \
-    SC( 89,     26,       24,      2, yes,  no,   0, no) \
-    SC( 90,     26,       24,      3, yes,  no,   0, no) \
-    SC( 91,     26,       24,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 92,     27,       25,      1, yes,  no,   0, no) \
-    SC( 93,     27,       25,      2, yes,  no,   0, no) \
-    SC( 94,     27,       25,      3, yes,  no,   0, no) \
-    SC( 95,     27,       25,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 96,     28,       26,      1, yes,  no,   0, no) \
-    SC( 97,     28,       26,      2, yes,  no,   0, no) \
-    SC( 98,     28,       26,      3, yes,  no,   0, no) \
-    SC( 99,     28,       26,      4, yes,  no,   0, no) \
-                                                         \
-    SC(100,     29,       27,      1, yes,  no,   0, no) \
-    SC(101,     29,       27,      2, yes,  no,   0, no) \
-    SC(102,     29,       27,      3, yes,  no,   0, no) \
-    SC(103,     29,       27,      4, yes,  no,   0, no) \
-                                                         \
-    SC(104,     30,       28,      1, yes,  no,   0, no) \
-    SC(105,     30,       28,      2, yes,  no,   0, no) \
-    SC(106,     30,       28,      3, yes,  no,   0, no) \
-
-#define	SIZE_CLASSES_DEFINED
-#define	NTBINS			0
-#define	NLBINS			32
-#define	NBINS			39
-#define	NSIZES			107
-#define	NPSIZES			71
-#define	LG_TINY_MAXCLASS	"NA"
-#define	LOOKUP_MAXCLASS		((((size_t)1) << 11) + (((size_t)4) << 9))
-#define	SMALL_MAXCLASS		((((size_t)1) << 13) + (((size_t)3) << 11))
-#define	LG_LARGE_MINCLASS	14
-#define	LARGE_MAXCLASS		((((size_t)1) << 30) + (((size_t)3) << 28))
-#endif
-
-#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define	SIZE_CLASSES \
-  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
-    SC(  0,      3,        3,      0,  no, yes,   1,  3) \
-                                                         \
-    SC(  1,      3,        3,      1,  no, yes,   1,  3) \
-    SC(  2,      4,        4,      1,  no, yes,   1,  4) \
-    SC(  3,      4,        4,      2,  no, yes,   3,  4) \
-    SC(  4,      4,        4,      3,  no, yes,   1,  4) \
-                                                         \
-    SC(  5,      6,        4,      1,  no, yes,   5,  4) \
-    SC(  6,      6,        4,      2,  no, yes,   3,  4) \
-    SC(  7,      6,        4,      3,  no, yes,   7,  4) \
-    SC(  8,      6,        4,      4,  no, yes,   1,  4) \
-                                                         \
-    SC(  9,      7,        5,      1,  no, yes,   5,  5) \
-    SC( 10,      7,        5,      2,  no, yes,   3,  5) \
-    SC( 11,      7,        5,      3,  no, yes,   7,  5) \
-    SC( 12,      7,        5,      4,  no, yes,   1,  5) \
-                                                         \
-    SC( 13,      8,        6,      1,  no, yes,   5,  6) \
-    SC( 14,      8,        6,      2,  no, yes,   3,  6) \
-    SC( 15,      8,        6,      3,  no, yes,   7,  6) \
-    SC( 16,      8,        6,      4,  no, yes,   1,  6) \
-                                                         \
-    SC( 17,      9,        7,      1,  no, yes,   5,  7) \
-    SC( 18,      9,        7,      2,  no, yes,   3,  7) \
-    SC( 19,      9,        7,      3,  no, yes,   7,  7) \
-    SC( 20,      9,        7,      4,  no, yes,   1,  7) \
-                                                         \
-    SC( 21,     10,        8,      1,  no, yes,   5,  8) \
-    SC( 22,     10,        8,      2,  no, yes,   3,  8) \
-    SC( 23,     10,        8,      3,  no, yes,   7,  8) \
-    SC( 24,     10,        8,      4,  no, yes,   1,  8) \
-                                                         \
-    SC( 25,     11,        9,      1,  no, yes,   5,  9) \
-    SC( 26,     11,        9,      2,  no, yes,   3,  9) \
-    SC( 27,     11,        9,      3,  no, yes,   7,  9) \
-    SC( 28,     11,        9,      4, yes, yes,   1,  9) \
-                                                         \
-    SC( 29,     12,       10,      1,  no, yes,   5, no) \
-    SC( 30,     12,       10,      2,  no, yes,   3, no) \
-    SC( 31,     12,       10,      3,  no, yes,   7, no) \
-    SC( 32,     12,       10,      4, yes, yes,   2, no) \
-                                                         \
-    SC( 33,     13,       11,      1,  no, yes,   5, no) \
-    SC( 34,     13,       11,      2, yes, yes,   3, no) \
-    SC( 35,     13,       11,      3,  no, yes,   7, no) \
-    SC( 36,     13,       11,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 37,     14,       12,      1, yes,  no,   0, no) \
-    SC( 38,     14,       12,      2, yes,  no,   0, no) \
-    SC( 39,     14,       12,      3, yes,  no,   0, no) \
-    SC( 40,     14,       12,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 41,     15,       13,      1, yes,  no,   0, no) \
-    SC( 42,     15,       13,      2, yes,  no,   0, no) \
-    SC( 43,     15,       13,      3, yes,  no,   0, no) \
-    SC( 44,     15,       13,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 45,     16,       14,      1, yes,  no,   0, no) \
-    SC( 46,     16,       14,      2, yes,  no,   0, no) \
-    SC( 47,     16,       14,      3, yes,  no,   0, no) \
-    SC( 48,     16,       14,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 49,     17,       15,      1, yes,  no,   0, no) \
-    SC( 50,     17,       15,      2, yes,  no,   0, no) \
-    SC( 51,     17,       15,      3, yes,  no,   0, no) \
-    SC( 52,     17,       15,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 53,     18,       16,      1, yes,  no,   0, no) \
-    SC( 54,     18,       16,      2, yes,  no,   0, no) \
-    SC( 55,     18,       16,      3, yes,  no,   0, no) \
-    SC( 56,     18,       16,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 57,     19,       17,      1, yes,  no,   0, no) \
-    SC( 58,     19,       17,      2, yes,  no,   0, no) \
-    SC( 59,     19,       17,      3, yes,  no,   0, no) \
-    SC( 60,     19,       17,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 61,     20,       18,      1, yes,  no,   0, no) \
-    SC( 62,     20,       18,      2, yes,  no,   0, no) \
-    SC( 63,     20,       18,      3, yes,  no,   0, no) \
-    SC( 64,     20,       18,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 65,     21,       19,      1, yes,  no,   0, no) \
-    SC( 66,     21,       19,      2, yes,  no,   0, no) \
-    SC( 67,     21,       19,      3, yes,  no,   0, no) \
-    SC( 68,     21,       19,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 69,     22,       20,      1, yes,  no,   0, no) \
-    SC( 70,     22,       20,      2, yes,  no,   0, no) \
-    SC( 71,     22,       20,      3, yes,  no,   0, no) \
-    SC( 72,     22,       20,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 73,     23,       21,      1, yes,  no,   0, no) \
-    SC( 74,     23,       21,      2, yes,  no,   0, no) \
-    SC( 75,     23,       21,      3, yes,  no,   0, no) \
-    SC( 76,     23,       21,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 77,     24,       22,      1, yes,  no,   0, no) \
-    SC( 78,     24,       22,      2, yes,  no,   0, no) \
-    SC( 79,     24,       22,      3, yes,  no,   0, no) \
-    SC( 80,     24,       22,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 81,     25,       23,      1, yes,  no,   0, no) \
-    SC( 82,     25,       23,      2, yes,  no,   0, no) \
-    SC( 83,     25,       23,      3, yes,  no,   0, no) \
-    SC( 84,     25,       23,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 85,     26,       24,      1, yes,  no,   0, no) \
-    SC( 86,     26,       24,      2, yes,  no,   0, no) \
-    SC( 87,     26,       24,      3, yes,  no,   0, no) \
-    SC( 88,     26,       24,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 89,     27,       25,      1, yes,  no,   0, no) \
-    SC( 90,     27,       25,      2, yes,  no,   0, no) \
-    SC( 91,     27,       25,      3, yes,  no,   0, no) \
-    SC( 92,     27,       25,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 93,     28,       26,      1, yes,  no,   0, no) \
-    SC( 94,     28,       26,      2, yes,  no,   0, no) \
-    SC( 95,     28,       26,      3, yes,  no,   0, no) \
-    SC( 96,     28,       26,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 97,     29,       27,      1, yes,  no,   0, no) \
-    SC( 98,     29,       27,      2, yes,  no,   0, no) \
-    SC( 99,     29,       27,      3, yes,  no,   0, no) \
-    SC(100,     29,       27,      4, yes,  no,   0, no) \
-                                                         \
-    SC(101,     30,       28,      1, yes,  no,   0, no) \
-    SC(102,     30,       28,      2, yes,  no,   0, no) \
-    SC(103,     30,       28,      3, yes,  no,   0, no) \
-
-#define	SIZE_CLASSES_DEFINED
-#define	NTBINS			1
-#define	NLBINS			29
-#define	NBINS			36
-#define	NSIZES			104
-#define	NPSIZES			71
-#define	LG_TINY_MAXCLASS	3
-#define	LOOKUP_MAXCLASS		((((size_t)1) << 11) + (((size_t)4) << 9))
-#define	SMALL_MAXCLASS		((((size_t)1) << 13) + (((size_t)3) << 11))
-#define	LG_LARGE_MINCLASS	14
-#define	LARGE_MAXCLASS		((((size_t)1) << 30) + (((size_t)3) << 28))
-#endif
-
-#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define	SIZE_CLASSES \
-  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
-    SC(  0,      4,        4,      0,  no, yes,   1,  4) \
-    SC(  1,      4,        4,      1,  no, yes,   1,  4) \
-    SC(  2,      4,        4,      2,  no, yes,   3,  4) \
-    SC(  3,      4,        4,      3,  no, yes,   1,  4) \
-                                                         \
-    SC(  4,      6,        4,      1,  no, yes,   5,  4) \
-    SC(  5,      6,        4,      2,  no, yes,   3,  4) \
-    SC(  6,      6,        4,      3,  no, yes,   7,  4) \
-    SC(  7,      6,        4,      4,  no, yes,   1,  4) \
-                                                         \
-    SC(  8,      7,        5,      1,  no, yes,   5,  5) \
-    SC(  9,      7,        5,      2,  no, yes,   3,  5) \
-    SC( 10,      7,        5,      3,  no, yes,   7,  5) \
-    SC( 11,      7,        5,      4,  no, yes,   1,  5) \
-                                                         \
-    SC( 12,      8,        6,      1,  no, yes,   5,  6) \
-    SC( 13,      8,        6,      2,  no, yes,   3,  6) \
-    SC( 14,      8,        6,      3,  no, yes,   7,  6) \
-    SC( 15,      8,        6,      4,  no, yes,   1,  6) \
-                                                         \
-    SC( 16,      9,        7,      1,  no, yes,   5,  7) \
-    SC( 17,      9,        7,      2,  no, yes,   3,  7) \
-    SC( 18,      9,        7,      3,  no, yes,   7,  7) \
-    SC( 19,      9,        7,      4,  no, yes,   1,  7) \
-                                                         \
-    SC( 20,     10,        8,      1,  no, yes,   5,  8) \
-    SC( 21,     10,        8,      2,  no, yes,   3,  8) \
-    SC( 22,     10,        8,      3,  no, yes,   7,  8) \
-    SC( 23,     10,        8,      4,  no, yes,   1,  8) \
-                                                         \
-    SC( 24,     11,        9,      1,  no, yes,   5,  9) \
-    SC( 25,     11,        9,      2,  no, yes,   3,  9) \
-    SC( 26,     11,        9,      3,  no, yes,   7,  9) \
-    SC( 27,     11,        9,      4, yes, yes,   1,  9) \
-                                                         \
-    SC( 28,     12,       10,      1,  no, yes,   5, no) \
-    SC( 29,     12,       10,      2,  no, yes,   3, no) \
-    SC( 30,     12,       10,      3,  no, yes,   7, no) \
-    SC( 31,     12,       10,      4, yes, yes,   2, no) \
-                                                         \
-    SC( 32,     13,       11,      1,  no, yes,   5, no) \
-    SC( 33,     13,       11,      2, yes, yes,   3, no) \
-    SC( 34,     13,       11,      3,  no, yes,   7, no) \
-    SC( 35,     13,       11,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 36,     14,       12,      1, yes,  no,   0, no) \
-    SC( 37,     14,       12,      2, yes,  no,   0, no) \
-    SC( 38,     14,       12,      3, yes,  no,   0, no) \
-    SC( 39,     14,       12,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 40,     15,       13,      1, yes,  no,   0, no) \
-    SC( 41,     15,       13,      2, yes,  no,   0, no) \
-    SC( 42,     15,       13,      3, yes,  no,   0, no) \
-    SC( 43,     15,       13,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 44,     16,       14,      1, yes,  no,   0, no) \
-    SC( 45,     16,       14,      2, yes,  no,   0, no) \
-    SC( 46,     16,       14,      3, yes,  no,   0, no) \
-    SC( 47,     16,       14,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 48,     17,       15,      1, yes,  no,   0, no) \
-    SC( 49,     17,       15,      2, yes,  no,   0, no) \
-    SC( 50,     17,       15,      3, yes,  no,   0, no) \
-    SC( 51,     17,       15,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 52,     18,       16,      1, yes,  no,   0, no) \
-    SC( 53,     18,       16,      2, yes,  no,   0, no) \
-    SC( 54,     18,       16,      3, yes,  no,   0, no) \
-    SC( 55,     18,       16,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 56,     19,       17,      1, yes,  no,   0, no) \
-    SC( 57,     19,       17,      2, yes,  no,   0, no) \
-    SC( 58,     19,       17,      3, yes,  no,   0, no) \
-    SC( 59,     19,       17,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 60,     20,       18,      1, yes,  no,   0, no) \
-    SC( 61,     20,       18,      2, yes,  no,   0, no) \
-    SC( 62,     20,       18,      3, yes,  no,   0, no) \
-    SC( 63,     20,       18,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 64,     21,       19,      1, yes,  no,   0, no) \
-    SC( 65,     21,       19,      2, yes,  no,   0, no) \
-    SC( 66,     21,       19,      3, yes,  no,   0, no) \
-    SC( 67,     21,       19,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 68,     22,       20,      1, yes,  no,   0, no) \
-    SC( 69,     22,       20,      2, yes,  no,   0, no) \
-    SC( 70,     22,       20,      3, yes,  no,   0, no) \
-    SC( 71,     22,       20,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 72,     23,       21,      1, yes,  no,   0, no) \
-    SC( 73,     23,       21,      2, yes,  no,   0, no) \
-    SC( 74,     23,       21,      3, yes,  no,   0, no) \
-    SC( 75,     23,       21,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 76,     24,       22,      1, yes,  no,   0, no) \
-    SC( 77,     24,       22,      2, yes,  no,   0, no) \
-    SC( 78,     24,       22,      3, yes,  no,   0, no) \
-    SC( 79,     24,       22,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 80,     25,       23,      1, yes,  no,   0, no) \
-    SC( 81,     25,       23,      2, yes,  no,   0, no) \
-    SC( 82,     25,       23,      3, yes,  no,   0, no) \
-    SC( 83,     25,       23,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 84,     26,       24,      1, yes,  no,   0, no) \
-    SC( 85,     26,       24,      2, yes,  no,   0, no) \
-    SC( 86,     26,       24,      3, yes,  no,   0, no) \
-    SC( 87,     26,       24,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 88,     27,       25,      1, yes,  no,   0, no) \
-    SC( 89,     27,       25,      2, yes,  no,   0, no) \
-    SC( 90,     27,       25,      3, yes,  no,   0, no) \
-    SC( 91,     27,       25,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 92,     28,       26,      1, yes,  no,   0, no) \
-    SC( 93,     28,       26,      2, yes,  no,   0, no) \
-    SC( 94,     28,       26,      3, yes,  no,   0, no) \
-    SC( 95,     28,       26,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 96,     29,       27,      1, yes,  no,   0, no) \
-    SC( 97,     29,       27,      2, yes,  no,   0, no) \
-    SC( 98,     29,       27,      3, yes,  no,   0, no) \
-    SC( 99,     29,       27,      4, yes,  no,   0, no) \
-                                                         \
-    SC(100,     30,       28,      1, yes,  no,   0, no) \
-    SC(101,     30,       28,      2, yes,  no,   0, no) \
-    SC(102,     30,       28,      3, yes,  no,   0, no) \
-
-#define	SIZE_CLASSES_DEFINED
-#define	NTBINS			0
-#define	NLBINS			28
-#define	NBINS			35
-#define	NSIZES			103
-#define	NPSIZES			71
-#define	LG_TINY_MAXCLASS	"NA"
-#define	LOOKUP_MAXCLASS		((((size_t)1) << 11) + (((size_t)4) << 9))
-#define	SMALL_MAXCLASS		((((size_t)1) << 13) + (((size_t)3) << 11))
-#define	LG_LARGE_MINCLASS	14
-#define	LARGE_MAXCLASS		((((size_t)1) << 30) + (((size_t)3) << 28))
-#endif
-
-#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
-#define	SIZE_CLASSES \
-  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
-    SC(  0,      3,        3,      0,  no, yes,   1,  3) \
-    SC(  1,      3,        3,      1,  no, yes,   1,  3) \
-    SC(  2,      3,        3,      2,  no, yes,   3,  3) \
-    SC(  3,      3,        3,      3,  no, yes,   1,  3) \
-                                                         \
-    SC(  4,      5,        3,      1,  no, yes,   5,  3) \
-    SC(  5,      5,        3,      2,  no, yes,   3,  3) \
-    SC(  6,      5,        3,      3,  no, yes,   7,  3) \
-    SC(  7,      5,        3,      4,  no, yes,   1,  3) \
-                                                         \
-    SC(  8,      6,        4,      1,  no, yes,   5,  4) \
-    SC(  9,      6,        4,      2,  no, yes,   3,  4) \
-    SC( 10,      6,        4,      3,  no, yes,   7,  4) \
-    SC( 11,      6,        4,      4,  no, yes,   1,  4) \
-                                                         \
-    SC( 12,      7,        5,      1,  no, yes,   5,  5) \
-    SC( 13,      7,        5,      2,  no, yes,   3,  5) \
-    SC( 14,      7,        5,      3,  no, yes,   7,  5) \
-    SC( 15,      7,        5,      4,  no, yes,   1,  5) \
-                                                         \
-    SC( 16,      8,        6,      1,  no, yes,   5,  6) \
-    SC( 17,      8,        6,      2,  no, yes,   3,  6) \
-    SC( 18,      8,        6,      3,  no, yes,   7,  6) \
-    SC( 19,      8,        6,      4,  no, yes,   1,  6) \
-                                                         \
-    SC( 20,      9,        7,      1,  no, yes,   5,  7) \
-    SC( 21,      9,        7,      2,  no, yes,   3,  7) \
-    SC( 22,      9,        7,      3,  no, yes,   7,  7) \
-    SC( 23,      9,        7,      4,  no, yes,   1,  7) \
-                                                         \
-    SC( 24,     10,        8,      1,  no, yes,   5,  8) \
-    SC( 25,     10,        8,      2,  no, yes,   3,  8) \
-    SC( 26,     10,        8,      3,  no, yes,   7,  8) \
-    SC( 27,     10,        8,      4,  no, yes,   1,  8) \
-                                                         \
-    SC( 28,     11,        9,      1,  no, yes,   5,  9) \
-    SC( 29,     11,        9,      2,  no, yes,   3,  9) \
-    SC( 30,     11,        9,      3,  no, yes,   7,  9) \
-    SC( 31,     11,        9,      4, yes, yes,   1,  9) \
-                                                         \
-    SC( 32,     12,       10,      1,  no, yes,   5, no) \
-    SC( 33,     12,       10,      2,  no, yes,   3, no) \
-    SC( 34,     12,       10,      3,  no, yes,   7, no) \
-    SC( 35,     12,       10,      4, yes, yes,   2, no) \
-                                                         \
-    SC( 36,     13,       11,      1,  no, yes,   5, no) \
-    SC( 37,     13,       11,      2, yes, yes,   3, no) \
-    SC( 38,     13,       11,      3,  no, yes,   7, no) \
-    SC( 39,     13,       11,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 40,     14,       12,      1, yes,  no,   0, no) \
-    SC( 41,     14,       12,      2, yes,  no,   0, no) \
-    SC( 42,     14,       12,      3, yes,  no,   0, no) \
-    SC( 43,     14,       12,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 44,     15,       13,      1, yes,  no,   0, no) \
-    SC( 45,     15,       13,      2, yes,  no,   0, no) \
-    SC( 46,     15,       13,      3, yes,  no,   0, no) \
-    SC( 47,     15,       13,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 48,     16,       14,      1, yes,  no,   0, no) \
-    SC( 49,     16,       14,      2, yes,  no,   0, no) \
-    SC( 50,     16,       14,      3, yes,  no,   0, no) \
-    SC( 51,     16,       14,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 52,     17,       15,      1, yes,  no,   0, no) \
-    SC( 53,     17,       15,      2, yes,  no,   0, no) \
-    SC( 54,     17,       15,      3, yes,  no,   0, no) \
-    SC( 55,     17,       15,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 56,     18,       16,      1, yes,  no,   0, no) \
-    SC( 57,     18,       16,      2, yes,  no,   0, no) \
-    SC( 58,     18,       16,      3, yes,  no,   0, no) \
-    SC( 59,     18,       16,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 60,     19,       17,      1, yes,  no,   0, no) \
-    SC( 61,     19,       17,      2, yes,  no,   0, no) \
-    SC( 62,     19,       17,      3, yes,  no,   0, no) \
-    SC( 63,     19,       17,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 64,     20,       18,      1, yes,  no,   0, no) \
-    SC( 65,     20,       18,      2, yes,  no,   0, no) \
-    SC( 66,     20,       18,      3, yes,  no,   0, no) \
-    SC( 67,     20,       18,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 68,     21,       19,      1, yes,  no,   0, no) \
-    SC( 69,     21,       19,      2, yes,  no,   0, no) \
-    SC( 70,     21,       19,      3, yes,  no,   0, no) \
-    SC( 71,     21,       19,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 72,     22,       20,      1, yes,  no,   0, no) \
-    SC( 73,     22,       20,      2, yes,  no,   0, no) \
-    SC( 74,     22,       20,      3, yes,  no,   0, no) \
-    SC( 75,     22,       20,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 76,     23,       21,      1, yes,  no,   0, no) \
-    SC( 77,     23,       21,      2, yes,  no,   0, no) \
-    SC( 78,     23,       21,      3, yes,  no,   0, no) \
-    SC( 79,     23,       21,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 80,     24,       22,      1, yes,  no,   0, no) \
-    SC( 81,     24,       22,      2, yes,  no,   0, no) \
-    SC( 82,     24,       22,      3, yes,  no,   0, no) \
-    SC( 83,     24,       22,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 84,     25,       23,      1, yes,  no,   0, no) \
-    SC( 85,     25,       23,      2, yes,  no,   0, no) \
-    SC( 86,     25,       23,      3, yes,  no,   0, no) \
-    SC( 87,     25,       23,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 88,     26,       24,      1, yes,  no,   0, no) \
-    SC( 89,     26,       24,      2, yes,  no,   0, no) \
-    SC( 90,     26,       24,      3, yes,  no,   0, no) \
-    SC( 91,     26,       24,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 92,     27,       25,      1, yes,  no,   0, no) \
-    SC( 93,     27,       25,      2, yes,  no,   0, no) \
-    SC( 94,     27,       25,      3, yes,  no,   0, no) \
-    SC( 95,     27,       25,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 96,     28,       26,      1, yes,  no,   0, no) \
-    SC( 97,     28,       26,      2, yes,  no,   0, no) \
-    SC( 98,     28,       26,      3, yes,  no,   0, no) \
-    SC( 99,     28,       26,      4, yes,  no,   0, no) \
-                                                         \
-    SC(100,     29,       27,      1, yes,  no,   0, no) \
-    SC(101,     29,       27,      2, yes,  no,   0, no) \
-    SC(102,     29,       27,      3, yes,  no,   0, no) \
-    SC(103,     29,       27,      4, yes,  no,   0, no) \
-                                                         \
-    SC(104,     30,       28,      1, yes,  no,   0, no) \
-    SC(105,     30,       28,      2, yes,  no,   0, no) \
-    SC(106,     30,       28,      3, yes,  no,   0, no) \
-    SC(107,     30,       28,      4, yes,  no,   0, no) \
-                                                         \
-    SC(108,     31,       29,      1, yes,  no,   0, no) \
-    SC(109,     31,       29,      2, yes,  no,   0, no) \
-    SC(110,     31,       29,      3, yes,  no,   0, no) \
-    SC(111,     31,       29,      4, yes,  no,   0, no) \
-                                                         \
-    SC(112,     32,       30,      1, yes,  no,   0, no) \
-    SC(113,     32,       30,      2, yes,  no,   0, no) \
-    SC(114,     32,       30,      3, yes,  no,   0, no) \
-    SC(115,     32,       30,      4, yes,  no,   0, no) \
-                                                         \
-    SC(116,     33,       31,      1, yes,  no,   0, no) \
-    SC(117,     33,       31,      2, yes,  no,   0, no) \
-    SC(118,     33,       31,      3, yes,  no,   0, no) \
-    SC(119,     33,       31,      4, yes,  no,   0, no) \
-                                                         \
-    SC(120,     34,       32,      1, yes,  no,   0, no) \
-    SC(121,     34,       32,      2, yes,  no,   0, no) \
-    SC(122,     34,       32,      3, yes,  no,   0, no) \
-    SC(123,     34,       32,      4, yes,  no,   0, no) \
-                                                         \
-    SC(124,     35,       33,      1, yes,  no,   0, no) \
-    SC(125,     35,       33,      2, yes,  no,   0, no) \
-    SC(126,     35,       33,      3, yes,  no,   0, no) \
-    SC(127,     35,       33,      4, yes,  no,   0, no) \
-                                                         \
-    SC(128,     36,       34,      1, yes,  no,   0, no) \
-    SC(129,     36,       34,      2, yes,  no,   0, no) \
-    SC(130,     36,       34,      3, yes,  no,   0, no) \
-    SC(131,     36,       34,      4, yes,  no,   0, no) \
-                                                         \
-    SC(132,     37,       35,      1, yes,  no,   0, no) \
-    SC(133,     37,       35,      2, yes,  no,   0, no) \
-    SC(134,     37,       35,      3, yes,  no,   0, no) \
-    SC(135,     37,       35,      4, yes,  no,   0, no) \
-                                                         \
-    SC(136,     38,       36,      1, yes,  no,   0, no) \
-    SC(137,     38,       36,      2, yes,  no,   0, no) \
-    SC(138,     38,       36,      3, yes,  no,   0, no) \
-    SC(139,     38,       36,      4, yes,  no,   0, no) \
-                                                         \
-    SC(140,     39,       37,      1, yes,  no,   0, no) \
-    SC(141,     39,       37,      2, yes,  no,   0, no) \
-    SC(142,     39,       37,      3, yes,  no,   0, no) \
-    SC(143,     39,       37,      4, yes,  no,   0, no) \
-                                                         \
-    SC(144,     40,       38,      1, yes,  no,   0, no) \
-    SC(145,     40,       38,      2, yes,  no,   0, no) \
-    SC(146,     40,       38,      3, yes,  no,   0, no) \
-    SC(147,     40,       38,      4, yes,  no,   0, no) \
-                                                         \
-    SC(148,     41,       39,      1, yes,  no,   0, no) \
-    SC(149,     41,       39,      2, yes,  no,   0, no) \
-    SC(150,     41,       39,      3, yes,  no,   0, no) \
-    SC(151,     41,       39,      4, yes,  no,   0, no) \
-                                                         \
-    SC(152,     42,       40,      1, yes,  no,   0, no) \
-    SC(153,     42,       40,      2, yes,  no,   0, no) \
-    SC(154,     42,       40,      3, yes,  no,   0, no) \
-    SC(155,     42,       40,      4, yes,  no,   0, no) \
-                                                         \
-    SC(156,     43,       41,      1, yes,  no,   0, no) \
-    SC(157,     43,       41,      2, yes,  no,   0, no) \
-    SC(158,     43,       41,      3, yes,  no,   0, no) \
-    SC(159,     43,       41,      4, yes,  no,   0, no) \
-                                                         \
-    SC(160,     44,       42,      1, yes,  no,   0, no) \
-    SC(161,     44,       42,      2, yes,  no,   0, no) \
-    SC(162,     44,       42,      3, yes,  no,   0, no) \
-    SC(163,     44,       42,      4, yes,  no,   0, no) \
-                                                         \
-    SC(164,     45,       43,      1, yes,  no,   0, no) \
-    SC(165,     45,       43,      2, yes,  no,   0, no) \
-    SC(166,     45,       43,      3, yes,  no,   0, no) \
-    SC(167,     45,       43,      4, yes,  no,   0, no) \
-                                                         \
-    SC(168,     46,       44,      1, yes,  no,   0, no) \
-    SC(169,     46,       44,      2, yes,  no,   0, no) \
-    SC(170,     46,       44,      3, yes,  no,   0, no) \
-    SC(171,     46,       44,      4, yes,  no,   0, no) \
-                                                         \
-    SC(172,     47,       45,      1, yes,  no,   0, no) \
-    SC(173,     47,       45,      2, yes,  no,   0, no) \
-    SC(174,     47,       45,      3, yes,  no,   0, no) \
-    SC(175,     47,       45,      4, yes,  no,   0, no) \
-                                                         \
-    SC(176,     48,       46,      1, yes,  no,   0, no) \
-    SC(177,     48,       46,      2, yes,  no,   0, no) \
-    SC(178,     48,       46,      3, yes,  no,   0, no) \
-    SC(179,     48,       46,      4, yes,  no,   0, no) \
-                                                         \
-    SC(180,     49,       47,      1, yes,  no,   0, no) \
-    SC(181,     49,       47,      2, yes,  no,   0, no) \
-    SC(182,     49,       47,      3, yes,  no,   0, no) \
-    SC(183,     49,       47,      4, yes,  no,   0, no) \
-                                                         \
-    SC(184,     50,       48,      1, yes,  no,   0, no) \
-    SC(185,     50,       48,      2, yes,  no,   0, no) \
-    SC(186,     50,       48,      3, yes,  no,   0, no) \
-    SC(187,     50,       48,      4, yes,  no,   0, no) \
-                                                         \
-    SC(188,     51,       49,      1, yes,  no,   0, no) \
-    SC(189,     51,       49,      2, yes,  no,   0, no) \
-    SC(190,     51,       49,      3, yes,  no,   0, no) \
-    SC(191,     51,       49,      4, yes,  no,   0, no) \
-                                                         \
-    SC(192,     52,       50,      1, yes,  no,   0, no) \
-    SC(193,     52,       50,      2, yes,  no,   0, no) \
-    SC(194,     52,       50,      3, yes,  no,   0, no) \
-    SC(195,     52,       50,      4, yes,  no,   0, no) \
-                                                         \
-    SC(196,     53,       51,      1, yes,  no,   0, no) \
-    SC(197,     53,       51,      2, yes,  no,   0, no) \
-    SC(198,     53,       51,      3, yes,  no,   0, no) \
-    SC(199,     53,       51,      4, yes,  no,   0, no) \
-                                                         \
-    SC(200,     54,       52,      1, yes,  no,   0, no) \
-    SC(201,     54,       52,      2, yes,  no,   0, no) \
-    SC(202,     54,       52,      3, yes,  no,   0, no) \
-    SC(203,     54,       52,      4, yes,  no,   0, no) \
-                                                         \
-    SC(204,     55,       53,      1, yes,  no,   0, no) \
-    SC(205,     55,       53,      2, yes,  no,   0, no) \
-    SC(206,     55,       53,      3, yes,  no,   0, no) \
-    SC(207,     55,       53,      4, yes,  no,   0, no) \
-                                                         \
-    SC(208,     56,       54,      1, yes,  no,   0, no) \
-    SC(209,     56,       54,      2, yes,  no,   0, no) \
-    SC(210,     56,       54,      3, yes,  no,   0, no) \
-    SC(211,     56,       54,      4, yes,  no,   0, no) \
-                                                         \
-    SC(212,     57,       55,      1, yes,  no,   0, no) \
-    SC(213,     57,       55,      2, yes,  no,   0, no) \
-    SC(214,     57,       55,      3, yes,  no,   0, no) \
-    SC(215,     57,       55,      4, yes,  no,   0, no) \
-                                                         \
-    SC(216,     58,       56,      1, yes,  no,   0, no) \
-    SC(217,     58,       56,      2, yes,  no,   0, no) \
-    SC(218,     58,       56,      3, yes,  no,   0, no) \
-    SC(219,     58,       56,      4, yes,  no,   0, no) \
-                                                         \
-    SC(220,     59,       57,      1, yes,  no,   0, no) \
-    SC(221,     59,       57,      2, yes,  no,   0, no) \
-    SC(222,     59,       57,      3, yes,  no,   0, no) \
-    SC(223,     59,       57,      4, yes,  no,   0, no) \
-                                                         \
-    SC(224,     60,       58,      1, yes,  no,   0, no) \
-    SC(225,     60,       58,      2, yes,  no,   0, no) \
-    SC(226,     60,       58,      3, yes,  no,   0, no) \
-    SC(227,     60,       58,      4, yes,  no,   0, no) \
-                                                         \
-    SC(228,     61,       59,      1, yes,  no,   0, no) \
-    SC(229,     61,       59,      2, yes,  no,   0, no) \
-    SC(230,     61,       59,      3, yes,  no,   0, no) \
-    SC(231,     61,       59,      4, yes,  no,   0, no) \
-                                                         \
-    SC(232,     62,       60,      1, yes,  no,   0, no) \
-    SC(233,     62,       60,      2, yes,  no,   0, no) \
-    SC(234,     62,       60,      3, yes,  no,   0, no) \
-
-#define	SIZE_CLASSES_DEFINED
-#define	NTBINS			0
-#define	NLBINS			32
-#define	NBINS			39
-#define	NSIZES			235
-#define	NPSIZES			199
-#define	LG_TINY_MAXCLASS	"NA"
-#define	LOOKUP_MAXCLASS		((((size_t)1) << 11) + (((size_t)4) << 9))
-#define	SMALL_MAXCLASS		((((size_t)1) << 13) + (((size_t)3) << 11))
-#define	LG_LARGE_MINCLASS	14
-#define	LARGE_MAXCLASS		((((size_t)1) << 62) + (((size_t)3) << 60))
-#endif
-
-#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define	SIZE_CLASSES \
-  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
-    SC(  0,      3,        3,      0,  no, yes,   1,  3) \
-                                                         \
-    SC(  1,      3,        3,      1,  no, yes,   1,  3) \
-    SC(  2,      4,        4,      1,  no, yes,   1,  4) \
-    SC(  3,      4,        4,      2,  no, yes,   3,  4) \
-    SC(  4,      4,        4,      3,  no, yes,   1,  4) \
-                                                         \
-    SC(  5,      6,        4,      1,  no, yes,   5,  4) \
-    SC(  6,      6,        4,      2,  no, yes,   3,  4) \
-    SC(  7,      6,        4,      3,  no, yes,   7,  4) \
-    SC(  8,      6,        4,      4,  no, yes,   1,  4) \
-                                                         \
-    SC(  9,      7,        5,      1,  no, yes,   5,  5) \
-    SC( 10,      7,        5,      2,  no, yes,   3,  5) \
-    SC( 11,      7,        5,      3,  no, yes,   7,  5) \
-    SC( 12,      7,        5,      4,  no, yes,   1,  5) \
-                                                         \
-    SC( 13,      8,        6,      1,  no, yes,   5,  6) \
-    SC( 14,      8,        6,      2,  no, yes,   3,  6) \
-    SC( 15,      8,        6,      3,  no, yes,   7,  6) \
-    SC( 16,      8,        6,      4,  no, yes,   1,  6) \
-                                                         \
-    SC( 17,      9,        7,      1,  no, yes,   5,  7) \
-    SC( 18,      9,        7,      2,  no, yes,   3,  7) \
-    SC( 19,      9,        7,      3,  no, yes,   7,  7) \
-    SC( 20,      9,        7,      4,  no, yes,   1,  7) \
-                                                         \
-    SC( 21,     10,        8,      1,  no, yes,   5,  8) \
-    SC( 22,     10,        8,      2,  no, yes,   3,  8) \
-    SC( 23,     10,        8,      3,  no, yes,   7,  8) \
-    SC( 24,     10,        8,      4,  no, yes,   1,  8) \
-                                                         \
-    SC( 25,     11,        9,      1,  no, yes,   5,  9) \
-    SC( 26,     11,        9,      2,  no, yes,   3,  9) \
-    SC( 27,     11,        9,      3,  no, yes,   7,  9) \
-    SC( 28,     11,        9,      4, yes, yes,   1,  9) \
-                                                         \
-    SC( 29,     12,       10,      1,  no, yes,   5, no) \
-    SC( 30,     12,       10,      2,  no, yes,   3, no) \
-    SC( 31,     12,       10,      3,  no, yes,   7, no) \
-    SC( 32,     12,       10,      4, yes, yes,   2, no) \
-                                                         \
-    SC( 33,     13,       11,      1,  no, yes,   5, no) \
-    SC( 34,     13,       11,      2, yes, yes,   3, no) \
-    SC( 35,     13,       11,      3,  no, yes,   7, no) \
-    SC( 36,     13,       11,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 37,     14,       12,      1, yes,  no,   0, no) \
-    SC( 38,     14,       12,      2, yes,  no,   0, no) \
-    SC( 39,     14,       12,      3, yes,  no,   0, no) \
-    SC( 40,     14,       12,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 41,     15,       13,      1, yes,  no,   0, no) \
-    SC( 42,     15,       13,      2, yes,  no,   0, no) \
-    SC( 43,     15,       13,      3, yes,  no,   0, no) \
-    SC( 44,     15,       13,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 45,     16,       14,      1, yes,  no,   0, no) \
-    SC( 46,     16,       14,      2, yes,  no,   0, no) \
-    SC( 47,     16,       14,      3, yes,  no,   0, no) \
-    SC( 48,     16,       14,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 49,     17,       15,      1, yes,  no,   0, no) \
-    SC( 50,     17,       15,      2, yes,  no,   0, no) \
-    SC( 51,     17,       15,      3, yes,  no,   0, no) \
-    SC( 52,     17,       15,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 53,     18,       16,      1, yes,  no,   0, no) \
-    SC( 54,     18,       16,      2, yes,  no,   0, no) \
-    SC( 55,     18,       16,      3, yes,  no,   0, no) \
-    SC( 56,     18,       16,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 57,     19,       17,      1, yes,  no,   0, no) \
-    SC( 58,     19,       17,      2, yes,  no,   0, no) \
-    SC( 59,     19,       17,      3, yes,  no,   0, no) \
-    SC( 60,     19,       17,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 61,     20,       18,      1, yes,  no,   0, no) \
-    SC( 62,     20,       18,      2, yes,  no,   0, no) \
-    SC( 63,     20,       18,      3, yes,  no,   0, no) \
-    SC( 64,     20,       18,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 65,     21,       19,      1, yes,  no,   0, no) \
-    SC( 66,     21,       19,      2, yes,  no,   0, no) \
-    SC( 67,     21,       19,      3, yes,  no,   0, no) \
-    SC( 68,     21,       19,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 69,     22,       20,      1, yes,  no,   0, no) \
-    SC( 70,     22,       20,      2, yes,  no,   0, no) \
-    SC( 71,     22,       20,      3, yes,  no,   0, no) \
-    SC( 72,     22,       20,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 73,     23,       21,      1, yes,  no,   0, no) \
-    SC( 74,     23,       21,      2, yes,  no,   0, no) \
-    SC( 75,     23,       21,      3, yes,  no,   0, no) \
-    SC( 76,     23,       21,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 77,     24,       22,      1, yes,  no,   0, no) \
-    SC( 78,     24,       22,      2, yes,  no,   0, no) \
-    SC( 79,     24,       22,      3, yes,  no,   0, no) \
-    SC( 80,     24,       22,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 81,     25,       23,      1, yes,  no,   0, no) \
-    SC( 82,     25,       23,      2, yes,  no,   0, no) \
-    SC( 83,     25,       23,      3, yes,  no,   0, no) \
-    SC( 84,     25,       23,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 85,     26,       24,      1, yes,  no,   0, no) \
-    SC( 86,     26,       24,      2, yes,  no,   0, no) \
-    SC( 87,     26,       24,      3, yes,  no,   0, no) \
-    SC( 88,     26,       24,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 89,     27,       25,      1, yes,  no,   0, no) \
-    SC( 90,     27,       25,      2, yes,  no,   0, no) \
-    SC( 91,     27,       25,      3, yes,  no,   0, no) \
-    SC( 92,     27,       25,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 93,     28,       26,      1, yes,  no,   0, no) \
-    SC( 94,     28,       26,      2, yes,  no,   0, no) \
-    SC( 95,     28,       26,      3, yes,  no,   0, no) \
-    SC( 96,     28,       26,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 97,     29,       27,      1, yes,  no,   0, no) \
-    SC( 98,     29,       27,      2, yes,  no,   0, no) \
-    SC( 99,     29,       27,      3, yes,  no,   0, no) \
-    SC(100,     29,       27,      4, yes,  no,   0, no) \
-                                                         \
-    SC(101,     30,       28,      1, yes,  no,   0, no) \
-    SC(102,     30,       28,      2, yes,  no,   0, no) \
-    SC(103,     30,       28,      3, yes,  no,   0, no) \
-    SC(104,     30,       28,      4, yes,  no,   0, no) \
-                                                         \
-    SC(105,     31,       29,      1, yes,  no,   0, no) \
-    SC(106,     31,       29,      2, yes,  no,   0, no) \
-    SC(107,     31,       29,      3, yes,  no,   0, no) \
-    SC(108,     31,       29,      4, yes,  no,   0, no) \
-                                                         \
-    SC(109,     32,       30,      1, yes,  no,   0, no) \
-    SC(110,     32,       30,      2, yes,  no,   0, no) \
-    SC(111,     32,       30,      3, yes,  no,   0, no) \
-    SC(112,     32,       30,      4, yes,  no,   0, no) \
-                                                         \
-    SC(113,     33,       31,      1, yes,  no,   0, no) \
-    SC(114,     33,       31,      2, yes,  no,   0, no) \
-    SC(115,     33,       31,      3, yes,  no,   0, no) \
-    SC(116,     33,       31,      4, yes,  no,   0, no) \
-                                                         \
-    SC(117,     34,       32,      1, yes,  no,   0, no) \
-    SC(118,     34,       32,      2, yes,  no,   0, no) \
-    SC(119,     34,       32,      3, yes,  no,   0, no) \
-    SC(120,     34,       32,      4, yes,  no,   0, no) \
-                                                         \
-    SC(121,     35,       33,      1, yes,  no,   0, no) \
-    SC(122,     35,       33,      2, yes,  no,   0, no) \
-    SC(123,     35,       33,      3, yes,  no,   0, no) \
-    SC(124,     35,       33,      4, yes,  no,   0, no) \
-                                                         \
-    SC(125,     36,       34,      1, yes,  no,   0, no) \
-    SC(126,     36,       34,      2, yes,  no,   0, no) \
-    SC(127,     36,       34,      3, yes,  no,   0, no) \
-    SC(128,     36,       34,      4, yes,  no,   0, no) \
-                                                         \
-    SC(129,     37,       35,      1, yes,  no,   0, no) \
-    SC(130,     37,       35,      2, yes,  no,   0, no) \
-    SC(131,     37,       35,      3, yes,  no,   0, no) \
-    SC(132,     37,       35,      4, yes,  no,   0, no) \
-                                                         \
-    SC(133,     38,       36,      1, yes,  no,   0, no) \
-    SC(134,     38,       36,      2, yes,  no,   0, no) \
-    SC(135,     38,       36,      3, yes,  no,   0, no) \
-    SC(136,     38,       36,      4, yes,  no,   0, no) \
-                                                         \
-    SC(137,     39,       37,      1, yes,  no,   0, no) \
-    SC(138,     39,       37,      2, yes,  no,   0, no) \
-    SC(139,     39,       37,      3, yes,  no,   0, no) \
-    SC(140,     39,       37,      4, yes,  no,   0, no) \
-                                                         \
-    SC(141,     40,       38,      1, yes,  no,   0, no) \
-    SC(142,     40,       38,      2, yes,  no,   0, no) \
-    SC(143,     40,       38,      3, yes,  no,   0, no) \
-    SC(144,     40,       38,      4, yes,  no,   0, no) \
-                                                         \
-    SC(145,     41,       39,      1, yes,  no,   0, no) \
-    SC(146,     41,       39,      2, yes,  no,   0, no) \
-    SC(147,     41,       39,      3, yes,  no,   0, no) \
-    SC(148,     41,       39,      4, yes,  no,   0, no) \
-                                                         \
-    SC(149,     42,       40,      1, yes,  no,   0, no) \
-    SC(150,     42,       40,      2, yes,  no,   0, no) \
-    SC(151,     42,       40,      3, yes,  no,   0, no) \
-    SC(152,     42,       40,      4, yes,  no,   0, no) \
-                                                         \
-    SC(153,     43,       41,      1, yes,  no,   0, no) \
-    SC(154,     43,       41,      2, yes,  no,   0, no) \
-    SC(155,     43,       41,      3, yes,  no,   0, no) \
-    SC(156,     43,       41,      4, yes,  no,   0, no) \
-                                                         \
-    SC(157,     44,       42,      1, yes,  no,   0, no) \
-    SC(158,     44,       42,      2, yes,  no,   0, no) \
-    SC(159,     44,       42,      3, yes,  no,   0, no) \
-    SC(160,     44,       42,      4, yes,  no,   0, no) \
-                                                         \
-    SC(161,     45,       43,      1, yes,  no,   0, no) \
-    SC(162,     45,       43,      2, yes,  no,   0, no) \
-    SC(163,     45,       43,      3, yes,  no,   0, no) \
-    SC(164,     45,       43,      4, yes,  no,   0, no) \
-                                                         \
-    SC(165,     46,       44,      1, yes,  no,   0, no) \
-    SC(166,     46,       44,      2, yes,  no,   0, no) \
-    SC(167,     46,       44,      3, yes,  no,   0, no) \
-    SC(168,     46,       44,      4, yes,  no,   0, no) \
-                                                         \
-    SC(169,     47,       45,      1, yes,  no,   0, no) \
-    SC(170,     47,       45,      2, yes,  no,   0, no) \
-    SC(171,     47,       45,      3, yes,  no,   0, no) \
-    SC(172,     47,       45,      4, yes,  no,   0, no) \
-                                                         \
-    SC(173,     48,       46,      1, yes,  no,   0, no) \
-    SC(174,     48,       46,      2, yes,  no,   0, no) \
-    SC(175,     48,       46,      3, yes,  no,   0, no) \
-    SC(176,     48,       46,      4, yes,  no,   0, no) \
-                                                         \
-    SC(177,     49,       47,      1, yes,  no,   0, no) \
-    SC(178,     49,       47,      2, yes,  no,   0, no) \
-    SC(179,     49,       47,      3, yes,  no,   0, no) \
-    SC(180,     49,       47,      4, yes,  no,   0, no) \
-                                                         \
-    SC(181,     50,       48,      1, yes,  no,   0, no) \
-    SC(182,     50,       48,      2, yes,  no,   0, no) \
-    SC(183,     50,       48,      3, yes,  no,   0, no) \
-    SC(184,     50,       48,      4, yes,  no,   0, no) \
-                                                         \
-    SC(185,     51,       49,      1, yes,  no,   0, no) \
-    SC(186,     51,       49,      2, yes,  no,   0, no) \
-    SC(187,     51,       49,      3, yes,  no,   0, no) \
-    SC(188,     51,       49,      4, yes,  no,   0, no) \
-                                                         \
-    SC(189,     52,       50,      1, yes,  no,   0, no) \
-    SC(190,     52,       50,      2, yes,  no,   0, no) \
-    SC(191,     52,       50,      3, yes,  no,   0, no) \
-    SC(192,     52,       50,      4, yes,  no,   0, no) \
-                                                         \
-    SC(193,     53,       51,      1, yes,  no,   0, no) \
-    SC(194,     53,       51,      2, yes,  no,   0, no) \
-    SC(195,     53,       51,      3, yes,  no,   0, no) \
-    SC(196,     53,       51,      4, yes,  no,   0, no) \
-                                                         \
-    SC(197,     54,       52,      1, yes,  no,   0, no) \
-    SC(198,     54,       52,      2, yes,  no,   0, no) \
-    SC(199,     54,       52,      3, yes,  no,   0, no) \
-    SC(200,     54,       52,      4, yes,  no,   0, no) \
-                                                         \
-    SC(201,     55,       53,      1, yes,  no,   0, no) \
-    SC(202,     55,       53,      2, yes,  no,   0, no) \
-    SC(203,     55,       53,      3, yes,  no,   0, no) \
-    SC(204,     55,       53,      4, yes,  no,   0, no) \
-                                                         \
-    SC(205,     56,       54,      1, yes,  no,   0, no) \
-    SC(206,     56,       54,      2, yes,  no,   0, no) \
-    SC(207,     56,       54,      3, yes,  no,   0, no) \
-    SC(208,     56,       54,      4, yes,  no,   0, no) \
-                                                         \
-    SC(209,     57,       55,      1, yes,  no,   0, no) \
-    SC(210,     57,       55,      2, yes,  no,   0, no) \
-    SC(211,     57,       55,      3, yes,  no,   0, no) \
-    SC(212,     57,       55,      4, yes,  no,   0, no) \
-                                                         \
-    SC(213,     58,       56,      1, yes,  no,   0, no) \
-    SC(214,     58,       56,      2, yes,  no,   0, no) \
-    SC(215,     58,       56,      3, yes,  no,   0, no) \
-    SC(216,     58,       56,      4, yes,  no,   0, no) \
-                                                         \
-    SC(217,     59,       57,      1, yes,  no,   0, no) \
-    SC(218,     59,       57,      2, yes,  no,   0, no) \
-    SC(219,     59,       57,      3, yes,  no,   0, no) \
-    SC(220,     59,       57,      4, yes,  no,   0, no) \
-                                                         \
-    SC(221,     60,       58,      1, yes,  no,   0, no) \
-    SC(222,     60,       58,      2, yes,  no,   0, no) \
-    SC(223,     60,       58,      3, yes,  no,   0, no) \
-    SC(224,     60,       58,      4, yes,  no,   0, no) \
-                                                         \
-    SC(225,     61,       59,      1, yes,  no,   0, no) \
-    SC(226,     61,       59,      2, yes,  no,   0, no) \
-    SC(227,     61,       59,      3, yes,  no,   0, no) \
-    SC(228,     61,       59,      4, yes,  no,   0, no) \
-                                                         \
-    SC(229,     62,       60,      1, yes,  no,   0, no) \
-    SC(230,     62,       60,      2, yes,  no,   0, no) \
-    SC(231,     62,       60,      3, yes,  no,   0, no) \
-
-#define	SIZE_CLASSES_DEFINED
-#define	NTBINS			1
-#define	NLBINS			29
-#define	NBINS			36
-#define	NSIZES			232
-#define	NPSIZES			199
-#define	LG_TINY_MAXCLASS	3
-#define	LOOKUP_MAXCLASS		((((size_t)1) << 11) + (((size_t)4) << 9))
-#define	SMALL_MAXCLASS		((((size_t)1) << 13) + (((size_t)3) << 11))
-#define	LG_LARGE_MINCLASS	14
-#define	LARGE_MAXCLASS		((((size_t)1) << 62) + (((size_t)3) << 60))
-#endif
-
-#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define	SIZE_CLASSES \
-  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
-    SC(  0,      4,        4,      0,  no, yes,   1,  4) \
-    SC(  1,      4,        4,      1,  no, yes,   1,  4) \
-    SC(  2,      4,        4,      2,  no, yes,   3,  4) \
-    SC(  3,      4,        4,      3,  no, yes,   1,  4) \
-                                                         \
-    SC(  4,      6,        4,      1,  no, yes,   5,  4) \
-    SC(  5,      6,        4,      2,  no, yes,   3,  4) \
-    SC(  6,      6,        4,      3,  no, yes,   7,  4) \
-    SC(  7,      6,        4,      4,  no, yes,   1,  4) \
-                                                         \
-    SC(  8,      7,        5,      1,  no, yes,   5,  5) \
-    SC(  9,      7,        5,      2,  no, yes,   3,  5) \
-    SC( 10,      7,        5,      3,  no, yes,   7,  5) \
-    SC( 11,      7,        5,      4,  no, yes,   1,  5) \
-                                                         \
-    SC( 12,      8,        6,      1,  no, yes,   5,  6) \
-    SC( 13,      8,        6,      2,  no, yes,   3,  6) \
-    SC( 14,      8,        6,      3,  no, yes,   7,  6) \
-    SC( 15,      8,        6,      4,  no, yes,   1,  6) \
-                                                         \
-    SC( 16,      9,        7,      1,  no, yes,   5,  7) \
-    SC( 17,      9,        7,      2,  no, yes,   3,  7) \
-    SC( 18,      9,        7,      3,  no, yes,   7,  7) \
-    SC( 19,      9,        7,      4,  no, yes,   1,  7) \
-                                                         \
-    SC( 20,     10,        8,      1,  no, yes,   5,  8) \
-    SC( 21,     10,        8,      2,  no, yes,   3,  8) \
-    SC( 22,     10,        8,      3,  no, yes,   7,  8) \
-    SC( 23,     10,        8,      4,  no, yes,   1,  8) \
-                                                         \
-    SC( 24,     11,        9,      1,  no, yes,   5,  9) \
-    SC( 25,     11,        9,      2,  no, yes,   3,  9) \
-    SC( 26,     11,        9,      3,  no, yes,   7,  9) \
-    SC( 27,     11,        9,      4, yes, yes,   1,  9) \
-                                                         \
-    SC( 28,     12,       10,      1,  no, yes,   5, no) \
-    SC( 29,     12,       10,      2,  no, yes,   3, no) \
-    SC( 30,     12,       10,      3,  no, yes,   7, no) \
-    SC( 31,     12,       10,      4, yes, yes,   2, no) \
-                                                         \
-    SC( 32,     13,       11,      1,  no, yes,   5, no) \
-    SC( 33,     13,       11,      2, yes, yes,   3, no) \
-    SC( 34,     13,       11,      3,  no, yes,   7, no) \
-    SC( 35,     13,       11,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 36,     14,       12,      1, yes,  no,   0, no) \
-    SC( 37,     14,       12,      2, yes,  no,   0, no) \
-    SC( 38,     14,       12,      3, yes,  no,   0, no) \
-    SC( 39,     14,       12,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 40,     15,       13,      1, yes,  no,   0, no) \
-    SC( 41,     15,       13,      2, yes,  no,   0, no) \
-    SC( 42,     15,       13,      3, yes,  no,   0, no) \
-    SC( 43,     15,       13,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 44,     16,       14,      1, yes,  no,   0, no) \
-    SC( 45,     16,       14,      2, yes,  no,   0, no) \
-    SC( 46,     16,       14,      3, yes,  no,   0, no) \
-    SC( 47,     16,       14,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 48,     17,       15,      1, yes,  no,   0, no) \
-    SC( 49,     17,       15,      2, yes,  no,   0, no) \
-    SC( 50,     17,       15,      3, yes,  no,   0, no) \
-    SC( 51,     17,       15,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 52,     18,       16,      1, yes,  no,   0, no) \
-    SC( 53,     18,       16,      2, yes,  no,   0, no) \
-    SC( 54,     18,       16,      3, yes,  no,   0, no) \
-    SC( 55,     18,       16,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 56,     19,       17,      1, yes,  no,   0, no) \
-    SC( 57,     19,       17,      2, yes,  no,   0, no) \
-    SC( 58,     19,       17,      3, yes,  no,   0, no) \
-    SC( 59,     19,       17,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 60,     20,       18,      1, yes,  no,   0, no) \
-    SC( 61,     20,       18,      2, yes,  no,   0, no) \
-    SC( 62,     20,       18,      3, yes,  no,   0, no) \
-    SC( 63,     20,       18,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 64,     21,       19,      1, yes,  no,   0, no) \
-    SC( 65,     21,       19,      2, yes,  no,   0, no) \
-    SC( 66,     21,       19,      3, yes,  no,   0, no) \
-    SC( 67,     21,       19,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 68,     22,       20,      1, yes,  no,   0, no) \
-    SC( 69,     22,       20,      2, yes,  no,   0, no) \
-    SC( 70,     22,       20,      3, yes,  no,   0, no) \
-    SC( 71,     22,       20,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 72,     23,       21,      1, yes,  no,   0, no) \
-    SC( 73,     23,       21,      2, yes,  no,   0, no) \
-    SC( 74,     23,       21,      3, yes,  no,   0, no) \
-    SC( 75,     23,       21,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 76,     24,       22,      1, yes,  no,   0, no) \
-    SC( 77,     24,       22,      2, yes,  no,   0, no) \
-    SC( 78,     24,       22,      3, yes,  no,   0, no) \
-    SC( 79,     24,       22,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 80,     25,       23,      1, yes,  no,   0, no) \
-    SC( 81,     25,       23,      2, yes,  no,   0, no) \
-    SC( 82,     25,       23,      3, yes,  no,   0, no) \
-    SC( 83,     25,       23,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 84,     26,       24,      1, yes,  no,   0, no) \
-    SC( 85,     26,       24,      2, yes,  no,   0, no) \
-    SC( 86,     26,       24,      3, yes,  no,   0, no) \
-    SC( 87,     26,       24,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 88,     27,       25,      1, yes,  no,   0, no) \
-    SC( 89,     27,       25,      2, yes,  no,   0, no) \
-    SC( 90,     27,       25,      3, yes,  no,   0, no) \
-    SC( 91,     27,       25,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 92,     28,       26,      1, yes,  no,   0, no) \
-    SC( 93,     28,       26,      2, yes,  no,   0, no) \
-    SC( 94,     28,       26,      3, yes,  no,   0, no) \
-    SC( 95,     28,       26,      4, yes,  no,   0, no) \
-                                                         \
-    SC( 96,     29,       27,      1, yes,  no,   0, no) \
-    SC( 97,     29,       27,      2, yes,  no,   0, no) \
-    SC( 98,     29,       27,      3, yes,  no,   0, no) \
-    SC( 99,     29,       27,      4, yes,  no,   0, no) \
-                                                         \
-    SC(100,     30,       28,      1, yes,  no,   0, no) \
-    SC(101,     30,       28,      2, yes,  no,   0, no) \
-    SC(102,     30,       28,      3, yes,  no,   0, no) \
-    SC(103,     30,       28,      4, yes,  no,   0, no) \
-                                                         \
-    SC(104,     31,       29,      1, yes,  no,   0, no) \
-    SC(105,     31,       29,      2, yes,  no,   0, no) \
-    SC(106,     31,       29,      3, yes,  no,   0, no) \
-    SC(107,     31,       29,      4, yes,  no,   0, no) \
-                                                         \
-    SC(108,     32,       30,      1, yes,  no,   0, no) \
-    SC(109,     32,       30,      2, yes,  no,   0, no) \
-    SC(110,     32,       30,      3, yes,  no,   0, no) \
-    SC(111,     32,       30,      4, yes,  no,   0, no) \
-                                                         \
-    SC(112,     33,       31,      1, yes,  no,   0, no) \
-    SC(113,     33,       31,      2, yes,  no,   0, no) \
-    SC(114,     33,       31,      3, yes,  no,   0, no) \
-    SC(115,     33,       31,      4, yes,  no,   0, no) \
-                                                         \
-    SC(116,     34,       32,      1, yes,  no,   0, no) \
-    SC(117,     34,       32,      2, yes,  no,   0, no) \
-    SC(118,     34,       32,      3, yes,  no,   0, no) \
-    SC(119,     34,       32,      4, yes,  no,   0, no) \
-                                                         \
-    SC(120,     35,       33,      1, yes,  no,   0, no) \
-    SC(121,     35,       33,      2, yes,  no,   0, no) \
-    SC(122,     35,       33,      3, yes,  no,   0, no) \
-    SC(123,     35,       33,      4, yes,  no,   0, no) \
-                                                         \
-    SC(124,     36,       34,      1, yes,  no,   0, no) \
-    SC(125,     36,       34,      2, yes,  no,   0, no) \
-    SC(126,     36,       34,      3, yes,  no,   0, no) \
-    SC(127,     36,       34,      4, yes,  no,   0, no) \
-                                                         \
-    SC(128,     37,       35,      1, yes,  no,   0, no) \
-    SC(129,     37,       35,      2, yes,  no,   0, no) \
-    SC(130,     37,       35,      3, yes,  no,   0, no) \
-    SC(131,     37,       35,      4, yes,  no,   0, no) \
-                                                         \
-    SC(132,     38,       36,      1, yes,  no,   0, no) \
-    SC(133,     38,       36,      2, yes,  no,   0, no) \
-    SC(134,     38,       36,      3, yes,  no,   0, no) \
-    SC(135,     38,       36,      4, yes,  no,   0, no) \
-                                                         \
-    SC(136,     39,       37,      1, yes,  no,   0, no) \
-    SC(137,     39,       37,      2, yes,  no,   0, no) \
-    SC(138,     39,       37,      3, yes,  no,   0, no) \
-    SC(139,     39,       37,      4, yes,  no,   0, no) \
-                                                         \
-    SC(140,     40,       38,      1, yes,  no,   0, no) \
-    SC(141,     40,       38,      2, yes,  no,   0, no) \
-    SC(142,     40,       38,      3, yes,  no,   0, no) \
-    SC(143,     40,       38,      4, yes,  no,   0, no) \
-                                                         \
-    SC(144,     41,       39,      1, yes,  no,   0, no) \
-    SC(145,     41,       39,      2, yes,  no,   0, no) \
-    SC(146,     41,       39,      3, yes,  no,   0, no) \
-    SC(147,     41,       39,      4, yes,  no,   0, no) \
-                                                         \
-    SC(148,     42,       40,      1, yes,  no,   0, no) \
-    SC(149,     42,       40,      2, yes,  no,   0, no) \
-    SC(150,     42,       40,      3, yes,  no,   0, no) \
-    SC(151,     42,       40,      4, yes,  no,   0, no) \
-                                                         \
-    SC(152,     43,       41,      1, yes,  no,   0, no) \
-    SC(153,     43,       41,      2, yes,  no,   0, no) \
-    SC(154,     43,       41,      3, yes,  no,   0, no) \
-    SC(155,     43,       41,      4, yes,  no,   0, no) \
-                                                         \
-    SC(156,     44,       42,      1, yes,  no,   0, no) \
-    SC(157,     44,       42,      2, yes,  no,   0, no) \
-    SC(158,     44,       42,      3, yes,  no,   0, no) \
-    SC(159,     44,       42,      4, yes,  no,   0, no) \
-                                                         \
-    SC(160,     45,       43,      1, yes,  no,   0, no) \
-    SC(161,     45,       43,      2, yes,  no,   0, no) \
-    SC(162,     45,       43,      3, yes,  no,   0, no) \
-    SC(163,     45,       43,      4, yes,  no,   0, no) \
-                                                         \
-    SC(164,     46,       44,      1, yes,  no,   0, no) \
-    SC(165,     46,       44,      2, yes,  no,   0, no) \
-    SC(166,     46,       44,      3, yes,  no,   0, no) \
-    SC(167,     46,       44,      4, yes,  no,   0, no) \
-                                                         \
-    SC(168,     47,       45,      1, yes,  no,   0, no) \
-    SC(169,     47,       45,      2, yes,  no,   0, no) \
-    SC(170,     47,       45,      3, yes,  no,   0, no) \
-    SC(171,     47,       45,      4, yes,  no,   0, no) \
-                                                         \
-    SC(172,     48,       46,      1, yes,  no,   0, no) \
-    SC(173,     48,       46,      2, yes,  no,   0, no) \
-    SC(174,     48,       46,      3, yes,  no,   0, no) \
-    SC(175,     48,       46,      4, yes,  no,   0, no) \
-                                                         \
-    SC(176,     49,       47,      1, yes,  no,   0, no) \
-    SC(177,     49,       47,      2, yes,  no,   0, no) \
-    SC(178,     49,       47,      3, yes,  no,   0, no) \
-    SC(179,     49,       47,      4, yes,  no,   0, no) \
-                                                         \
-    SC(180,     50,       48,      1, yes,  no,   0, no) \
-    SC(181,     50,       48,      2, yes,  no,   0, no) \
-    SC(182,     50,       48,      3, yes,  no,   0, no) \
-    SC(183,     50,       48,      4, yes,  no,   0, no) \
-                                                         \
-    SC(184,     51,       49,      1, yes,  no,   0, no) \
-    SC(185,     51,       49,      2, yes,  no,   0, no) \
-    SC(186,     51,       49,      3, yes,  no,   0, no) \
-    SC(187,     51,       49,      4, yes,  no,   0, no) \
-                                                         \
-    SC(188,     52,       50,      1, yes,  no,   0, no) \
-    SC(189,     52,       50,      2, yes,  no,   0, no) \
-    SC(190,     52,       50,      3, yes,  no,   0, no) \
-    SC(191,     52,       50,      4, yes,  no,   0, no) \
-                                                         \
-    SC(192,     53,       51,      1, yes,  no,   0, no) \
-    SC(193,     53,       51,      2, yes,  no,   0, no) \
-    SC(194,     53,       51,      3, yes,  no,   0, no) \
-    SC(195,     53,       51,      4, yes,  no,   0, no) \
-                                                         \
-    SC(196,     54,       52,      1, yes,  no,   0, no) \
-    SC(197,     54,       52,      2, yes,  no,   0, no) \
-    SC(198,     54,       52,      3, yes,  no,   0, no) \
-    SC(199,     54,       52,      4, yes,  no,   0, no) \
-                                                         \
-    SC(200,     55,       53,      1, yes,  no,   0, no) \
-    SC(201,     55,       53,      2, yes,  no,   0, no) \
-    SC(202,     55,       53,      3, yes,  no,   0, no) \
-    SC(203,     55,       53,      4, yes,  no,   0, no) \
-                                                         \
-    SC(204,     56,       54,      1, yes,  no,   0, no) \
-    SC(205,     56,       54,      2, yes,  no,   0, no) \
-    SC(206,     56,       54,      3, yes,  no,   0, no) \
-    SC(207,     56,       54,      4, yes,  no,   0, no) \
-                                                         \
-    SC(208,     57,       55,      1, yes,  no,   0, no) \
-    SC(209,     57,       55,      2, yes,  no,   0, no) \
-    SC(210,     57,       55,      3, yes,  no,   0, no) \
-    SC(211,     57,       55,      4, yes,  no,   0, no) \
-                                                         \
-    SC(212,     58,       56,      1, yes,  no,   0, no) \
-    SC(213,     58,       56,      2, yes,  no,   0, no) \
-    SC(214,     58,       56,      3, yes,  no,   0, no) \
-    SC(215,     58,       56,      4, yes,  no,   0, no) \
-                                                         \
-    SC(216,     59,       57,      1, yes,  no,   0, no) \
-    SC(217,     59,       57,      2, yes,  no,   0, no) \
-    SC(218,     59,       57,      3, yes,  no,   0, no) \
-    SC(219,     59,       57,      4, yes,  no,   0, no) \
-                                                         \
-    SC(220,     60,       58,      1, yes,  no,   0, no) \
-    SC(221,     60,       58,      2, yes,  no,   0, no) \
-    SC(222,     60,       58,      3, yes,  no,   0, no) \
-    SC(223,     60,       58,      4, yes,  no,   0, no) \
-                                                         \
-    SC(224,     61,       59,      1, yes,  no,   0, no) \
-    SC(225,     61,       59,      2, yes,  no,   0, no) \
-    SC(226,     61,       59,      3, yes,  no,   0, no) \
-    SC(227,     61,       59,      4, yes,  no,   0, no) \
-                                                         \
-    SC(228,     62,       60,      1, yes,  no,   0, no) \
-    SC(229,     62,       60,      2, yes,  no,   0, no) \
-    SC(230,     62,       60,      3, yes,  no,   0, no) \
-
-#define	SIZE_CLASSES_DEFINED
-#define	NTBINS			0
-#define	NLBINS			28
-#define	NBINS			35
-#define	NSIZES			231
-#define	NPSIZES			199
-#define	LG_TINY_MAXCLASS	"NA"
-#define	LOOKUP_MAXCLASS		((((size_t)1) << 11) + (((size_t)4) << 9))
-#define	SMALL_MAXCLASS		((((size_t)1) << 13) + (((size_t)3) << 11))
-#define	LG_LARGE_MINCLASS	14
-#define	LARGE_MAXCLASS		((((size_t)1) << 62) + (((size_t)3) << 60))
-#endif
-
-#ifndef SIZE_CLASSES_DEFINED
-#  error "No size class definitions match configuration"
-#endif
-#undef SIZE_CLASSES_DEFINED
-/*
- * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
- * cannot support more than 256 small size classes.
- */
-#if (NBINS > 256)
-#  error "Too many small size classes"
-#endif
-
-#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/size_classes.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/size_classes.sh
deleted file mode 100755
index 3680b65..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/size_classes.sh
+++ /dev/null
@@ -1,342 +0,0 @@
-#!/bin/sh
-#
-# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
-
-# The following limits are chosen such that they cover all supported platforms.
-
-# Pointer sizes.
-lg_zarr="2 3"
-
-# Quanta.
-lg_qarr=$1
-
-# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
-lg_tmin=$2
-
-# Maximum lookup size.
-lg_kmax=12
-
-# Page sizes.
-lg_parr=`echo $3 | tr ',' ' '`
-
-# Size class group size (number of size classes for each size doubling).
-lg_g=$4
-
-pow2() {
-  e=$1
-  pow2_result=1
-  while [ ${e} -gt 0 ] ; do
-    pow2_result=$((${pow2_result} + ${pow2_result}))
-    e=$((${e} - 1))
-  done
-}
-
-lg() {
-  x=$1
-  lg_result=0
-  while [ ${x} -gt 1 ] ; do
-    lg_result=$((${lg_result} + 1))
-    x=$((${x} / 2))
-  done
-}
-
-reg_size_compute() {
-  lg_grp=$1
-  lg_delta=$2
-  ndelta=$3
-
-  pow2 ${lg_grp}; grp=${pow2_result}
-  pow2 ${lg_delta}; delta=${pow2_result}
-  reg_size=$((${grp} + ${delta}*${ndelta}))
-}
-
-slab_size() {
-  lg_p=$1
-  lg_grp=$2
-  lg_delta=$3
-  ndelta=$4
-
-  pow2 ${lg_p}; p=${pow2_result}
-  reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
-
-  # Compute smallest slab size that is an integer multiple of reg_size.
-  try_slab_size=${p}
-  try_nregs=$((${try_slab_size} / ${reg_size}))
-  perfect=0
-  while [ ${perfect} -eq 0 ] ; do
-    perfect_slab_size=${try_slab_size}
-    perfect_nregs=${try_nregs}
-
-    try_slab_size=$((${try_slab_size} + ${p}))
-    try_nregs=$((${try_slab_size} / ${reg_size}))
-    if [ ${perfect_slab_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
-      perfect=1
-    fi
-  done
-
-  slab_size_pgs=$((${perfect_slab_size} / ${p}))
-}
-
-size_class() {
-  index=$1
-  lg_grp=$2
-  lg_delta=$3
-  ndelta=$4
-  lg_p=$5
-  lg_kmax=$6
-
-  if [ ${lg_delta} -ge ${lg_p} ] ; then
-    psz="yes"
-  else
-    pow2 ${lg_p}; p=${pow2_result}
-    pow2 ${lg_grp}; grp=${pow2_result}
-    pow2 ${lg_delta}; delta=${pow2_result}
-    sz=$((${grp} + ${delta} * ${ndelta}))
-    npgs=$((${sz} / ${p}))
-    if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
-      psz="yes"
-    else
-      psz="no"
-    fi
-  fi
-
-  lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
-  if [ ${pow2_result} -lt ${ndelta} ] ; then
-    rem="yes"
-  else
-    rem="no"
-  fi
-
-  lg_size=${lg_grp}
-  if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
-    lg_size=$((${lg_grp} + 1))
-  else
-    lg_size=${lg_grp}
-    rem="yes"
-  fi
-
-  if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
-    bin="yes"
-    slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs}
-  else
-    bin="no"
-    pgs=0
-  fi
-  if [ ${lg_size} -lt ${lg_kmax} \
-      -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
-    lg_delta_lookup=${lg_delta}
-  else
-    lg_delta_lookup="no"
-  fi
-  printf '    SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${pgs} ${lg_delta_lookup}
-  # Defined upon return:
-  # - psz ("yes" or "no")
-  # - bin ("yes" or "no")
-  # - pgs
-  # - lg_delta_lookup (${lg_delta} or "no")
-}
-
-sep_line() {
-  echo "                                                         \\"
-}
-
-size_classes() {
-  lg_z=$1
-  lg_q=$2
-  lg_t=$3
-  lg_p=$4
-  lg_g=$5
-
-  pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
-  pow2 ${lg_g}; g=${pow2_result}
-
-  echo "#define	SIZE_CLASSES \\"
-  echo "  /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \\"
-
-  ntbins=0
-  nlbins=0
-  lg_tiny_maxclass='"NA"'
-  nbins=0
-  npsizes=0
-
-  # Tiny size classes.
-  ndelta=0
-  index=0
-  lg_grp=${lg_t}
-  lg_delta=${lg_grp}
-  while [ ${lg_grp} -lt ${lg_q} ] ; do
-    size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
-    if [ ${lg_delta_lookup} != "no" ] ; then
-      nlbins=$((${index} + 1))
-    fi
-    if [ ${psz} = "yes" ] ; then
-      npsizes=$((${npsizes} + 1))
-    fi
-    if [ ${bin} != "no" ] ; then
-      nbins=$((${index} + 1))
-    fi
-    ntbins=$((${ntbins} + 1))
-    lg_tiny_maxclass=${lg_grp} # Final written value is correct.
-    index=$((${index} + 1))
-    lg_delta=${lg_grp}
-    lg_grp=$((${lg_grp} + 1))
-  done
-
-  # First non-tiny group.
-  if [ ${ntbins} -gt 0 ] ; then
-    sep_line
-    # The first size class has an unusual encoding, because the size has to be
-    # split between grp and delta*ndelta.
-    lg_grp=$((${lg_grp} - 1))
-    ndelta=1
-    size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
-    index=$((${index} + 1))
-    lg_grp=$((${lg_grp} + 1))
-    lg_delta=$((${lg_delta} + 1))
-    if [ ${psz} = "yes" ] ; then
-      npsizes=$((${npsizes} + 1))
-    fi
-  fi
-  while [ ${ndelta} -lt ${g} ] ; do
-    size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
-    index=$((${index} + 1))
-    ndelta=$((${ndelta} + 1))
-    if [ ${psz} = "yes" ] ; then
-      npsizes=$((${npsizes} + 1))
-    fi
-  done
-
-  # All remaining groups.
-  lg_grp=$((${lg_grp} + ${lg_g}))
-  while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
-    sep_line
-    ndelta=1
-    if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
-      ndelta_limit=$((${g} - 1))
-    else
-      ndelta_limit=${g}
-    fi
-    while [ ${ndelta} -le ${ndelta_limit} ] ; do
-      size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
-      if [ ${lg_delta_lookup} != "no" ] ; then
-        nlbins=$((${index} + 1))
-        # Final written value is correct:
-        lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
-      fi
-      if [ ${psz} = "yes" ] ; then
-        npsizes=$((${npsizes} + 1))
-      fi
-      if [ ${bin} != "no" ] ; then
-        nbins=$((${index} + 1))
-        # Final written value is correct:
-        small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
-        if [ ${lg_g} -gt 0 ] ; then
-          lg_large_minclass=$((${lg_grp} + 1))
-        else
-          lg_large_minclass=$((${lg_grp} + 2))
-        fi
-      fi
-      # Final written value is correct:
-      large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
-      index=$((${index} + 1))
-      ndelta=$((${ndelta} + 1))
-    done
-    lg_grp=$((${lg_grp} + 1))
-    lg_delta=$((${lg_delta} + 1))
-  done
-  echo
-  nsizes=${index}
-
-  # Defined upon completion:
-  # - ntbins
-  # - nlbins
-  # - nbins
-  # - nsizes
-  # - npsizes
-  # - lg_tiny_maxclass
-  # - lookup_maxclass
-  # - small_maxclass
-  # - lg_large_minclass
-  # - large_maxclass
-}
-
-cat <<EOF
-#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
-#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
-
-/* This file was automatically generated by size_classes.sh. */
-
-/*
- * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
- * be defined prior to inclusion, and it in turn defines:
- *
- *   LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
- *   SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
- *                 bin, pgs, lg_delta_lookup) tuples.
- *     index: Size class index.
- *     lg_grp: Lg group base size (no deltas added).
- *     lg_delta: Lg delta to previous size class.
- *     ndelta: Delta multiplier.  size == 1<<lg_grp + ndelta<<lg_delta
- *     psz: 'yes' if a multiple of the page size, 'no' otherwise.
- *     bin: 'yes' if a small bin size class, 'no' otherwise.
- *     pgs: Slab page count if a small bin size class, 0 otherwise.
- *     lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
- *                      otherwise.
- *   NTBINS: Number of tiny bins.
- *   NLBINS: Number of bins supported by the lookup table.
- *   NBINS: Number of small size class bins.
- *   NSIZES: Number of size classes.
- *   NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
- *   LG_TINY_MAXCLASS: Lg of maximum tiny size class.
- *   LOOKUP_MAXCLASS: Maximum size class included in lookup table.
- *   SMALL_MAXCLASS: Maximum small size class.
- *   LG_LARGE_MINCLASS: Lg of minimum large size class.
- *   LARGE_MAXCLASS: Maximum (large) size class.
- */
-
-#define	LG_SIZE_CLASS_GROUP	${lg_g}
-
-EOF
-
-for lg_z in ${lg_zarr} ; do
-  for lg_q in ${lg_qarr} ; do
-    lg_t=${lg_tmin}
-    while [ ${lg_t} -le ${lg_q} ] ; do
-      # Iterate through page sizes and compute how many bins there are.
-      for lg_p in ${lg_parr} ; do
-        echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
-        size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
-        echo "#define	SIZE_CLASSES_DEFINED"
-        echo "#define	NTBINS			${ntbins}"
-        echo "#define	NLBINS			${nlbins}"
-        echo "#define	NBINS			${nbins}"
-        echo "#define	NSIZES			${nsizes}"
-        echo "#define	NPSIZES			${npsizes}"
-        echo "#define	LG_TINY_MAXCLASS	${lg_tiny_maxclass}"
-        echo "#define	LOOKUP_MAXCLASS		${lookup_maxclass}"
-        echo "#define	SMALL_MAXCLASS		${small_maxclass}"
-        echo "#define	LG_LARGE_MINCLASS	${lg_large_minclass}"
-        echo "#define	LARGE_MAXCLASS		${large_maxclass}"
-        echo "#endif"
-        echo
-      done
-      lg_t=$((${lg_t} + 1))
-    done
-  done
-done
-
-cat <<EOF
-#ifndef SIZE_CLASSES_DEFINED
-#  error "No size class definitions match configuration"
-#endif
-#undef SIZE_CLASSES_DEFINED
-/*
- * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
- * cannot support more than 256 small size classes.
- */
-#if (NBINS > 256)
-#  error "Too many small size classes"
-#endif
-
-#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
-EOF
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/smoothstep.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/smoothstep.h
deleted file mode 100644
index dab53d9..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/smoothstep.h
+++ /dev/null
@@ -1,232 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
-
-/*
- * This file was generated by the following command:
- *   sh smoothstep.sh smoother 200 24 3 15
- */
-/******************************************************************************/
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1.  The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- *                      3     2
- *   smoothstep(x) = -2x  + 3x
- *
- *                       5      4      3
- *   smootherstep(x) = 6x  - 15x  + 10x
- *
- *                          7      6      5      4
- *   smootheststep(x) = -20x  + 70x  - 84x  + 35x
- */
-
-#define	SMOOTHSTEP_VARIANT	"smoother"
-#define	SMOOTHSTEP_NSTEPS	200
-#define	SMOOTHSTEP_BFP		24
-#define	SMOOTHSTEP \
- /* STEP(step, h,                            x,     y) */ \
-    STEP(   1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
-    STEP(   2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
-    STEP(   3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
-    STEP(   4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
-    STEP(   5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
-    STEP(   6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
-    STEP(   7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
-    STEP(   8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
-    STEP(   9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
-    STEP(  10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
-    STEP(  11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
-    STEP(  12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
-    STEP(  13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
-    STEP(  14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
-    STEP(  15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
-    STEP(  16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
-    STEP(  17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
-    STEP(  18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
-    STEP(  19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
-    STEP(  20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
-    STEP(  21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
-    STEP(  22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
-    STEP(  23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
-    STEP(  24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
-    STEP(  25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
-    STEP(  26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
-    STEP(  27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
-    STEP(  28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
-    STEP(  29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
-    STEP(  30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
-    STEP(  31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
-    STEP(  32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
-    STEP(  33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
-    STEP(  34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
-    STEP(  35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
-    STEP(  36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
-    STEP(  37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
-    STEP(  38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
-    STEP(  39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
-    STEP(  40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
-    STEP(  41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
-    STEP(  42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
-    STEP(  43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
-    STEP(  44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
-    STEP(  45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
-    STEP(  46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
-    STEP(  47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
-    STEP(  48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
-    STEP(  49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
-    STEP(  50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
-    STEP(  51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
-    STEP(  52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
-    STEP(  53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
-    STEP(  54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
-    STEP(  55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
-    STEP(  56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
-    STEP(  57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
-    STEP(  58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
-    STEP(  59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
-    STEP(  60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
-    STEP(  61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
-    STEP(  62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
-    STEP(  63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
-    STEP(  64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
-    STEP(  65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
-    STEP(  66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
-    STEP(  67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
-    STEP(  68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
-    STEP(  69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
-    STEP(  70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
-    STEP(  71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
-    STEP(  72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
-    STEP(  73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
-    STEP(  74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
-    STEP(  75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
-    STEP(  76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
-    STEP(  77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
-    STEP(  78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
-    STEP(  79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
-    STEP(  80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
-    STEP(  81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
-    STEP(  82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
-    STEP(  83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
-    STEP(  84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
-    STEP(  85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
-    STEP(  86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
-    STEP(  87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
-    STEP(  88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
-    STEP(  89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
-    STEP(  90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
-    STEP(  91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
-    STEP(  92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
-    STEP(  93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
-    STEP(  94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
-    STEP(  95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
-    STEP(  96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
-    STEP(  97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
-    STEP(  98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
-    STEP(  99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
-    STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
-    STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
-    STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
-    STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
-    STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
-    STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
-    STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
-    STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
-    STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
-    STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
-    STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
-    STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
-    STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
-    STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
-    STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
-    STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
-    STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
-    STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
-    STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
-    STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
-    STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
-    STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
-    STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
-    STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
-    STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
-    STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
-    STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
-    STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
-    STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
-    STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
-    STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
-    STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
-    STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
-    STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
-    STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
-    STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
-    STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
-    STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
-    STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
-    STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
-    STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
-    STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
-    STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
-    STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
-    STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
-    STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
-    STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
-    STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
-    STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
-    STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
-    STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
-    STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
-    STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
-    STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
-    STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
-    STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
-    STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
-    STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
-    STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
-    STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
-    STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
-    STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
-    STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
-    STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
-    STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
-    STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
-    STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
-    STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
-    STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
-    STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
-    STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
-    STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
-    STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
-    STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
-    STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
-    STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
-    STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
-    STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
-    STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
-    STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
-    STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
-    STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
-    STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
-    STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
-    STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
-    STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
-    STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
-    STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
-    STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
-    STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
-    STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
-    STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
-    STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
-    STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
-    STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
-    STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
-    STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
-    STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
-    STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
-    STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
-    STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
-
-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/smoothstep.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/smoothstep.sh
deleted file mode 100755
index 5d72e35..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/smoothstep.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/sh
-#
-# Generate a discrete lookup table for a sigmoid function in the smoothstep
-# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
-# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps].  Encode
-# the entries using a binary fixed point representation.
-#
-# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
-#
-#        <variant> is in {smooth, smoother, smoothest}.
-#        <nsteps> must be greater than zero.
-#        <bfp> must be in [0..62]; reasonable values are roughly [10..30].
-#        <xprec> is x decimal precision.
-#        <yprec> is y decimal precision.
-
-#set -x
-
-cmd="sh smoothstep.sh $*"
-variant=$1
-nsteps=$2
-bfp=$3
-xprec=$4
-yprec=$5
-
-case "${variant}" in
-  smooth)
-    ;;
-  smoother)
-    ;;
-  smoothest)
-    ;;
-  *)
-    echo "Unsupported variant"
-    exit 1
-    ;;
-esac
-
-smooth() {
-  step=$1
-  y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
-  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoother() {
-  step=$1
-  y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
-  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoothest() {
-  step=$1
-  y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
-  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-cat <<EOF
-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
-
-/*
- * This file was generated by the following command:
- *   $cmd
- */
-/******************************************************************************/
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1.  The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- *                      3     2
- *   smoothstep(x) = -2x  + 3x
- *
- *                       5      4      3
- *   smootherstep(x) = 6x  - 15x  + 10x
- *
- *                          7      6      5      4
- *   smootheststep(x) = -20x  + 70x  - 84x  + 35x
- */
-
-#define	SMOOTHSTEP_VARIANT	"${variant}"
-#define	SMOOTHSTEP_NSTEPS	${nsteps}
-#define	SMOOTHSTEP_BFP		${bfp}
-#define	SMOOTHSTEP \\
- /* STEP(step, h,                            x,     y) */ \\
-EOF
-
-s=1
-while [ $s -le $nsteps ] ; do
-  $variant ${s}
-  x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
-  printf '    STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
-
-  s=$((s+1))
-done
-echo
-
-cat <<EOF
-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
-EOF
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_inlines.h
deleted file mode 100644
index b4e779f..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_inlines.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SPIN_INLINES_H
-#define JEMALLOC_INTERNAL_SPIN_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	spin_init(spin_t *spin);
-void	spin_adaptive(spin_t *spin);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
-JEMALLOC_INLINE void
-spin_init(spin_t *spin)
-{
-	spin->iteration = 0;
-}
-
-JEMALLOC_INLINE void
-spin_adaptive(spin_t *spin)
-{
-	volatile uint64_t i;
-
-	for (i = 0; i < (KQU(1) << spin->iteration); i++)
-		CPU_SPINWAIT;
-
-	if (spin->iteration < 63)
-		spin->iteration++;
-}
-
-#endif
-
-#endif /* JEMALLOC_INTERNAL_SPIN_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_structs.h
deleted file mode 100644
index ef71a76..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_structs.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SPIN_STRUCTS_H
-#define JEMALLOC_INTERNAL_SPIN_STRUCTS_H
-
-struct spin_s {
-	unsigned iteration;
-};
-
-#endif /* JEMALLOC_INTERNAL_SPIN_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_types.h
deleted file mode 100644
index 52ee4cc..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/spin_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SPIN_TYPES_H
-#define JEMALLOC_INTERNAL_SPIN_TYPES_H
-
-typedef struct spin_s spin_t;
-
-#endif /* JEMALLOC_INTERNAL_SPIN_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_externs.h
deleted file mode 100644
index da1de48..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_externs.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_STATS_EXTERNS_H
-#define JEMALLOC_INTERNAL_STATS_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-extern bool	opt_stats_print;
-
-void	stats_print(void (*write)(void *, const char *), void *cbopaque,
-    const char *opts);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_STATS_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_structs.h
deleted file mode 100644
index 32ef611..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_structs.h
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_STATS_STRUCTS_H
-#define JEMALLOC_INTERNAL_STATS_STRUCTS_H
-
-struct tcache_bin_stats_s {
-	/*
-	 * Number of allocation requests that corresponded to the size of this
-	 * bin.
-	 */
-	uint64_t	nrequests;
-};
-
-struct malloc_bin_stats_s {
-	/*
-	 * Total number of allocation/deallocation requests served directly by
-	 * the bin.  Note that tcache may allocate an object, then recycle it
-	 * many times, resulting many increments to nrequests, but only one
-	 * each to nmalloc and ndalloc.
-	 */
-	uint64_t	nmalloc;
-	uint64_t	ndalloc;
-
-	/*
-	 * Number of allocation requests that correspond to the size of this
-	 * bin.  This includes requests served by tcache, though tcache only
-	 * periodically merges into this counter.
-	 */
-	uint64_t	nrequests;
-
-	/*
-	 * Current number of regions of this size class, including regions
-	 * currently cached by tcache.
-	 */
-	size_t		curregs;
-
-	/* Number of tcache fills from this bin. */
-	uint64_t	nfills;
-
-	/* Number of tcache flushes to this bin. */
-	uint64_t	nflushes;
-
-	/* Total number of slabs created for this bin's size class. */
-	uint64_t	nslabs;
-
-	/*
-	 * Total number of slabs reused by extracting them from the slabs heap
-	 * for this bin's size class.
-	 */
-	uint64_t	reslabs;
-
-	/* Current number of slabs in this bin. */
-	size_t		curslabs;
-};
-
-struct malloc_large_stats_s {
-	/*
-	 * Total number of allocation/deallocation requests served directly by
-	 * the arena.
-	 */
-	uint64_t	nmalloc;
-	uint64_t	ndalloc;
-
-	/*
-	 * Number of allocation requests that correspond to this size class.
-	 * This includes requests served by tcache, though tcache only
-	 * periodically merges into this counter.
-	 */
-	uint64_t	nrequests;
-
-	/* Current number of allocations of this size class. */
-	size_t		curlextents;
-};
-
-struct arena_stats_s {
-	/* Number of bytes currently mapped. */
-	size_t		mapped;
-
-	/*
-	 * Number of bytes currently retained as a side effect of munmap() being
-	 * disabled/bypassed.  Retained bytes are technically mapped (though
-	 * always decommitted or purged), but they are excluded from the mapped
-	 * statistic (above).
-	 */
-	size_t		retained;
-
-	/*
-	 * Total number of purge sweeps, total number of madvise calls made,
-	 * and total pages purged in order to keep dirty unused memory under
-	 * control.
-	 */
-	uint64_t	npurge;
-	uint64_t	nmadvise;
-	uint64_t	purged;
-
-	size_t		base;
-	size_t		internal; /* Protected via atomic_*_zu(). */
-	size_t		resident;
-
-	size_t		allocated_large;
-	uint64_t	nmalloc_large;
-	uint64_t	ndalloc_large;
-	uint64_t	nrequests_large;
-
-	/* Number of bytes cached in tcache associated with this arena. */
-	size_t		tcache_bytes;
-
-	/* One element for each large size class. */
-	malloc_large_stats_t	lstats[NSIZES - NBINS];
-};
-
-#endif /* JEMALLOC_INTERNAL_STATS_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_types.h
deleted file mode 100644
index f202b23..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/stats_types.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_STATS_TYPES_H
-#define JEMALLOC_INTERNAL_STATS_TYPES_H
-
-typedef struct tcache_bin_stats_s tcache_bin_stats_t;
-typedef struct malloc_bin_stats_s malloc_bin_stats_t;
-typedef struct malloc_large_stats_s malloc_large_stats_t;
-typedef struct arena_stats_s arena_stats_t;
-
-#endif /* JEMALLOC_INTERNAL_STATS_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_externs.h
deleted file mode 100644
index 930eaa2..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_externs.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-extern bool	opt_tcache;
-extern ssize_t	opt_lg_tcache_max;
-
-extern tcache_bin_info_t	*tcache_bin_info;
-
-/*
- * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
- * large-object bins.
- */
-extern unsigned	nhbins;
-
-/* Maximum cached size class. */
-extern size_t	tcache_maxclass;
-
-/*
- * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
- * usable via the MALLOCX_TCACHE() flag.  The automatic per thread tcaches are
- * completely disjoint from this data structure.  tcaches starts off as a sparse
- * array, so it has no physical memory footprint until individual pages are
- * touched.  This allows the entire array to be allocated the first time an
- * explicit tcache is created without a disproportionate impact on memory usage.
- */
-extern tcaches_t	*tcaches;
-
-size_t	tcache_salloc(tsdn_t *tsdn, const void *ptr);
-void	tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void	*tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
-    tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
-void	tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
-    szind_t binind, unsigned rem);
-void	tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
-    unsigned rem, tcache_t *tcache);
-void	tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
-    arena_t *oldarena, arena_t *newarena);
-tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
-void	tcache_cleanup(tsd_t *tsd);
-void	tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-bool	tcaches_create(tsd_t *tsd, unsigned *r_ind);
-void	tcaches_flush(tsd_t *tsd, unsigned ind);
-void	tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool	tcache_boot(tsdn_t *tsdn);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_inlines.h
deleted file mode 100644
index 2762b0e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_inlines.h
+++ /dev/null
@@ -1,305 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
-#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	tcache_event(tsd_t *tsd, tcache_t *tcache);
-void	tcache_flush(void);
-bool	tcache_enabled_get(void);
-tcache_t *tcache_get(tsd_t *tsd, bool create);
-void	tcache_enabled_set(bool enabled);
-void	*tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
-void	*tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
-    size_t size, szind_t ind, bool zero, bool slow_path);
-void	*tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
-    size_t size, szind_t ind, bool zero, bool slow_path);
-void	tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
-    szind_t binind, bool slow_path);
-void	tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
-    size_t size, bool slow_path);
-tcache_t	*tcaches_get(tsd_t *tsd, unsigned ind);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
-JEMALLOC_INLINE void
-tcache_flush(void)
-{
-	tsd_t *tsd;
-
-	cassert(config_tcache);
-
-	tsd = tsd_fetch();
-	tcache_cleanup(tsd);
-}
-
-JEMALLOC_INLINE bool
-tcache_enabled_get(void)
-{
-	tsd_t *tsd;
-	tcache_enabled_t tcache_enabled;
-
-	cassert(config_tcache);
-
-	tsd = tsd_fetch();
-	tcache_enabled = tsd_tcache_enabled_get(tsd);
-	if (tcache_enabled == tcache_enabled_default) {
-		tcache_enabled = (tcache_enabled_t)opt_tcache;
-		tsd_tcache_enabled_set(tsd, tcache_enabled);
-	}
-
-	return ((bool)tcache_enabled);
-}
-
-JEMALLOC_INLINE void
-tcache_enabled_set(bool enabled)
-{
-	tsd_t *tsd;
-	tcache_enabled_t tcache_enabled;
-
-	cassert(config_tcache);
-
-	tsd = tsd_fetch();
-
-	tcache_enabled = (tcache_enabled_t)enabled;
-	tsd_tcache_enabled_set(tsd, tcache_enabled);
-
-	if (!enabled)
-		tcache_cleanup(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(tsd_t *tsd, bool create)
-{
-	tcache_t *tcache;
-
-	if (!config_tcache)
-		return (NULL);
-
-	tcache = tsd_tcache_get(tsd);
-	if (!create)
-		return (tcache);
-	if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
-		tcache = tcache_get_hard(tsd);
-		tsd_tcache_set(tsd, tcache);
-	}
-
-	return (tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_event(tsd_t *tsd, tcache_t *tcache)
-{
-	if (TCACHE_GC_INCR == 0)
-		return;
-
-	if (unlikely(ticker_tick(&tcache->gc_ticker)))
-		tcache_event_hard(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
-{
-	void *ret;
-
-	if (unlikely(tbin->ncached == 0)) {
-		tbin->low_water = -1;
-		*tcache_success = false;
-		return (NULL);
-	}
-	/*
-	 * tcache_success (instead of ret) should be checked upon the return of
-	 * this function.  We avoid checking (ret == NULL) because there is
-	 * never a null stored on the avail stack (which is unknown to the
-	 * compiler), and eagerly checking ret would cause pipeline stall
-	 * (waiting for the cacheline).
-	 */
-	*tcache_success = true;
-	ret = *(tbin->avail - tbin->ncached);
-	tbin->ncached--;
-
-	if (unlikely((int)tbin->ncached < tbin->low_water))
-		tbin->low_water = tbin->ncached;
-
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
-    szind_t binind, bool zero, bool slow_path)
-{
-	void *ret;
-	tcache_bin_t *tbin;
-	bool tcache_success;
-	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
-	assert(binind < NBINS);
-	tbin = &tcache->tbins[binind];
-	ret = tcache_alloc_easy(tbin, &tcache_success);
-	assert(tcache_success == (ret != NULL));
-	if (unlikely(!tcache_success)) {
-		bool tcache_hard_success;
-		arena = arena_choose(tsd, arena);
-		if (unlikely(arena == NULL))
-			return (NULL);
-
-		ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
-		    tbin, binind, &tcache_hard_success);
-		if (tcache_hard_success == false)
-			return (NULL);
-	}
-
-	assert(ret);
-	/*
-	 * Only compute usize if required.  The checks in the following if
-	 * statement are all static.
-	 */
-	if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
-		usize = index2size(binind);
-		assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
-	}
-
-	if (likely(!zero)) {
-		if (slow_path && config_fill) {
-			if (unlikely(opt_junk_alloc)) {
-				arena_alloc_junk_small(ret,
-				    &arena_bin_info[binind], false);
-			} else if (unlikely(opt_zero))
-				memset(ret, 0, usize);
-		}
-	} else {
-		if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
-			arena_alloc_junk_small(ret, &arena_bin_info[binind],
-			    true);
-		}
-		memset(ret, 0, usize);
-	}
-
-	if (config_stats)
-		tbin->tstats.nrequests++;
-	if (config_prof)
-		tcache->prof_accumbytes += usize;
-	tcache_event(tsd, tcache);
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
-    szind_t binind, bool zero, bool slow_path)
-{
-	void *ret;
-	tcache_bin_t *tbin;
-	bool tcache_success;
-
-	assert(binind < nhbins);
-	tbin = &tcache->tbins[binind];
-	ret = tcache_alloc_easy(tbin, &tcache_success);
-	assert(tcache_success == (ret != NULL));
-	if (unlikely(!tcache_success)) {
-		/*
-		 * Only allocate one large object at a time, because it's quite
-		 * expensive to create one and not use it.
-		 */
-		arena = arena_choose(tsd, arena);
-		if (unlikely(arena == NULL))
-			return (NULL);
-
-		ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
-		if (ret == NULL)
-			return (NULL);
-	} else {
-		size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
-		/* Only compute usize on demand */
-		if (config_prof || (slow_path && config_fill) ||
-		    unlikely(zero)) {
-			usize = index2size(binind);
-			assert(usize <= tcache_maxclass);
-		}
-
-		if (likely(!zero)) {
-			if (slow_path && config_fill) {
-				if (unlikely(opt_junk_alloc)) {
-					memset(ret, JEMALLOC_ALLOC_JUNK,
-					    usize);
-				} else if (unlikely(opt_zero))
-					memset(ret, 0, usize);
-			}
-		} else
-			memset(ret, 0, usize);
-
-		if (config_stats)
-			tbin->tstats.nrequests++;
-		if (config_prof)
-			tcache->prof_accumbytes += usize;
-	}
-
-	tcache_event(tsd, tcache);
-	return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
-    bool slow_path)
-{
-	tcache_bin_t *tbin;
-	tcache_bin_info_t *tbin_info;
-
-	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
-
-	if (slow_path && config_fill && unlikely(opt_junk_free))
-		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
-
-	tbin = &tcache->tbins[binind];
-	tbin_info = &tcache_bin_info[binind];
-	if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
-		tcache_bin_flush_small(tsd, tcache, tbin, binind,
-		    (tbin_info->ncached_max >> 1));
-	}
-	assert(tbin->ncached < tbin_info->ncached_max);
-	tbin->ncached++;
-	*(tbin->avail - tbin->ncached) = ptr;
-
-	tcache_event(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
-    bool slow_path)
-{
-	szind_t binind;
-	tcache_bin_t *tbin;
-	tcache_bin_info_t *tbin_info;
-
-	assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
-	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
-
-	binind = size2index(size);
-
-	if (slow_path && config_fill && unlikely(opt_junk_free))
-		large_dalloc_junk(ptr, size);
-
-	tbin = &tcache->tbins[binind];
-	tbin_info = &tcache_bin_info[binind];
-	if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
-		tcache_bin_flush_large(tsd, tbin, binind,
-		    (tbin_info->ncached_max >> 1), tcache);
-	}
-	assert(tbin->ncached < tbin_info->ncached_max);
-	tbin->ncached++;
-	*(tbin->avail - tbin->ncached) = ptr;
-
-	tcache_event(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcaches_get(tsd_t *tsd, unsigned ind)
-{
-	tcaches_t *elm = &tcaches[ind];
-	if (unlikely(elm->tcache == NULL)) {
-		elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
-		    NULL));
-	}
-	return (elm->tcache);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_structs.h
deleted file mode 100644
index a2b28af..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_structs.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
-#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
-
-typedef enum {
-	tcache_enabled_false   = 0, /* Enable cast to/from bool. */
-	tcache_enabled_true    = 1,
-	tcache_enabled_default = 2
-} tcache_enabled_t;
-
-/*
- * Read-only information associated with each element of tcache_t's tbins array
- * is stored separately, mainly to reduce memory usage.
- */
-struct tcache_bin_info_s {
-	unsigned	ncached_max;	/* Upper limit on ncached. */
-};
-
-struct tcache_bin_s {
-	tcache_bin_stats_t tstats;
-	int		low_water;	/* Min # cached since last GC. */
-	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
-	unsigned	ncached;	/* # of cached objects. */
-	/*
-	 * To make use of adjacent cacheline prefetch, the items in the avail
-	 * stack goes to higher address for newer allocations.  avail points
-	 * just above the available space, which means that
-	 * avail[-ncached, ... -1] are available items and the lowest item will
-	 * be allocated first.
-	 */
-	void		**avail;	/* Stack of available objects. */
-};
-
-struct tcache_s {
-	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
-	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum(). */
-	ticker_t	gc_ticker;	/* Drives incremental GC. */
-	szind_t		next_gc_bin;	/* Next bin to GC. */
-	tcache_bin_t	tbins[1];	/* Dynamically sized. */
-	/*
-	 * The pointer stacks associated with tbins follow as a contiguous
-	 * array.  During tcache initialization, the avail pointer in each
-	 * element of tbins is initialized to point to the proper offset within
-	 * this array.
-	 */
-};
-
-/* Linkage for list of available (previously used) explicit tcache IDs. */
-struct tcaches_s {
-	union {
-		tcache_t	*tcache;
-		tcaches_t	*next;
-	};
-};
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_types.h
deleted file mode 100644
index c6ac767..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tcache_types.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
-#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
-
-typedef struct tcache_bin_info_s tcache_bin_info_t;
-typedef struct tcache_bin_s tcache_bin_t;
-typedef struct tcache_s tcache_t;
-typedef struct tcaches_s tcaches_t;
-
-/*
- * tcache pointers close to NULL are used to encode state information that is
- * used for two purposes: preventing thread caching on a per thread basis and
- * cleaning up during thread shutdown.
- */
-#define	TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
-#define	TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
-#define	TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
-#define	TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
-
-/*
- * Absolute minimum number of cache slots for each small bin.
- */
-#define	TCACHE_NSLOTS_SMALL_MIN		20
-
-/*
- * Absolute maximum number of cache slots for each small bin in the thread
- * cache.  This is an additional constraint beyond that imposed as: twice the
- * number of regions per slab for this size class.
- *
- * This constant must be an even number.
- */
-#define	TCACHE_NSLOTS_SMALL_MAX		200
-
-/* Number of cache slots for large size classes. */
-#define	TCACHE_NSLOTS_LARGE		20
-
-/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
-#define	LG_TCACHE_MAXCLASS_DEFAULT	15
-
-/*
- * TCACHE_GC_SWEEP is the approximate number of allocation events between
- * full GC sweeps.  Integer rounding may cause the actual number to be
- * slightly higher, since GC is performed incrementally.
- */
-#define	TCACHE_GC_SWEEP			8192
-
-/* Number of tcache allocation/deallocation events between incremental GCs. */
-#define	TCACHE_GC_INCR							\
-    ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_inlines.h
deleted file mode 100644
index 1a4395f..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_inlines.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TICKER_INLINES_H
-#define JEMALLOC_INTERNAL_TICKER_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void	ticker_init(ticker_t *ticker, int32_t nticks);
-void	ticker_copy(ticker_t *ticker, const ticker_t *other);
-int32_t	ticker_read(const ticker_t *ticker);
-bool	ticker_ticks(ticker_t *ticker, int32_t nticks);
-bool	ticker_tick(ticker_t *ticker);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
-JEMALLOC_INLINE void
-ticker_init(ticker_t *ticker, int32_t nticks)
-{
-	ticker->tick = nticks;
-	ticker->nticks = nticks;
-}
-
-JEMALLOC_INLINE void
-ticker_copy(ticker_t *ticker, const ticker_t *other)
-{
-	*ticker = *other;
-}
-
-JEMALLOC_INLINE int32_t
-ticker_read(const ticker_t *ticker)
-{
-	return (ticker->tick);
-}
-
-JEMALLOC_INLINE bool
-ticker_ticks(ticker_t *ticker, int32_t nticks)
-{
-	if (unlikely(ticker->tick < nticks)) {
-		ticker->tick = ticker->nticks;
-		return (true);
-	}
-	ticker->tick -= nticks;
-	return(false);
-}
-
-JEMALLOC_INLINE bool
-ticker_tick(ticker_t *ticker)
-{
-	return (ticker_ticks(ticker, 1));
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_TICKER_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_structs.h
deleted file mode 100644
index e30c4e2..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_structs.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TICKER_STRUCTS_H
-#define JEMALLOC_INTERNAL_TICKER_STRUCTS_H
-
-struct ticker_s {
-	int32_t	tick;
-	int32_t	nticks;
-};
-
-#endif /* JEMALLOC_INTERNAL_TICKER_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_types.h
deleted file mode 100644
index 62d67f3..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/ticker_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TICKER_TYPES_H
-#define JEMALLOC_INTERNAL_TICKER_TYPES_H
-
-typedef struct ticker_s ticker_t;
-
-#endif /* JEMALLOC_INTERNAL_TICKER_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_externs.h
deleted file mode 100644
index 577bdc5..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_externs.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TSD_EXTERNS_H
-#define JEMALLOC_INTERNAL_TSD_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-void	*malloc_tsd_malloc(size_t size);
-void	malloc_tsd_dalloc(void *wrapper);
-void	malloc_tsd_no_cleanup(void *arg);
-void	malloc_tsd_cleanup_register(bool (*f)(void));
-tsd_t	*malloc_tsd_boot0(void);
-void	malloc_tsd_boot1(void);
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
-    !defined(_WIN32))
-void	*tsd_init_check_recursion(tsd_init_head_t *head,
-    tsd_init_block_t *block);
-void	tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
-#endif
-void	tsd_cleanup(void *arg);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_TSD_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_inlines.h
deleted file mode 100644
index 0df21ad..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_inlines.h
+++ /dev/null
@@ -1,130 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TSD_INLINES_H
-#define JEMALLOC_INTERNAL_TSD_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
-
-tsd_t	*tsd_fetch_impl(bool init);
-tsd_t	*tsd_fetch(void);
-tsdn_t	*tsd_tsdn(tsd_t *tsd);
-bool	tsd_nominal(tsd_t *tsd);
-#define	O(n, t, c)							\
-t	*tsd_##n##p_get(tsd_t *tsd);					\
-t	tsd_##n##_get(tsd_t *tsd);					\
-void	tsd_##n##_set(tsd_t *tsd, t n);
-MALLOC_TSD
-#undef O
-tsdn_t	*tsdn_fetch(void);
-bool	tsdn_null(const tsdn_t *tsdn);
-tsd_t	*tsdn_tsd(tsdn_t *tsdn);
-rtree_ctx_t	*tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
-malloc_tsd_externs(, tsd_t)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init)
-{
-	tsd_t *tsd = tsd_get(init);
-
-	if (!init && tsd_get_allocates() && tsd == NULL)
-		return (NULL);
-	assert(tsd != NULL);
-
-	if (unlikely(tsd->state != tsd_state_nominal)) {
-		if (tsd->state == tsd_state_uninitialized) {
-			tsd->state = tsd_state_nominal;
-			/* Trigger cleanup handler registration. */
-			tsd_set(tsd);
-		} else if (tsd->state == tsd_state_purgatory) {
-			tsd->state = tsd_state_reincarnated;
-			tsd_set(tsd);
-		} else
-			assert(tsd->state == tsd_state_reincarnated);
-	}
-
-	return (tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void)
-{
-	return (tsd_fetch_impl(true));
-}
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd)
-{
-	return ((tsdn_t *)tsd);
-}
-
-JEMALLOC_INLINE bool
-tsd_nominal(tsd_t *tsd)
-{
-	return (tsd->state == tsd_state_nominal);
-}
-
-#define	O(n, t, c)							\
-JEMALLOC_ALWAYS_INLINE t *						\
-tsd_##n##p_get(tsd_t *tsd)						\
-{									\
-	return (&tsd->n);						\
-}									\
-									\
-JEMALLOC_ALWAYS_INLINE t						\
-tsd_##n##_get(tsd_t *tsd)						\
-{									\
-	return (*tsd_##n##p_get(tsd));					\
-}									\
-									\
-JEMALLOC_ALWAYS_INLINE void						\
-tsd_##n##_set(tsd_t *tsd, t n)						\
-{									\
-	assert(tsd->state == tsd_state_nominal);			\
-	tsd->n = n;							\
-}
-MALLOC_TSD
-#undef O
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void)
-{
-	if (!tsd_booted_get())
-		return (NULL);
-
-	return (tsd_tsdn(tsd_fetch_impl(false)));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn)
-{
-	return (tsdn == NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn)
-{
-	assert(!tsdn_null(tsdn));
-
-	return (&tsdn->tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
-tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback)
-{
-	/*
-	 * If tsd cannot be accessed, initialize the fallback rtree_ctx and
-	 * return a pointer to it.
-	 */
-	if (unlikely(tsdn_null(tsdn))) {
-		static const rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
-		memcpy(fallback, &rtree_ctx, sizeof(rtree_ctx_t));
-		return (fallback);
-	}
-	return (tsd_rtree_ctxp_get(tsdn_tsd(tsdn)));
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_TSD_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_structs.h
deleted file mode 100644
index 8d94c5b..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_structs.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TSD_STRUCTS_H
-#define JEMALLOC_INTERNAL_TSD_STRUCTS_H
-
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
-    !defined(_WIN32))
-struct tsd_init_block_s {
-	ql_elm(tsd_init_block_t)	link;
-	pthread_t			thread;
-	void				*data;
-};
-struct tsd_init_head_s {
-	ql_head(tsd_init_block_t)	blocks;
-	malloc_mutex_t			lock;
-};
-#endif
-
-#define	MALLOC_TSD							\
-/*  O(name,			type,			cleanup) */	\
-    O(tcache,			tcache_t *,		yes)		\
-    O(thread_allocated,		uint64_t,		no)		\
-    O(thread_deallocated,	uint64_t,		no)		\
-    O(prof_tdata,		prof_tdata_t *,		yes)		\
-    O(iarena,			arena_t *,		yes)		\
-    O(arena,			arena_t *,		yes)		\
-    O(arenas_tdata,		arena_tdata_t *,	yes)		\
-    O(narenas_tdata,		unsigned,		no)		\
-    O(arenas_tdata_bypass,	bool,			no)		\
-    O(tcache_enabled,		tcache_enabled_t,	no)		\
-    O(rtree_ctx,		rtree_ctx_t,		no)		\
-    O(witnesses,		witness_list_t,		yes)		\
-    O(rtree_elm_witnesses,	rtree_elm_witness_tsd_t,no)		\
-    O(witness_fork,		bool,			no)		\
-
-#define	TSD_INITIALIZER {						\
-    tsd_state_uninitialized,						\
-    NULL,								\
-    0,									\
-    0,									\
-    NULL,								\
-    NULL,								\
-    NULL,								\
-    NULL,								\
-    0,									\
-    false,								\
-    tcache_enabled_default,						\
-    RTREE_CTX_INITIALIZER,						\
-    ql_head_initializer(witnesses),					\
-    RTREE_ELM_WITNESS_TSD_INITIALIZER,					\
-    false								\
-}
-
-struct tsd_s {
-	tsd_state_t	state;
-#define	O(n, t, c)							\
-	t		n;
-MALLOC_TSD
-#undef O
-};
-
-/*
- * Wrapper around tsd_t that makes it possible to avoid implicit conversion
- * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
- * explicitly converted to tsd_t, which is non-nullable.
- */
-struct tsdn_s {
-	tsd_t	tsd;
-};
-
-static const tsd_t tsd_initializer = TSD_INITIALIZER;
-
-malloc_tsd_types(, tsd_t)
-
-#endif /* JEMALLOC_INTERNAL_TSD_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_types.h
deleted file mode 100644
index 17e3da9..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/tsd_types.h
+++ /dev/null
@@ -1,554 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
-#define JEMALLOC_INTERNAL_TSD_TYPES_H
-
-/* Maximum number of malloc_tsd users with cleanup functions. */
-#define	MALLOC_TSD_CLEANUPS_MAX	2
-
-typedef bool (*malloc_tsd_cleanup_t)(void);
-
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
-    !defined(_WIN32))
-typedef struct tsd_init_block_s tsd_init_block_t;
-typedef struct tsd_init_head_s tsd_init_head_t;
-#endif
-
-typedef struct tsd_s tsd_t;
-typedef struct tsdn_s tsdn_t;
-
-#define	TSDN_NULL	((tsdn_t *)0)
-
-typedef enum {
-	tsd_state_uninitialized,
-	tsd_state_nominal,
-	tsd_state_purgatory,
-	tsd_state_reincarnated
-} tsd_state_t;
-
-/*
- * TLS/TSD-agnostic macro-based implementation of thread-specific data.  There
- * are five macros that support (at least) three use cases: file-private,
- * library-private, and library-private inlined.  Following is an example
- * library-private tsd variable:
- *
- * In example.h:
- *   typedef struct {
- *           int x;
- *           int y;
- *   } example_t;
- *   #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
- *   malloc_tsd_types(example_, example_t)
- *   malloc_tsd_protos(, example_, example_t)
- *   malloc_tsd_externs(example_, example_t)
- * In example.c:
- *   malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
- *   malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
- *       example_tsd_cleanup)
- *
- * The result is a set of generated functions, e.g.:
- *
- *   bool example_tsd_boot(void) {...}
- *   bool example_tsd_booted_get(void) {...}
- *   example_t *example_tsd_get(bool init) {...}
- *   void example_tsd_set(example_t *val) {...}
- *
- * Note that all of the functions deal in terms of (a_type *) rather than
- * (a_type) so that it is possible to support non-pointer types (unlike
- * pthreads TSD).  example_tsd_cleanup() is passed an (a_type *) pointer that is
- * cast to (void *).  This means that the cleanup function needs to cast the
- * function argument to (a_type *), then dereference the resulting pointer to
- * access fields, e.g.
- *
- *   void
- *   example_tsd_cleanup(void *arg)
- *   {
- *           example_t *example = (example_t *)arg;
- *
- *           example->x = 42;
- *           [...]
- *           if ([want the cleanup function to be called again])
- *                   example_tsd_set(example);
- *   }
- *
- * If example_tsd_set() is called within example_tsd_cleanup(), it will be
- * called again.  This is similar to how pthreads TSD destruction works, except
- * that pthreads only calls the cleanup function again if the value was set to
- * non-NULL.
- */
-
-/* malloc_tsd_types(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define	malloc_tsd_types(a_name, a_type)
-#elif (defined(JEMALLOC_TLS))
-#define	malloc_tsd_types(a_name, a_type)
-#elif (defined(_WIN32))
-#define	malloc_tsd_types(a_name, a_type)				\
-typedef struct {							\
-	bool	initialized;						\
-	a_type	val;							\
-} a_name##tsd_wrapper_t;
-#else
-#define	malloc_tsd_types(a_name, a_type)				\
-typedef struct {							\
-	bool	initialized;						\
-	a_type	val;							\
-} a_name##tsd_wrapper_t;
-#endif
-
-/* malloc_tsd_protos(). */
-#define	malloc_tsd_protos(a_attr, a_name, a_type)			\
-a_attr bool								\
-a_name##tsd_boot0(void);						\
-a_attr void								\
-a_name##tsd_boot1(void);						\
-a_attr bool								\
-a_name##tsd_boot(void);							\
-a_attr bool								\
-a_name##tsd_booted_get(void);						\
-a_attr a_type *								\
-a_name##tsd_get(bool init);						\
-a_attr void								\
-a_name##tsd_set(a_type *val);
-
-/* malloc_tsd_externs(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define	malloc_tsd_externs(a_name, a_type)				\
-extern __thread a_type	a_name##tsd_tls;				\
-extern __thread bool	a_name##tsd_initialized;			\
-extern bool		a_name##tsd_booted;
-#elif (defined(JEMALLOC_TLS))
-#define	malloc_tsd_externs(a_name, a_type)				\
-extern __thread a_type	a_name##tsd_tls;				\
-extern pthread_key_t	a_name##tsd_tsd;				\
-extern bool		a_name##tsd_booted;
-#elif (defined(_WIN32))
-#define	malloc_tsd_externs(a_name, a_type)				\
-extern DWORD		a_name##tsd_tsd;				\
-extern a_name##tsd_wrapper_t	a_name##tsd_boot_wrapper;		\
-extern bool		a_name##tsd_booted;
-#else
-#define	malloc_tsd_externs(a_name, a_type)				\
-extern pthread_key_t	a_name##tsd_tsd;				\
-extern tsd_init_head_t	a_name##tsd_init_head;				\
-extern a_name##tsd_wrapper_t	a_name##tsd_boot_wrapper;		\
-extern bool		a_name##tsd_booted;
-#endif
-
-/* malloc_tsd_data(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
-a_attr __thread a_type JEMALLOC_TLS_MODEL				\
-    a_name##tsd_tls = a_initializer;					\
-a_attr __thread bool JEMALLOC_TLS_MODEL					\
-    a_name##tsd_initialized = false;					\
-a_attr bool		a_name##tsd_booted = false;
-#elif (defined(JEMALLOC_TLS))
-#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
-a_attr __thread a_type JEMALLOC_TLS_MODEL				\
-    a_name##tsd_tls = a_initializer;					\
-a_attr pthread_key_t	a_name##tsd_tsd;				\
-a_attr bool		a_name##tsd_booted = false;
-#elif (defined(_WIN32))
-#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
-a_attr DWORD		a_name##tsd_tsd;				\
-a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = {		\
-	false,								\
-	a_initializer							\
-};									\
-a_attr bool		a_name##tsd_booted = false;
-#else
-#define	malloc_tsd_data(a_attr, a_name, a_type, a_initializer)		\
-a_attr pthread_key_t	a_name##tsd_tsd;				\
-a_attr tsd_init_head_t	a_name##tsd_init_head = {			\
-	ql_head_initializer(blocks),					\
-	MALLOC_MUTEX_INITIALIZER					\
-};									\
-a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = {		\
-	false,								\
-	a_initializer							\
-};									\
-a_attr bool		a_name##tsd_booted = false;
-#endif
-
-/* malloc_tsd_funcs(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
-    a_cleanup)								\
-/* Initialization/cleanup. */						\
-a_attr bool								\
-a_name##tsd_cleanup_wrapper(void)					\
-{									\
-	if (a_name##tsd_initialized) {					\
-		a_name##tsd_initialized = false;			\
-		a_cleanup(&a_name##tsd_tls);				\
-	}								\
-	return (a_name##tsd_initialized);				\
-}									\
-a_attr bool								\
-a_name##tsd_boot0(void)							\
-{									\
-	if (a_cleanup != malloc_tsd_no_cleanup) {			\
-		malloc_tsd_cleanup_register(				\
-		    &a_name##tsd_cleanup_wrapper);			\
-	}								\
-	a_name##tsd_booted = true;					\
-	return (false);							\
-}									\
-a_attr void								\
-a_name##tsd_boot1(void)							\
-{									\
-	/* Do nothing. */						\
-}									\
-a_attr bool								\
-a_name##tsd_boot(void)							\
-{									\
-	return (a_name##tsd_boot0());					\
-}									\
-a_attr bool								\
-a_name##tsd_booted_get(void)						\
-{									\
-	return (a_name##tsd_booted);					\
-}									\
-a_attr bool								\
-a_name##tsd_get_allocates(void)						\
-{									\
-	return (false);							\
-}									\
-/* Get/set. */								\
-a_attr a_type *								\
-a_name##tsd_get(bool init)						\
-{									\
-	assert(a_name##tsd_booted);					\
-	return (&a_name##tsd_tls);					\
-}									\
-a_attr void								\
-a_name##tsd_set(a_type *val)						\
-{									\
-	assert(a_name##tsd_booted);					\
-	if (likely(&a_name##tsd_tls != val))				\
-		a_name##tsd_tls = (*val);				\
-	if (a_cleanup != malloc_tsd_no_cleanup)				\
-		a_name##tsd_initialized = true;				\
-}
-#elif (defined(JEMALLOC_TLS))
-#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
-    a_cleanup)								\
-/* Initialization/cleanup. */						\
-a_attr bool								\
-a_name##tsd_boot0(void)							\
-{									\
-	if (a_cleanup != malloc_tsd_no_cleanup) {			\
-		if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) !=	\
-		    0)							\
-			return (true);					\
-	}								\
-	a_name##tsd_booted = true;					\
-	return (false);							\
-}									\
-a_attr void								\
-a_name##tsd_boot1(void)							\
-{									\
-	/* Do nothing. */						\
-}									\
-a_attr bool								\
-a_name##tsd_boot(void)							\
-{									\
-	return (a_name##tsd_boot0());					\
-}									\
-a_attr bool								\
-a_name##tsd_booted_get(void)						\
-{									\
-	return (a_name##tsd_booted);					\
-}									\
-a_attr bool								\
-a_name##tsd_get_allocates(void)						\
-{									\
-	return (false);							\
-}									\
-/* Get/set. */								\
-a_attr a_type *								\
-a_name##tsd_get(bool init)						\
-{									\
-	assert(a_name##tsd_booted);					\
-	return (&a_name##tsd_tls);					\
-}									\
-a_attr void								\
-a_name##tsd_set(a_type *val)						\
-{									\
-	assert(a_name##tsd_booted);					\
-	if (likely(&a_name##tsd_tls != val))				\
-		a_name##tsd_tls = (*val);				\
-	if (a_cleanup != malloc_tsd_no_cleanup) {			\
-		if (pthread_setspecific(a_name##tsd_tsd,		\
-		    (void *)(&a_name##tsd_tls))) {			\
-			malloc_write("<jemalloc>: Error"		\
-			    " setting TSD for "#a_name"\n");		\
-			if (opt_abort)					\
-				abort();				\
-		}							\
-	}								\
-}
-#elif (defined(_WIN32))
-#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
-    a_cleanup)								\
-/* Initialization/cleanup. */						\
-a_attr bool								\
-a_name##tsd_cleanup_wrapper(void)					\
-{									\
-	DWORD error = GetLastError();					\
-	a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)	\
-	    TlsGetValue(a_name##tsd_tsd);				\
-	SetLastError(error);						\
-									\
-	if (wrapper == NULL)						\
-		return (false);						\
-	if (a_cleanup != malloc_tsd_no_cleanup &&			\
-	    wrapper->initialized) {					\
-		wrapper->initialized = false;				\
-		a_cleanup(&wrapper->val);				\
-		if (wrapper->initialized) {				\
-			/* Trigger another cleanup round. */		\
-			return (true);					\
-		}							\
-	}								\
-	malloc_tsd_dalloc(wrapper);					\
-	return (false);							\
-}									\
-a_attr void								\
-a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper)			\
-{									\
-	if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) {		\
-		malloc_write("<jemalloc>: Error setting"		\
-		    " TSD for "#a_name"\n");				\
-		abort();						\
-	}								\
-}									\
-a_attr a_name##tsd_wrapper_t *						\
-a_name##tsd_wrapper_get(bool init)					\
-{									\
-	DWORD error = GetLastError();					\
-	a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)	\
-	    TlsGetValue(a_name##tsd_tsd);				\
-	SetLastError(error);						\
-									\
-	if (init && unlikely(wrapper == NULL)) {			\
-		wrapper = (a_name##tsd_wrapper_t *)			\
-		    malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t));	\
-		if (wrapper == NULL) {					\
-			malloc_write("<jemalloc>: Error allocating"	\
-			    " TSD for "#a_name"\n");			\
-			abort();					\
-		} else {						\
-			wrapper->initialized = false;			\
-			wrapper->val = a_initializer;			\
-		}							\
-		a_name##tsd_wrapper_set(wrapper);			\
-	}								\
-	return (wrapper);						\
-}									\
-a_attr bool								\
-a_name##tsd_boot0(void)							\
-{									\
-	a_name##tsd_tsd = TlsAlloc();					\
-	if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES)			\
-		return (true);						\
-	if (a_cleanup != malloc_tsd_no_cleanup) {			\
-		malloc_tsd_cleanup_register(				\
-		    &a_name##tsd_cleanup_wrapper);			\
-	}								\
-	a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper);		\
-	a_name##tsd_booted = true;					\
-	return (false);							\
-}									\
-a_attr void								\
-a_name##tsd_boot1(void)							\
-{									\
-	a_name##tsd_wrapper_t *wrapper;					\
-	wrapper = (a_name##tsd_wrapper_t *)				\
-	    malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t));		\
-	if (wrapper == NULL) {						\
-		malloc_write("<jemalloc>: Error allocating"		\
-		    " TSD for "#a_name"\n");				\
-		abort();						\
-	}								\
-	memcpy(wrapper, &a_name##tsd_boot_wrapper,			\
-	    sizeof(a_name##tsd_wrapper_t));				\
-	a_name##tsd_wrapper_set(wrapper);				\
-}									\
-a_attr bool								\
-a_name##tsd_boot(void)							\
-{									\
-	if (a_name##tsd_boot0())					\
-		return (true);						\
-	a_name##tsd_boot1();						\
-	return (false);							\
-}									\
-a_attr bool								\
-a_name##tsd_booted_get(void)						\
-{									\
-	return (a_name##tsd_booted);					\
-}									\
-a_attr bool								\
-a_name##tsd_get_allocates(void)						\
-{									\
-	return (true);							\
-}									\
-/* Get/set. */								\
-a_attr a_type *								\
-a_name##tsd_get(bool init)						\
-{									\
-	a_name##tsd_wrapper_t *wrapper;					\
-									\
-	assert(a_name##tsd_booted);					\
-	wrapper = a_name##tsd_wrapper_get(init);			\
-	if (a_name##tsd_get_allocates() && !init && wrapper == NULL)	\
-		return (NULL);						\
-	return (&wrapper->val);						\
-}									\
-a_attr void								\
-a_name##tsd_set(a_type *val)						\
-{									\
-	a_name##tsd_wrapper_t *wrapper;					\
-									\
-	assert(a_name##tsd_booted);					\
-	wrapper = a_name##tsd_wrapper_get(true);			\
-	if (likely(&wrapper->val != val))				\
-		wrapper->val = *(val);					\
-	if (a_cleanup != malloc_tsd_no_cleanup)				\
-		wrapper->initialized = true;				\
-}
-#else
-#define	malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer,		\
-    a_cleanup)								\
-/* Initialization/cleanup. */						\
-a_attr void								\
-a_name##tsd_cleanup_wrapper(void *arg)					\
-{									\
-	a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg;	\
-									\
-	if (a_cleanup != malloc_tsd_no_cleanup &&			\
-	    wrapper->initialized) {					\
-		wrapper->initialized = false;				\
-		a_cleanup(&wrapper->val);				\
-		if (wrapper->initialized) {				\
-			/* Trigger another cleanup round. */		\
-			if (pthread_setspecific(a_name##tsd_tsd,	\
-			    (void *)wrapper)) {				\
-				malloc_write("<jemalloc>: Error"	\
-				    " setting TSD for "#a_name"\n");	\
-				if (opt_abort)				\
-					abort();			\
-			}						\
-			return;						\
-		}							\
-	}								\
-	malloc_tsd_dalloc(wrapper);					\
-}									\
-a_attr void								\
-a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper)			\
-{									\
-	if (pthread_setspecific(a_name##tsd_tsd,			\
-	    (void *)wrapper)) {						\
-		malloc_write("<jemalloc>: Error setting"		\
-		    " TSD for "#a_name"\n");				\
-		abort();						\
-	}								\
-}									\
-a_attr a_name##tsd_wrapper_t *						\
-a_name##tsd_wrapper_get(bool init)					\
-{									\
-	a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)	\
-	    pthread_getspecific(a_name##tsd_tsd);			\
-									\
-	if (init && unlikely(wrapper == NULL)) {			\
-		tsd_init_block_t block;					\
-		wrapper = (a_name##tsd_wrapper_t *)			\
-		    tsd_init_check_recursion(&a_name##tsd_init_head,	\
-		    &block);						\
-		if (wrapper)						\
-		    return (wrapper);					\
-		wrapper = (a_name##tsd_wrapper_t *)			\
-		    malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t));	\
-		block.data = (void *)wrapper;				\
-		if (wrapper == NULL) {					\
-			malloc_write("<jemalloc>: Error allocating"	\
-			    " TSD for "#a_name"\n");			\
-			abort();					\
-		} else {						\
-			wrapper->initialized = false;			\
-			wrapper->val = a_initializer;			\
-		}							\
-		a_name##tsd_wrapper_set(wrapper);			\
-		tsd_init_finish(&a_name##tsd_init_head, &block);	\
-	}								\
-	return (wrapper);						\
-}									\
-a_attr bool								\
-a_name##tsd_boot0(void)							\
-{									\
-	if (pthread_key_create(&a_name##tsd_tsd,			\
-	    a_name##tsd_cleanup_wrapper) != 0)				\
-		return (true);						\
-	a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper);		\
-	a_name##tsd_booted = true;					\
-	return (false);							\
-}									\
-a_attr void								\
-a_name##tsd_boot1(void)							\
-{									\
-	a_name##tsd_wrapper_t *wrapper;					\
-	wrapper = (a_name##tsd_wrapper_t *)				\
-	    malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t));		\
-	if (wrapper == NULL) {						\
-		malloc_write("<jemalloc>: Error allocating"		\
-		    " TSD for "#a_name"\n");				\
-		abort();						\
-	}								\
-	memcpy(wrapper, &a_name##tsd_boot_wrapper,			\
-	    sizeof(a_name##tsd_wrapper_t));				\
-	a_name##tsd_wrapper_set(wrapper);				\
-}									\
-a_attr bool								\
-a_name##tsd_boot(void)							\
-{									\
-	if (a_name##tsd_boot0())					\
-		return (true);						\
-	a_name##tsd_boot1();						\
-	return (false);							\
-}									\
-a_attr bool								\
-a_name##tsd_booted_get(void)						\
-{									\
-	return (a_name##tsd_booted);					\
-}									\
-a_attr bool								\
-a_name##tsd_get_allocates(void)						\
-{									\
-	return (true);							\
-}									\
-/* Get/set. */								\
-a_attr a_type *								\
-a_name##tsd_get(bool init)						\
-{									\
-	a_name##tsd_wrapper_t *wrapper;					\
-									\
-	assert(a_name##tsd_booted);					\
-	wrapper = a_name##tsd_wrapper_get(init);			\
-	if (a_name##tsd_get_allocates() && !init && wrapper == NULL)	\
-		return (NULL);						\
-	return (&wrapper->val);						\
-}									\
-a_attr void								\
-a_name##tsd_set(a_type *val)						\
-{									\
-	a_name##tsd_wrapper_t *wrapper;					\
-									\
-	assert(a_name##tsd_booted);					\
-	wrapper = a_name##tsd_wrapper_get(true);			\
-	if (likely(&wrapper->val != val))				\
-		wrapper->val = *(val);					\
-	if (a_cleanup != malloc_tsd_no_cleanup)				\
-		wrapper->initialized = true;				\
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_externs.h
deleted file mode 100644
index 70362cb..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_externs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_UTIL_EXTERNS_H
-#define JEMALLOC_INTERNAL_UTIL_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-int	buferror(int err, char *buf, size_t buflen);
-uintmax_t	malloc_strtoumax(const char *restrict nptr,
-    char **restrict endptr, int base);
-void	malloc_write(const char *s);
-
-/*
- * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
- * point math.
- */
-size_t	malloc_vsnprintf(char *str, size_t size, const char *format,
-    va_list ap);
-size_t	malloc_snprintf(char *str, size_t size, const char *format, ...)
-    JEMALLOC_FORMAT_PRINTF(3, 4);
-void	malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
-    const char *format, va_list ap);
-void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
-    const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
-void	malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_UTIL_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_inlines.h
deleted file mode 100644
index 689e5eb..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_inlines.h
+++ /dev/null
@@ -1,211 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_UTIL_INLINES_H
-#define JEMALLOC_INTERNAL_UTIL_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-unsigned	ffs_llu(unsigned long long bitmap);
-unsigned	ffs_lu(unsigned long bitmap);
-unsigned	ffs_u(unsigned bitmap);
-unsigned	ffs_zu(size_t bitmap);
-unsigned	ffs_u64(uint64_t bitmap);
-unsigned	ffs_u32(uint32_t bitmap);
-uint64_t	pow2_ceil_u64(uint64_t x);
-uint32_t	pow2_ceil_u32(uint32_t x);
-size_t	pow2_ceil_zu(size_t x);
-unsigned	lg_floor(size_t x);
-void	set_errno(int errnum);
-int	get_errno(void);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
-
-/* Sanity check. */
-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
-    || !defined(JEMALLOC_INTERNAL_FFS)
-#  error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
-#endif
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_llu(unsigned long long bitmap)
-{
-	return (JEMALLOC_INTERNAL_FFSLL(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_lu(unsigned long bitmap)
-{
-	return (JEMALLOC_INTERNAL_FFSL(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u(unsigned bitmap)
-{
-	return (JEMALLOC_INTERNAL_FFS(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_zu(size_t bitmap)
-{
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
-	return (ffs_u(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
-	return (ffs_lu(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
-	return (ffs_llu(bitmap));
-#else
-#error No implementation for size_t ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u64(uint64_t bitmap)
-{
-#if LG_SIZEOF_LONG == 3
-	return (ffs_lu(bitmap));
-#elif LG_SIZEOF_LONG_LONG == 3
-	return (ffs_llu(bitmap));
-#else
-#error No implementation for 64-bit ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u32(uint32_t bitmap)
-{
-#if LG_SIZEOF_INT == 2
-	return (ffs_u(bitmap));
-#else
-#error No implementation for 32-bit ffs()
-#endif
-	return (ffs_u(bitmap));
-}
-
-JEMALLOC_INLINE uint64_t
-pow2_ceil_u64(uint64_t x)
-{
-	x--;
-	x |= x >> 1;
-	x |= x >> 2;
-	x |= x >> 4;
-	x |= x >> 8;
-	x |= x >> 16;
-	x |= x >> 32;
-	x++;
-	return (x);
-}
-
-JEMALLOC_INLINE uint32_t
-pow2_ceil_u32(uint32_t x)
-{
-	x--;
-	x |= x >> 1;
-	x |= x >> 2;
-	x |= x >> 4;
-	x |= x >> 8;
-	x |= x >> 16;
-	x++;
-	return (x);
-}
-
-/* Compute the smallest power of 2 that is >= x. */
-JEMALLOC_INLINE size_t
-pow2_ceil_zu(size_t x)
-{
-#if (LG_SIZEOF_PTR == 3)
-	return (pow2_ceil_u64(x));
-#else
-	return (pow2_ceil_u32(x));
-#endif
-}
-
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
-	size_t ret;
-
-	assert(x != 0);
-
-	__asm__ ("bsr %1, %0"
-		: "=r"(ret) // Outputs.
-		: "r"(x)    // Inputs.
-		);
-	assert(ret < UINT_MAX);
-	return ((unsigned)ret);
-}
-#elif (defined(_MSC_VER))
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
-	unsigned long ret;
-
-	assert(x != 0);
-
-#if (LG_SIZEOF_PTR == 3)
-	_BitScanReverse64(&ret, x);
-#elif (LG_SIZEOF_PTR == 2)
-	_BitScanReverse(&ret, x);
-#else
-#  error "Unsupported type size for lg_floor()"
-#endif
-	assert(ret < UINT_MAX);
-	return ((unsigned)ret);
-}
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
-	assert(x != 0);
-
-#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
-	return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
-#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
-	return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
-#else
-#  error "Unsupported type size for lg_floor()"
-#endif
-}
-#else
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
-	assert(x != 0);
-
-	x |= (x >> 1);
-	x |= (x >> 2);
-	x |= (x >> 4);
-	x |= (x >> 8);
-	x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3)
-	x |= (x >> 32);
-#endif
-	if (x == SIZE_T_MAX)
-		return ((8 << LG_SIZEOF_PTR) - 1);
-	x++;
-	return (ffs_zu(x) - 2);
-}
-#endif
-
-/* Set error code. */
-JEMALLOC_INLINE void
-set_errno(int errnum)
-{
-#ifdef _WIN32
-	SetLastError(errnum);
-#else
-	errno = errnum;
-#endif
-}
-
-/* Get last error code. */
-JEMALLOC_INLINE int
-get_errno(void)
-{
-#ifdef _WIN32
-	return (GetLastError());
-#else
-	return (errno);
-#endif
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_UTIL_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_types.h
deleted file mode 100644
index 7f72799..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/util_types.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_UTIL_TYPES_H
-#define JEMALLOC_INTERNAL_UTIL_TYPES_H
-
-#ifdef _WIN32
-#  ifdef _WIN64
-#    define FMT64_PREFIX "ll"
-#    define FMTPTR_PREFIX "ll"
-#  else
-#    define FMT64_PREFIX "ll"
-#    define FMTPTR_PREFIX ""
-#  endif
-#  define FMTd32 "d"
-#  define FMTu32 "u"
-#  define FMTx32 "x"
-#  define FMTd64 FMT64_PREFIX "d"
-#  define FMTu64 FMT64_PREFIX "u"
-#  define FMTx64 FMT64_PREFIX "x"
-#  define FMTdPTR FMTPTR_PREFIX "d"
-#  define FMTuPTR FMTPTR_PREFIX "u"
-#  define FMTxPTR FMTPTR_PREFIX "x"
-#else
-#  include <inttypes.h>
-#  define FMTd32 PRId32
-#  define FMTu32 PRIu32
-#  define FMTx32 PRIx32
-#  define FMTd64 PRId64
-#  define FMTu64 PRIu64
-#  define FMTx64 PRIx64
-#  define FMTdPTR PRIdPTR
-#  define FMTuPTR PRIuPTR
-#  define FMTxPTR PRIxPTR
-#endif
-
-/* Size of stack-allocated buffer passed to buferror(). */
-#define	BUFERROR_BUF		64
-
-/*
- * Size of stack-allocated buffer used by malloc_{,v,vc}printf().  This must be
- * large enough for all possible uses within jemalloc.
- */
-#define	MALLOC_PRINTF_BUFSIZE	4096
-
-/* Junk fill patterns. */
-#ifndef JEMALLOC_ALLOC_JUNK
-#  define JEMALLOC_ALLOC_JUNK	((uint8_t)0xa5)
-#endif
-#ifndef JEMALLOC_FREE_JUNK
-#  define JEMALLOC_FREE_JUNK	((uint8_t)0x5a)
-#endif
-
-/*
- * Wrap a cpp argument that contains commas such that it isn't broken up into
- * multiple arguments.
- */
-#define	JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
-
-/* cpp macro definition stringification. */
-#define	STRINGIFY_HELPER(x) #x
-#define	STRINGIFY(x) STRINGIFY_HELPER(x)
-
-/*
- * Silence compiler warnings due to uninitialized values.  This is used
- * wherever the compiler fails to recognize that the variable is never used
- * uninitialized.
- */
-#ifdef JEMALLOC_CC_SILENCE
-#  define JEMALLOC_CC_SILENCE_INIT(v) = v
-#else
-#  define JEMALLOC_CC_SILENCE_INIT(v)
-#endif
-
-#ifdef __GNUC__
-#  define likely(x)   __builtin_expect(!!(x), 1)
-#  define unlikely(x) __builtin_expect(!!(x), 0)
-#else
-#  define likely(x)   !!(x)
-#  define unlikely(x) !!(x)
-#endif
-
-#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
-#  error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
-#endif
-
-#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
-
-#include "jemalloc/internal/assert.h"
-
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#define	cassert(c) do {							\
-	if (unlikely(!(c)))						\
-		not_reached();						\
-} while (0)
-
-#endif /* JEMALLOC_INTERNAL_UTIL_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_externs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_externs.h
deleted file mode 100644
index c0a76fe..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_externs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_WITNESS_EXTERNS_H
-#define JEMALLOC_INTERNAL_WITNESS_EXTERNS_H
-
-#pragma GCC visibility push(hidden)
-
-void	witness_init(witness_t *witness, const char *name, witness_rank_t rank,
-    witness_comp_t *comp, void *opaque);
-#ifdef JEMALLOC_JET
-typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
-extern witness_lock_error_t *witness_lock_error;
-#else
-void	witness_lock_error(const witness_list_t *witnesses,
-    const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_owner_error_t)(const witness_t *);
-extern witness_owner_error_t *witness_owner_error;
-#else
-void	witness_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_not_owner_error_t)(const witness_t *);
-extern witness_not_owner_error_t *witness_not_owner_error;
-#else
-void	witness_not_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_lockless_error_t)(const witness_list_t *);
-extern witness_lockless_error_t *witness_lockless_error;
-#else
-void	witness_lockless_error(const witness_list_t *witnesses);
-#endif
-
-void	witnesses_cleanup(tsd_t *tsd);
-void	witness_prefork(tsd_t *tsd);
-void	witness_postfork_parent(tsd_t *tsd);
-void	witness_postfork_child(tsd_t *tsd);
-
-#pragma GCC visibility pop
-
-#endif /* JEMALLOC_INTERNAL_WITNESS_EXTERNS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_inlines.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_inlines.h
deleted file mode 100644
index 259aa2e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_inlines.h
+++ /dev/null
@@ -1,163 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_WITNESS_INLINES_H
-#define JEMALLOC_INTERNAL_WITNESS_INLINES_H
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool	witness_owner(tsd_t *tsd, const witness_t *witness);
-void	witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
-void	witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
-void	witness_assert_lockless(tsdn_t *tsdn);
-void	witness_lock(tsdn_t *tsdn, witness_t *witness);
-void	witness_unlock(tsdn_t *tsdn, witness_t *witness);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-/* Helper, not intended for direct use. */
-JEMALLOC_INLINE bool
-witness_owner(tsd_t *tsd, const witness_t *witness)
-{
-	witness_list_t *witnesses;
-	witness_t *w;
-
-	cassert(config_debug);
-
-	witnesses = tsd_witnessesp_get(tsd);
-	ql_foreach(w, witnesses, link) {
-		if (w == witness)
-			return (true);
-	}
-
-	return (false);
-}
-
-JEMALLOC_INLINE void
-witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
-{
-	tsd_t *tsd;
-
-	if (!config_debug)
-		return;
-
-	if (tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	if (witness->rank == WITNESS_RANK_OMIT)
-		return;
-
-	if (witness_owner(tsd, witness))
-		return;
-	witness_owner_error(witness);
-}
-
-JEMALLOC_INLINE void
-witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
-{
-	tsd_t *tsd;
-	witness_list_t *witnesses;
-	witness_t *w;
-
-	if (!config_debug)
-		return;
-
-	if (tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	if (witness->rank == WITNESS_RANK_OMIT)
-		return;
-
-	witnesses = tsd_witnessesp_get(tsd);
-	ql_foreach(w, witnesses, link) {
-		if (w == witness)
-			witness_not_owner_error(witness);
-	}
-}
-
-JEMALLOC_INLINE void
-witness_assert_lockless(tsdn_t *tsdn)
-{
-	tsd_t *tsd;
-	witness_list_t *witnesses;
-	witness_t *w;
-
-	if (!config_debug)
-		return;
-
-	if (tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-
-	witnesses = tsd_witnessesp_get(tsd);
-	w = ql_last(witnesses, link);
-	if (w != NULL)
-		witness_lockless_error(witnesses);
-}
-
-JEMALLOC_INLINE void
-witness_lock(tsdn_t *tsdn, witness_t *witness)
-{
-	tsd_t *tsd;
-	witness_list_t *witnesses;
-	witness_t *w;
-
-	if (!config_debug)
-		return;
-
-	if (tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	if (witness->rank == WITNESS_RANK_OMIT)
-		return;
-
-	witness_assert_not_owner(tsdn, witness);
-
-	witnesses = tsd_witnessesp_get(tsd);
-	w = ql_last(witnesses, link);
-	if (w == NULL) {
-		/* No other locks; do nothing. */
-	} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
-		/* Forking, and relaxed ranking satisfied. */
-	} else if (w->rank > witness->rank) {
-		/* Not forking, rank order reversal. */
-		witness_lock_error(witnesses, witness);
-	} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
-	    witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
-	    0)) {
-		/*
-		 * Missing/incompatible comparison function, or comparison
-		 * function indicates rank order reversal.
-		 */
-		witness_lock_error(witnesses, witness);
-	}
-
-	ql_elm_new(witness, link);
-	ql_tail_insert(witnesses, witness, link);
-}
-
-JEMALLOC_INLINE void
-witness_unlock(tsdn_t *tsdn, witness_t *witness)
-{
-	tsd_t *tsd;
-	witness_list_t *witnesses;
-
-	if (!config_debug)
-		return;
-
-	if (tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	if (witness->rank == WITNESS_RANK_OMIT)
-		return;
-
-	/*
-	 * Check whether owner before removal, rather than relying on
-	 * witness_assert_owner() to abort, so that unit tests can test this
-	 * function's failure mode without causing undefined behavior.
-	 */
-	if (witness_owner(tsd, witness)) {
-		witnesses = tsd_witnessesp_get(tsd);
-		ql_remove(witnesses, witness, link);
-	} else
-		witness_assert_owner(tsdn, witness);
-}
-#endif
-
-#endif /* JEMALLOC_INTERNAL_WITNESS_INLINES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_structs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_structs.h
deleted file mode 100644
index 95d1970..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_structs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_WITNESS_STRUCTS_H
-#define JEMALLOC_INTERNAL_WITNESS_STRUCTS_H
-
-struct witness_s {
-	/* Name, used for printing lock order reversal messages. */
-	const char		*name;
-
-	/*
-	 * Witness rank, where 0 is lowest and UINT_MAX is highest.  Witnesses
-	 * must be acquired in order of increasing rank.
-	 */
-	witness_rank_t		rank;
-
-	/*
-	 * If two witnesses are of equal rank and they have the samp comp
-	 * function pointer, it is called as a last attempt to differentiate
-	 * between witnesses of equal rank.
-	 */
-	witness_comp_t		*comp;
-
-	/* Opaque data, passed to comp(). */
-	void			*opaque;
-
-	/* Linkage for thread's currently owned locks. */
-	ql_elm(witness_t)	link;
-};
-
-#endif /* JEMALLOC_INTERNAL_WITNESS_STRUCTS_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_types.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_types.h
deleted file mode 100644
index ef96282..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/internal/witness_types.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_WITNESS_TYPES_H
-#define JEMALLOC_INTERNAL_WITNESS_TYPES_H
-
-typedef struct witness_s witness_t;
-typedef unsigned witness_rank_t;
-typedef ql_head(witness_t) witness_list_t;
-typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
-    void *);
-
-/*
- * Lock ranks.  Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
- * the witness machinery.
- */
-#define	WITNESS_RANK_OMIT		0U
-
-#define	WITNESS_RANK_INIT		1U
-#define	WITNESS_RANK_CTL		1U
-#define	WITNESS_RANK_ARENAS		2U
-
-#define	WITNESS_RANK_PROF_DUMP		3U
-#define	WITNESS_RANK_PROF_BT2GCTX	4U
-#define	WITNESS_RANK_PROF_TDATAS	5U
-#define	WITNESS_RANK_PROF_TDATA		6U
-#define	WITNESS_RANK_PROF_GCTX		7U
-
-#define	WITNESS_RANK_ARENA		8U
-#define	WITNESS_RANK_ARENA_EXTENTS	9U
-#define	WITNESS_RANK_ARENA_EXTENT_CACHE	10
-
-#define	WITNESS_RANK_RTREE_ELM		11U
-#define	WITNESS_RANK_RTREE		12U
-#define	WITNESS_RANK_BASE		13U
-
-#define	WITNESS_RANK_LEAF		0xffffffffU
-#define	WITNESS_RANK_ARENA_BIN		WITNESS_RANK_LEAF
-#define	WITNESS_RANK_ARENA_LARGE	WITNESS_RANK_LEAF
-#define	WITNESS_RANK_DSS		WITNESS_RANK_LEAF
-#define	WITNESS_RANK_PROF_ACTIVE	WITNESS_RANK_LEAF
-#define	WITNESS_RANK_PROF_DUMP_SEQ	WITNESS_RANK_LEAF
-#define	WITNESS_RANK_PROF_GDUMP		WITNESS_RANK_LEAF
-#define	WITNESS_RANK_PROF_NEXT_THR_UID	WITNESS_RANK_LEAF
-#define	WITNESS_RANK_PROF_THREAD_ACTIVE_INIT	WITNESS_RANK_LEAF
-
-#define	WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
-
-#endif /* JEMALLOC_INTERNAL_WITNESS_TYPES_H */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc.h
deleted file mode 100644
index b4f8d5f..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc.h
+++ /dev/null
@@ -1,414 +0,0 @@
-#ifndef JEMALLOC_H_
-#define	JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Defined if __attribute__((...)) syntax is supported. */
-#define	JEMALLOC_HAVE_ATTR
-
-/* Defined if alloc_size attribute is supported. */
-#define	JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-#if !defined(__clang__)
-#define	JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-#endif
-
-/* Defined if format(printf, ...) attribute is supported. */
-#define	JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#define	JEMALLOC_OVERRIDE_MEMALIGN
-#define	JEMALLOC_OVERRIDE_VALLOC
-
-/*
- * At least Linux omits the "const" in:
- *
- *   size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#define	JEMALLOC_USABLE_SIZE_CONST
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++.  The only justification for this is to match the prototypes that
- * glibc defines.
- */
-#undef	JEMALLOC_USE_CXX_THROW
-
-#ifdef _MSC_VER
-#  ifdef _WIN64
-#    define LG_SIZEOF_PTR_WIN 3
-#  else
-#    define LG_SIZEOF_PTR_WIN 2
-#  endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#define	LG_SIZEOF_PTR 3
-
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix.  With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-#  define je_malloc_conf malloc_conf
-#  define je_malloc_message malloc_message
-#  define je_malloc malloc
-#  define je_calloc calloc
-#  define je_posix_memalign posix_memalign
-#  define je_aligned_alloc aligned_alloc
-#  define je_realloc realloc
-#  define je_free free
-#  define je_mallocx mallocx
-#  define je_rallocx rallocx
-#  define je_xallocx xallocx
-#  define je_sallocx sallocx
-#  define je_dallocx dallocx
-#  define je_sdallocx sdallocx
-#  define je_nallocx nallocx
-#  define je_mallctl mallctl
-#  define je_mallctlnametomib mallctlnametomib
-#  define je_mallctlbymib mallctlbymib
-#  define je_malloc_stats_print malloc_stats_print
-#  define je_malloc_usable_size malloc_usable_size
-#  define je_memalign memalign
-#  define je_valloc valloc
-#endif
-
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define	JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000"
-#define	JEMALLOC_VERSION_MAJOR 0
-#define	JEMALLOC_VERSION_MINOR 0
-#define	JEMALLOC_VERSION_BUGFIX 0
-#define	JEMALLOC_VERSION_NREV 0
-#define	JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000"
-
-#define	MALLOCX_LG_ALIGN(la)	((int)(la))
-#if LG_SIZEOF_PTR == 2
-#  define MALLOCX_ALIGN(a)	((int)(ffs((int)(a))-1))
-#else
-#  define MALLOCX_ALIGN(a)						\
-     ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 :	\
-     ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define	MALLOCX_ZERO	((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define	MALLOCX_TCACHE(tc)	((int)(((tc)+2) << 8))
-#define	MALLOCX_TCACHE_NONE	MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define	MALLOCX_ARENA(a)	((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas.  This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- *   #define STRINGIFY_HELPER(x) #x
- *   #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- *   mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- *       0);
- */
-#define	MALLCTL_ARENAS_ALL	4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define	MALLCTL_ARENAS_DESTROYED	4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-#  define JEMALLOC_CXX_THROW throw()
-#else
-#  define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-#  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_ALIGNED(s) __declspec(align(s))
-#  define JEMALLOC_ALLOC_SIZE(s)
-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  ifndef JEMALLOC_EXPORT
-#    ifdef DLLEXPORT
-#      define JEMALLOC_EXPORT __declspec(dllexport)
-#    else
-#      define JEMALLOC_EXPORT __declspec(dllimport)
-#    endif
-#  endif
-#  define JEMALLOC_FORMAT_PRINTF(s, i)
-#  define JEMALLOC_NOINLINE __declspec(noinline)
-#  ifdef __cplusplus
-#    define JEMALLOC_NOTHROW __declspec(nothrow)
-#  else
-#    define JEMALLOC_NOTHROW
-#  endif
-#  define JEMALLOC_SECTION(s) __declspec(allocate(s))
-#  define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-#  if _MSC_VER >= 1900 && !defined(__EDG__)
-#    define JEMALLOC_ALLOCATOR __declspec(allocator)
-#  else
-#    define JEMALLOC_ALLOCATOR
-#  endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-#  define JEMALLOC_ATTR(s) __attribute__((s))
-#  define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-#  ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-#    define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-#    define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-#  else
-#    define JEMALLOC_ALLOC_SIZE(s)
-#    define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  endif
-#  ifndef JEMALLOC_EXPORT
-#    define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-#  endif
-#  ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-#  elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-#  else
-#    define JEMALLOC_FORMAT_PRINTF(s, i)
-#  endif
-#  define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-#  define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-#  define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-#  define JEMALLOC_RESTRICT_RETURN
-#  define JEMALLOC_ALLOCATOR
-#else
-#  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_ALIGNED(s)
-#  define JEMALLOC_ALLOC_SIZE(s)
-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  define JEMALLOC_EXPORT
-#  define JEMALLOC_FORMAT_PRINTF(s, i)
-#  define JEMALLOC_NOINLINE
-#  define JEMALLOC_NOTHROW
-#  define JEMALLOC_SECTION(s)
-#  define JEMALLOC_RESTRICT_RETURN
-#  define JEMALLOC_ALLOCATOR
-#endif
-
-/*
- * The je_ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
- */
-extern JEMALLOC_EXPORT const char	*je_malloc_conf;
-extern JEMALLOC_EXPORT void		(*je_malloc_message)(void *cbopaque,
-    const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_malloc(size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_calloc(size_t num, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_posix_memalign(void **memptr,
-    size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_aligned_alloc(size_t alignment,
-    size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
-    JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_realloc(void *ptr, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_free(void *ptr)
-    JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_mallocx(size_t size, int flags)
-    JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_rallocx(void *ptr, size_t size,
-    int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_xallocx(void *ptr, size_t size,
-    size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_sallocx(const void *ptr,
-    int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_sdallocx(void *ptr, size_t size,
-    int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_nallocx(size_t size, int flags)
-    JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_mallctl(const char *name,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_mallctlnametomib(const char *name,
-    size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_mallctlbymib(const size_t *mib,
-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_malloc_stats_print(
-    void (*write_cb)(void *, const char *), void *je_cbopaque,
-    const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_malloc_usable_size(
-    JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_memalign(size_t alignment, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_valloc(size_t size) JEMALLOC_CXX_THROW
-    JEMALLOC_ATTR(malloc);
-#endif
-
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- *     size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
-    bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
-    unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
-    size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- *     void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
-    bool, unsigned);
-
-struct extent_hooks_s {
-	extent_alloc_t		*alloc;
-	extent_dalloc_t		*dalloc;
-	extent_commit_t		*commit;
-	extent_decommit_t	*decommit;
-	extent_purge_t		*purge_lazy;
-	extent_purge_t		*purge_forced;
-	extent_split_t		*split;
-	extent_merge_t		*merge;
-};
-
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application.  Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-#  ifndef JEMALLOC_NO_DEMANGLE
-#    define JEMALLOC_NO_DEMANGLE
-#  endif
-#  define malloc_conf je_malloc_conf
-#  define malloc_message je_malloc_message
-#  define malloc je_malloc
-#  define calloc je_calloc
-#  define posix_memalign je_posix_memalign
-#  define aligned_alloc je_aligned_alloc
-#  define realloc je_realloc
-#  define free je_free
-#  define mallocx je_mallocx
-#  define rallocx je_rallocx
-#  define xallocx je_xallocx
-#  define sallocx je_sallocx
-#  define dallocx je_dallocx
-#  define sdallocx je_sdallocx
-#  define nallocx je_nallocx
-#  define mallctl je_mallctl
-#  define mallctlnametomib je_mallctlnametomib
-#  define mallctlbymib je_mallctlbymib
-#  define malloc_stats_print je_malloc_stats_print
-#  define malloc_usable_size je_malloc_usable_size
-#  define memalign je_memalign
-#  define valloc je_valloc
-#endif
-
-/*
- * The je_* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined.  This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-#  undef je_malloc_conf
-#  undef je_malloc_message
-#  undef je_malloc
-#  undef je_calloc
-#  undef je_posix_memalign
-#  undef je_aligned_alloc
-#  undef je_realloc
-#  undef je_free
-#  undef je_mallocx
-#  undef je_rallocx
-#  undef je_xallocx
-#  undef je_sallocx
-#  undef je_dallocx
-#  undef je_sdallocx
-#  undef je_nallocx
-#  undef je_mallctl
-#  undef je_mallctlnametomib
-#  undef je_mallctlbymib
-#  undef je_malloc_stats_print
-#  undef je_malloc_usable_size
-#  undef je_memalign
-#  undef je_valloc
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* JEMALLOC_H_ */
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc.sh
deleted file mode 100755
index c085814..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-objroot=$1
-
-cat <<EOF
-#ifndef JEMALLOC_H_
-#define	JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-EOF
-
-for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
-           jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
-  cat "${objroot}include/jemalloc/${hdr}" \
-      | grep -v 'Generated from .* by configure\.' \
-      | sed -e 's/^#define /#define	/g' \
-      | sed -e 's/ $//g'
-  echo
-done
-
-cat <<EOF
-#ifdef __cplusplus
-}
-#endif
-#endif /* JEMALLOC_H_ */
-EOF
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_defs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_defs.h
deleted file mode 100644
index 1d45302..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_defs.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* include/jemalloc/jemalloc_defs.h.  Generated from jemalloc_defs.h.in by configure.  */
-/* Defined if __attribute__((...)) syntax is supported. */
-#define JEMALLOC_HAVE_ATTR 
-
-/* Defined if alloc_size attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE 
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-#if !defined(__clang__)
-#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF 
-#endif
-
-/* Defined if format(printf, ...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF 
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#define JEMALLOC_OVERRIDE_MEMALIGN 
-#define JEMALLOC_OVERRIDE_VALLOC 
-
-/*
- * At least Linux omits the "const" in:
- *
- *   size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#define JEMALLOC_USABLE_SIZE_CONST 
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++.  The only justification for this is to match the prototypes that
- * glibc defines.
- */
-#define JEMALLOC_USE_CXX_THROW 
-
-#ifdef _MSC_VER
-#  ifdef _WIN64
-#    define LG_SIZEOF_PTR_WIN 3
-#  else
-#    define LG_SIZEOF_PTR_WIN 2
-#  endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#define LG_SIZEOF_PTR 3
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_defs.h.in b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_defs.h.in
deleted file mode 100644
index 6d89435..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Defined if __attribute__((...)) syntax is supported. */
-#undef JEMALLOC_HAVE_ATTR
-
-/* Defined if alloc_size attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-
-/* Defined if format(printf, ...) attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#undef JEMALLOC_OVERRIDE_MEMALIGN
-#undef JEMALLOC_OVERRIDE_VALLOC
-
-/*
- * At least Linux omits the "const" in:
- *
- *   size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#undef JEMALLOC_USABLE_SIZE_CONST
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++.  The only justification for this is to match the prototypes that
- * glibc defines.
- */
-#undef JEMALLOC_USE_CXX_THROW
-
-#ifdef _MSC_VER
-#  ifdef _WIN64
-#    define LG_SIZEOF_PTR_WIN 3
-#  else
-#    define LG_SIZEOF_PTR_WIN 2
-#  endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#undef LG_SIZEOF_PTR
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_macros.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_macros.h
deleted file mode 100644
index 47103f7..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_macros.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define	JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000"
-#define	JEMALLOC_VERSION_MAJOR 0
-#define	JEMALLOC_VERSION_MINOR 0
-#define	JEMALLOC_VERSION_BUGFIX 0
-#define	JEMALLOC_VERSION_NREV 0
-#define	JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000"
-
-#define	MALLOCX_LG_ALIGN(la)	((int)(la))
-#if LG_SIZEOF_PTR == 2
-#  define MALLOCX_ALIGN(a)	((int)(ffs((int)(a))-1))
-#else
-#  define MALLOCX_ALIGN(a)						\
-     ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 :	\
-     ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define	MALLOCX_ZERO	((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define	MALLOCX_TCACHE(tc)	((int)(((tc)+2) << 8))
-#define	MALLOCX_TCACHE_NONE	MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define	MALLOCX_ARENA(a)	((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas.  This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- *   #define STRINGIFY_HELPER(x) #x
- *   #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- *   mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- *       0);
- */
-#define	MALLCTL_ARENAS_ALL	4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define	MALLCTL_ARENAS_DESTROYED	4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-#  define JEMALLOC_CXX_THROW throw()
-#else
-#  define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-#  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_ALIGNED(s) __declspec(align(s))
-#  define JEMALLOC_ALLOC_SIZE(s)
-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  ifndef JEMALLOC_EXPORT
-#    ifdef DLLEXPORT
-#      define JEMALLOC_EXPORT __declspec(dllexport)
-#    else
-#      define JEMALLOC_EXPORT __declspec(dllimport)
-#    endif
-#  endif
-#  define JEMALLOC_FORMAT_PRINTF(s, i)
-#  define JEMALLOC_NOINLINE __declspec(noinline)
-#  ifdef __cplusplus
-#    define JEMALLOC_NOTHROW __declspec(nothrow)
-#  else
-#    define JEMALLOC_NOTHROW
-#  endif
-#  define JEMALLOC_SECTION(s) __declspec(allocate(s))
-#  define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-#  if _MSC_VER >= 1900 && !defined(__EDG__)
-#    define JEMALLOC_ALLOCATOR __declspec(allocator)
-#  else
-#    define JEMALLOC_ALLOCATOR
-#  endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-#  define JEMALLOC_ATTR(s) __attribute__((s))
-#  define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-#  ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-#    define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-#    define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-#  else
-#    define JEMALLOC_ALLOC_SIZE(s)
-#    define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  endif
-#  ifndef JEMALLOC_EXPORT
-#    define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-#  endif
-#  ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-#  elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-#  else
-#    define JEMALLOC_FORMAT_PRINTF(s, i)
-#  endif
-#  define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-#  define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-#  define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-#  define JEMALLOC_RESTRICT_RETURN
-#  define JEMALLOC_ALLOCATOR
-#else
-#  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_ALIGNED(s)
-#  define JEMALLOC_ALLOC_SIZE(s)
-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  define JEMALLOC_EXPORT
-#  define JEMALLOC_FORMAT_PRINTF(s, i)
-#  define JEMALLOC_NOINLINE
-#  define JEMALLOC_NOTHROW
-#  define JEMALLOC_SECTION(s)
-#  define JEMALLOC_RESTRICT_RETURN
-#  define JEMALLOC_ALLOCATOR
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_macros.h.in b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_macros.h.in
deleted file mode 100644
index 05bcdd7..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_macros.h.in
+++ /dev/null
@@ -1,122 +0,0 @@
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define	JEMALLOC_VERSION "@jemalloc_version@"
-#define	JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
-#define	JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
-#define	JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
-#define	JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
-#define	JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
-
-#define	MALLOCX_LG_ALIGN(la)	((int)(la))
-#if LG_SIZEOF_PTR == 2
-#  define MALLOCX_ALIGN(a)	((int)(ffs((int)(a))-1))
-#else
-#  define MALLOCX_ALIGN(a)						\
-     ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 :	\
-     ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define	MALLOCX_ZERO	((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define	MALLOCX_TCACHE(tc)	((int)(((tc)+2) << 8))
-#define	MALLOCX_TCACHE_NONE	MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define	MALLOCX_ARENA(a)	((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas.  This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- *   #define STRINGIFY_HELPER(x) #x
- *   #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- *   mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- *       0);
- */
-#define	MALLCTL_ARENAS_ALL	4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define	MALLCTL_ARENAS_DESTROYED	4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-#  define JEMALLOC_CXX_THROW throw()
-#else
-#  define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-#  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_ALIGNED(s) __declspec(align(s))
-#  define JEMALLOC_ALLOC_SIZE(s)
-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  ifndef JEMALLOC_EXPORT
-#    ifdef DLLEXPORT
-#      define JEMALLOC_EXPORT __declspec(dllexport)
-#    else
-#      define JEMALLOC_EXPORT __declspec(dllimport)
-#    endif
-#  endif
-#  define JEMALLOC_FORMAT_PRINTF(s, i)
-#  define JEMALLOC_NOINLINE __declspec(noinline)
-#  ifdef __cplusplus
-#    define JEMALLOC_NOTHROW __declspec(nothrow)
-#  else
-#    define JEMALLOC_NOTHROW
-#  endif
-#  define JEMALLOC_SECTION(s) __declspec(allocate(s))
-#  define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-#  if _MSC_VER >= 1900 && !defined(__EDG__)
-#    define JEMALLOC_ALLOCATOR __declspec(allocator)
-#  else
-#    define JEMALLOC_ALLOCATOR
-#  endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-#  define JEMALLOC_ATTR(s) __attribute__((s))
-#  define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-#  ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-#    define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-#    define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-#  else
-#    define JEMALLOC_ALLOC_SIZE(s)
-#    define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  endif
-#  ifndef JEMALLOC_EXPORT
-#    define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-#  endif
-#  ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-#  elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-#  else
-#    define JEMALLOC_FORMAT_PRINTF(s, i)
-#  endif
-#  define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-#  define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-#  define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-#  define JEMALLOC_RESTRICT_RETURN
-#  define JEMALLOC_ALLOCATOR
-#else
-#  define JEMALLOC_ATTR(s)
-#  define JEMALLOC_ALIGNED(s)
-#  define JEMALLOC_ALLOC_SIZE(s)
-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
-#  define JEMALLOC_EXPORT
-#  define JEMALLOC_FORMAT_PRINTF(s, i)
-#  define JEMALLOC_NOINLINE
-#  define JEMALLOC_NOTHROW
-#  define JEMALLOC_SECTION(s)
-#  define JEMALLOC_RESTRICT_RETURN
-#  define JEMALLOC_ALLOCATOR
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle.h
deleted file mode 100644
index 34872e8..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application.  Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-#  ifndef JEMALLOC_NO_DEMANGLE
-#    define JEMALLOC_NO_DEMANGLE
-#  endif
-#  define malloc_conf je_malloc_conf
-#  define malloc_message je_malloc_message
-#  define malloc je_malloc
-#  define calloc je_calloc
-#  define posix_memalign je_posix_memalign
-#  define aligned_alloc je_aligned_alloc
-#  define realloc je_realloc
-#  define free je_free
-#  define mallocx je_mallocx
-#  define rallocx je_rallocx
-#  define xallocx je_xallocx
-#  define sallocx je_sallocx
-#  define dallocx je_dallocx
-#  define sdallocx je_sdallocx
-#  define nallocx je_nallocx
-#  define mallctl je_mallctl
-#  define mallctlnametomib je_mallctlnametomib
-#  define mallctlbymib je_mallctlbymib
-#  define malloc_stats_print je_malloc_stats_print
-#  define malloc_usable_size je_malloc_usable_size
-#  define memalign je_memalign
-#  define valloc je_valloc
-#endif
-
-/*
- * The je_* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined.  This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-#  undef je_malloc_conf
-#  undef je_malloc_message
-#  undef je_malloc
-#  undef je_calloc
-#  undef je_posix_memalign
-#  undef je_aligned_alloc
-#  undef je_realloc
-#  undef je_free
-#  undef je_mallocx
-#  undef je_rallocx
-#  undef je_xallocx
-#  undef je_sallocx
-#  undef je_dallocx
-#  undef je_sdallocx
-#  undef je_nallocx
-#  undef je_mallctl
-#  undef je_mallctlnametomib
-#  undef je_mallctlbymib
-#  undef je_malloc_stats_print
-#  undef je_malloc_usable_size
-#  undef je_memalign
-#  undef je_valloc
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle.sh
deleted file mode 100755
index df328b7..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh
-
-public_symbols_txt=$1
-symbol_prefix=$2
-
-cat <<EOF
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application.  Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-#  ifndef JEMALLOC_NO_DEMANGLE
-#    define JEMALLOC_NO_DEMANGLE
-#  endif
-EOF
-
-for nm in `cat ${public_symbols_txt}` ; do
-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
-  echo "#  define ${n} ${symbol_prefix}${n}"
-done
-
-cat <<EOF
-#endif
-
-/*
- * The ${symbol_prefix}* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined.  This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-EOF
-
-for nm in `cat ${public_symbols_txt}` ; do
-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
-  echo "#  undef ${symbol_prefix}${n}"
-done
-
-cat <<EOF
-#endif
-EOF
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle_jet.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle_jet.h
deleted file mode 100644
index db5b7b0..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_mangle_jet.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application.  Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-#  ifndef JEMALLOC_NO_DEMANGLE
-#    define JEMALLOC_NO_DEMANGLE
-#  endif
-#  define malloc_conf jet_malloc_conf
-#  define malloc_message jet_malloc_message
-#  define malloc jet_malloc
-#  define calloc jet_calloc
-#  define posix_memalign jet_posix_memalign
-#  define aligned_alloc jet_aligned_alloc
-#  define realloc jet_realloc
-#  define free jet_free
-#  define mallocx jet_mallocx
-#  define rallocx jet_rallocx
-#  define xallocx jet_xallocx
-#  define sallocx jet_sallocx
-#  define dallocx jet_dallocx
-#  define sdallocx jet_sdallocx
-#  define nallocx jet_nallocx
-#  define mallctl jet_mallctl
-#  define mallctlnametomib jet_mallctlnametomib
-#  define mallctlbymib jet_mallctlbymib
-#  define malloc_stats_print jet_malloc_stats_print
-#  define malloc_usable_size jet_malloc_usable_size
-#  define memalign jet_memalign
-#  define valloc jet_valloc
-#endif
-
-/*
- * The jet_* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined.  This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-#  undef jet_malloc_conf
-#  undef jet_malloc_message
-#  undef jet_malloc
-#  undef jet_calloc
-#  undef jet_posix_memalign
-#  undef jet_aligned_alloc
-#  undef jet_realloc
-#  undef jet_free
-#  undef jet_mallocx
-#  undef jet_rallocx
-#  undef jet_xallocx
-#  undef jet_sallocx
-#  undef jet_dallocx
-#  undef jet_sdallocx
-#  undef jet_nallocx
-#  undef jet_mallctl
-#  undef jet_mallctlnametomib
-#  undef jet_mallctlbymib
-#  undef jet_malloc_stats_print
-#  undef jet_malloc_usable_size
-#  undef jet_memalign
-#  undef jet_valloc
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos.h
deleted file mode 100644
index ff025e30..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * The je_ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
- */
-extern JEMALLOC_EXPORT const char	*je_malloc_conf;
-extern JEMALLOC_EXPORT void		(*je_malloc_message)(void *cbopaque,
-    const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_malloc(size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_calloc(size_t num, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_posix_memalign(void **memptr,
-    size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_aligned_alloc(size_t alignment,
-    size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
-    JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_realloc(void *ptr, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_free(void *ptr)
-    JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_mallocx(size_t size, int flags)
-    JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_rallocx(void *ptr, size_t size,
-    int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_xallocx(void *ptr, size_t size,
-    size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_sallocx(const void *ptr,
-    int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_sdallocx(void *ptr, size_t size,
-    int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_nallocx(size_t size, int flags)
-    JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_mallctl(const char *name,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_mallctlnametomib(const char *name,
-    size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	je_mallctlbymib(const size_t *mib,
-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	je_malloc_stats_print(
-    void (*write_cb)(void *, const char *), void *je_cbopaque,
-    const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	je_malloc_usable_size(
-    JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_memalign(size_t alignment, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*je_valloc(size_t size) JEMALLOC_CXX_THROW
-    JEMALLOC_ATTR(malloc);
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos.h.in b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos.h.in
deleted file mode 100644
index a78414b..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos.h.in
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * The @je_@ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
- */
-extern JEMALLOC_EXPORT const char	*@je_@malloc_conf;
-extern JEMALLOC_EXPORT void		(*@je_@malloc_message)(void *cbopaque,
-    const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@malloc(size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@calloc(size_t num, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@posix_memalign(void **memptr,
-    size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@aligned_alloc(size_t alignment,
-    size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
-    JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@realloc(void *ptr, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@free(void *ptr)
-    JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@mallocx(size_t size, int flags)
-    JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@rallocx(void *ptr, size_t size,
-    int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@xallocx(void *ptr, size_t size,
-    size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@sallocx(const void *ptr,
-    int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@sdallocx(void *ptr, size_t size,
-    int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@nallocx(size_t size, int flags)
-    JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@mallctl(const char *name,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@mallctlnametomib(const char *name,
-    size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@mallctlbymib(const size_t *mib,
-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@malloc_stats_print(
-    void (*write_cb)(void *, const char *), void *@je_@cbopaque,
-    const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@malloc_usable_size(
-    JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@memalign(size_t alignment, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*@je_@valloc(size_t size) JEMALLOC_CXX_THROW
-    JEMALLOC_ATTR(malloc);
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos_jet.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos_jet.h
deleted file mode 100644
index f71efef..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_protos_jet.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * The jet_ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
- */
-extern JEMALLOC_EXPORT const char	*jet_malloc_conf;
-extern JEMALLOC_EXPORT void		(*jet_malloc_message)(void *cbopaque,
-    const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_malloc(size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_calloc(size_t num, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	jet_posix_memalign(void **memptr,
-    size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_aligned_alloc(size_t alignment,
-    size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
-    JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_realloc(void *ptr, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	jet_free(void *ptr)
-    JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_mallocx(size_t size, int flags)
-    JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_rallocx(void *ptr, size_t size,
-    int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	jet_xallocx(void *ptr, size_t size,
-    size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	jet_sallocx(const void *ptr,
-    int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	jet_dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	jet_sdallocx(void *ptr, size_t size,
-    int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	jet_nallocx(size_t size, int flags)
-    JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	jet_mallctl(const char *name,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	jet_mallctlnametomib(const char *name,
-    size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	jet_mallctlbymib(const size_t *mib,
-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	jet_malloc_stats_print(
-    void (*write_cb)(void *, const char *), void *jet_cbopaque,
-    const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	jet_malloc_usable_size(
-    JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_memalign(size_t alignment, size_t size)
-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-    void JEMALLOC_NOTHROW	*jet_valloc(size_t size) JEMALLOC_CXX_THROW
-    JEMALLOC_ATTR(malloc);
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_rename.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_rename.h
deleted file mode 100644
index 1919e8a..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_rename.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix.  With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-#  define je_malloc_conf malloc_conf
-#  define je_malloc_message malloc_message
-#  define je_malloc malloc
-#  define je_calloc calloc
-#  define je_posix_memalign posix_memalign
-#  define je_aligned_alloc aligned_alloc
-#  define je_realloc realloc
-#  define je_free free
-#  define je_mallocx mallocx
-#  define je_rallocx rallocx
-#  define je_xallocx xallocx
-#  define je_sallocx sallocx
-#  define je_dallocx dallocx
-#  define je_sdallocx sdallocx
-#  define je_nallocx nallocx
-#  define je_mallctl mallctl
-#  define je_mallctlnametomib mallctlnametomib
-#  define je_mallctlbymib mallctlbymib
-#  define je_malloc_stats_print malloc_stats_print
-#  define je_malloc_usable_size malloc_usable_size
-#  define je_memalign memalign
-#  define je_valloc valloc
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_rename.sh b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_rename.sh
deleted file mode 100755
index f943891..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_rename.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-public_symbols_txt=$1
-
-cat <<EOF
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix.  With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-EOF
-
-for nm in `cat ${public_symbols_txt}` ; do
-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
-  m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
-  echo "#  define je_${n} ${m}"
-done
-
-cat <<EOF
-#endif
-EOF
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_typedefs.h b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_typedefs.h
deleted file mode 100644
index 91b5a8d..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_typedefs.h
+++ /dev/null
@@ -1,68 +0,0 @@
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- *     size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
-    bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
-    unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
-    size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- *     void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
-    bool, unsigned);
-
-struct extent_hooks_s {
-	extent_alloc_t		*alloc;
-	extent_dalloc_t		*dalloc;
-	extent_commit_t		*commit;
-	extent_decommit_t	*decommit;
-	extent_purge_t		*purge_lazy;
-	extent_purge_t		*purge_forced;
-	extent_split_t		*split;
-	extent_merge_t		*merge;
-};
diff --git a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
deleted file mode 100644
index 91b5a8d..0000000
--- a/zircon/third_party/ulib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
+++ /dev/null
@@ -1,68 +0,0 @@
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- *     size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
-    bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
-    unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
-    size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- *     size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
-    bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- *     void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
-    bool, unsigned);
-
-struct extent_hooks_s {
-	extent_alloc_t		*alloc;
-	extent_dalloc_t		*dalloc;
-	extent_commit_t		*commit;
-	extent_decommit_t	*decommit;
-	extent_purge_t		*purge_lazy;
-	extent_purge_t		*purge_forced;
-	extent_split_t		*split;
-	extent_merge_t		*merge;
-};
diff --git a/zircon/third_party/ulib/jemalloc/include/msvc_compat/C99/stdbool.h b/zircon/third_party/ulib/jemalloc/include/msvc_compat/C99/stdbool.h
deleted file mode 100644
index d92160e..0000000
--- a/zircon/third_party/ulib/jemalloc/include/msvc_compat/C99/stdbool.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef stdbool_h
-#define stdbool_h
-
-#include <wtypes.h>
-
-/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
-/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
-/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
- * a built-in type. */
-#ifndef __clang__
-typedef BOOL _Bool;
-#endif
-
-#define bool _Bool
-#define true 1
-#define false 0
-
-#define __bool_true_false_are_defined 1
-
-#endif /* stdbool_h */
diff --git a/zircon/third_party/ulib/jemalloc/include/msvc_compat/C99/stdint.h b/zircon/third_party/ulib/jemalloc/include/msvc_compat/C99/stdint.h
deleted file mode 100644
index d02608a..0000000
--- a/zircon/third_party/ulib/jemalloc/include/msvc_compat/C99/stdint.h
+++ /dev/null
@@ -1,247 +0,0 @@
-// ISO C9x  compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 
-// 
-//  Copyright (c) 2006-2008 Alexander Chemeris
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-// 
-//   1. Redistributions of source code must retain the above copyright notice,
-//      this list of conditions and the following disclaimer.
-// 
-//   2. Redistributions in binary form must reproduce the above copyright
-//      notice, this list of conditions and the following disclaimer in the
-//      documentation and/or other materials provided with the distribution.
-// 
-//   3. The name of the author may be used to endorse or promote products
-//      derived from this software without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// 
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_STDINT_H_ // [
-#define _MSC_STDINT_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include <limits.h>
-
-// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
-// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
-// or compiler give many errors like this:
-//   error C2733: second C linkage of overloaded function 'wmemchr' not allowed
-#ifdef __cplusplus
-extern "C" {
-#endif
-#  include <wchar.h>
-#ifdef __cplusplus
-}
-#endif
-
-// Define _W64 macros to mark types changing their size, like intptr_t.
-#ifndef _W64
-#  if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
-#     define _W64 __w64
-#  else
-#     define _W64
-#  endif
-#endif
-
-
-// 7.18.1 Integer types
-
-// 7.18.1.1 Exact-width integer types
-
-// Visual Studio 6 and Embedded Visual C++ 4 doesn't
-// realize that, e.g. char has the same size as __int8
-// so we give up on __intX for them.
-#if (_MSC_VER < 1300)
-   typedef signed char       int8_t;
-   typedef signed short      int16_t;
-   typedef signed int        int32_t;
-   typedef unsigned char     uint8_t;
-   typedef unsigned short    uint16_t;
-   typedef unsigned int      uint32_t;
-#else
-   typedef signed __int8     int8_t;
-   typedef signed __int16    int16_t;
-   typedef signed __int32    int32_t;
-   typedef unsigned __int8   uint8_t;
-   typedef unsigned __int16  uint16_t;
-   typedef unsigned __int32  uint32_t;
-#endif
-typedef signed __int64       int64_t;
-typedef unsigned __int64     uint64_t;
-
-
-// 7.18.1.2 Minimum-width integer types
-typedef int8_t    int_least8_t;
-typedef int16_t   int_least16_t;
-typedef int32_t   int_least32_t;
-typedef int64_t   int_least64_t;
-typedef uint8_t   uint_least8_t;
-typedef uint16_t  uint_least16_t;
-typedef uint32_t  uint_least32_t;
-typedef uint64_t  uint_least64_t;
-
-// 7.18.1.3 Fastest minimum-width integer types
-typedef int8_t    int_fast8_t;
-typedef int16_t   int_fast16_t;
-typedef int32_t   int_fast32_t;
-typedef int64_t   int_fast64_t;
-typedef uint8_t   uint_fast8_t;
-typedef uint16_t  uint_fast16_t;
-typedef uint32_t  uint_fast32_t;
-typedef uint64_t  uint_fast64_t;
-
-// 7.18.1.4 Integer types capable of holding object pointers
-#ifdef _WIN64 // [
-   typedef signed __int64    intptr_t;
-   typedef unsigned __int64  uintptr_t;
-#else // _WIN64 ][
-   typedef _W64 signed int   intptr_t;
-   typedef _W64 unsigned int uintptr_t;
-#endif // _WIN64 ]
-
-// 7.18.1.5 Greatest-width integer types
-typedef int64_t   intmax_t;
-typedef uint64_t  uintmax_t;
-
-
-// 7.18.2 Limits of specified-width integer types
-
-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [   See footnote 220 at page 257 and footnote 221 at page 259
-
-// 7.18.2.1 Limits of exact-width integer types
-#define INT8_MIN     ((int8_t)_I8_MIN)
-#define INT8_MAX     _I8_MAX
-#define INT16_MIN    ((int16_t)_I16_MIN)
-#define INT16_MAX    _I16_MAX
-#define INT32_MIN    ((int32_t)_I32_MIN)
-#define INT32_MAX    _I32_MAX
-#define INT64_MIN    ((int64_t)_I64_MIN)
-#define INT64_MAX    _I64_MAX
-#define UINT8_MAX    _UI8_MAX
-#define UINT16_MAX   _UI16_MAX
-#define UINT32_MAX   _UI32_MAX
-#define UINT64_MAX   _UI64_MAX
-
-// 7.18.2.2 Limits of minimum-width integer types
-#define INT_LEAST8_MIN    INT8_MIN
-#define INT_LEAST8_MAX    INT8_MAX
-#define INT_LEAST16_MIN   INT16_MIN
-#define INT_LEAST16_MAX   INT16_MAX
-#define INT_LEAST32_MIN   INT32_MIN
-#define INT_LEAST32_MAX   INT32_MAX
-#define INT_LEAST64_MIN   INT64_MIN
-#define INT_LEAST64_MAX   INT64_MAX
-#define UINT_LEAST8_MAX   UINT8_MAX
-#define UINT_LEAST16_MAX  UINT16_MAX
-#define UINT_LEAST32_MAX  UINT32_MAX
-#define UINT_LEAST64_MAX  UINT64_MAX
-
-// 7.18.2.3 Limits of fastest minimum-width integer types
-#define INT_FAST8_MIN    INT8_MIN
-#define INT_FAST8_MAX    INT8_MAX
-#define INT_FAST16_MIN   INT16_MIN
-#define INT_FAST16_MAX   INT16_MAX
-#define INT_FAST32_MIN   INT32_MIN
-#define INT_FAST32_MAX   INT32_MAX
-#define INT_FAST64_MIN   INT64_MIN
-#define INT_FAST64_MAX   INT64_MAX
-#define UINT_FAST8_MAX   UINT8_MAX
-#define UINT_FAST16_MAX  UINT16_MAX
-#define UINT_FAST32_MAX  UINT32_MAX
-#define UINT_FAST64_MAX  UINT64_MAX
-
-// 7.18.2.4 Limits of integer types capable of holding object pointers
-#ifdef _WIN64 // [
-#  define INTPTR_MIN   INT64_MIN
-#  define INTPTR_MAX   INT64_MAX
-#  define UINTPTR_MAX  UINT64_MAX
-#else // _WIN64 ][
-#  define INTPTR_MIN   INT32_MIN
-#  define INTPTR_MAX   INT32_MAX
-#  define UINTPTR_MAX  UINT32_MAX
-#endif // _WIN64 ]
-
-// 7.18.2.5 Limits of greatest-width integer types
-#define INTMAX_MIN   INT64_MIN
-#define INTMAX_MAX   INT64_MAX
-#define UINTMAX_MAX  UINT64_MAX
-
-// 7.18.3 Limits of other integer types
-
-#ifdef _WIN64 // [
-#  define PTRDIFF_MIN  _I64_MIN
-#  define PTRDIFF_MAX  _I64_MAX
-#else  // _WIN64 ][
-#  define PTRDIFF_MIN  _I32_MIN
-#  define PTRDIFF_MAX  _I32_MAX
-#endif  // _WIN64 ]
-
-#define SIG_ATOMIC_MIN  INT_MIN
-#define SIG_ATOMIC_MAX  INT_MAX
-
-#ifndef SIZE_MAX // [
-#  ifdef _WIN64 // [
-#     define SIZE_MAX  _UI64_MAX
-#  else // _WIN64 ][
-#     define SIZE_MAX  _UI32_MAX
-#  endif // _WIN64 ]
-#endif // SIZE_MAX ]
-
-// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
-#ifndef WCHAR_MIN // [
-#  define WCHAR_MIN  0
-#endif  // WCHAR_MIN ]
-#ifndef WCHAR_MAX // [
-#  define WCHAR_MAX  _UI16_MAX
-#endif  // WCHAR_MAX ]
-
-#define WINT_MIN  0
-#define WINT_MAX  _UI16_MAX
-
-#endif // __STDC_LIMIT_MACROS ]
-
-
-// 7.18.4 Limits of other integer types
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [   See footnote 224 at page 260
-
-// 7.18.4.1 Macros for minimum-width integer constants
-
-#define INT8_C(val)  val##i8
-#define INT16_C(val) val##i16
-#define INT32_C(val) val##i32
-#define INT64_C(val) val##i64
-
-#define UINT8_C(val)  val##ui8
-#define UINT16_C(val) val##ui16
-#define UINT32_C(val) val##ui32
-#define UINT64_C(val) val##ui64
-
-// 7.18.4.2 Macros for greatest-width integer constants
-#define INTMAX_C   INT64_C
-#define UINTMAX_C  UINT64_C
-
-#endif // __STDC_CONSTANT_MACROS ]
-
-
-#endif // _MSC_STDINT_H_ ]
diff --git a/zircon/third_party/ulib/jemalloc/include/msvc_compat/strings.h b/zircon/third_party/ulib/jemalloc/include/msvc_compat/strings.h
deleted file mode 100644
index 47998be..0000000
--- a/zircon/third_party/ulib/jemalloc/include/msvc_compat/strings.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef strings_h
-#define strings_h
-
-/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
- * for both */
-#ifdef _MSC_VER
-#  include <intrin.h>
-#  pragma intrinsic(_BitScanForward)
-static __forceinline int ffsl(long x)
-{
-	unsigned long i;
-
-	if (_BitScanForward(&i, x))
-		return (i + 1);
-	return (0);
-}
-
-static __forceinline int ffs(int x)
-{
-	return (ffsl(x));
-}
-
-#  ifdef  _M_X64
-#    pragma intrinsic(_BitScanForward64)
-#  endif
-
-static __forceinline int ffsll(unsigned __int64 x)
-{
-	unsigned long i;
-#ifdef  _M_X64
-	if (_BitScanForward64(&i, x))
-		return (i + 1);
-	return (0);
-#else
-// Fallback for 32-bit build where 64-bit version not available
-// assuming little endian
-	union {
-		unsigned __int64 ll;
-		unsigned   long l[2];
-	} s;
-
-	s.ll = x;
-
-	if (_BitScanForward(&i, s.l[0]))
-		return (i + 1);
-	else if(_BitScanForward(&i, s.l[1]))
-		return (i + 33);
-	return (0);
-#endif
-}
-
-#else
-#  define ffsll(x) __builtin_ffsll(x)
-#  define ffsl(x) __builtin_ffsl(x)
-#  define ffs(x) __builtin_ffs(x)
-#endif
-
-#endif /* strings_h */
diff --git a/zircon/third_party/ulib/jemalloc/include/msvc_compat/windows_extra.h b/zircon/third_party/ulib/jemalloc/include/msvc_compat/windows_extra.h
deleted file mode 100644
index 3008faa..0000000
--- a/zircon/third_party/ulib/jemalloc/include/msvc_compat/windows_extra.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
-#define	MSVC_COMPAT_WINDOWS_EXTRA_H
-
-#include <errno.h>
-
-#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/zircon/third_party/ulib/jemalloc/jemalloc.pc.in b/zircon/third_party/ulib/jemalloc/jemalloc.pc.in
deleted file mode 100644
index a318e8d..0000000
--- a/zircon/third_party/ulib/jemalloc/jemalloc.pc.in
+++ /dev/null
@@ -1,12 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-install_suffix=@install_suffix@
-
-Name: jemalloc
-Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
-URL: http://jemalloc.net/
-Version: @jemalloc_version@
-Cflags: -I${includedir}
-Libs: -L${libdir} -ljemalloc${install_suffix}
diff --git a/zircon/third_party/ulib/jemalloc/m4/ax_cxx_compile_stdcxx.m4 b/zircon/third_party/ulib/jemalloc/m4/ax_cxx_compile_stdcxx.m4
deleted file mode 100644
index 2c18e49..0000000
--- a/zircon/third_party/ulib/jemalloc/m4/ax_cxx_compile_stdcxx.m4
+++ /dev/null
@@ -1,562 +0,0 @@
-# ===========================================================================
-#   http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-#   AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
-#
-# DESCRIPTION
-#
-#   Check for baseline language coverage in the compiler for the specified
-#   version of the C++ standard.  If necessary, add switches to CXX and
-#   CXXCPP to enable support.  VERSION may be '11' (for the C++11 standard)
-#   or '14' (for the C++14 standard).
-#
-#   The second argument, if specified, indicates whether you insist on an
-#   extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
-#   -std=c++11).  If neither is specified, you get whatever works, with
-#   preference for an extended mode.
-#
-#   The third argument, if specified 'mandatory' or if left unspecified,
-#   indicates that baseline support for the specified C++ standard is
-#   required and that the macro should error out if no mode with that
-#   support is found.  If specified 'optional', then configuration proceeds
-#   regardless, after defining HAVE_CXX${VERSION} if and only if a
-#   supporting mode is found.
-#
-# LICENSE
-#
-#   Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
-#   Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
-#   Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
-#   Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
-#   Copyright (c) 2015 Paul Norman <penorman@mac.com>
-#   Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
-#
-#   Copying and distribution of this file, with or without modification, are
-#   permitted in any medium without royalty provided the copyright notice
-#   and this notice are preserved.  This file is offered as-is, without any
-#   warranty.
-
-#serial 4
-
-dnl  This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
-dnl  (serial version number 13).
-
-AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
-  m4_if([$1], [11], [],
-        [$1], [14], [],
-        [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])],
-        [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
-  m4_if([$2], [], [],
-        [$2], [ext], [],
-        [$2], [noext], [],
-        [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
-  m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
-        [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
-        [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
-        [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
-  AC_LANG_PUSH([C++])dnl
-  ac_success=no
-  AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
-  ax_cv_cxx_compile_cxx$1,
-  [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
-    [ax_cv_cxx_compile_cxx$1=yes],
-    [ax_cv_cxx_compile_cxx$1=no])])
-  if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
-    ac_success=yes
-  fi
-
-  m4_if([$2], [noext], [], [dnl
-  if test x$ac_success = xno; then
-    for switch in -std=gnu++$1 -std=gnu++0x; do
-      cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
-      AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
-                     $cachevar,
-        [ac_save_CXX="$CXX"
-         CXX="$CXX $switch"
-         AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
-          [eval $cachevar=yes],
-          [eval $cachevar=no])
-         CXX="$ac_save_CXX"])
-      if eval test x\$$cachevar = xyes; then
-        CXX="$CXX $switch"
-        if test -n "$CXXCPP" ; then
-          CXXCPP="$CXXCPP $switch"
-        fi
-        ac_success=yes
-        break
-      fi
-    done
-  fi])
-
-  m4_if([$2], [ext], [], [dnl
-  if test x$ac_success = xno; then
-    dnl HP's aCC needs +std=c++11 according to:
-    dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
-    dnl Cray's crayCC needs "-h std=c++11"
-    for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do
-      cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
-      AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
-                     $cachevar,
-        [ac_save_CXX="$CXX"
-         CXX="$CXX $switch"
-         AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
-          [eval $cachevar=yes],
-          [eval $cachevar=no])
-         CXX="$ac_save_CXX"])
-      if eval test x\$$cachevar = xyes; then
-        CXX="$CXX $switch"
-        if test -n "$CXXCPP" ; then
-          CXXCPP="$CXXCPP $switch"
-        fi
-        ac_success=yes
-        break
-      fi
-    done
-  fi])
-  AC_LANG_POP([C++])
-  if test x$ax_cxx_compile_cxx$1_required = xtrue; then
-    if test x$ac_success = xno; then
-      AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
-    fi
-  fi
-  if test x$ac_success = xno; then
-    HAVE_CXX$1=0
-    AC_MSG_NOTICE([No compiler with C++$1 support was found])
-  else
-    HAVE_CXX$1=1
-    AC_DEFINE(HAVE_CXX$1,1,
-              [define if the compiler supports basic C++$1 syntax])
-  fi
-  AC_SUBST(HAVE_CXX$1)
-])
-
-
-dnl  Test body for checking C++11 support
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
-)
-
-
-dnl  Test body for checking C++14 support
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
-)
-
-
-dnl  Tests for new features in C++11
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
-
-// If the compiler admits that it is not ready for C++11, why torture it?
-// Hopefully, this will speed up the test.
-
-#ifndef __cplusplus
-
-#error "This is not a C++ compiler"
-
-#elif __cplusplus < 201103L
-
-#error "This is not a C++11 compiler"
-
-#else
-
-namespace cxx11
-{
-
-  namespace test_static_assert
-  {
-
-    template <typename T>
-    struct check
-    {
-      static_assert(sizeof(int) <= sizeof(T), "not big enough");
-    };
-
-  }
-
-  namespace test_final_override
-  {
-
-    struct Base
-    {
-      virtual void f() {}
-    };
-
-    struct Derived : public Base
-    {
-      virtual void f() override {}
-    };
-
-  }
-
-  namespace test_double_right_angle_brackets
-  {
-
-    template < typename T >
-    struct check {};
-
-    typedef check<void> single_type;
-    typedef check<check<void>> double_type;
-    typedef check<check<check<void>>> triple_type;
-    typedef check<check<check<check<void>>>> quadruple_type;
-
-  }
-
-  namespace test_decltype
-  {
-
-    int
-    f()
-    {
-      int a = 1;
-      decltype(a) b = 2;
-      return a + b;
-    }
-
-  }
-
-  namespace test_type_deduction
-  {
-
-    template < typename T1, typename T2 >
-    struct is_same
-    {
-      static const bool value = false;
-    };
-
-    template < typename T >
-    struct is_same<T, T>
-    {
-      static const bool value = true;
-    };
-
-    template < typename T1, typename T2 >
-    auto
-    add(T1 a1, T2 a2) -> decltype(a1 + a2)
-    {
-      return a1 + a2;
-    }
-
-    int
-    test(const int c, volatile int v)
-    {
-      static_assert(is_same<int, decltype(0)>::value == true, "");
-      static_assert(is_same<int, decltype(c)>::value == false, "");
-      static_assert(is_same<int, decltype(v)>::value == false, "");
-      auto ac = c;
-      auto av = v;
-      auto sumi = ac + av + 'x';
-      auto sumf = ac + av + 1.0;
-      static_assert(is_same<int, decltype(ac)>::value == true, "");
-      static_assert(is_same<int, decltype(av)>::value == true, "");
-      static_assert(is_same<int, decltype(sumi)>::value == true, "");
-      static_assert(is_same<int, decltype(sumf)>::value == false, "");
-      static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
-      return (sumf > 0.0) ? sumi : add(c, v);
-    }
-
-  }
-
-  namespace test_noexcept
-  {
-
-    int f() { return 0; }
-    int g() noexcept { return 0; }
-
-    static_assert(noexcept(f()) == false, "");
-    static_assert(noexcept(g()) == true, "");
-
-  }
-
-  namespace test_constexpr
-  {
-
-    template < typename CharT >
-    unsigned long constexpr
-    strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
-    {
-      return *s ? strlen_c_r(s + 1, acc + 1) : acc;
-    }
-
-    template < typename CharT >
-    unsigned long constexpr
-    strlen_c(const CharT *const s) noexcept
-    {
-      return strlen_c_r(s, 0UL);
-    }
-
-    static_assert(strlen_c("") == 0UL, "");
-    static_assert(strlen_c("1") == 1UL, "");
-    static_assert(strlen_c("example") == 7UL, "");
-    static_assert(strlen_c("another\0example") == 7UL, "");
-
-  }
-
-  namespace test_rvalue_references
-  {
-
-    template < int N >
-    struct answer
-    {
-      static constexpr int value = N;
-    };
-
-    answer<1> f(int&)       { return answer<1>(); }
-    answer<2> f(const int&) { return answer<2>(); }
-    answer<3> f(int&&)      { return answer<3>(); }
-
-    void
-    test()
-    {
-      int i = 0;
-      const int c = 0;
-      static_assert(decltype(f(i))::value == 1, "");
-      static_assert(decltype(f(c))::value == 2, "");
-      static_assert(decltype(f(0))::value == 3, "");
-    }
-
-  }
-
-  namespace test_uniform_initialization
-  {
-
-    struct test
-    {
-      static const int zero {};
-      static const int one {1};
-    };
-
-    static_assert(test::zero == 0, "");
-    static_assert(test::one == 1, "");
-
-  }
-
-  namespace test_lambdas
-  {
-
-    void
-    test1()
-    {
-      auto lambda1 = [](){};
-      auto lambda2 = lambda1;
-      lambda1();
-      lambda2();
-    }
-
-    int
-    test2()
-    {
-      auto a = [](int i, int j){ return i + j; }(1, 2);
-      auto b = []() -> int { return '0'; }();
-      auto c = [=](){ return a + b; }();
-      auto d = [&](){ return c; }();
-      auto e = [a, &b](int x) mutable {
-        const auto identity = [](int y){ return y; };
-        for (auto i = 0; i < a; ++i)
-          a += b--;
-        return x + identity(a + b);
-      }(0);
-      return a + b + c + d + e;
-    }
-
-    int
-    test3()
-    {
-      const auto nullary = [](){ return 0; };
-      const auto unary = [](int x){ return x; };
-      using nullary_t = decltype(nullary);
-      using unary_t = decltype(unary);
-      const auto higher1st = [](nullary_t f){ return f(); };
-      const auto higher2nd = [unary](nullary_t f1){
-        return [unary, f1](unary_t f2){ return f2(unary(f1())); };
-      };
-      return higher1st(nullary) + higher2nd(nullary)(unary);
-    }
-
-  }
-
-  namespace test_variadic_templates
-  {
-
-    template <int...>
-    struct sum;
-
-    template <int N0, int... N1toN>
-    struct sum<N0, N1toN...>
-    {
-      static constexpr auto value = N0 + sum<N1toN...>::value;
-    };
-
-    template <>
-    struct sum<>
-    {
-      static constexpr auto value = 0;
-    };
-
-    static_assert(sum<>::value == 0, "");
-    static_assert(sum<1>::value == 1, "");
-    static_assert(sum<23>::value == 23, "");
-    static_assert(sum<1, 2>::value == 3, "");
-    static_assert(sum<5, 5, 11>::value == 21, "");
-    static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
-
-  }
-
-  // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
-  // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
-  // because of this.
-  namespace test_template_alias_sfinae
-  {
-
-    struct foo {};
-
-    template<typename T>
-    using member = typename T::member_type;
-
-    template<typename T>
-    void func(...) {}
-
-    template<typename T>
-    void func(member<T>*) {}
-
-    void test();
-
-    void test() { func<foo>(0); }
-
-  }
-
-}  // namespace cxx11
-
-#endif  // __cplusplus >= 201103L
-
-]])
-
-
-dnl  Tests for new features in C++14
-
-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
-
-// If the compiler admits that it is not ready for C++14, why torture it?
-// Hopefully, this will speed up the test.
-
-#ifndef __cplusplus
-
-#error "This is not a C++ compiler"
-
-#elif __cplusplus < 201402L
-
-#error "This is not a C++14 compiler"
-
-#else
-
-namespace cxx14
-{
-
-  namespace test_polymorphic_lambdas
-  {
-
-    int
-    test()
-    {
-      const auto lambda = [](auto&&... args){
-        const auto istiny = [](auto x){
-          return (sizeof(x) == 1UL) ? 1 : 0;
-        };
-        const int aretiny[] = { istiny(args)... };
-        return aretiny[0];
-      };
-      return lambda(1, 1L, 1.0f, '1');
-    }
-
-  }
-
-  namespace test_binary_literals
-  {
-
-    constexpr auto ivii = 0b0000000000101010;
-    static_assert(ivii == 42, "wrong value");
-
-  }
-
-  namespace test_generalized_constexpr
-  {
-
-    template < typename CharT >
-    constexpr unsigned long
-    strlen_c(const CharT *const s) noexcept
-    {
-      auto length = 0UL;
-      for (auto p = s; *p; ++p)
-        ++length;
-      return length;
-    }
-
-    static_assert(strlen_c("") == 0UL, "");
-    static_assert(strlen_c("x") == 1UL, "");
-    static_assert(strlen_c("test") == 4UL, "");
-    static_assert(strlen_c("another\0test") == 7UL, "");
-
-  }
-
-  namespace test_lambda_init_capture
-  {
-
-    int
-    test()
-    {
-      auto x = 0;
-      const auto lambda1 = [a = x](int b){ return a + b; };
-      const auto lambda2 = [a = lambda1(x)](){ return a; };
-      return lambda2();
-    }
-
-  }
-
-  namespace test_digit_seperators
-  {
-
-    constexpr auto ten_million = 100'000'000;
-    static_assert(ten_million == 100000000, "");
-
-  }
-
-  namespace test_return_type_deduction
-  {
-
-    auto f(int& x) { return x; }
-    decltype(auto) g(int& x) { return x; }
-
-    template < typename T1, typename T2 >
-    struct is_same
-    {
-      static constexpr auto value = false;
-    };
-
-    template < typename T >
-    struct is_same<T, T>
-    {
-      static constexpr auto value = true;
-    };
-
-    int
-    test()
-    {
-      auto x = 0;
-      static_assert(is_same<int, decltype(f(x))>::value, "");
-      static_assert(is_same<int&, decltype(g(x))>::value, "");
-      return x;
-    }
-
-  }
-
-}  // namespace cxx14
-
-#endif  // __cplusplus >= 201402L
-
-]])
diff --git a/zircon/third_party/ulib/jemalloc/msvc/ReadMe.txt b/zircon/third_party/ulib/jemalloc/msvc/ReadMe.txt
deleted file mode 100644
index 77d567d..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/ReadMe.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-How to build jemalloc for Windows
-=================================
-
-1. Install Cygwin with at least the following packages:
-   * autoconf
-   * autogen
-   * gawk
-   * grep
-   * sed
-
-2. Install Visual Studio 2015 with Visual C++
-
-3. Add Cygwin\bin to the PATH environment variable
-
-4. Open "VS2015 x86 Native Tools Command Prompt"
-   (note: x86/x64 doesn't matter at this point)
-
-5. Generate header files:
-   sh -c "CC=cl ./autogen.sh"
-
-6. Now the project can be opened and built in Visual Studio:
-   msvc\jemalloc_vc2015.sln
-
diff --git a/zircon/third_party/ulib/jemalloc/msvc/jemalloc_vc2015.sln b/zircon/third_party/ulib/jemalloc/msvc/jemalloc_vc2015.sln
deleted file mode 100644
index aedd5e5..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/jemalloc_vc2015.sln
+++ /dev/null
@@ -1,63 +0,0 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 14
-VisualStudioVersion = 14.0.24720.0
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
-	ProjectSection(SolutionItems) = preProject
-		ReadMe.txt = ReadMe.txt
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
-EndProject
-Global
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|x64 = Debug|x64
-		Debug|x86 = Debug|x86
-		Debug-static|x64 = Debug-static|x64
-		Debug-static|x86 = Debug-static|x86
-		Release|x64 = Release|x64
-		Release|x86 = Release|x86
-		Release-static|x64 = Release-static|x64
-		Release-static|x86 = Release-static|x86
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-EndGlobal
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
deleted file mode 100644
index 75ea8fb..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
+++ /dev/null
@@ -1,398 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug-static|Win32">
-      <Configuration>Debug-static</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug-static|x64">
-      <Configuration>Debug-static</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release-static|Win32">
-      <Configuration>Release-static</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release-static|x64">
-      <Configuration>Release-static</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent_dss.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent_mmap.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\large.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
-    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
-    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
-    <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
-    <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\..\..\src\arena.c" />
-    <ClCompile Include="..\..\..\..\src\atomic.c" />
-    <ClCompile Include="..\..\..\..\src\base.c" />
-    <ClCompile Include="..\..\..\..\src\bitmap.c" />
-    <ClCompile Include="..\..\..\..\src\ckh.c" />
-    <ClCompile Include="..\..\..\..\src\ctl.c" />
-    <ClCompile Include="..\..\..\..\src\extent.c" />
-    <ClCompile Include="..\..\..\..\src\extent_dss.c" />
-    <ClCompile Include="..\..\..\..\src\extent_mmap.c" />
-    <ClCompile Include="..\..\..\..\src\hash.c" />
-    <ClCompile Include="..\..\..\..\src\jemalloc.c" />
-    <ClCompile Include="..\..\..\..\src\large.c" />
-    <ClCompile Include="..\..\..\..\src\mb.c" />
-    <ClCompile Include="..\..\..\..\src\mutex.c" />
-    <ClCompile Include="..\..\..\..\src\nstime.c" />
-    <ClCompile Include="..\..\..\..\src\pages.c" />
-    <ClCompile Include="..\..\..\..\src\prng.c" />
-    <ClCompile Include="..\..\..\..\src\prof.c" />
-    <ClCompile Include="..\..\..\..\src\rtree.c" />
-    <ClCompile Include="..\..\..\..\src\spin.c" />
-    <ClCompile Include="..\..\..\..\src\stats.c" />
-    <ClCompile Include="..\..\..\..\src\tcache.c" />
-    <ClCompile Include="..\..\..\..\src\ticker.c" />
-    <ClCompile Include="..\..\..\..\src\tsd.c" />
-    <ClCompile Include="..\..\..\..\src\util.c" />
-    <ClCompile Include="..\..\..\..\src\witness.c" />
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>jemalloc</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
-    <ConfigurationType>StaticLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
-    <ConfigurationType>StaticLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
-    <ConfigurationType>StaticLibrary</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>DynamicLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
-    <ConfigurationType>StaticLibrary</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="Shared">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <TargetName>$(ProjectName)d</TargetName>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <TargetName>$(ProjectName)d</TargetName>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <DebugInformationFormat>OldStyle</DebugInformationFormat>
-      <MinimalRebuild>false</MinimalRebuild>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
-      <DebugInformationFormat>OldStyle</DebugInformationFormat>
-    </ClCompile>
-    <Link>
-      <SubSystem>Windows</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-    </Link>
-  </ItemDefinitionGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
deleted file mode 100644
index a328a6f9..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
+++ /dev/null
@@ -1,260 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-    <Filter Include="Header Files\internal">
-      <UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
-    </Filter>
-    <Filter Include="Header Files\msvc_compat">
-      <UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
-    </Filter>
-    <Filter Include="Header Files\msvc_compat\C99">
-      <UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent_dss.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent_mmap.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\large.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h">
-      <Filter>Header Files\internal</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
-      <Filter>Header Files\msvc_compat</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
-      <Filter>Header Files\msvc_compat</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
-      <Filter>Header Files\msvc_compat\C99</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
-      <Filter>Header Files\msvc_compat\C99</Filter>
-    </ClInclude>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="..\..\..\..\src\arena.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\atomic.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\base.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\bitmap.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\ckh.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\ctl.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\extent.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\extent_dss.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\extent_mmap.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\hash.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\jemalloc.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\large.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\mb.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\mutex.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\nstime.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\pages.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\prng.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\prof.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\rtree.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\spin.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\stats.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\tcache.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\ticker.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\tsd.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\util.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="..\..\..\..\src\witness.c">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-</Project>
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp
deleted file mode 100644
index a3d1a79..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-// jemalloc C++ threaded test
-// Author: Rustam Abdullaev
-// Public Domain
-
-#include <atomic>
-#include <functional>
-#include <future>
-#include <random>
-#include <thread>
-#include <vector>
-#include <stdio.h>
-#include <jemalloc/jemalloc.h>
-
-using std::vector;
-using std::thread;
-using std::uniform_int_distribution;
-using std::minstd_rand;
-
-int test_threads()
-{
-  je_malloc_conf = "narenas:3";
-  int narenas = 0;
-  size_t sz = sizeof(narenas);
-  je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
-  if (narenas != 3) {
-    printf("Error: unexpected number of arenas: %d\n", narenas);
-    return 1;
-  }
-  static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
-  static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
-  vector<thread> workers;
-  static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
-  je_malloc_stats_print(NULL, NULL, NULL);
-  size_t allocated1;
-  size_t sz1 = sizeof(allocated1);
-  je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
-  printf("\nPress Enter to start threads...\n");
-  getchar();
-  printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
-  for (int i = 0; i < numThreads; i++) {
-    workers.emplace_back([tid=i]() {
-      uniform_int_distribution<int> sizeDist(0, numSizes - 1);
-      minstd_rand rnd(tid * 17);
-      uint8_t* ptrs[numAllocsMax];
-      int ptrsz[numAllocsMax];
-      for (int i = 0; i < numIter1; ++i) {
-        thread t([&]() {
-          for (int i = 0; i < numIter2; ++i) {
-            const int numAllocs = numAllocsMax - sizeDist(rnd);
-            for (int j = 0; j < numAllocs; j += 64) {
-              const int x = sizeDist(rnd);
-              const int sz = sizes[x];
-              ptrsz[j] = sz;
-              ptrs[j] = (uint8_t*)je_malloc(sz);
-              if (!ptrs[j]) {
-                printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
-                exit(1);
-              }
-              for (int k = 0; k < sz; k++)
-                ptrs[j][k] = tid + k;
-            }
-            for (int j = 0; j < numAllocs; j += 64) {
-              for (int k = 0, sz = ptrsz[j]; k < sz; k++)
-                if (ptrs[j][k] != (uint8_t)(tid + k)) {
-                  printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
-                  exit(1);
-                }
-              je_free(ptrs[j]);
-            }
-          }
-        });
-        t.join();
-      }
-    });
-  }
-  for (thread& t : workers) {
-    t.join();
-  }
-  je_malloc_stats_print(NULL, NULL, NULL);
-  size_t allocated2;
-  je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
-  size_t leaked = allocated2 - allocated1;
-  printf("\nDone. Leaked: %zd bytes\n", leaked);
-  bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
-  printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
-  printf("\nPress Enter to continue...\n");
-  getchar();
-  return failed ? 1 : 0;
-}
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h
deleted file mode 100644
index 64d0cdb3..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h
+++ /dev/null
@@ -1,3 +0,0 @@
-#pragma once
-
-int test_threads();
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
deleted file mode 100644
index f5e9898..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
+++ /dev/null
@@ -1,327 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup Label="ProjectConfigurations">
-    <ProjectConfiguration Include="Debug-static|Win32">
-      <Configuration>Debug-static</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug-static|x64">
-      <Configuration>Debug-static</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|Win32">
-      <Configuration>Debug</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release-static|Win32">
-      <Configuration>Release-static</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release-static|x64">
-      <Configuration>Release-static</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|Win32">
-      <Configuration>Release</Configuration>
-      <Platform>Win32</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Debug|x64">
-      <Configuration>Debug</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-    <ProjectConfiguration Include="Release|x64">
-      <Configuration>Release</Configuration>
-      <Platform>x64</Platform>
-    </ProjectConfiguration>
-  </ItemGroup>
-  <PropertyGroup Label="Globals">
-    <ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
-    <Keyword>Win32Proj</Keyword>
-    <RootNamespace>test_threads</RootNamespace>
-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>true</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
-    <ConfigurationType>Application</ConfigurationType>
-    <UseDebugLibraries>false</UseDebugLibraries>
-    <PlatformToolset>v140</PlatformToolset>
-    <WholeProgramOptimization>true</WholeProgramOptimization>
-    <CharacterSet>MultiByte</CharacterSet>
-  </PropertyGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
-  <ImportGroup Label="ExtensionSettings">
-  </ImportGroup>
-  <ImportGroup Label="Shared">
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
-  </ImportGroup>
-  <PropertyGroup Label="UserMacros" />
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <LinkIncremental>true</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <LinkIncremental>true</LinkIncremental>
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
-    <LinkIncremental>true</LinkIncremental>
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
-    <LinkIncremental>false</LinkIncremental>
-  </PropertyGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
-    <ClCompile>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <WarningLevel>Level3</WarningLevel>
-      <Optimization>Disabled</Optimization>
-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
-    <ClCompile>
-      <WarningLevel>Level3</WarningLevel>
-      <PrecompiledHeader>
-      </PrecompiledHeader>
-      <Optimization>MaxSpeed</Optimization>
-      <FunctionLevelLinking>true</FunctionLevelLinking>
-      <IntrinsicFunctions>true</IntrinsicFunctions>
-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
-    </ClCompile>
-    <Link>
-      <SubSystem>Console</SubSystem>
-      <GenerateDebugInformation>true</GenerateDebugInformation>
-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
-      <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
-    </Link>
-  </ItemDefinitionGroup>
-  <ItemGroup>
-    <ClCompile Include="test_threads.cpp" />
-    <ClCompile Include="test_threads_main.cpp" />
-  </ItemGroup>
-  <ItemGroup>
-    <ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
-      <Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
-    </ProjectReference>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="test_threads.h" />
-  </ItemGroup>
-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
-  <ImportGroup Label="ExtensionTargets">
-  </ImportGroup>
-</Project>
\ No newline at end of file
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
deleted file mode 100644
index 4c233407..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <ItemGroup>
-    <Filter Include="Source Files">
-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
-    </Filter>
-    <Filter Include="Header Files">
-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
-    </Filter>
-  </ItemGroup>
-  <ItemGroup>
-    <ClCompile Include="test_threads.cpp">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-    <ClCompile Include="test_threads_main.cpp">
-      <Filter>Source Files</Filter>
-    </ClCompile>
-  </ItemGroup>
-  <ItemGroup>
-    <ClInclude Include="test_threads.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
-  </ItemGroup>
-</Project>
\ No newline at end of file
diff --git a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp b/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp
deleted file mode 100644
index ffd96e6..0000000
--- a/zircon/third_party/ulib/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-#include "test_threads.h"
-#include <future>
-#include <functional>
-#include <chrono>
-
-using namespace std::chrono_literals;
-
-int main(int argc, char** argv)
-{
-  int rc = test_threads();
-  return rc;
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/arena.c b/zircon/third_party/ulib/jemalloc/src/arena.c
deleted file mode 100644
index 7362c4e..0000000
--- a/zircon/third_party/ulib/jemalloc/src/arena.c
+++ /dev/null
@@ -1,1848 +0,0 @@
-#define	JEMALLOC_ARENA_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-ssize_t		opt_decay_time = DECAY_TIME_DEFAULT;
-static ssize_t	decay_time_default;
-
-const arena_bin_info_t	arena_bin_info[NBINS] = {
-#define	BIN_INFO_bin_yes(reg_size, slab_size, nregs)			\
-	{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
-#define	BIN_INFO_bin_no(reg_size, slab_size, nregs)
-#define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs,		\
-    lg_delta_lookup)							\
-	BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta),		\
-	    (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) +	\
-	    (ndelta<<lg_delta)))
-	SIZE_CLASSES
-#undef BIN_INFO_bin_yes
-#undef BIN_INFO_bin_no
-#undef SC
-};
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static void	arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
-    size_t ndirty_limit);
-static void	arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena,
-    extent_t *slab, arena_bin_t *bin);
-static void	arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
-    extent_t *slab, arena_bin_t *bin);
-
-/******************************************************************************/
-
-static size_t
-arena_extent_dirty_npages(const extent_t *extent)
-{
-	return (extent_size_get(extent) >> LG_PAGE);
-}
-
-static extent_t *
-arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool slab)
-{
-	bool commit = true;
-
-	malloc_mutex_assert_owner(tsdn, &arena->lock);
-
-	return (extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
-	    pad, alignment, zero, &commit, slab));
-}
-
-extent_t *
-arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
-    size_t alignment, bool *zero)
-{
-	extent_t *extent;
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	extent = arena_extent_cache_alloc_locked(tsdn, arena, r_extent_hooks,
-	    new_addr, size, 0, alignment, zero, false);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-
-	return (extent);
-}
-
-static void
-arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
-	malloc_mutex_assert_owner(tsdn, &arena->lock);
-
-	extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
-	arena_maybe_purge(tsdn, arena);
-}
-
-void
-arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
-	malloc_mutex_lock(tsdn, &arena->lock);
-	arena_extent_cache_dalloc_locked(tsdn, arena, r_extent_hooks, extent);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    bool cache)
-{
-	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
-
-	if (cache) {
-		extent_ring_insert(&arena->extents_dirty, extent);
-		arena->ndirty += arena_extent_dirty_npages(extent);
-	}
-}
-
-void
-arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    bool dirty)
-{
-	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
-
-	if (dirty) {
-		extent_ring_remove(extent);
-		assert(arena->ndirty >= arena_extent_dirty_npages(extent));
-		arena->ndirty -= arena_extent_dirty_npages(extent);
-	}
-}
-
-JEMALLOC_INLINE_C void *
-arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
-    const arena_bin_info_t *bin_info)
-{
-	void *ret;
-	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
-	size_t regind;
-
-	assert(slab_data->nfree > 0);
-	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
-
-	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
-	ret = (void *)((uintptr_t)extent_addr_get(slab) +
-	    (uintptr_t)(bin_info->reg_size * regind));
-	slab_data->nfree--;
-	return (ret);
-}
-
-#ifndef JEMALLOC_JET
-JEMALLOC_INLINE_C
-#endif
-size_t
-arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr)
-{
-	size_t diff, regind;
-
-	/* Freeing a pointer outside the slab can cause assertion failure. */
-	assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
-	assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
-	/* Freeing an interior pointer can cause assertion failure. */
-	assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
-	    (uintptr_t)arena_bin_info[binind].reg_size == 0);
-
-	/* Avoid doing division with a variable divisor. */
-	diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
-	switch (binind) {
-#define	REGIND_bin_yes(index, reg_size)					\
-	case index:							\
-		regind = diff / (reg_size);				\
-		assert(diff == regind * (reg_size));			\
-		break;
-#define	REGIND_bin_no(index, reg_size)
-#define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs,		\
-    lg_delta_lookup)							\
-	REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta))
-	SIZE_CLASSES
-#undef REGIND_bin_yes
-#undef REGIND_bin_no
-#undef SC
-	default: not_reached();
-	}
-
-	assert(regind < arena_bin_info[binind].nregs);
-
-	return (regind);
-}
-
-JEMALLOC_INLINE_C void
-arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
-    arena_slab_data_t *slab_data, void *ptr)
-{
-	szind_t binind = slab_data->binind;
-	const arena_bin_info_t *bin_info = &arena_bin_info[binind];
-	size_t regind = arena_slab_regind(slab, binind, ptr);
-
-	assert(slab_data->nfree < bin_info->nregs);
-	/* Freeing an unallocated pointer can cause assertion failure. */
-	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
-
-	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
-	slab_data->nfree++;
-}
-
-static void
-arena_nactive_add(arena_t *arena, size_t add_pages)
-{
-	arena->nactive += add_pages;
-}
-
-static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages)
-{
-	assert(arena->nactive >= sub_pages);
-	arena->nactive -= sub_pages;
-}
-
-static void
-arena_large_malloc_stats_update(arena_t *arena, size_t usize)
-{
-	szind_t index, hindex;
-
-	cassert(config_stats);
-
-	if (usize < LARGE_MINCLASS)
-		usize = LARGE_MINCLASS;
-	index = size2index(usize);
-	hindex = (index >= NBINS) ? index - NBINS : 0;
-
-	arena->stats.nmalloc_large++;
-	arena->stats.allocated_large += usize;
-	arena->stats.lstats[hindex].nmalloc++;
-	arena->stats.lstats[hindex].nrequests++;
-	arena->stats.lstats[hindex].curlextents++;
-}
-
-static void
-arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize)
-{
-	szind_t index, hindex;
-
-	cassert(config_stats);
-
-	if (usize < LARGE_MINCLASS)
-		usize = LARGE_MINCLASS;
-	index = size2index(usize);
-	hindex = (index >= NBINS) ? index - NBINS : 0;
-
-	arena->stats.nmalloc_large--;
-	arena->stats.allocated_large -= usize;
-	arena->stats.lstats[hindex].nmalloc--;
-	arena->stats.lstats[hindex].nrequests--;
-	arena->stats.lstats[hindex].curlextents--;
-}
-
-static void
-arena_large_dalloc_stats_update(arena_t *arena, size_t usize)
-{
-	szind_t index, hindex;
-
-	cassert(config_stats);
-
-	if (usize < LARGE_MINCLASS)
-		usize = LARGE_MINCLASS;
-	index = size2index(usize);
-	hindex = (index >= NBINS) ? index - NBINS : 0;
-
-	arena->stats.ndalloc_large++;
-	arena->stats.allocated_large -= usize;
-	arena->stats.lstats[hindex].ndalloc++;
-	arena->stats.lstats[hindex].curlextents--;
-}
-
-static void
-arena_large_reset_stats_cancel(arena_t *arena, size_t usize)
-{
-	szind_t index = size2index(usize);
-	szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
-
-	cassert(config_stats);
-
-	arena->stats.ndalloc_large--;
-	arena->stats.lstats[hindex].ndalloc--;
-}
-
-static void
-arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
-{
-	arena_large_dalloc_stats_update(arena, oldusize);
-	arena_large_malloc_stats_update(arena, usize);
-}
-
-static extent_t *
-arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, size_t usize, size_t alignment, bool *zero)
-{
-	extent_t *extent;
-	bool commit = true;
-
-	extent = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, usize,
-	    large_pad, alignment, zero, &commit, false);
-	if (extent == NULL) {
-		/* Revert optimistic stats updates. */
-		malloc_mutex_lock(tsdn, &arena->lock);
-		if (config_stats) {
-			arena_large_malloc_stats_update_undo(arena, usize);
-			arena->stats.mapped -= usize;
-		}
-		arena_nactive_sub(arena, (usize + large_pad) >> LG_PAGE);
-		malloc_mutex_unlock(tsdn, &arena->lock);
-	}
-
-	return (extent);
-}
-
-extent_t *
-arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool *zero)
-{
-	extent_t *extent;
-	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-
-	/* Optimistically update stats. */
-	if (config_stats) {
-		arena_large_malloc_stats_update(arena, usize);
-		arena->stats.mapped += usize;
-	}
-	arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
-
-	extent = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks,
-	    NULL, usize, large_pad, alignment, zero, false);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-	if (extent == NULL) {
-		extent = arena_extent_alloc_large_hard(tsdn, arena,
-		    &extent_hooks, usize, alignment, zero);
-	}
-
-	return (extent);
-}
-
-void
-arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    bool locked)
-{
-	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
-	if (!locked)
-		malloc_mutex_lock(tsdn, &arena->lock);
-	else
-		malloc_mutex_assert_owner(tsdn, &arena->lock);
-	if (config_stats) {
-		arena_large_dalloc_stats_update(arena,
-		    extent_usize_get(extent));
-		arena->stats.mapped -= extent_size_get(extent);
-	}
-	arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
-
-	arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent);
-	if (!locked)
-		malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    size_t oldusize)
-{
-	size_t usize = extent_usize_get(extent);
-	size_t udiff = oldusize - usize;
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	if (config_stats) {
-		arena_large_ralloc_stats_update(arena, oldusize, usize);
-		arena->stats.mapped -= udiff;
-	}
-	arena_nactive_sub(arena, udiff >> LG_PAGE);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    size_t oldusize)
-{
-	size_t usize = extent_usize_get(extent);
-	size_t udiff = usize - oldusize;
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	if (config_stats) {
-		arena_large_ralloc_stats_update(arena, oldusize, usize);
-		arena->stats.mapped += udiff;
-	}
-	arena_nactive_add(arena, udiff >> LG_PAGE);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-static void
-arena_decay_deadline_init(arena_t *arena)
-{
-	/*
-	 * Generate a new deadline that is uniformly random within the next
-	 * epoch after the current one.
-	 */
-	nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
-	nstime_add(&arena->decay.deadline, &arena->decay.interval);
-	if (arena->decay.time > 0) {
-		nstime_t jitter;
-
-		nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
-		    nstime_ns(&arena->decay.interval)));
-		nstime_add(&arena->decay.deadline, &jitter);
-	}
-}
-
-static bool
-arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
-{
-	return (nstime_compare(&arena->decay.deadline, time) <= 0);
-}
-
-static size_t
-arena_decay_backlog_npages_limit(const arena_t *arena)
-{
-	static const uint64_t h_steps[] = {
-#define	STEP(step, h, x, y) \
-		h,
-		SMOOTHSTEP
-#undef STEP
-	};
-	uint64_t sum;
-	size_t npages_limit_backlog;
-	unsigned i;
-
-	/*
-	 * For each element of decay_backlog, multiply by the corresponding
-	 * fixed-point smoothstep decay factor.  Sum the products, then divide
-	 * to round down to the nearest whole number of pages.
-	 */
-	sum = 0;
-	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
-		sum += arena->decay.backlog[i] * h_steps[i];
-	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
-
-	return (npages_limit_backlog);
-}
-
-static void
-arena_decay_backlog_update_last(arena_t *arena)
-{
-	size_t ndirty_delta = (arena->ndirty > arena->decay.nunpurged) ?
-	    arena->ndirty - arena->decay.nunpurged : 0;
-	arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
-}
-
-static void
-arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
-{
-	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
-		memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
-		    sizeof(size_t));
-	} else {
-		size_t nadvance_z = (size_t)nadvance_u64;
-
-		assert((uint64_t)nadvance_z == nadvance_u64);
-
-		memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
-		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
-		if (nadvance_z > 1) {
-			memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
-			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
-		}
-	}
-
-	arena_decay_backlog_update_last(arena);
-}
-
-static void
-arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
-{
-	uint64_t nadvance_u64;
-	nstime_t delta;
-
-	assert(arena_decay_deadline_reached(arena, time));
-
-	nstime_copy(&delta, time);
-	nstime_subtract(&delta, &arena->decay.epoch);
-	nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
-	assert(nadvance_u64 > 0);
-
-	/* Add nadvance_u64 decay intervals to epoch. */
-	nstime_copy(&delta, &arena->decay.interval);
-	nstime_imultiply(&delta, nadvance_u64);
-	nstime_add(&arena->decay.epoch, &delta);
-
-	/* Set a new deadline. */
-	arena_decay_deadline_init(arena);
-
-	/* Update the backlog. */
-	arena_decay_backlog_update(arena, nadvance_u64);
-}
-
-static void
-arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
-{
-	size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
-
-	if (arena->ndirty > ndirty_limit)
-		arena_purge_to_limit(tsdn, arena, ndirty_limit);
-	arena->decay.nunpurged = arena->ndirty;
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
-{
-	arena_decay_epoch_advance_helper(arena, time);
-	arena_decay_epoch_advance_purge(tsdn, arena);
-}
-
-static void
-arena_decay_init(arena_t *arena, ssize_t decay_time)
-{
-	arena->decay.time = decay_time;
-	if (decay_time > 0) {
-		nstime_init2(&arena->decay.interval, decay_time, 0);
-		nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
-	}
-
-	nstime_init(&arena->decay.epoch, 0);
-	nstime_update(&arena->decay.epoch);
-	arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
-	arena_decay_deadline_init(arena);
-	arena->decay.nunpurged = arena->ndirty;
-	memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
-}
-
-static bool
-arena_decay_time_valid(ssize_t decay_time)
-{
-	if (decay_time < -1)
-		return (false);
-	if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
-		return (true);
-	return (false);
-}
-
-ssize_t
-arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
-{
-	ssize_t decay_time;
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	decay_time = arena->decay.time;
-	malloc_mutex_unlock(tsdn, &arena->lock);
-
-	return (decay_time);
-}
-
-bool
-arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
-{
-	if (!arena_decay_time_valid(decay_time))
-		return (true);
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	/*
-	 * Restart decay backlog from scratch, which may cause many dirty pages
-	 * to be immediately purged.  It would conceptually be possible to map
-	 * the old backlog onto the new backlog, but there is no justification
-	 * for such complexity since decay_time changes are intended to be
-	 * infrequent, either between the {-1, 0, >0} states, or a one-time
-	 * arbitrary change during initial arena configuration.
-	 */
-	arena_decay_init(arena, decay_time);
-	arena_maybe_purge(tsdn, arena);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-
-	return (false);
-}
-
-static void
-arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena)
-{
-	nstime_t time;
-
-	/* Purge all or nothing if the option is disabled. */
-	if (arena->decay.time <= 0) {
-		if (arena->decay.time == 0)
-			arena_purge_to_limit(tsdn, arena, 0);
-		return;
-	}
-
-	nstime_init(&time, 0);
-	nstime_update(&time);
-	if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
-	    &time) > 0)) {
-		/*
-		 * Time went backwards.  Move the epoch back in time and
-		 * generate a new deadline, with the expectation that time
-		 * typically flows forward for long enough periods of time that
-		 * epochs complete.  Unfortunately, this strategy is susceptible
-		 * to clock jitter triggering premature epoch advances, but
-		 * clock jitter estimation and compensation isn't feasible here
-		 * because calls into this code are event-driven.
-		 */
-		nstime_copy(&arena->decay.epoch, &time);
-		arena_decay_deadline_init(arena);
-	} else {
-		/* Verify that time does not go backwards. */
-		assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
-	}
-
-	/*
-	 * If the deadline has been reached, advance to the current epoch and
-	 * purge to the new limit if necessary.  Note that dirty pages created
-	 * during the current epoch are not subject to purge until a future
-	 * epoch, so as a result purging only happens during epoch advances.
-	 */
-	if (arena_decay_deadline_reached(arena, &time))
-		arena_decay_epoch_advance(tsdn, arena, &time);
-}
-
-void
-arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
-{
-	malloc_mutex_assert_owner(tsdn, &arena->lock);
-
-	/* Don't recursively purge. */
-	if (arena->purging)
-		return;
-
-	arena_maybe_purge_helper(tsdn, arena);
-}
-
-static size_t
-arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
-{
-	extent_t *extent;
-	size_t ndirty = 0;
-
-	malloc_mutex_lock(tsdn, &arena->extents_mtx);
-
-	for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
-	    &arena->extents_dirty; extent = qr_next(extent, qr_link))
-		ndirty += extent_size_get(extent) >> LG_PAGE;
-
-	malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-
-	return (ndirty);
-}
-
-static size_t
-arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
-    size_t ndirty_limit, extent_t *purge_extents_sentinel)
-{
-	extent_t *extent, *next;
-	size_t nstashed = 0;
-
-	malloc_mutex_lock(tsdn, &arena->extents_mtx);
-
-	/* Stash extents according to ndirty_limit. */
-	for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
-	    &arena->extents_dirty; extent = next) {
-		size_t npages;
-		bool zero, commit;
-		UNUSED extent_t *textent;
-
-		npages = extent_size_get(extent) >> LG_PAGE;
-		if (arena->ndirty - (nstashed + npages) < ndirty_limit)
-			break;
-
-		next = qr_next(extent, qr_link);
-		/* Allocate. */
-		zero = false;
-		commit = false;
-		textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
-		    extent_base_get(extent), extent_size_get(extent), 0, PAGE,
-		    &zero, &commit, false);
-		assert(textent == extent);
-		assert(zero == extent_zeroed_get(extent));
-		extent_ring_remove(extent);
-		extent_ring_insert(purge_extents_sentinel, extent);
-
-		nstashed += npages;
-	}
-
-	malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-	return (nstashed);
-}
-
-static size_t
-arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel)
-{
-	UNUSED size_t nmadvise;
-	size_t npurged;
-	extent_t *extent, *next;
-
-	if (config_stats)
-		nmadvise = 0;
-	npurged = 0;
-
-	for (extent = qr_next(purge_extents_sentinel, qr_link); extent !=
-	    purge_extents_sentinel; extent = next) {
-		if (config_stats)
-			nmadvise++;
-		npurged += extent_size_get(extent) >> LG_PAGE;
-
-		next = qr_next(extent, qr_link);
-		extent_ring_remove(extent);
-		extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent);
-	}
-
-	if (config_stats) {
-		arena->stats.nmadvise += nmadvise;
-		arena->stats.purged += npurged;
-	}
-
-	return (npurged);
-}
-
-/*
- *   ndirty_limit: Purge as many dirty extents as possible without violating the
- *   invariant: (arena->ndirty >= ndirty_limit)
- */
-static void
-arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
-{
-	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-	size_t npurge, npurged;
-	extent_t purge_extents_sentinel;
-
-	arena->purging = true;
-
-	/*
-	 * Calls to arena_dirty_count() are disabled even for debug builds
-	 * because overhead grows nonlinearly as memory usage increases.
-	 */
-	if (false && config_debug) {
-		size_t ndirty = arena_dirty_count(tsdn, arena);
-		assert(ndirty == arena->ndirty);
-	}
-	extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, 0, false, false,
-	    false, false);
-
-	npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit,
-	    &purge_extents_sentinel);
-	if (npurge == 0)
-		goto label_return;
-	npurged = arena_purge_stashed(tsdn, arena, &extent_hooks,
-	    &purge_extents_sentinel);
-	assert(npurged == npurge);
-
-	if (config_stats)
-		arena->stats.npurge++;
-
-label_return:
-	arena->purging = false;
-}
-
-void
-arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
-{
-	malloc_mutex_lock(tsdn, &arena->lock);
-	if (all)
-		arena_purge_to_limit(tsdn, arena, 0);
-	else
-		arena_maybe_purge(tsdn, arena);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-static void
-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab)
-{
-	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
-	arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
-	arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, slab);
-}
-
-static void
-arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab)
-{
-	assert(extent_slab_data_get(slab)->nfree > 0);
-	extent_heap_insert(&bin->slabs_nonfull, slab);
-}
-
-static void
-arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab)
-{
-	extent_heap_remove(&bin->slabs_nonfull, slab);
-}
-
-static extent_t *
-arena_bin_slabs_nonfull_tryget(arena_bin_t *bin)
-{
-	extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
-	if (slab == NULL)
-		return (NULL);
-	if (config_stats)
-		bin->stats.reslabs++;
-	return (slab);
-}
-
-static void
-arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab)
-{
-	assert(extent_slab_data_get(slab)->nfree == 0);
-	extent_ring_insert(&bin->slabs_full, slab);
-}
-
-static void
-arena_bin_slabs_full_remove(extent_t *slab)
-{
-	extent_ring_remove(slab);
-}
-
-void
-arena_reset(tsd_t *tsd, arena_t *arena)
-{
-	unsigned i;
-	extent_t *extent;
-
-	/*
-	 * Locking in this function is unintuitive.  The caller guarantees that
-	 * no concurrent operations are happening in this arena, but there are
-	 * still reasons that some locking is necessary:
-	 *
-	 * - Some of the functions in the transitive closure of calls assume
-	 *   appropriate locks are held, and in some cases these locks are
-	 *   temporarily dropped to avoid lock order reversal or deadlock due to
-	 *   reentry.
-	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
-	 *   strictly speaking this is a "concurrent operation", disallowing
-	 *   stats refreshes would impose an inconvenient burden.
-	 */
-
-	/* Large allocations. */
-	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
-	for (extent = ql_last(&arena->large, ql_link); extent != NULL; extent =
-	    ql_last(&arena->large, ql_link)) {
-		void *ptr = extent_base_get(extent);
-		size_t usize;
-
-		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
-		if (config_stats || (config_prof && opt_prof))
-			usize = isalloc(tsd_tsdn(tsd), extent, ptr);
-		/* Remove large allocation from prof sample set. */
-		if (config_prof && opt_prof)
-			prof_free(tsd, extent, ptr, usize);
-		large_dalloc(tsd_tsdn(tsd), extent);
-		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
-		/* Cancel out unwanted effects on stats. */
-		if (config_stats)
-			arena_large_reset_stats_cancel(arena, usize);
-	}
-	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
-
-	/* Bins. */
-	for (i = 0; i < NBINS; i++) {
-		extent_t *slab;
-		arena_bin_t *bin = &arena->bins[i];
-		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
-		if (bin->slabcur != NULL) {
-			slab = bin->slabcur;
-			bin->slabcur = NULL;
-			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
-			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
-		}
-		while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
-		    NULL) {
-			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
-			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
-		}
-		for (slab = qr_next(&bin->slabs_full, qr_link); slab !=
-		    &bin->slabs_full; slab = qr_next(&bin->slabs_full,
-		    qr_link)) {
-			arena_bin_slabs_full_remove(slab);
-			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
-			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
-		}
-		if (config_stats) {
-			bin->stats.curregs = 0;
-			bin->stats.curslabs = 0;
-		}
-		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-	}
-
-	assert(!arena->purging);
-	arena->nactive = 0;
-
-	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
-}
-
-static void
-arena_destroy_retained(tsdn_t *tsdn, arena_t *arena)
-{
-	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-	size_t i;
-
-	/*
-	 * Iterate over the retained extents and blindly attempt to deallocate
-	 * them.  This gives the extent allocator underlying the extent hooks an
-	 * opportunity to unmap all retained memory without having to keep its
-	 * own metadata structures, but if deallocation fails, that is the
-	 * application's decision/problem.  In practice, retained extents are
-	 * leaked here if !config_munmap unless the application provided custom
-	 * extent hooks, so best practice is to either enable munmap (and avoid
-	 * dss for arenas to be destroyed), or provide custom extent hooks that
-	 * either unmap retained extents or track them for later use.
-	 */
-	for (i = 0; i < sizeof(arena->extents_retained)/sizeof(extent_heap_t);
-	    i++) {
-		extent_heap_t *extents = &arena->extents_retained[i];
-		extent_t *extent;
-
-		while ((extent = extent_heap_remove_first(extents)) != NULL) {
-			extent_dalloc_wrapper_try(tsdn, arena, &extent_hooks,
-			    extent);
-		}
-	}
-}
-
-void
-arena_destroy(tsd_t *tsd, arena_t *arena)
-{
-	assert(base_ind_get(arena->base) >= narenas_auto);
-	assert(arena_nthreads_get(arena, false) == 0);
-	assert(arena_nthreads_get(arena, true) == 0);
-
-	/*
-	 * No allocations have occurred since arena_reset() was called.
-	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
-	 * extents, so only retained extents may remain.
-	 */
-	assert(arena->ndirty == 0);
-
-	/* Attempt to deallocate retained memory. */
-	arena_destroy_retained(tsd_tsdn(tsd), arena);
-
-	/*
-	 * Remove the arena pointer from the arenas array.  We rely on the fact
-	 * that there is no way for the application to get a dirty read from the
-	 * arenas array unless there is an inherent race in the application
-	 * involving access of an arena being concurrently destroyed.  The
-	 * application must synchronize knowledge of the arena's validity, so as
-	 * long as we use an atomic write to update the arenas array, the
-	 * application will get a clean read any time after it synchronizes
-	 * knowledge that the arena is no longer valid.
-	 */
-	arena_set(base_ind_get(arena->base), NULL);
-
-	/*
-	 * Destroy the base allocator, which manages all metadata ever mapped by
-	 * this arena.
-	 */
-	base_delete(arena->base);
-}
-
-static extent_t *
-arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info)
-{
-	extent_t *slab;
-	bool zero, commit;
-
-	zero = false;
-	commit = true;
-	malloc_mutex_unlock(tsdn, &arena->lock);
-	slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
-	    bin_info->slab_size, 0, PAGE, &zero, &commit, true);
-	malloc_mutex_lock(tsdn, &arena->lock);
-
-	return (slab);
-}
-
-static extent_t *
-arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
-    const arena_bin_info_t *bin_info)
-{
-	extent_t *slab;
-	arena_slab_data_t *slab_data;
-	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-	bool zero = false;
-
-	slab = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL,
-	    bin_info->slab_size, 0, PAGE, &zero, true);
-	if (slab == NULL) {
-		slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
-		    bin_info);
-		if (slab == NULL)
-			return (NULL);
-	}
-	assert(extent_slab_get(slab));
-
-	arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
-
-	/* Initialize slab internals. */
-	slab_data = extent_slab_data_get(slab);
-	slab_data->binind = binind;
-	slab_data->nfree = bin_info->nregs;
-	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
-
-	if (config_stats)
-		arena->stats.mapped += extent_size_get(slab);
-
-	return (slab);
-}
-
-static extent_t *
-arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
-    szind_t binind)
-{
-	extent_t *slab;
-	const arena_bin_info_t *bin_info;
-
-	/* Look for a usable slab. */
-	slab = arena_bin_slabs_nonfull_tryget(bin);
-	if (slab != NULL)
-		return (slab);
-	/* No existing slabs have any space available. */
-
-	bin_info = &arena_bin_info[binind];
-
-	/* Allocate a new slab. */
-	malloc_mutex_unlock(tsdn, &bin->lock);
-	/******************************/
-	malloc_mutex_lock(tsdn, &arena->lock);
-	slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-	/********************************/
-	malloc_mutex_lock(tsdn, &bin->lock);
-	if (slab != NULL) {
-		if (config_stats) {
-			bin->stats.nslabs++;
-			bin->stats.curslabs++;
-		}
-		return (slab);
-	}
-
-	/*
-	 * arena_slab_alloc() failed, but another thread may have made
-	 * sufficient memory available while this one dropped bin->lock above,
-	 * so search one more time.
-	 */
-	slab = arena_bin_slabs_nonfull_tryget(bin);
-	if (slab != NULL)
-		return (slab);
-
-	return (NULL);
-}
-
-/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
-static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
-    szind_t binind)
-{
-	const arena_bin_info_t *bin_info;
-	extent_t *slab;
-
-	bin_info = &arena_bin_info[binind];
-	if (bin->slabcur != NULL) {
-		arena_bin_slabs_full_insert(bin, bin->slabcur);
-		bin->slabcur = NULL;
-	}
-	slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
-	if (bin->slabcur != NULL) {
-		/*
-		 * Another thread updated slabcur while this one ran without the
-		 * bin lock in arena_bin_nonfull_slab_get().
-		 */
-		if (extent_slab_data_get(bin->slabcur)->nfree > 0) {
-			void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
-			    bin_info);
-			if (slab != NULL) {
-				/*
-				 * arena_slab_alloc() may have allocated slab,
-				 * or it may have been pulled from
-				 * slabs_nonfull.  Therefore it is unsafe to
-				 * make any assumptions about how slab has
-				 * previously been used, and
-				 * arena_bin_lower_slab() must be called, as if
-				 * a region were just deallocated from the slab.
-				 */
-				if (extent_slab_data_get(slab)->nfree ==
-				    bin_info->nregs) {
-					arena_dalloc_bin_slab(tsdn, arena, slab,
-					    bin);
-				} else {
-					arena_bin_lower_slab(tsdn, arena, slab,
-					    bin);
-				}
-			}
-			return (ret);
-		}
-
-		arena_bin_slabs_full_insert(bin, bin->slabcur);
-		bin->slabcur = NULL;
-	}
-
-	if (slab == NULL)
-		return (NULL);
-	bin->slabcur = slab;
-
-	assert(extent_slab_data_get(bin->slabcur)->nfree > 0);
-
-	return (arena_slab_reg_alloc(tsdn, slab, bin_info));
-}
-
-void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
-    szind_t binind, uint64_t prof_accumbytes)
-{
-	unsigned i, nfill;
-	arena_bin_t *bin;
-
-	assert(tbin->ncached == 0);
-
-	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
-		prof_idump(tsdn);
-	bin = &arena->bins[binind];
-	malloc_mutex_lock(tsdn, &bin->lock);
-	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
-	    tbin->lg_fill_div); i < nfill; i++) {
-		extent_t *slab;
-		void *ptr;
-		if ((slab = bin->slabcur) != NULL &&
-		    extent_slab_data_get(slab)->nfree > 0) {
-			ptr = arena_slab_reg_alloc(tsdn, slab,
-			    &arena_bin_info[binind]);
-		} else
-			ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
-		if (ptr == NULL) {
-			/*
-			 * OOM.  tbin->avail isn't yet filled down to its first
-			 * element, so the successful allocations (if any) must
-			 * be moved just before tbin->avail before bailing out.
-			 */
-			if (i > 0) {
-				memmove(tbin->avail - i, tbin->avail - nfill,
-				    i * sizeof(void *));
-			}
-			break;
-		}
-		if (config_fill && unlikely(opt_junk_alloc)) {
-			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
-			    true);
-		}
-		/* Insert such that low regions get used first. */
-		*(tbin->avail - nfill + i) = ptr;
-	}
-	if (config_stats) {
-		bin->stats.nmalloc += i;
-		bin->stats.nrequests += tbin->tstats.nrequests;
-		bin->stats.curregs += i;
-		bin->stats.nfills++;
-		tbin->tstats.nrequests = 0;
-	}
-	malloc_mutex_unlock(tsdn, &bin->lock);
-	tbin->ncached = i;
-	arena_decay_tick(tsdn, arena);
-}
-
-void
-arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero)
-{
-	if (!zero)
-		memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
-}
-
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define	arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
-#endif
-void
-arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info)
-{
-	memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-arena_dalloc_junk_small_t *arena_dalloc_junk_small =
-    JEMALLOC_N(n_arena_dalloc_junk_small);
-#endif
-
-static void *
-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
-{
-	void *ret;
-	arena_bin_t *bin;
-	size_t usize;
-	extent_t *slab;
-
-	assert(binind < NBINS);
-	bin = &arena->bins[binind];
-	usize = index2size(binind);
-
-	malloc_mutex_lock(tsdn, &bin->lock);
-	if ((slab = bin->slabcur) != NULL && extent_slab_data_get(slab)->nfree >
-	    0)
-		ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
-	else
-		ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
-
-	if (ret == NULL) {
-		malloc_mutex_unlock(tsdn, &bin->lock);
-		return (NULL);
-	}
-
-	if (config_stats) {
-		bin->stats.nmalloc++;
-		bin->stats.nrequests++;
-		bin->stats.curregs++;
-	}
-	malloc_mutex_unlock(tsdn, &bin->lock);
-	if (config_prof && arena_prof_accum(tsdn, arena, usize))
-		prof_idump(tsdn);
-
-	if (!zero) {
-		if (config_fill) {
-			if (unlikely(opt_junk_alloc)) {
-				arena_alloc_junk_small(ret,
-				    &arena_bin_info[binind], false);
-			} else if (unlikely(opt_zero))
-				memset(ret, 0, usize);
-		}
-	} else {
-		if (config_fill && unlikely(opt_junk_alloc)) {
-			arena_alloc_junk_small(ret, &arena_bin_info[binind],
-			    true);
-		}
-		memset(ret, 0, usize);
-	}
-
-	arena_decay_tick(tsdn, arena);
-	return (ret);
-}
-
-void *
-arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
-    bool zero)
-{
-	assert(!tsdn_null(tsdn) || arena != NULL);
-
-	if (likely(!tsdn_null(tsdn)))
-		arena = arena_choose(tsdn_tsd(tsdn), arena);
-	if (unlikely(arena == NULL))
-		return (NULL);
-
-	if (likely(size <= SMALL_MAXCLASS))
-		return (arena_malloc_small(tsdn, arena, ind, zero));
-	return (large_malloc(tsdn, arena, index2size(ind), zero));
-}
-
-void *
-arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
-    bool zero, tcache_t *tcache)
-{
-	void *ret;
-
-	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
-	    && (usize & PAGE_MASK) == 0))) {
-		/* Small; alignment doesn't require special slab placement. */
-		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
-		    tcache, true);
-	} else {
-		if (likely(alignment <= CACHELINE))
-			ret = large_malloc(tsdn, arena, usize, zero);
-		else
-			ret = large_palloc(tsdn, arena, usize, alignment, zero);
-	}
-	return (ret);
-}
-
-void
-arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize)
-{
-	arena_t *arena = extent_arena_get(extent);
-
-	cassert(config_prof);
-	assert(ptr != NULL);
-	assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
-	assert(usize <= SMALL_MAXCLASS);
-
-	extent_usize_set(extent, usize);
-
-	/*
-	 * Cancel out as much of the excessive prof_accumbytes increase as
-	 * possible without underflowing.  Interval-triggered dumps occur
-	 * slightly more often than intended as a result of incomplete
-	 * canceling.
-	 */
-	malloc_mutex_lock(tsdn, &arena->lock);
-	if (arena->prof_accumbytes >= LARGE_MINCLASS - usize)
-		arena->prof_accumbytes -= LARGE_MINCLASS - usize;
-	else
-		arena->prof_accumbytes = 0;
-	malloc_mutex_unlock(tsdn, &arena->lock);
-
-	assert(isalloc(tsdn, extent, ptr) == usize);
-}
-
-static size_t
-arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr)
-{
-	cassert(config_prof);
-	assert(ptr != NULL);
-
-	extent_usize_set(extent, LARGE_MINCLASS);
-
-	assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
-
-	return (LARGE_MINCLASS);
-}
-
-void
-arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    tcache_t *tcache, bool slow_path)
-{
-	size_t usize;
-
-	cassert(config_prof);
-	assert(opt_prof);
-
-	usize = arena_prof_demote(tsdn, extent, ptr);
-	if (usize <= tcache_maxclass) {
-		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize,
-		    slow_path);
-	} else
-		large_dalloc(tsdn, extent);
-}
-
-static void
-arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin)
-{
-	/* Dissociate slab from bin. */
-	if (slab == bin->slabcur)
-		bin->slabcur = NULL;
-	else {
-		szind_t binind = extent_slab_data_get(slab)->binind;
-		const arena_bin_info_t *bin_info = &arena_bin_info[binind];
-
-		/*
-		 * The following block's conditional is necessary because if the
-		 * slab only contains one region, then it never gets inserted
-		 * into the non-full slabs heap.
-		 */
-		if (bin_info->nregs == 1)
-			arena_bin_slabs_full_remove(slab);
-		else
-			arena_bin_slabs_nonfull_remove(bin, slab);
-	}
-}
-
-static void
-arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
-    arena_bin_t *bin)
-{
-	assert(slab != bin->slabcur);
-
-	malloc_mutex_unlock(tsdn, &bin->lock);
-	/******************************/
-	malloc_mutex_lock(tsdn, &arena->lock);
-	arena_slab_dalloc(tsdn, arena, slab);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-	/****************************/
-	malloc_mutex_lock(tsdn, &bin->lock);
-	if (config_stats)
-		bin->stats.curslabs--;
-}
-
-static void
-arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
-    arena_bin_t *bin)
-{
-	assert(extent_slab_data_get(slab)->nfree > 0);
-
-	/*
-	 * Make sure that if bin->slabcur is non-NULL, it refers to the
-	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
-	 * than proactively keeping it pointing at the oldest/lowest non-full
-	 * slab.
-	 */
-	if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
-		/* Switch slabcur. */
-		if (extent_slab_data_get(bin->slabcur)->nfree > 0)
-			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
-		else
-			arena_bin_slabs_full_insert(bin, bin->slabcur);
-		bin->slabcur = slab;
-		if (config_stats)
-			bin->stats.reslabs++;
-	} else
-		arena_bin_slabs_nonfull_insert(bin, slab);
-}
-
-static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
-    void *ptr, bool junked)
-{
-	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
-	szind_t binind = slab_data->binind;
-	arena_bin_t *bin = &arena->bins[binind];
-	const arena_bin_info_t *bin_info = &arena_bin_info[binind];
-
-	if (!junked && config_fill && unlikely(opt_junk_free))
-		arena_dalloc_junk_small(ptr, bin_info);
-
-	arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
-	if (slab_data->nfree == bin_info->nregs) {
-		arena_dissociate_bin_slab(slab, bin);
-		arena_dalloc_bin_slab(tsdn, arena, slab, bin);
-	} else if (slab_data->nfree == 1 && slab != bin->slabcur) {
-		arena_bin_slabs_full_remove(slab);
-		arena_bin_lower_slab(tsdn, arena, slab, bin);
-	}
-
-	if (config_stats) {
-		bin->stats.ndalloc++;
-		bin->stats.curregs--;
-	}
-}
-
-void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
-    void *ptr)
-{
-	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
-}
-
-static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
-{
-	arena_bin_t *bin = &arena->bins[extent_slab_data_get(extent)->binind];
-
-	malloc_mutex_lock(tsdn, &bin->lock);
-	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
-	malloc_mutex_unlock(tsdn, &bin->lock);
-}
-
-void
-arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
-{
-	arena_dalloc_bin(tsdn, arena, extent, ptr);
-	arena_decay_tick(tsdn, arena);
-}
-
-bool
-arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
-    size_t size, size_t extra, bool zero)
-{
-	size_t usize_min, usize_max;
-
-	/* Calls with non-zero extra had to clamp extra. */
-	assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
-
-	if (unlikely(size > LARGE_MAXCLASS))
-		return (true);
-
-	usize_min = s2u(size);
-	usize_max = s2u(size + extra);
-	if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
-		/*
-		 * Avoid moving the allocation if the size class can be left the
-		 * same.
-		 */
-		assert(arena_bin_info[size2index(oldsize)].reg_size ==
-		    oldsize);
-		if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
-		    size2index(oldsize)) && (size > oldsize || usize_max <
-		    oldsize))
-			return (true);
-
-		arena_decay_tick(tsdn, extent_arena_get(extent));
-		return (false);
-	} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
-		return (large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
-		    zero));
-	}
-
-	return (true);
-}
-
-static void *
-arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool zero, tcache_t *tcache)
-{
-	if (alignment == 0)
-		return (arena_malloc(tsdn, arena, usize, size2index(usize),
-		    zero, tcache, true));
-	usize = sa2u(usize, alignment);
-	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-		return (NULL);
-	return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
-}
-
-void *
-arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
-    size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache)
-{
-	void *ret;
-	size_t usize, copysize;
-
-	usize = s2u(size);
-	if (unlikely(usize == 0 || size > LARGE_MAXCLASS))
-		return (NULL);
-
-	if (likely(usize <= SMALL_MAXCLASS)) {
-		/* Try to avoid moving the allocation. */
-		if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
-		    zero))
-			return (ptr);
-	}
-
-	if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
-		return (large_ralloc(tsdn, arena, extent, usize, alignment,
-		    zero, tcache));
-	}
-
-	/*
-	 * size and oldsize are different enough that we need to move the
-	 * object.  In that case, fall back to allocating new space and copying.
-	 */
-	ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero,
-	    tcache);
-	if (ret == NULL)
-		return (NULL);
-
-	/*
-	 * Junk/zero-filling were already done by
-	 * ipalloc()/arena_malloc().
-	 */
-
-	copysize = (usize < oldsize) ? usize : oldsize;
-	memcpy(ret, ptr, copysize);
-	isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
-	return (ret);
-}
-
-dss_prec_t
-arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
-{
-	dss_prec_t ret;
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	ret = arena->dss_prec;
-	malloc_mutex_unlock(tsdn, &arena->lock);
-	return (ret);
-}
-
-bool
-arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
-{
-	if (!have_dss)
-		return (dss_prec != dss_prec_disabled);
-	malloc_mutex_lock(tsdn, &arena->lock);
-	arena->dss_prec = dss_prec;
-	malloc_mutex_unlock(tsdn, &arena->lock);
-	return (false);
-}
-
-ssize_t
-arena_decay_time_default_get(void)
-{
-	return ((ssize_t)atomic_read_zu((size_t *)&decay_time_default));
-}
-
-bool
-arena_decay_time_default_set(ssize_t decay_time)
-{
-	if (!arena_decay_time_valid(decay_time))
-		return (true);
-	atomic_write_zu((size_t *)&decay_time_default, (size_t)decay_time);
-	return (false);
-}
-
-static void
-arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
-    const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty)
-{
-	*nthreads += arena_nthreads_get(arena, false);
-	*dss = dss_prec_names[arena->dss_prec];
-	*decay_time = arena->decay.time;
-	*nactive += arena->nactive;
-	*ndirty += arena->ndirty;
-}
-
-void
-arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
-    const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty)
-{
-	malloc_mutex_lock(tsdn, &arena->lock);
-	arena_basic_stats_merge_locked(arena, nthreads, dss, decay_time,
-	    nactive, ndirty);
-	malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
-    const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty,
-    arena_stats_t *astats, malloc_bin_stats_t *bstats,
-    malloc_large_stats_t *lstats)
-{
-	size_t base_allocated, base_resident, base_mapped;
-	unsigned i;
-
-	cassert(config_stats);
-
-	malloc_mutex_lock(tsdn, &arena->lock);
-	arena_basic_stats_merge_locked(arena, nthreads, dss, decay_time,
-	    nactive, ndirty);
-
-	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
-	    &base_mapped);
-
-	astats->mapped += base_mapped + arena->stats.mapped;
-	astats->retained += arena->stats.retained;
-	astats->npurge += arena->stats.npurge;
-	astats->nmadvise += arena->stats.nmadvise;
-	astats->purged += arena->stats.purged;
-	astats->base += base_allocated;
-	astats->internal += arena_internal_get(arena);
-	astats->resident += base_resident + (((arena->nactive + arena->ndirty)
-	    << LG_PAGE));
-	astats->allocated_large += arena->stats.allocated_large;
-	astats->nmalloc_large += arena->stats.nmalloc_large;
-	astats->ndalloc_large += arena->stats.ndalloc_large;
-	astats->nrequests_large += arena->stats.nrequests_large;
-
-	for (i = 0; i < NSIZES - NBINS; i++) {
-		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
-		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
-		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
-		lstats[i].curlextents += arena->stats.lstats[i].curlextents;
-	}
-
-	if (config_tcache) {
-		tcache_bin_t *tbin;
-		tcache_t *tcache;
-
-		/* tcache_bytes counts currently cached bytes. */
-		astats->tcache_bytes = 0;
-		ql_foreach(tcache, &arena->tcache_ql, link) {
-			for (i = 0; i < nhbins; i++) {
-				tbin = &tcache->tbins[i];
-				astats->tcache_bytes += tbin->ncached *
-				    index2size(i);
-			}
-		}
-	}
-	malloc_mutex_unlock(tsdn, &arena->lock);
-
-	for (i = 0; i < NBINS; i++) {
-		arena_bin_t *bin = &arena->bins[i];
-
-		malloc_mutex_lock(tsdn, &bin->lock);
-		bstats[i].nmalloc += bin->stats.nmalloc;
-		bstats[i].ndalloc += bin->stats.ndalloc;
-		bstats[i].nrequests += bin->stats.nrequests;
-		bstats[i].curregs += bin->stats.curregs;
-		if (config_tcache) {
-			bstats[i].nfills += bin->stats.nfills;
-			bstats[i].nflushes += bin->stats.nflushes;
-		}
-		bstats[i].nslabs += bin->stats.nslabs;
-		bstats[i].reslabs += bin->stats.reslabs;
-		bstats[i].curslabs += bin->stats.curslabs;
-		malloc_mutex_unlock(tsdn, &bin->lock);
-	}
-}
-
-unsigned
-arena_nthreads_get(arena_t *arena, bool internal)
-{
-	return (atomic_read_u(&arena->nthreads[internal]));
-}
-
-void
-arena_nthreads_inc(arena_t *arena, bool internal)
-{
-	atomic_add_u(&arena->nthreads[internal], 1);
-}
-
-void
-arena_nthreads_dec(arena_t *arena, bool internal)
-{
-	atomic_sub_u(&arena->nthreads[internal], 1);
-}
-
-size_t
-arena_extent_sn_next(arena_t *arena)
-{
-	return (atomic_add_zu(&arena->extent_sn_next, 1) - 1);
-}
-
-arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
-	arena_t *arena;
-	base_t *base;
-	unsigned i;
-
-	if (ind == 0)
-		base = b0get();
-	else {
-		base = base_new(tsdn, ind, extent_hooks);
-		if (base == NULL)
-			return (NULL);
-	}
-
-	arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
-	if (arena == NULL)
-		goto label_error;
-
-	arena->nthreads[0] = arena->nthreads[1] = 0;
-	if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
-		goto label_error;
-
-	if (config_stats && config_tcache)
-		ql_new(&arena->tcache_ql);
-
-	if (config_prof)
-		arena->prof_accumbytes = 0;
-
-	if (config_cache_oblivious) {
-		/*
-		 * A nondeterministic seed based on the address of arena reduces
-		 * the likelihood of lockstep non-uniform cache index
-		 * utilization among identical concurrent processes, but at the
-		 * cost of test repeatability.  For debug builds, instead use a
-		 * deterministic seed.
-		 */
-		arena->offset_state = config_debug ? ind :
-		    (size_t)(uintptr_t)arena;
-	}
-
-	arena->extent_sn_next = 0;
-
-	arena->dss_prec = extent_dss_prec_get();
-
-	arena->purging = false;
-	arena->nactive = 0;
-	arena->ndirty = 0;
-
-	arena_decay_init(arena, arena_decay_time_default_get());
-
-	ql_new(&arena->large);
-	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
-	    WITNESS_RANK_ARENA_LARGE))
-		goto label_error;
-
-	for (i = 0; i < NPSIZES+1; i++) {
-		extent_heap_new(&arena->extents_cached[i]);
-		extent_heap_new(&arena->extents_retained[i]);
-	}
-
-	extent_init(&arena->extents_dirty, arena, NULL, 0, 0, 0, false, false,
-	    false, false);
-
-	if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
-	    WITNESS_RANK_ARENA_EXTENTS))
-		goto label_error;
-
-	if (!config_munmap)
-		arena->extent_grow_next = psz2ind(HUGEPAGE);
-
-	ql_new(&arena->extent_cache);
-	if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
-	    WITNESS_RANK_ARENA_EXTENT_CACHE))
-		goto label_error;
-
-	/* Initialize bins. */
-	for (i = 0; i < NBINS; i++) {
-		arena_bin_t *bin = &arena->bins[i];
-		if (malloc_mutex_init(&bin->lock, "arena_bin",
-		    WITNESS_RANK_ARENA_BIN))
-			goto label_error;
-		bin->slabcur = NULL;
-		extent_heap_new(&bin->slabs_nonfull);
-		extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false,
-		    false, false, false);
-		if (config_stats)
-			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-	}
-
-	arena->base = base;
-
-	return (arena);
-label_error:
-	if (ind != 0)
-		base_delete(base);
-	return (NULL);
-}
-
-void
-arena_boot(void)
-{
-	arena_decay_time_default_set(opt_decay_time);
-}
-
-void
-arena_prefork0(tsdn_t *tsdn, arena_t *arena)
-{
-	malloc_mutex_prefork(tsdn, &arena->lock);
-}
-
-void
-arena_prefork1(tsdn_t *tsdn, arena_t *arena)
-{
-	malloc_mutex_prefork(tsdn, &arena->extents_mtx);
-}
-
-void
-arena_prefork2(tsdn_t *tsdn, arena_t *arena)
-{
-	malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
-}
-
-void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena)
-{
-	unsigned i;
-
-	base_prefork(tsdn, arena->base);
-	for (i = 0; i < NBINS; i++)
-		malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
-	malloc_mutex_prefork(tsdn, &arena->large_mtx);
-}
-
-void
-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
-{
-	unsigned i;
-
-	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
-	for (i = 0; i < NBINS; i++)
-		malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
-	base_postfork_parent(tsdn, arena->base);
-	malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
-	malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx);
-	malloc_mutex_postfork_parent(tsdn, &arena->lock);
-}
-
-void
-arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
-{
-	unsigned i;
-
-	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
-	for (i = 0; i < NBINS; i++)
-		malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
-	base_postfork_child(tsdn, arena->base);
-	malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
-	malloc_mutex_postfork_child(tsdn, &arena->extents_mtx);
-	malloc_mutex_postfork_child(tsdn, &arena->lock);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/atomic.c b/zircon/third_party/ulib/jemalloc/src/atomic.c
deleted file mode 100644
index 77ee313..0000000
--- a/zircon/third_party/ulib/jemalloc/src/atomic.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	JEMALLOC_ATOMIC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/base.c b/zircon/third_party/ulib/jemalloc/src/base.c
deleted file mode 100644
index 7c0ef2c..0000000
--- a/zircon/third_party/ulib/jemalloc/src/base.c
+++ /dev/null
@@ -1,363 +0,0 @@
-#define	JEMALLOC_BASE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-static base_t	*b0;
-
-/******************************************************************************/
-
-static void *
-base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size)
-{
-	void *addr;
-	bool zero = true;
-	bool commit = true;
-
-	assert(size == HUGEPAGE_CEILING(size));
-
-	if (extent_hooks == &extent_hooks_default)
-		addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
-	else {
-		addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
-		    &zero, &commit, ind);
-	}
-
-	return (addr);
-}
-
-static void
-base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size)
-{
-	/*
-	 * Cascade through dalloc, decommit, purge_lazy, and purge_forced,
-	 * stopping at first success.  This cascade is performed for consistency
-	 * with the cascade in extent_dalloc_wrapper() because an application's
-	 * custom hooks may not support e.g. dalloc.  This function is only ever
-	 * called as a side effect of arena destruction, so although it might
-	 * seem pointless to do anything besides dalloc here, the application
-	 * may in fact want the end state of all associated virtual memory to in
-	 * some consistent-but-allocated state.
-	 */
-	if (extent_hooks == &extent_hooks_default) {
-		if (!extent_dalloc_mmap(addr, size))
-			return;
-		if (!pages_decommit(addr, size))
-			return;
-		if (!pages_purge_lazy(addr, size))
-			return;
-		if (!pages_purge_forced(addr, size))
-			return;
-		/* Nothing worked.  This should never happen. */
-		not_reached();
-	} else {
-		if (extent_hooks->dalloc != NULL &&
-		    !extent_hooks->dalloc(extent_hooks, addr, size, true, ind))
-			return;
-		if (extent_hooks->decommit != NULL &&
-		    !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
-		    ind))
-			return;
-		if (extent_hooks->purge_lazy != NULL &&
-		    !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
-		    ind))
-			return;
-		if (extent_hooks->purge_forced != NULL &&
-		    !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
-		    size, ind))
-			return;
-		/* Nothing worked.  That's the application's problem. */
-	}
-}
-
-static void
-base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
-    size_t size)
-{
-	size_t sn;
-
-	sn = *extent_sn_next;
-	(*extent_sn_next)++;
-
-	extent_init(extent, NULL, addr, size, 0, sn, true, true, true, false);
-}
-
-static void *
-base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
-    size_t alignment)
-{
-	void *ret;
-
-	assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
-	assert(size == ALIGNMENT_CEILING(size, alignment));
-
-	*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
-	    alignment) - (uintptr_t)extent_addr_get(extent);
-	ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
-	assert(extent_size_get(extent) >= *gap_size + size);
-	extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
-	    *gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
-	    extent_sn_get(extent), true, true, true, false);
-	return (ret);
-}
-
-static void
-base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
-    size_t gap_size, void *addr, size_t size)
-{
-	if (extent_size_get(extent) > 0) {
-		/*
-		 * Compute the index for the largest size class that does not
-		 * exceed extent's size.
-		 */
-		szind_t index_floor = size2index(extent_size_get(extent) + 1) -
-		    1;
-		extent_heap_insert(&base->avail[index_floor], extent);
-	}
-
-	if (config_stats) {
-		base->allocated += size;
-		/*
-		 * Add one PAGE to base_resident for every page boundary that is
-		 * crossed by the new allocation.
-		 */
-		base->resident += PAGE_CEILING((uintptr_t)addr + size) -
-		    PAGE_CEILING((uintptr_t)addr - gap_size);
-		assert(base->allocated <= base->resident);
-		assert(base->resident <= base->mapped);
-	}
-}
-
-static void *
-base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
-    size_t size, size_t alignment)
-{
-	void *ret;
-	size_t gap_size;
-
-	ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
-	base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
-	return (ret);
-}
-
-/*
- * Allocate a block of virtual memory that is large enough to start with a
- * base_block_t header, followed by an object of specified size and alignment.
- * On success a pointer to the initialized base_block_t header is returned.
- */
-static base_block_t *
-base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
-    size_t *extent_sn_next, size_t size, size_t alignment)
-{
-	base_block_t *block;
-	size_t usize, header_size, gap_size, block_size;
-
-	alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
-	usize = ALIGNMENT_CEILING(size, alignment);
-	header_size = sizeof(base_block_t);
-	gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size;
-	block_size = HUGEPAGE_CEILING(header_size + gap_size + usize);
-	block = (base_block_t *)base_map(extent_hooks, ind, block_size);
-	if (block == NULL)
-		return (NULL);
-	block->size = block_size;
-	block->next = NULL;
-	assert(block_size >= header_size);
-	base_extent_init(extent_sn_next, &block->extent,
-	    (void *)((uintptr_t)block + header_size), block_size - header_size);
-	return (block);
-}
-
-/*
- * Allocate an extent that is at least as large as specified size, with
- * specified alignment.
- */
-static extent_t *
-base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
-{
-	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
-	base_block_t *block;
-
-	malloc_mutex_assert_owner(tsdn, &base->mtx);
-
-	block = base_block_alloc(extent_hooks, base_ind_get(base),
-	    &base->extent_sn_next, size, alignment);
-	if (block == NULL)
-		return (NULL);
-	block->next = base->blocks;
-	base->blocks = block;
-	if (config_stats) {
-		base->allocated += sizeof(base_block_t);
-		base->resident += PAGE_CEILING(sizeof(base_block_t));
-		base->mapped += block->size;
-		assert(base->allocated <= base->resident);
-		assert(base->resident <= base->mapped);
-	}
-	return (&block->extent);
-}
-
-base_t *
-b0get(void)
-{
-	return (b0);
-}
-
-base_t *
-base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
-	base_t *base;
-	size_t extent_sn_next, base_alignment, base_size, gap_size;
-	base_block_t *block;
-	szind_t i;
-
-	extent_sn_next = 0;
-	block = base_block_alloc(extent_hooks, ind, &extent_sn_next,
-	    sizeof(base_t), QUANTUM);
-	if (block == NULL)
-		return (NULL);
-
-	base_alignment = CACHELINE;
-	base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
-	base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
-	    &gap_size, base_size, base_alignment);
-	base->ind = ind;
-	base->extent_hooks = extent_hooks;
-	if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) {
-		base_unmap(extent_hooks, ind, block, block->size);
-		return (NULL);
-	}
-	base->extent_sn_next = extent_sn_next;
-	base->blocks = block;
-	for (i = 0; i < NSIZES; i++)
-		extent_heap_new(&base->avail[i]);
-	if (config_stats) {
-		base->allocated = sizeof(base_block_t);
-		base->resident = PAGE_CEILING(sizeof(base_block_t));
-		base->mapped = block->size;
-		assert(base->allocated <= base->resident);
-		assert(base->resident <= base->mapped);
-	}
-	base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
-	    base_size);
-
-	return (base);
-}
-
-void
-base_delete(base_t *base)
-{
-	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
-	base_block_t *next = base->blocks;
-	do {
-		base_block_t *block = next;
-		next = block->next;
-		base_unmap(extent_hooks, base_ind_get(base), block,
-		    block->size);
-	} while (next != NULL);
-}
-
-extent_hooks_t *
-base_extent_hooks_get(base_t *base)
-{
-	return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun));
-}
-
-extent_hooks_t *
-base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks)
-{
-	extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
-	union {
-		extent_hooks_t	**h;
-		void		**v;
-	} u;
-
-	u.h = &base->extent_hooks;
-	atomic_write_p(u.v, extent_hooks);
-
-	return (old_extent_hooks);
-}
-
-/*
- * base_alloc() returns zeroed memory, which is always demand-zeroed for the
- * auto arenas, in order to make multi-page sparse data structures such as radix
- * tree nodes efficient with respect to physical memory usage.  Upon success a
- * pointer to at least size bytes with specified alignment is returned.  Note
- * that size is rounded up to the nearest multiple of alignment to avoid false
- * sharing.
- */
-void *
-base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
-{
-	void *ret;
-	size_t usize, asize;
-	szind_t i;
-	extent_t *extent;
-
-	alignment = QUANTUM_CEILING(alignment);
-	usize = ALIGNMENT_CEILING(size, alignment);
-	asize = usize + alignment - QUANTUM;
-
-	extent = NULL;
-	malloc_mutex_lock(tsdn, &base->mtx);
-	for (i = size2index(asize); i < NSIZES; i++) {
-		extent = extent_heap_remove_first(&base->avail[i]);
-		if (extent != NULL) {
-			/* Use existing space. */
-			break;
-		}
-	}
-	if (extent == NULL) {
-		/* Try to allocate more space. */
-		extent = base_extent_alloc(tsdn, base, usize, alignment);
-	}
-	if (extent == NULL) {
-		ret = NULL;
-		goto label_return;
-	}
-
-	ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
-label_return:
-	malloc_mutex_unlock(tsdn, &base->mtx);
-	return (ret);
-}
-
-void
-base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
-    size_t *mapped)
-{
-	cassert(config_stats);
-
-	malloc_mutex_lock(tsdn, &base->mtx);
-	assert(base->allocated <= base->resident);
-	assert(base->resident <= base->mapped);
-	*allocated = base->allocated;
-	*resident = base->resident;
-	*mapped = base->mapped;
-	malloc_mutex_unlock(tsdn, &base->mtx);
-}
-
-void
-base_prefork(tsdn_t *tsdn, base_t *base)
-{
-	malloc_mutex_prefork(tsdn, &base->mtx);
-}
-
-void
-base_postfork_parent(tsdn_t *tsdn, base_t *base)
-{
-	malloc_mutex_postfork_parent(tsdn, &base->mtx);
-}
-
-void
-base_postfork_child(tsdn_t *tsdn, base_t *base)
-{
-	malloc_mutex_postfork_child(tsdn, &base->mtx);
-}
-
-bool
-base_boot(tsdn_t *tsdn)
-{
-	b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
-	return (b0 == NULL);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/bitmap.c b/zircon/third_party/ulib/jemalloc/src/bitmap.c
deleted file mode 100644
index 3d27f05..0000000
--- a/zircon/third_party/ulib/jemalloc/src/bitmap.c
+++ /dev/null
@@ -1,107 +0,0 @@
-#define	JEMALLOC_BITMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-
-#ifdef BITMAP_USE_TREE
-
-void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
-	unsigned i;
-	size_t group_count;
-
-	assert(nbits > 0);
-	assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
-
-	/*
-	 * Compute the number of groups necessary to store nbits bits, and
-	 * progressively work upward through the levels until reaching a level
-	 * that requires only one group.
-	 */
-	binfo->levels[0].group_offset = 0;
-	group_count = BITMAP_BITS2GROUPS(nbits);
-	for (i = 1; group_count > 1; i++) {
-		assert(i < BITMAP_MAX_LEVELS);
-		binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
-		    + group_count;
-		group_count = BITMAP_BITS2GROUPS(group_count);
-	}
-	binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
-	    + group_count;
-	assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
-	binfo->nlevels = i;
-	binfo->nbits = nbits;
-}
-
-static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-	return (binfo->levels[binfo->nlevels].group_offset);
-}
-
-void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
-	size_t extra;
-	unsigned i;
-
-	/*
-	 * Bits are actually inverted with regard to the external bitmap
-	 * interface, so the bitmap starts out with all 1 bits, except for
-	 * trailing unused bits (if any).  Note that each group uses bit 0 to
-	 * correspond to the first logical bit in the group, so extra bits
-	 * are the most significant bits of the last group.
-	 */
-	memset(bitmap, 0xffU, bitmap_size(binfo));
-	extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
-	    & BITMAP_GROUP_NBITS_MASK;
-	if (extra != 0)
-		bitmap[binfo->levels[1].group_offset - 1] >>= extra;
-	for (i = 1; i < binfo->nlevels; i++) {
-		size_t group_count = binfo->levels[i].group_offset -
-		    binfo->levels[i-1].group_offset;
-		extra = (BITMAP_GROUP_NBITS - (group_count &
-		    BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
-		if (extra != 0)
-			bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
-	}
-}
-
-#else /* BITMAP_USE_TREE */
-
-void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
-	assert(nbits > 0);
-	assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
-
-	binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
-	binfo->nbits = nbits;
-}
-
-static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-	return (binfo->ngroups);
-}
-
-void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
-	size_t extra;
-
-	memset(bitmap, 0xffU, bitmap_size(binfo));
-	extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
-	    & BITMAP_GROUP_NBITS_MASK;
-	if (extra != 0)
-		bitmap[binfo->ngroups - 1] >>= extra;
-}
-
-#endif /* BITMAP_USE_TREE */
-
-size_t
-bitmap_size(const bitmap_info_t *binfo)
-{
-	return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/ckh.c b/zircon/third_party/ulib/jemalloc/src/ckh.c
deleted file mode 100644
index fe79862..0000000
--- a/zircon/third_party/ulib/jemalloc/src/ckh.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
- *******************************************************************************
- * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
- * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
- * functions are employed.  The original cuckoo hashing algorithm was described
- * in:
- *
- *   Pagh, R., F.F. Rodler (2004) Cuckoo Hashing.  Journal of Algorithms
- *     51(2):122-144.
- *
- * Generalization of cuckoo hashing was discussed in:
- *
- *   Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
- *     alternative to traditional hash tables.  In Proceedings of the 7th
- *     Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
- *     January 2006.
- *
- * This implementation uses precisely two hash functions because that is the
- * fewest that can work, and supporting multiple hashes is an implementation
- * burden.  Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
- * that shows approximate expected maximum load factors for various
- * configurations:
- *
- *           |         #cells/bucket         |
- *   #hashes |   1   |   2   |   4   |   8   |
- *   --------+-------+-------+-------+-------+
- *         1 | 0.006 | 0.006 | 0.03  | 0.12  |
- *         2 | 0.49  | 0.86  |>0.93< |>0.96< |
- *         3 | 0.91  | 0.97  | 0.98  | 0.999 |
- *         4 | 0.97  | 0.99  | 0.999 |       |
- *
- * The number of cells per bucket is chosen such that a bucket fits in one cache
- * line.  So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
- * respectively.
- *
- ******************************************************************************/
-#define	JEMALLOC_CKH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static bool	ckh_grow(tsd_t *tsd, ckh_t *ckh);
-static void	ckh_shrink(tsd_t *tsd, ckh_t *ckh);
-
-/******************************************************************************/
-
-/*
- * Search bucket for key and return the cell number if found; SIZE_T_MAX
- * otherwise.
- */
-JEMALLOC_INLINE_C size_t
-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
-{
-	ckhc_t *cell;
-	unsigned i;
-
-	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
-		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
-		if (cell->key != NULL && ckh->keycomp(key, cell->key))
-			return ((bucket << LG_CKH_BUCKET_CELLS) + i);
-	}
-
-	return (SIZE_T_MAX);
-}
-
-/*
- * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
- */
-JEMALLOC_INLINE_C size_t
-ckh_isearch(ckh_t *ckh, const void *key)
-{
-	size_t hashes[2], bucket, cell;
-
-	assert(ckh != NULL);
-
-	ckh->hash(key, hashes);
-
-	/* Search primary bucket. */
-	bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
-	cell = ckh_bucket_search(ckh, bucket, key);
-	if (cell != SIZE_T_MAX)
-		return (cell);
-
-	/* Search secondary bucket. */
-	bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
-	cell = ckh_bucket_search(ckh, bucket, key);
-	return (cell);
-}
-
-JEMALLOC_INLINE_C bool
-ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
-    const void *data)
-{
-	ckhc_t *cell;
-	unsigned offset, i;
-
-	/*
-	 * Cycle through the cells in the bucket, starting at a random position.
-	 * The randomness avoids worst-case search overhead as buckets fill up.
-	 */
-	offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
-	    LG_CKH_BUCKET_CELLS);
-	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
-		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
-		    ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
-		if (cell->key == NULL) {
-			cell->key = key;
-			cell->data = data;
-			ckh->count++;
-			return (false);
-		}
-	}
-
-	return (true);
-}
-
-/*
- * No space is available in bucket.  Randomly evict an item, then try to find an
- * alternate location for that item.  Iteratively repeat this
- * eviction/relocation procedure until either success or detection of an
- * eviction/relocation bucket cycle.
- */
-JEMALLOC_INLINE_C bool
-ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
-    void const **argdata)
-{
-	const void *key, *data, *tkey, *tdata;
-	ckhc_t *cell;
-	size_t hashes[2], bucket, tbucket;
-	unsigned i;
-
-	bucket = argbucket;
-	key = *argkey;
-	data = *argdata;
-	while (true) {
-		/*
-		 * Choose a random item within the bucket to evict.  This is
-		 * critical to correct function, because without (eventually)
-		 * evicting all items within a bucket during iteration, it
-		 * would be possible to get stuck in an infinite loop if there
-		 * were an item for which both hashes indicated the same
-		 * bucket.
-		 */
-		i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
-		    LG_CKH_BUCKET_CELLS);
-		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
-		assert(cell->key != NULL);
-
-		/* Swap cell->{key,data} and {key,data} (evict). */
-		tkey = cell->key; tdata = cell->data;
-		cell->key = key; cell->data = data;
-		key = tkey; data = tdata;
-
-#ifdef CKH_COUNT
-		ckh->nrelocs++;
-#endif
-
-		/* Find the alternate bucket for the evicted item. */
-		ckh->hash(key, hashes);
-		tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
-		if (tbucket == bucket) {
-			tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
-			    - 1);
-			/*
-			 * It may be that (tbucket == bucket) still, if the
-			 * item's hashes both indicate this bucket.  However,
-			 * we are guaranteed to eventually escape this bucket
-			 * during iteration, assuming pseudo-random item
-			 * selection (true randomness would make infinite
-			 * looping a remote possibility).  The reason we can
-			 * never get trapped forever is that there are two
-			 * cases:
-			 *
-			 * 1) This bucket == argbucket, so we will quickly
-			 *    detect an eviction cycle and terminate.
-			 * 2) An item was evicted to this bucket from another,
-			 *    which means that at least one item in this bucket
-			 *    has hashes that indicate distinct buckets.
-			 */
-		}
-		/* Check for a cycle. */
-		if (tbucket == argbucket) {
-			*argkey = key;
-			*argdata = data;
-			return (true);
-		}
-
-		bucket = tbucket;
-		if (!ckh_try_bucket_insert(ckh, bucket, key, data))
-			return (false);
-	}
-}
-
-JEMALLOC_INLINE_C bool
-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
-{
-	size_t hashes[2], bucket;
-	const void *key = *argkey;
-	const void *data = *argdata;
-
-	ckh->hash(key, hashes);
-
-	/* Try to insert in primary bucket. */
-	bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
-	if (!ckh_try_bucket_insert(ckh, bucket, key, data))
-		return (false);
-
-	/* Try to insert in secondary bucket. */
-	bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
-	if (!ckh_try_bucket_insert(ckh, bucket, key, data))
-		return (false);
-
-	/*
-	 * Try to find a place for this item via iterative eviction/relocation.
-	 */
-	return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
-}
-
-/*
- * Try to rebuild the hash table from scratch by inserting all items from the
- * old table into the new.
- */
-JEMALLOC_INLINE_C bool
-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
-{
-	size_t count, i, nins;
-	const void *key, *data;
-
-	count = ckh->count;
-	ckh->count = 0;
-	for (i = nins = 0; nins < count; i++) {
-		if (aTab[i].key != NULL) {
-			key = aTab[i].key;
-			data = aTab[i].data;
-			if (ckh_try_insert(ckh, &key, &data)) {
-				ckh->count = count;
-				return (true);
-			}
-			nins++;
-		}
-	}
-
-	return (false);
-}
-
-static bool
-ckh_grow(tsd_t *tsd, ckh_t *ckh)
-{
-	bool ret;
-	ckhc_t *tab, *ttab;
-	unsigned lg_prevbuckets, lg_curcells;
-
-#ifdef CKH_COUNT
-	ckh->ngrows++;
-#endif
-
-	/*
-	 * It is possible (though unlikely, given well behaved hashes) that the
-	 * table will have to be doubled more than once in order to create a
-	 * usable table.
-	 */
-	lg_prevbuckets = ckh->lg_curbuckets;
-	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
-	while (true) {
-		size_t usize;
-
-		lg_curcells++;
-		usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
-		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
-			ret = true;
-			goto label_return;
-		}
-		tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
-		    true, NULL, true, arena_ichoose(tsd, NULL));
-		if (tab == NULL) {
-			ret = true;
-			goto label_return;
-		}
-		/* Swap in new table. */
-		ttab = ckh->tab;
-		ckh->tab = tab;
-		tab = ttab;
-		ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
-
-		if (!ckh_rebuild(ckh, tab)) {
-			idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tab),
-			    tab, NULL, true, true);
-			break;
-		}
-
-		/* Rebuilding failed, so back out partially rebuilt table. */
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ckh->tab),
-		    ckh->tab, NULL, true, true);
-		ckh->tab = tab;
-		ckh->lg_curbuckets = lg_prevbuckets;
-	}
-
-	ret = false;
-label_return:
-	return (ret);
-}
-
-static void
-ckh_shrink(tsd_t *tsd, ckh_t *ckh)
-{
-	ckhc_t *tab, *ttab;
-	size_t usize;
-	unsigned lg_prevbuckets, lg_curcells;
-
-	/*
-	 * It is possible (though unlikely, given well behaved hashes) that the
-	 * table rebuild will fail.
-	 */
-	lg_prevbuckets = ckh->lg_curbuckets;
-	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
-	usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
-	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-		return;
-	tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
-	    true, arena_ichoose(tsd, NULL));
-	if (tab == NULL) {
-		/*
-		 * An OOM error isn't worth propagating, since it doesn't
-		 * prevent this or future operations from proceeding.
-		 */
-		return;
-	}
-	/* Swap in new table. */
-	ttab = ckh->tab;
-	ckh->tab = tab;
-	tab = ttab;
-	ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
-
-	if (!ckh_rebuild(ckh, tab)) {
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tab), tab, NULL,
-		    true, true);
-#ifdef CKH_COUNT
-		ckh->nshrinks++;
-#endif
-		return;
-	}
-
-	/* Rebuilding failed, so back out partially rebuilt table. */
-	idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ckh->tab), ckh->tab,
-	    NULL, true, true);
-	ckh->tab = tab;
-	ckh->lg_curbuckets = lg_prevbuckets;
-#ifdef CKH_COUNT
-	ckh->nshrinkfails++;
-#endif
-}
-
-bool
-ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
-    ckh_keycomp_t *keycomp)
-{
-	bool ret;
-	size_t mincells, usize;
-	unsigned lg_mincells;
-
-	assert(minitems > 0);
-	assert(hash != NULL);
-	assert(keycomp != NULL);
-
-#ifdef CKH_COUNT
-	ckh->ngrows = 0;
-	ckh->nshrinks = 0;
-	ckh->nshrinkfails = 0;
-	ckh->ninserts = 0;
-	ckh->nrelocs = 0;
-#endif
-	ckh->prng_state = 42; /* Value doesn't really matter. */
-	ckh->count = 0;
-
-	/*
-	 * Find the minimum power of 2 that is large enough to fit minitems
-	 * entries.  We are using (2+,2) cuckoo hashing, which has an expected
-	 * maximum load factor of at least ~0.86, so 0.75 is a conservative load
-	 * factor that will typically allow mincells items to fit without ever
-	 * growing the table.
-	 */
-	assert(LG_CKH_BUCKET_CELLS > 0);
-	mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
-	for (lg_mincells = LG_CKH_BUCKET_CELLS;
-	    (ZU(1) << lg_mincells) < mincells;
-	    lg_mincells++)
-		; /* Do nothing. */
-	ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
-	ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
-	ckh->hash = hash;
-	ckh->keycomp = keycomp;
-
-	usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
-	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
-		ret = true;
-		goto label_return;
-	}
-	ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
-	    NULL, true, arena_ichoose(tsd, NULL));
-	if (ckh->tab == NULL) {
-		ret = true;
-		goto label_return;
-	}
-
-	ret = false;
-label_return:
-	return (ret);
-}
-
-void
-ckh_delete(tsd_t *tsd, ckh_t *ckh)
-{
-	assert(ckh != NULL);
-
-#ifdef CKH_VERBOSE
-	malloc_printf(
-	    "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
-	    " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
-	    " nrelocs: %"FMTu64"\n", __func__, ckh,
-	    (unsigned long long)ckh->ngrows,
-	    (unsigned long long)ckh->nshrinks,
-	    (unsigned long long)ckh->nshrinkfails,
-	    (unsigned long long)ckh->ninserts,
-	    (unsigned long long)ckh->nrelocs);
-#endif
-
-	idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ckh->tab), ckh->tab,
-	    NULL, true, true);
-	if (config_debug)
-		memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
-}
-
-size_t
-ckh_count(ckh_t *ckh)
-{
-	assert(ckh != NULL);
-
-	return (ckh->count);
-}
-
-bool
-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
-{
-	size_t i, ncells;
-
-	for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
-	    LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
-		if (ckh->tab[i].key != NULL) {
-			if (key != NULL)
-				*key = (void *)ckh->tab[i].key;
-			if (data != NULL)
-				*data = (void *)ckh->tab[i].data;
-			*tabind = i + 1;
-			return (false);
-		}
-	}
-
-	return (true);
-}
-
-bool
-ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
-{
-	bool ret;
-
-	assert(ckh != NULL);
-	assert(ckh_search(ckh, key, NULL, NULL));
-
-#ifdef CKH_COUNT
-	ckh->ninserts++;
-#endif
-
-	while (ckh_try_insert(ckh, &key, &data)) {
-		if (ckh_grow(tsd, ckh)) {
-			ret = true;
-			goto label_return;
-		}
-	}
-
-	ret = false;
-label_return:
-	return (ret);
-}
-
-bool
-ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
-    void **data)
-{
-	size_t cell;
-
-	assert(ckh != NULL);
-
-	cell = ckh_isearch(ckh, searchkey);
-	if (cell != SIZE_T_MAX) {
-		if (key != NULL)
-			*key = (void *)ckh->tab[cell].key;
-		if (data != NULL)
-			*data = (void *)ckh->tab[cell].data;
-		ckh->tab[cell].key = NULL;
-		ckh->tab[cell].data = NULL; /* Not necessary. */
-
-		ckh->count--;
-		/* Try to halve the table if it is less than 1/4 full. */
-		if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
-		    + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
-		    > ckh->lg_minbuckets) {
-			/* Ignore error due to OOM. */
-			ckh_shrink(tsd, ckh);
-		}
-
-		return (false);
-	}
-
-	return (true);
-}
-
-bool
-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
-	size_t cell;
-
-	assert(ckh != NULL);
-
-	cell = ckh_isearch(ckh, searchkey);
-	if (cell != SIZE_T_MAX) {
-		if (key != NULL)
-			*key = (void *)ckh->tab[cell].key;
-		if (data != NULL)
-			*data = (void *)ckh->tab[cell].data;
-		return (false);
-	}
-
-	return (true);
-}
-
-void
-ckh_string_hash(const void *key, size_t r_hash[2])
-{
-	hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
-}
-
-bool
-ckh_string_keycomp(const void *k1, const void *k2)
-{
-	assert(k1 != NULL);
-	assert(k2 != NULL);
-
-	return (strcmp((char *)k1, (char *)k2) ? false : true);
-}
-
-void
-ckh_pointer_hash(const void *key, size_t r_hash[2])
-{
-	union {
-		const void	*v;
-		size_t		i;
-	} u;
-
-	assert(sizeof(u.v) == sizeof(u.i));
-	u.v = key;
-	hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
-}
-
-bool
-ckh_pointer_keycomp(const void *k1, const void *k2)
-{
-	return ((k1 == k2) ? true : false);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/ctl.c b/zircon/third_party/ulib/jemalloc/src/ctl.c
deleted file mode 100644
index b19c9d3..0000000
--- a/zircon/third_party/ulib/jemalloc/src/ctl.c
+++ /dev/null
@@ -1,2234 +0,0 @@
-#define	JEMALLOC_CTL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-/*
- * ctl_mtx protects the following:
- * - ctl_stats->*
- */
-static malloc_mutex_t	ctl_mtx;
-static bool		ctl_initialized;
-static ctl_stats_t	*ctl_stats;
-static ctl_arenas_t	*ctl_arenas;
-
-/******************************************************************************/
-/* Helpers for named and indexed nodes. */
-
-JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_node(const ctl_node_t *node)
-{
-	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
-}
-
-JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, size_t index)
-{
-	const ctl_named_node_t *children = ctl_named_node(node->children);
-
-	return (children ? &children[index] : NULL);
-}
-
-JEMALLOC_INLINE_C const ctl_indexed_node_t *
-ctl_indexed_node(const ctl_node_t *node)
-{
-	return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
-}
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-#define	CTL_PROTO(n)							\
-static int	n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,	\
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-
-#define	INDEX_PROTO(n)							\
-static const ctl_named_node_t	*n##_index(tsdn_t *tsdn,		\
-    const size_t *mib, size_t miblen, size_t i);
-
-CTL_PROTO(version)
-CTL_PROTO(epoch)
-CTL_PROTO(thread_tcache_enabled)
-CTL_PROTO(thread_tcache_flush)
-CTL_PROTO(thread_prof_name)
-CTL_PROTO(thread_prof_active)
-CTL_PROTO(thread_arena)
-CTL_PROTO(thread_allocated)
-CTL_PROTO(thread_allocatedp)
-CTL_PROTO(thread_deallocated)
-CTL_PROTO(thread_deallocatedp)
-CTL_PROTO(config_cache_oblivious)
-CTL_PROTO(config_debug)
-CTL_PROTO(config_fill)
-CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_malloc_conf)
-CTL_PROTO(config_munmap)
-CTL_PROTO(config_prof)
-CTL_PROTO(config_prof_libgcc)
-CTL_PROTO(config_prof_libunwind)
-CTL_PROTO(config_stats)
-CTL_PROTO(config_tcache)
-CTL_PROTO(config_tls)
-CTL_PROTO(config_utrace)
-CTL_PROTO(config_xmalloc)
-CTL_PROTO(opt_abort)
-CTL_PROTO(opt_dss)
-CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_decay_time)
-CTL_PROTO(opt_stats_print)
-CTL_PROTO(opt_junk)
-CTL_PROTO(opt_zero)
-CTL_PROTO(opt_utrace)
-CTL_PROTO(opt_xmalloc)
-CTL_PROTO(opt_tcache)
-CTL_PROTO(opt_lg_tcache_max)
-CTL_PROTO(opt_prof)
-CTL_PROTO(opt_prof_prefix)
-CTL_PROTO(opt_prof_active)
-CTL_PROTO(opt_prof_thread_active_init)
-CTL_PROTO(opt_lg_prof_sample)
-CTL_PROTO(opt_lg_prof_interval)
-CTL_PROTO(opt_prof_gdump)
-CTL_PROTO(opt_prof_final)
-CTL_PROTO(opt_prof_leak)
-CTL_PROTO(opt_prof_accum)
-CTL_PROTO(tcache_create)
-CTL_PROTO(tcache_flush)
-CTL_PROTO(tcache_destroy)
-CTL_PROTO(arena_i_initialized)
-CTL_PROTO(arena_i_purge)
-CTL_PROTO(arena_i_decay)
-CTL_PROTO(arena_i_reset)
-CTL_PROTO(arena_i_destroy)
-CTL_PROTO(arena_i_dss)
-CTL_PROTO(arena_i_decay_time)
-CTL_PROTO(arena_i_extent_hooks)
-INDEX_PROTO(arena_i)
-CTL_PROTO(arenas_bin_i_size)
-CTL_PROTO(arenas_bin_i_nregs)
-CTL_PROTO(arenas_bin_i_slab_size)
-INDEX_PROTO(arenas_bin_i)
-CTL_PROTO(arenas_lextent_i_size)
-INDEX_PROTO(arenas_lextent_i)
-CTL_PROTO(arenas_narenas)
-CTL_PROTO(arenas_decay_time)
-CTL_PROTO(arenas_quantum)
-CTL_PROTO(arenas_page)
-CTL_PROTO(arenas_tcache_max)
-CTL_PROTO(arenas_nbins)
-CTL_PROTO(arenas_nhbins)
-CTL_PROTO(arenas_nlextents)
-CTL_PROTO(arenas_create)
-CTL_PROTO(prof_thread_active_init)
-CTL_PROTO(prof_active)
-CTL_PROTO(prof_dump)
-CTL_PROTO(prof_gdump)
-CTL_PROTO(prof_reset)
-CTL_PROTO(prof_interval)
-CTL_PROTO(lg_prof_sample)
-CTL_PROTO(stats_arenas_i_small_allocated)
-CTL_PROTO(stats_arenas_i_small_nmalloc)
-CTL_PROTO(stats_arenas_i_small_ndalloc)
-CTL_PROTO(stats_arenas_i_small_nrequests)
-CTL_PROTO(stats_arenas_i_large_allocated)
-CTL_PROTO(stats_arenas_i_large_nmalloc)
-CTL_PROTO(stats_arenas_i_large_ndalloc)
-CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
-CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
-CTL_PROTO(stats_arenas_i_bins_j_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_curregs)
-CTL_PROTO(stats_arenas_i_bins_j_nfills)
-CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-CTL_PROTO(stats_arenas_i_bins_j_nslabs)
-CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
-CTL_PROTO(stats_arenas_i_bins_j_curslabs)
-INDEX_PROTO(stats_arenas_i_bins_j)
-CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
-CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
-CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
-CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
-INDEX_PROTO(stats_arenas_i_lextents_j)
-CTL_PROTO(stats_arenas_i_nthreads)
-CTL_PROTO(stats_arenas_i_dss)
-CTL_PROTO(stats_arenas_i_decay_time)
-CTL_PROTO(stats_arenas_i_pactive)
-CTL_PROTO(stats_arenas_i_pdirty)
-CTL_PROTO(stats_arenas_i_mapped)
-CTL_PROTO(stats_arenas_i_retained)
-CTL_PROTO(stats_arenas_i_npurge)
-CTL_PROTO(stats_arenas_i_nmadvise)
-CTL_PROTO(stats_arenas_i_purged)
-CTL_PROTO(stats_arenas_i_base)
-CTL_PROTO(stats_arenas_i_internal)
-CTL_PROTO(stats_arenas_i_tcache_bytes)
-CTL_PROTO(stats_arenas_i_resident)
-INDEX_PROTO(stats_arenas_i)
-CTL_PROTO(stats_allocated)
-CTL_PROTO(stats_active)
-CTL_PROTO(stats_metadata)
-CTL_PROTO(stats_resident)
-CTL_PROTO(stats_mapped)
-CTL_PROTO(stats_retained)
-
-/******************************************************************************/
-/* mallctl tree. */
-
-/* Maximum tree depth. */
-#define	CTL_MAX_DEPTH	6
-
-#define	NAME(n)	{true},	n
-#define	CHILD(t, c)							\
-	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
-	(ctl_node_t *)c##_node,						\
-	NULL
-#define	CTL(c)	0, NULL, c##_ctl
-
-/*
- * Only handles internal indexed nodes, since there are currently no external
- * ones.
- */
-#define	INDEX(i)	{false},	i##_index
-
-static const ctl_named_node_t	thread_tcache_node[] = {
-	{NAME("enabled"),	CTL(thread_tcache_enabled)},
-	{NAME("flush"),		CTL(thread_tcache_flush)}
-};
-
-static const ctl_named_node_t	thread_prof_node[] = {
-	{NAME("name"),		CTL(thread_prof_name)},
-	{NAME("active"),	CTL(thread_prof_active)}
-};
-
-static const ctl_named_node_t	thread_node[] = {
-	{NAME("arena"),		CTL(thread_arena)},
-	{NAME("allocated"),	CTL(thread_allocated)},
-	{NAME("allocatedp"),	CTL(thread_allocatedp)},
-	{NAME("deallocated"),	CTL(thread_deallocated)},
-	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
-	{NAME("tcache"),	CHILD(named, thread_tcache)},
-	{NAME("prof"),		CHILD(named, thread_prof)}
-};
-
-static const ctl_named_node_t	config_node[] = {
-	{NAME("cache_oblivious"), CTL(config_cache_oblivious)},
-	{NAME("debug"),		CTL(config_debug)},
-	{NAME("fill"),		CTL(config_fill)},
-	{NAME("lazy_lock"),	CTL(config_lazy_lock)},
-	{NAME("malloc_conf"),	CTL(config_malloc_conf)},
-	{NAME("munmap"),	CTL(config_munmap)},
-	{NAME("prof"),		CTL(config_prof)},
-	{NAME("prof_libgcc"),	CTL(config_prof_libgcc)},
-	{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
-	{NAME("stats"),		CTL(config_stats)},
-	{NAME("tcache"),	CTL(config_tcache)},
-	{NAME("tls"),		CTL(config_tls)},
-	{NAME("utrace"),	CTL(config_utrace)},
-	{NAME("xmalloc"),	CTL(config_xmalloc)}
-};
-
-static const ctl_named_node_t opt_node[] = {
-	{NAME("abort"),		CTL(opt_abort)},
-	{NAME("dss"),		CTL(opt_dss)},
-	{NAME("narenas"),	CTL(opt_narenas)},
-	{NAME("decay_time"),	CTL(opt_decay_time)},
-	{NAME("stats_print"),	CTL(opt_stats_print)},
-	{NAME("junk"),		CTL(opt_junk)},
-	{NAME("zero"),		CTL(opt_zero)},
-	{NAME("utrace"),	CTL(opt_utrace)},
-	{NAME("xmalloc"),	CTL(opt_xmalloc)},
-	{NAME("tcache"),	CTL(opt_tcache)},
-	{NAME("lg_tcache_max"),	CTL(opt_lg_tcache_max)},
-	{NAME("prof"),		CTL(opt_prof)},
-	{NAME("prof_prefix"),	CTL(opt_prof_prefix)},
-	{NAME("prof_active"),	CTL(opt_prof_active)},
-	{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
-	{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
-	{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
-	{NAME("prof_gdump"),	CTL(opt_prof_gdump)},
-	{NAME("prof_final"),	CTL(opt_prof_final)},
-	{NAME("prof_leak"),	CTL(opt_prof_leak)},
-	{NAME("prof_accum"),	CTL(opt_prof_accum)}
-};
-
-static const ctl_named_node_t	tcache_node[] = {
-	{NAME("create"),	CTL(tcache_create)},
-	{NAME("flush"),		CTL(tcache_flush)},
-	{NAME("destroy"),	CTL(tcache_destroy)}
-};
-
-static const ctl_named_node_t arena_i_node[] = {
-	{NAME("initialized"),	CTL(arena_i_initialized)},
-	{NAME("purge"),		CTL(arena_i_purge)},
-	{NAME("decay"),		CTL(arena_i_decay)},
-	{NAME("reset"),		CTL(arena_i_reset)},
-	{NAME("destroy"),	CTL(arena_i_destroy)},
-	{NAME("dss"),		CTL(arena_i_dss)},
-	{NAME("decay_time"),	CTL(arena_i_decay_time)},
-	{NAME("extent_hooks"),	CTL(arena_i_extent_hooks)}
-};
-static const ctl_named_node_t super_arena_i_node[] = {
-	{NAME(""),		CHILD(named, arena_i)}
-};
-
-static const ctl_indexed_node_t arena_node[] = {
-	{INDEX(arena_i)}
-};
-
-static const ctl_named_node_t arenas_bin_i_node[] = {
-	{NAME("size"),		CTL(arenas_bin_i_size)},
-	{NAME("nregs"),		CTL(arenas_bin_i_nregs)},
-	{NAME("slab_size"),	CTL(arenas_bin_i_slab_size)}
-};
-static const ctl_named_node_t super_arenas_bin_i_node[] = {
-	{NAME(""),		CHILD(named, arenas_bin_i)}
-};
-
-static const ctl_indexed_node_t arenas_bin_node[] = {
-	{INDEX(arenas_bin_i)}
-};
-
-static const ctl_named_node_t arenas_lextent_i_node[] = {
-	{NAME("size"),		CTL(arenas_lextent_i_size)}
-};
-static const ctl_named_node_t super_arenas_lextent_i_node[] = {
-	{NAME(""),		CHILD(named, arenas_lextent_i)}
-};
-
-static const ctl_indexed_node_t arenas_lextent_node[] = {
-	{INDEX(arenas_lextent_i)}
-};
-
-static const ctl_named_node_t arenas_node[] = {
-	{NAME("narenas"),	CTL(arenas_narenas)},
-	{NAME("decay_time"),	CTL(arenas_decay_time)},
-	{NAME("quantum"),	CTL(arenas_quantum)},
-	{NAME("page"),		CTL(arenas_page)},
-	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
-	{NAME("nbins"),		CTL(arenas_nbins)},
-	{NAME("nhbins"),	CTL(arenas_nhbins)},
-	{NAME("bin"),		CHILD(indexed, arenas_bin)},
-	{NAME("nlextents"),	CTL(arenas_nlextents)},
-	{NAME("lextent"),	CHILD(indexed, arenas_lextent)},
-	{NAME("create"),	CTL(arenas_create)}
-};
-
-static const ctl_named_node_t	prof_node[] = {
-	{NAME("thread_active_init"), CTL(prof_thread_active_init)},
-	{NAME("active"),	CTL(prof_active)},
-	{NAME("dump"),		CTL(prof_dump)},
-	{NAME("gdump"),		CTL(prof_gdump)},
-	{NAME("reset"),		CTL(prof_reset)},
-	{NAME("interval"),	CTL(prof_interval)},
-	{NAME("lg_sample"),	CTL(lg_prof_sample)}
-};
-
-static const ctl_named_node_t stats_arenas_i_small_node[] = {
-	{NAME("allocated"),	CTL(stats_arenas_i_small_allocated)},
-	{NAME("nmalloc"),	CTL(stats_arenas_i_small_nmalloc)},
-	{NAME("ndalloc"),	CTL(stats_arenas_i_small_ndalloc)},
-	{NAME("nrequests"),	CTL(stats_arenas_i_small_nrequests)}
-};
-
-static const ctl_named_node_t stats_arenas_i_large_node[] = {
-	{NAME("allocated"),	CTL(stats_arenas_i_large_allocated)},
-	{NAME("nmalloc"),	CTL(stats_arenas_i_large_nmalloc)},
-	{NAME("ndalloc"),	CTL(stats_arenas_i_large_ndalloc)},
-	{NAME("nrequests"),	CTL(stats_arenas_i_large_nrequests)}
-};
-
-static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
-	{NAME("nmalloc"),	CTL(stats_arenas_i_bins_j_nmalloc)},
-	{NAME("ndalloc"),	CTL(stats_arenas_i_bins_j_ndalloc)},
-	{NAME("nrequests"),	CTL(stats_arenas_i_bins_j_nrequests)},
-	{NAME("curregs"),	CTL(stats_arenas_i_bins_j_curregs)},
-	{NAME("nfills"),	CTL(stats_arenas_i_bins_j_nfills)},
-	{NAME("nflushes"),	CTL(stats_arenas_i_bins_j_nflushes)},
-	{NAME("nslabs"),	CTL(stats_arenas_i_bins_j_nslabs)},
-	{NAME("nreslabs"),	CTL(stats_arenas_i_bins_j_nreslabs)},
-	{NAME("curslabs"),	CTL(stats_arenas_i_bins_j_curslabs)}
-};
-static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
-	{NAME(""),		CHILD(named, stats_arenas_i_bins_j)}
-};
-
-static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
-	{INDEX(stats_arenas_i_bins_j)}
-};
-
-static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
-	{NAME("nmalloc"),	CTL(stats_arenas_i_lextents_j_nmalloc)},
-	{NAME("ndalloc"),	CTL(stats_arenas_i_lextents_j_ndalloc)},
-	{NAME("nrequests"),	CTL(stats_arenas_i_lextents_j_nrequests)},
-	{NAME("curlextents"),	CTL(stats_arenas_i_lextents_j_curlextents)}
-};
-static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
-	{NAME(""),		CHILD(named, stats_arenas_i_lextents_j)}
-};
-
-static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
-	{INDEX(stats_arenas_i_lextents_j)}
-};
-
-static const ctl_named_node_t stats_arenas_i_node[] = {
-	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
-	{NAME("dss"),		CTL(stats_arenas_i_dss)},
-	{NAME("decay_time"),	CTL(stats_arenas_i_decay_time)},
-	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
-	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
-	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
-	{NAME("retained"),	CTL(stats_arenas_i_retained)},
-	{NAME("npurge"),	CTL(stats_arenas_i_npurge)},
-	{NAME("nmadvise"),	CTL(stats_arenas_i_nmadvise)},
-	{NAME("purged"),	CTL(stats_arenas_i_purged)},
-	{NAME("base"),		CTL(stats_arenas_i_base)},
-	{NAME("internal"),	CTL(stats_arenas_i_internal)},
-	{NAME("tcache_bytes"),	CTL(stats_arenas_i_tcache_bytes)},
-	{NAME("resident"),	CTL(stats_arenas_i_resident)},
-	{NAME("small"),		CHILD(named, stats_arenas_i_small)},
-	{NAME("large"),		CHILD(named, stats_arenas_i_large)},
-	{NAME("bins"),		CHILD(indexed, stats_arenas_i_bins)},
-	{NAME("lextents"),	CHILD(indexed, stats_arenas_i_lextents)}
-};
-static const ctl_named_node_t super_stats_arenas_i_node[] = {
-	{NAME(""),		CHILD(named, stats_arenas_i)}
-};
-
-static const ctl_indexed_node_t stats_arenas_node[] = {
-	{INDEX(stats_arenas_i)}
-};
-
-static const ctl_named_node_t stats_node[] = {
-	{NAME("allocated"),	CTL(stats_allocated)},
-	{NAME("active"),	CTL(stats_active)},
-	{NAME("metadata"),	CTL(stats_metadata)},
-	{NAME("resident"),	CTL(stats_resident)},
-	{NAME("mapped"),	CTL(stats_mapped)},
-	{NAME("retained"),	CTL(stats_retained)},
-	{NAME("arenas"),	CHILD(indexed, stats_arenas)}
-};
-
-static const ctl_named_node_t	root_node[] = {
-	{NAME("version"),	CTL(version)},
-	{NAME("epoch"),		CTL(epoch)},
-	{NAME("thread"),	CHILD(named, thread)},
-	{NAME("config"),	CHILD(named, config)},
-	{NAME("opt"),		CHILD(named, opt)},
-	{NAME("tcache"),	CHILD(named, tcache)},
-	{NAME("arena"),		CHILD(indexed, arena)},
-	{NAME("arenas"),	CHILD(named, arenas)},
-	{NAME("prof"),		CHILD(named, prof)},
-	{NAME("stats"),		CHILD(named, stats)}
-};
-static const ctl_named_node_t super_root_node[] = {
-	{NAME(""),		CHILD(named, root)}
-};
-
-#undef NAME
-#undef CHILD
-#undef CTL
-#undef INDEX
-
-/******************************************************************************/
-
-static unsigned
-arenas_i2a_impl(size_t i, bool compat, bool validate)
-{
-	unsigned a;
-
-	switch (i) {
-	case MALLCTL_ARENAS_ALL:
-		a = 0;
-		break;
-	case MALLCTL_ARENAS_DESTROYED:
-		a = 1;
-		break;
-	default:
-		if (compat && i == ctl_arenas->narenas) {
-			/*
-			 * Provide deprecated backward compatibility for
-			 * accessing the merged stats at index narenas rather
-			 * than via MALLCTL_ARENAS_ALL.  This is scheduled for
-			 * removal in 6.0.0.
-			 */
-			a = 0;
-		} else if (validate && i >= ctl_arenas->narenas)
-			a = UINT_MAX;
-		else {
-			/*
-			 * This function should never be called for an index
-			 * more than one past the range of indices that have
-			 * initialized ctl data.
-			 */
-			assert(i < ctl_arenas->narenas || (!validate && i ==
-			    ctl_arenas->narenas));
-			a = (unsigned)i + 2;
-		}
-		break;
-	}
-
-	return (a);
-}
-
-static unsigned
-arenas_i2a(size_t i)
-{
-	return (arenas_i2a_impl(i, true, false));
-}
-
-static ctl_arena_t *
-arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init)
-{
-	ctl_arena_t *ret;
-
-	assert(!compat || !init);
-
-	ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
-	if (init && ret == NULL) {
-		if (config_stats) {
-			struct container_s {
-				ctl_arena_t		ctl_arena;
-				ctl_arena_stats_t	astats;
-			};
-			struct container_s *cont =
-			    (struct container_s *)base_alloc(tsdn, b0get(),
-			    sizeof(struct container_s), QUANTUM);
-			if (cont == NULL) {
-				return NULL;
-			}
-			ret = &cont->ctl_arena;
-			ret->astats = &cont->astats;
-		} else {
-			ret = (ctl_arena_t *)base_alloc(tsdn, b0get(),
-			    sizeof(ctl_arena_t), QUANTUM);
-			if (ret == NULL) {
-				return NULL;
-			}
-		}
-		ret->arena_ind = (unsigned)i;
-		ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
-	}
-
-	assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
-	return (ret);
-}
-
-static ctl_arena_t *
-arenas_i(size_t i)
-{
-	ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false);
-	assert(ret != NULL);
-	return (ret);
-}
-
-static void
-ctl_arena_clear(ctl_arena_t *ctl_arena)
-{
-	ctl_arena->nthreads = 0;
-	ctl_arena->dss = dss_prec_names[dss_prec_limit];
-	ctl_arena->decay_time = -1;
-	ctl_arena->pactive = 0;
-	ctl_arena->pdirty = 0;
-	if (config_stats) {
-		memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
-		ctl_arena->astats->allocated_small = 0;
-		ctl_arena->astats->nmalloc_small = 0;
-		ctl_arena->astats->ndalloc_small = 0;
-		ctl_arena->astats->nrequests_small = 0;
-		memset(ctl_arena->astats->bstats, 0, NBINS *
-		    sizeof(malloc_bin_stats_t));
-		memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
-		    sizeof(malloc_large_stats_t));
-	}
-}
-
-static void
-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena)
-{
-	unsigned i;
-
-	if (config_stats) {
-		arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
-		    &ctl_arena->dss, &ctl_arena->decay_time,
-		    &ctl_arena->pactive, &ctl_arena->pdirty,
-		    &ctl_arena->astats->astats, ctl_arena->astats->bstats,
-		    ctl_arena->astats->lstats);
-
-		for (i = 0; i < NBINS; i++) {
-			ctl_arena->astats->allocated_small +=
-			    ctl_arena->astats->bstats[i].curregs *
-			    index2size(i);
-			ctl_arena->astats->nmalloc_small +=
-			    ctl_arena->astats->bstats[i].nmalloc;
-			ctl_arena->astats->ndalloc_small +=
-			    ctl_arena->astats->bstats[i].ndalloc;
-			ctl_arena->astats->nrequests_small +=
-			    ctl_arena->astats->bstats[i].nrequests;
-		}
-	} else {
-		arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
-		    &ctl_arena->dss, &ctl_arena->decay_time,
-		    &ctl_arena->pactive, &ctl_arena->pdirty);
-	}
-}
-
-static void
-ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
-    bool destroyed)
-{
-	unsigned i;
-
-	if (!destroyed) {
-		ctl_sdarena->nthreads += ctl_arena->nthreads;
-		ctl_sdarena->pactive += ctl_arena->pactive;
-		ctl_sdarena->pdirty += ctl_arena->pdirty;
-	} else {
-		assert(ctl_arena->nthreads == 0);
-		assert(ctl_arena->pactive == 0);
-		assert(ctl_arena->pdirty == 0);
-	}
-
-	if (config_stats) {
-		ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
-		ctl_arena_stats_t *astats = ctl_arena->astats;
-
-		if (!destroyed) {
-			sdstats->astats.mapped += astats->astats.mapped;
-			sdstats->astats.retained += astats->astats.retained;
-		}
-		sdstats->astats.npurge += astats->astats.npurge;
-		sdstats->astats.nmadvise += astats->astats.nmadvise;
-		sdstats->astats.purged += astats->astats.purged;
-
-		if (!destroyed) {
-			sdstats->astats.base += astats->astats.base;
-			sdstats->astats.internal += astats->astats.internal;
-			sdstats->astats.resident += astats->astats.resident;
-		} else
-			assert(astats->astats.internal == 0);
-
-		if (!destroyed)
-			sdstats->allocated_small += astats->allocated_small;
-		else
-			assert(astats->allocated_small == 0);
-		sdstats->nmalloc_small += astats->nmalloc_small;
-		sdstats->ndalloc_small += astats->ndalloc_small;
-		sdstats->nrequests_small += astats->nrequests_small;
-
-		if (!destroyed) {
-			sdstats->astats.allocated_large +=
-			    astats->astats.allocated_large;
-		} else
-			assert(astats->astats.allocated_large == 0);
-		sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
-		sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
-		sdstats->astats.nrequests_large +=
-		    astats->astats.nrequests_large;
-
-		if (config_tcache) {
-			sdstats->astats.tcache_bytes +=
-			    astats->astats.tcache_bytes;
-		}
-
-		for (i = 0; i < NBINS; i++) {
-			sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
-			sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
-			sdstats->bstats[i].nrequests +=
-			    astats->bstats[i].nrequests;
-			if (!destroyed) {
-				sdstats->bstats[i].curregs +=
-				    astats->bstats[i].curregs;
-			} else
-				assert(astats->bstats[i].curregs == 0);
-			if (config_tcache) {
-				sdstats->bstats[i].nfills +=
-				    astats->bstats[i].nfills;
-				sdstats->bstats[i].nflushes +=
-				    astats->bstats[i].nflushes;
-			}
-			sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
-			sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
-			if (!destroyed) {
-				sdstats->bstats[i].curslabs +=
-				    astats->bstats[i].curslabs;
-			} else
-				assert(astats->bstats[i].curslabs == 0);
-		}
-
-		for (i = 0; i < NSIZES - NBINS; i++) {
-			sdstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
-			sdstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
-			sdstats->lstats[i].nrequests +=
-			    astats->lstats[i].nrequests;
-			if (!destroyed) {
-				sdstats->lstats[i].curlextents +=
-				    astats->lstats[i].curlextents;
-			} else
-				assert(astats->lstats[i].curlextents == 0);
-		}
-	}
-}
-
-static void
-ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
-    unsigned i, bool destroyed)
-{
-	ctl_arena_t *ctl_arena = arenas_i(i);
-
-	ctl_arena_clear(ctl_arena);
-	ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
-	/* Merge into sum stats as well. */
-	ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
-}
-
-static unsigned
-ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks)
-{
-	unsigned arena_ind;
-	ctl_arena_t *ctl_arena;
-
-	if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
-	    NULL) {
-		ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
-		arena_ind = ctl_arena->arena_ind;
-	} else
-		arena_ind = ctl_arenas->narenas;
-
-	/* Trigger stats allocation. */
-	if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL)
-		return (UINT_MAX);
-
-	/* Initialize new arena. */
-	if (arena_init(tsdn, arena_ind, extent_hooks) == NULL)
-		return (UINT_MAX);
-
-	if (arena_ind == ctl_arenas->narenas)
-		ctl_arenas->narenas++;
-
-	return (arena_ind);
-}
-
-static void
-ctl_refresh(tsdn_t *tsdn)
-{
-	unsigned i;
-	ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
-	VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
-
-	/*
-	 * Clear sum stats, since they will be merged into by
-	 * ctl_arena_refresh().
-	 */
-	ctl_arena_clear(ctl_sarena);
-
-	for (i = 0; i < ctl_arenas->narenas; i++) {
-		tarenas[i] = arena_get(tsdn, i, false);
-	}
-
-	for (i = 0; i < ctl_arenas->narenas; i++) {
-		ctl_arena_t *ctl_arena = arenas_i(i);
-		bool initialized = (tarenas[i] != NULL);
-
-		ctl_arena->initialized = initialized;
-		if (initialized) {
-			ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
-			    false);
-		}
-	}
-
-	if (config_stats) {
-		ctl_stats->allocated = ctl_sarena->astats->allocated_small +
-		    ctl_sarena->astats->astats.allocated_large;
-		ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
-		ctl_stats->metadata = ctl_sarena->astats->astats.base +
-		    ctl_sarena->astats->astats.internal;
-		ctl_stats->resident = ctl_sarena->astats->astats.resident;
-		ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
-		ctl_stats->retained = ctl_sarena->astats->astats.retained;
-	}
-	ctl_arenas->epoch++;
-}
-
-static bool
-ctl_init(tsdn_t *tsdn)
-{
-	bool ret;
-
-	malloc_mutex_lock(tsdn, &ctl_mtx);
-	if (!ctl_initialized) {
-		ctl_arena_t *ctl_sarena, *ctl_darena;
-		unsigned i;
-
-		/*
-		 * Allocate demand-zeroed space for pointers to the full
-		 * range of supported arena indices.
-		 */
-		if (ctl_arenas == NULL) {
-			ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
-			    b0get(), sizeof(ctl_arenas_t), QUANTUM);
-			if (ctl_arenas == NULL) {
-				ret = true;
-				goto label_return;
-			}
-		}
-
-		if (config_stats && ctl_stats == NULL) {
-			ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
-			    sizeof(ctl_stats_t), QUANTUM);
-			if (ctl_stats == NULL) {
-				ret = true;
-				goto label_return;
-			}
-		}
-
-		/*
-		 * Allocate space for the current full range of arenas
-		 * here rather than doing it lazily elsewhere, in order
-		 * to limit when OOM-caused errors can occur.
-		 */
-		if ((ctl_sarena = arenas_i_impl(tsdn, MALLCTL_ARENAS_ALL, false,
-		    true)) == NULL) {
-			ret = true;
-			goto label_return;
-		}
-		ctl_sarena->initialized = true;
-
-		if ((ctl_darena = arenas_i_impl(tsdn, MALLCTL_ARENAS_DESTROYED,
-		    false, true)) == NULL) {
-			ret = true;
-			goto label_return;
-		}
-		ctl_arena_clear(ctl_darena);
-		/*
-		 * Don't toggle ctl_darena to initialized until an arena is
-		 * actually destroyed, so that arena.<i>.initialized can be used
-		 * to query whether the stats are relevant.
-		 */
-
-		ctl_arenas->narenas = narenas_total_get();
-		for (i = 0; i < ctl_arenas->narenas; i++) {
-			if (arenas_i_impl(tsdn, i, false, true) == NULL) {
-				ret = true;
-				goto label_return;
-			}
-		}
-
-		ql_new(&ctl_arenas->destroyed);
-		ctl_refresh(tsdn);
-
-		ctl_initialized = true;
-	}
-
-	ret = false;
-label_return:
-	malloc_mutex_unlock(tsdn, &ctl_mtx);
-	return (ret);
-}
-
-static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
-    size_t *mibp, size_t *depthp)
-{
-	int ret;
-	const char *elm, *tdot, *dot;
-	size_t elen, i, j;
-	const ctl_named_node_t *node;
-
-	elm = name;
-	/* Equivalent to strchrnul(). */
-	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
-	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
-	if (elen == 0) {
-		ret = ENOENT;
-		goto label_return;
-	}
-	node = super_root_node;
-	for (i = 0; i < *depthp; i++) {
-		assert(node);
-		assert(node->nchildren > 0);
-		if (ctl_named_node(node->children) != NULL) {
-			const ctl_named_node_t *pnode = node;
-
-			/* Children are named. */
-			for (j = 0; j < node->nchildren; j++) {
-				const ctl_named_node_t *child =
-				    ctl_named_children(node, j);
-				if (strlen(child->name) == elen &&
-				    strncmp(elm, child->name, elen) == 0) {
-					node = child;
-					if (nodesp != NULL)
-						nodesp[i] =
-						    (const ctl_node_t *)node;
-					mibp[i] = j;
-					break;
-				}
-			}
-			if (node == pnode) {
-				ret = ENOENT;
-				goto label_return;
-			}
-		} else {
-			uintmax_t index;
-			const ctl_indexed_node_t *inode;
-
-			/* Children are indexed. */
-			index = malloc_strtoumax(elm, NULL, 10);
-			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
-				ret = ENOENT;
-				goto label_return;
-			}
-
-			inode = ctl_indexed_node(node->children);
-			node = inode->index(tsdn, mibp, *depthp, (size_t)index);
-			if (node == NULL) {
-				ret = ENOENT;
-				goto label_return;
-			}
-
-			if (nodesp != NULL)
-				nodesp[i] = (const ctl_node_t *)node;
-			mibp[i] = (size_t)index;
-		}
-
-		if (node->ctl != NULL) {
-			/* Terminal node. */
-			if (*dot != '\0') {
-				/*
-				 * The name contains more elements than are
-				 * in this path through the tree.
-				 */
-				ret = ENOENT;
-				goto label_return;
-			}
-			/* Complete lookup successful. */
-			*depthp = i + 1;
-			break;
-		}
-
-		/* Update elm. */
-		if (*dot == '\0') {
-			/* No more elements. */
-			ret = ENOENT;
-			goto label_return;
-		}
-		elm = &dot[1];
-		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
-		    strchr(elm, '\0');
-		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-int
-ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
-    void *newp, size_t newlen)
-{
-	int ret;
-	size_t depth;
-	ctl_node_t const *nodes[CTL_MAX_DEPTH];
-	size_t mib[CTL_MAX_DEPTH];
-	const ctl_named_node_t *node;
-
-	if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
-		ret = EAGAIN;
-		goto label_return;
-	}
-
-	depth = CTL_MAX_DEPTH;
-	ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
-	if (ret != 0)
-		goto label_return;
-
-	node = ctl_named_node(nodes[depth-1]);
-	if (node != NULL && node->ctl)
-		ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
-	else {
-		/* The name refers to a partial path through the ctl tree. */
-		ret = ENOENT;
-	}
-
-label_return:
-	return(ret);
-}
-
-int
-ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
-{
-	int ret;
-
-	if (!ctl_initialized && ctl_init(tsdn)) {
-		ret = EAGAIN;
-		goto label_return;
-	}
-
-	ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
-label_return:
-	return(ret);
-}
-
-int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	const ctl_named_node_t *node;
-	size_t i;
-
-	if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
-		ret = EAGAIN;
-		goto label_return;
-	}
-
-	/* Iterate down the tree. */
-	node = super_root_node;
-	for (i = 0; i < miblen; i++) {
-		assert(node);
-		assert(node->nchildren > 0);
-		if (ctl_named_node(node->children) != NULL) {
-			/* Children are named. */
-			if (node->nchildren <= mib[i]) {
-				ret = ENOENT;
-				goto label_return;
-			}
-			node = ctl_named_children(node, mib[i]);
-		} else {
-			const ctl_indexed_node_t *inode;
-
-			/* Indexed element. */
-			inode = ctl_indexed_node(node->children);
-			node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
-			if (node == NULL) {
-				ret = ENOENT;
-				goto label_return;
-			}
-		}
-	}
-
-	/* Call the ctl function. */
-	if (node && node->ctl)
-		ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
-	else {
-		/* Partial MIB. */
-		ret = ENOENT;
-	}
-
-label_return:
-	return(ret);
-}
-
-bool
-ctl_boot(void)
-{
-	if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
-		return (true);
-
-	ctl_initialized = false;
-
-	return (false);
-}
-
-void
-ctl_prefork(tsdn_t *tsdn)
-{
-	malloc_mutex_prefork(tsdn, &ctl_mtx);
-}
-
-void
-ctl_postfork_parent(tsdn_t *tsdn)
-{
-	malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
-}
-
-void
-ctl_postfork_child(tsdn_t *tsdn)
-{
-	malloc_mutex_postfork_child(tsdn, &ctl_mtx);
-}
-
-/******************************************************************************/
-/* *_ctl() functions. */
-
-#define	READONLY()	do {						\
-	if (newp != NULL || newlen != 0) {				\
-		ret = EPERM;						\
-		goto label_return;					\
-	}								\
-} while (0)
-
-#define	WRITEONLY()	do {						\
-	if (oldp != NULL || oldlenp != NULL) {				\
-		ret = EPERM;						\
-		goto label_return;					\
-	}								\
-} while (0)
-
-#define	READ_XOR_WRITE()	do {					\
-	if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||	\
-	    newlen != 0)) {						\
-		ret = EPERM;						\
-		goto label_return;					\
-	}								\
-} while (0)
-
-#define	READ(v, t)	do {						\
-	if (oldp != NULL && oldlenp != NULL) {				\
-		if (*oldlenp != sizeof(t)) {				\
-			size_t	copylen = (sizeof(t) <= *oldlenp)	\
-			    ? sizeof(t) : *oldlenp;			\
-			memcpy(oldp, (void *)&(v), copylen);		\
-			ret = EINVAL;					\
-			goto label_return;				\
-		}							\
-		*(t *)oldp = (v);					\
-	}								\
-} while (0)
-
-#define	WRITE(v, t)	do {						\
-	if (newp != NULL) {						\
-		if (newlen != sizeof(t)) {				\
-			ret = EINVAL;					\
-			goto label_return;				\
-		}							\
-		(v) = *(t *)newp;					\
-	}								\
-} while (0)
-
-#define	MIB_UNSIGNED(v, i) do {						\
-	if (mib[i] > UINT_MAX) {					\
-		ret = EFAULT;						\
-		goto label_return;					\
-	}								\
-	v = (unsigned)mib[i];						\
-} while (0)
-
-/*
- * There's a lot of code duplication in the following macros due to limitations
- * in how nested cpp macros are expanded.
- */
-#define	CTL_RO_CLGEN(c, l, n, v, t)					\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	if (!(c))							\
-		return (ENOENT);					\
-	if (l)								\
-		malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);		\
-	READONLY();							\
-	oldval = (v);							\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	if (l)								\
-		malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);		\
-	return (ret);							\
-}
-
-#define	CTL_RO_CGEN(c, n, v, t)						\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	if (!(c))							\
-		return (ENOENT);					\
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
-	READONLY();							\
-	oldval = (v);							\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
-	return (ret);							\
-}
-
-#define	CTL_RO_GEN(n, v, t)						\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
-	READONLY();							\
-	oldval = (v);							\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
-	return (ret);							\
-}
-
-/*
- * ctl_mtx is not acquired, under the assumption that no pertinent data will
- * mutate during the call.
- */
-#define	CTL_RO_NL_CGEN(c, n, v, t)					\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	if (!(c))							\
-		return (ENOENT);					\
-	READONLY();							\
-	oldval = (v);							\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	return (ret);							\
-}
-
-#define	CTL_RO_NL_GEN(n, v, t)						\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	READONLY();							\
-	oldval = (v);							\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	return (ret);							\
-}
-
-#define	CTL_TSD_RO_NL_CGEN(c, n, m, t)					\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	if (!(c))							\
-		return (ENOENT);					\
-	READONLY();							\
-	oldval = (m(tsd));						\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	return (ret);							\
-}
-
-#define	CTL_RO_CONFIG_GEN(n, t)						\
-static int								\
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
-    size_t *oldlenp, void *newp, size_t newlen)				\
-{									\
-	int ret;							\
-	t oldval;							\
-									\
-	READONLY();							\
-	oldval = n;							\
-	READ(oldval, t);						\
-									\
-	ret = 0;							\
-label_return:								\
-	return (ret);							\
-}
-
-/******************************************************************************/
-
-CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
-
-static int
-epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	UNUSED uint64_t newval;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-	WRITE(newval, uint64_t);
-	if (newp != NULL)
-		ctl_refresh(tsd_tsdn(tsd));
-	READ(ctl_arenas->epoch, uint64_t);
-
-	ret = 0;
-label_return:
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-	return (ret);
-}
-
-/******************************************************************************/
-
-CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
-CTL_RO_CONFIG_GEN(config_debug, bool)
-CTL_RO_CONFIG_GEN(config_fill, bool)
-CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
-CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
-CTL_RO_CONFIG_GEN(config_munmap, bool)
-CTL_RO_CONFIG_GEN(config_prof, bool)
-CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
-CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
-CTL_RO_CONFIG_GEN(config_stats, bool)
-CTL_RO_CONFIG_GEN(config_tcache, bool)
-CTL_RO_CONFIG_GEN(config_tls, bool)
-CTL_RO_CONFIG_GEN(config_utrace, bool)
-CTL_RO_CONFIG_GEN(config_xmalloc, bool)
-
-/******************************************************************************/
-
-CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
-CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
-CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
-CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
-CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
-    opt_prof_thread_active_init, bool)
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
-
-/******************************************************************************/
-
-static int
-thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	arena_t *oldarena;
-	unsigned newind, oldind;
-
-	oldarena = arena_choose(tsd, NULL);
-	if (oldarena == NULL)
-		return (EAGAIN);
-
-	newind = oldind = arena_ind_get(oldarena);
-	WRITE(newind, unsigned);
-	READ(oldind, unsigned);
-	if (newind != oldind) {
-		arena_t *newarena;
-
-		if (newind >= narenas_total_get()) {
-			/* New arena index is out of range. */
-			ret = EFAULT;
-			goto label_return;
-		}
-
-		/* Initialize arena if necessary. */
-		newarena = arena_get(tsd_tsdn(tsd), newind, true);
-		if (newarena == NULL) {
-			ret = EAGAIN;
-			goto label_return;
-		}
-		/* Set new arena/tcache associations. */
-		arena_migrate(tsd, oldind, newind);
-		if (config_tcache) {
-			tcache_t *tcache = tsd_tcache_get(tsd);
-			if (tcache != NULL) {
-				tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
-				    oldarena, newarena);
-			}
-		}
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
-    uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
-    uint64_t *)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
-    uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
-    tsd_thread_deallocatedp_get, uint64_t *)
-
-static int
-thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	bool oldval;
-
-	if (!config_tcache)
-		return (ENOENT);
-
-	oldval = tcache_enabled_get();
-	if (newp != NULL) {
-		if (newlen != sizeof(bool)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		tcache_enabled_set(*(bool *)newp);
-	}
-	READ(oldval, bool);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-
-	if (!config_tcache)
-		return (ENOENT);
-
-	READONLY();
-	WRITEONLY();
-
-	tcache_flush();
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	READ_XOR_WRITE();
-
-	if (newp != NULL) {
-		if (newlen != sizeof(const char *)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-
-		if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
-		    0)
-			goto label_return;
-	} else {
-		const char *oldname = prof_thread_name_get(tsd);
-		READ(oldname, const char *);
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	bool oldval;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	oldval = prof_thread_active_get(tsd);
-	if (newp != NULL) {
-		if (newlen != sizeof(bool)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		if (prof_thread_active_set(tsd, *(bool *)newp)) {
-			ret = EAGAIN;
-			goto label_return;
-		}
-	}
-	READ(oldval, bool);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-/******************************************************************************/
-
-static int
-tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned tcache_ind;
-
-	if (!config_tcache)
-		return (ENOENT);
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-	READONLY();
-	if (tcaches_create(tsd, &tcache_ind)) {
-		ret = EFAULT;
-		goto label_return;
-	}
-	READ(tcache_ind, unsigned);
-
-	ret = 0;
-label_return:
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-	return (ret);
-}
-
-static int
-tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned tcache_ind;
-
-	if (!config_tcache)
-		return (ENOENT);
-
-	WRITEONLY();
-	tcache_ind = UINT_MAX;
-	WRITE(tcache_ind, unsigned);
-	if (tcache_ind == UINT_MAX) {
-		ret = EFAULT;
-		goto label_return;
-	}
-	tcaches_flush(tsd, tcache_ind);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned tcache_ind;
-
-	if (!config_tcache)
-		return (ENOENT);
-
-	WRITEONLY();
-	tcache_ind = UINT_MAX;
-	WRITE(tcache_ind, unsigned);
-	if (tcache_ind == UINT_MAX) {
-		ret = EFAULT;
-		goto label_return;
-	}
-	tcaches_destroy(tsd, tcache_ind);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-/******************************************************************************/
-
-static int
-arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	tsdn_t *tsdn = tsd_tsdn(tsd);
-	unsigned arena_ind;
-	bool initialized;
-
-	READONLY();
-	MIB_UNSIGNED(arena_ind, 1);
-
-	malloc_mutex_lock(tsdn, &ctl_mtx);
-	initialized = arenas_i(arena_ind)->initialized;
-	malloc_mutex_unlock(tsdn, &ctl_mtx);
-
-	READ(initialized, bool);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static void
-arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
-{
-	malloc_mutex_lock(tsdn, &ctl_mtx);
-	{
-		unsigned narenas = ctl_arenas->narenas;
-
-		/*
-		 * Access via index narenas is deprecated, and scheduled for
-		 * removal in 6.0.0.
-		 */
-		if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
-			unsigned i;
-			VARIABLE_ARRAY(arena_t *, tarenas, narenas);
-
-			for (i = 0; i < narenas; i++)
-				tarenas[i] = arena_get(tsdn, i, false);
-
-			/*
-			 * No further need to hold ctl_mtx, since narenas and
-			 * tarenas contain everything needed below.
-			 */
-			malloc_mutex_unlock(tsdn, &ctl_mtx);
-
-			for (i = 0; i < narenas; i++) {
-				if (tarenas[i] != NULL)
-					arena_purge(tsdn, tarenas[i], all);
-			}
-		} else {
-			arena_t *tarena;
-
-			assert(arena_ind < narenas);
-
-			tarena = arena_get(tsdn, arena_ind, false);
-
-			/* No further need to hold ctl_mtx. */
-			malloc_mutex_unlock(tsdn, &ctl_mtx);
-
-			if (tarena != NULL)
-				arena_purge(tsdn, tarena, all);
-		}
-	}
-}
-
-static int
-arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned arena_ind;
-
-	READONLY();
-	WRITEONLY();
-	MIB_UNSIGNED(arena_ind, 1);
-	arena_i_purge(tsd_tsdn(tsd), arena_ind, true);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned arena_ind;
-
-	READONLY();
-	WRITEONLY();
-	MIB_UNSIGNED(arena_ind, 1);
-	arena_i_purge(tsd_tsdn(tsd), arena_ind, false);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
-    arena_t **arena)
-{
-	int ret;
-
-	READONLY();
-	WRITEONLY();
-	MIB_UNSIGNED(*arena_ind, 1);
-
-	if (*arena_ind < narenas_auto) {
-		ret = EFAULT;
-		goto label_return;
-	}
-
-	*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
-	if (*arena == NULL) {
-		ret = EFAULT;
-		goto label_return;
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned arena_ind;
-	arena_t *arena;
-
-	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
-	    newp, newlen, &arena_ind, &arena);
-	if (ret != 0)
-		return (ret);
-
-	arena_reset(tsd, arena);
-
-	return (ret);
-}
-
-static int
-arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned arena_ind;
-	arena_t *arena;
-	ctl_arena_t *ctl_darena, *ctl_arena;
-
-	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
-	    newp, newlen, &arena_ind, &arena);
-	if (ret != 0)
-		goto label_return;
-
-	if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
-	    true) != 0) {
-		ret = EFAULT;
-		goto label_return;
-	}
-
-	/* Merge stats after resetting and purging arena. */
-	arena_reset(tsd, arena);
-	arena_purge(tsd_tsdn(tsd), arena, true);
-	ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
-	ctl_darena->initialized = true;
-	ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
-	/* Destroy arena. */
-	arena_destroy(tsd, arena);
-	ctl_arena = arenas_i(arena_ind);
-	ctl_arena->initialized = false;
-	/* Record arena index for later recycling via arenas.create. */
-	ql_elm_new(ctl_arena, destroyed_link);
-	ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
-
-	assert(ret == 0);
-label_return:
-	return (ret);
-}
-
-static int
-arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	const char *dss = NULL;
-	unsigned arena_ind;
-	dss_prec_t dss_prec_old = dss_prec_limit;
-	dss_prec_t dss_prec = dss_prec_limit;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-	WRITE(dss, const char *);
-	MIB_UNSIGNED(arena_ind, 1);
-	if (dss != NULL) {
-		int i;
-		bool match = false;
-
-		for (i = 0; i < dss_prec_limit; i++) {
-			if (strcmp(dss_prec_names[i], dss) == 0) {
-				dss_prec = i;
-				match = true;
-				break;
-			}
-		}
-
-		if (!match) {
-			ret = EINVAL;
-			goto label_return;
-		}
-	}
-
-	/*
-	 * Access via index narenas is deprecated, and scheduled for removal in
-	 * 6.0.0.
-	 */
-	if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
-	    ctl_arenas->narenas) {
-		if (dss_prec != dss_prec_limit &&
-		    extent_dss_prec_set(dss_prec)) {
-			ret = EFAULT;
-			goto label_return;
-		}
-		dss_prec_old = extent_dss_prec_get();
-	} else {
-		arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
-		if (arena == NULL || (dss_prec != dss_prec_limit &&
-		    arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
-			ret = EFAULT;
-			goto label_return;
-		}
-		dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
-	}
-
-	dss = dss_prec_names[dss_prec_old];
-	READ(dss, const char *);
-
-	ret = 0;
-label_return:
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-	return (ret);
-}
-
-static int
-arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned arena_ind;
-	arena_t *arena;
-
-	MIB_UNSIGNED(arena_ind, 1);
-	arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
-	if (arena == NULL) {
-		ret = EFAULT;
-		goto label_return;
-	}
-
-	if (oldp != NULL && oldlenp != NULL) {
-		size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
-		READ(oldval, ssize_t);
-	}
-	if (newp != NULL) {
-		if (newlen != sizeof(ssize_t)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		if (arena_decay_time_set(tsd_tsdn(tsd), arena,
-		    *(ssize_t *)newp)) {
-			ret = EFAULT;
-			goto label_return;
-		}
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned arena_ind;
-	arena_t *arena;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-	MIB_UNSIGNED(arena_ind, 1);
-	if (arena_ind < narenas_total_get() && (arena =
-	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
-		if (newp != NULL) {
-			extent_hooks_t *old_extent_hooks;
-			extent_hooks_t *new_extent_hooks
-			    JEMALLOC_CC_SILENCE_INIT(NULL);
-			WRITE(new_extent_hooks, extent_hooks_t *);
-			old_extent_hooks = extent_hooks_set(arena,
-			    new_extent_hooks);
-			READ(old_extent_hooks, extent_hooks_t *);
-		} else {
-			extent_hooks_t *old_extent_hooks =
-			    extent_hooks_get(arena);
-			READ(old_extent_hooks, extent_hooks_t *);
-		}
-	} else {
-		ret = EFAULT;
-		goto label_return;
-	}
-	ret = 0;
-label_return:
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-	return (ret);
-}
-
-static const ctl_named_node_t *
-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-	const ctl_named_node_t *ret;
-
-	malloc_mutex_lock(tsdn, &ctl_mtx);
-	switch (i) {
-	case MALLCTL_ARENAS_ALL:
-	case MALLCTL_ARENAS_DESTROYED:
-		break;
-	default:
-		if (i > ctl_arenas->narenas) {
-			ret = NULL;
-			goto label_return;
-		}
-		break;
-	}
-
-	ret = super_arena_i_node;
-label_return:
-	malloc_mutex_unlock(tsdn, &ctl_mtx);
-	return (ret);
-}
-
-/******************************************************************************/
-
-static int
-arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	unsigned narenas;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-	READONLY();
-	if (*oldlenp != sizeof(unsigned)) {
-		ret = EINVAL;
-		goto label_return;
-	}
-	narenas = ctl_arenas->narenas;
-	READ(narenas, unsigned);
-
-	ret = 0;
-label_return:
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-	return (ret);
-}
-
-static int
-arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-
-	if (oldp != NULL && oldlenp != NULL) {
-		size_t oldval = arena_decay_time_default_get();
-		READ(oldval, ssize_t);
-	}
-	if (newp != NULL) {
-		if (newlen != sizeof(ssize_t)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		if (arena_decay_time_default_set(*(ssize_t *)newp)) {
-			ret = EFAULT;
-			goto label_return;
-		}
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
-CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
-CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
-CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
-CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
-CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
-static const ctl_named_node_t *
-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-	if (i > NBINS)
-		return (NULL);
-	return (super_arenas_bin_i_node);
-}
-
-CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
-CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
-static const ctl_named_node_t *
-arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-	if (i > NSIZES - NBINS)
-		return (NULL);
-	return (super_arenas_lextent_i_node);
-}
-
-static int
-arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	extent_hooks_t *extent_hooks;
-	unsigned arena_ind;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
-
-	extent_hooks = (extent_hooks_t *)&extent_hooks_default;
-	WRITE(extent_hooks, extent_hooks_t *);
-	if ((arena_ind = ctl_arena_init(tsd_tsdn(tsd), extent_hooks)) ==
-	    UINT_MAX) {
-		ret = EAGAIN;
-		goto label_return;
-	}
-	READ(arena_ind, unsigned);
-
-	ret = 0;
-label_return:
-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
-	return (ret);
-}
-
-/******************************************************************************/
-
-static int
-prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
-    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	bool oldval;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	if (newp != NULL) {
-		if (newlen != sizeof(bool)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
-		    *(bool *)newp);
-	} else
-		oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
-	READ(oldval, bool);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	bool oldval;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	if (newp != NULL) {
-		if (newlen != sizeof(bool)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
-	} else
-		oldval = prof_active_get(tsd_tsdn(tsd));
-	READ(oldval, bool);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	const char *filename = NULL;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	WRITEONLY();
-	WRITE(filename, const char *);
-
-	if (prof_mdump(tsd, filename)) {
-		ret = EFAULT;
-		goto label_return;
-	}
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	bool oldval;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	if (newp != NULL) {
-		if (newlen != sizeof(bool)) {
-			ret = EINVAL;
-			goto label_return;
-		}
-		oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
-	} else
-		oldval = prof_gdump_get(tsd_tsdn(tsd));
-	READ(oldval, bool);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-static int
-prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
-    size_t *oldlenp, void *newp, size_t newlen)
-{
-	int ret;
-	size_t lg_sample = lg_prof_sample;
-
-	if (!config_prof)
-		return (ENOENT);
-
-	WRITEONLY();
-	WRITE(lg_sample, size_t);
-	if (lg_sample >= (sizeof(uint64_t) << 3))
-		lg_sample = (sizeof(uint64_t) << 3) - 1;
-
-	prof_reset(tsd, lg_sample);
-
-	ret = 0;
-label_return:
-	return (ret);
-}
-
-CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
-CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
-
-/******************************************************************************/
-
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
-CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
-CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
-
-CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
-CTL_RO_GEN(stats_arenas_i_decay_time, arenas_i(mib[2])->decay_time,
-    ssize_t)
-CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
-    arenas_i(mib[2])->astats->astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
-    arenas_i(mib[2])->astats->astats.retained, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
-    arenas_i(mib[2])->astats->astats.npurge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
-    arenas_i(mib[2])->astats->astats.nmadvise, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
-    arenas_i(mib[2])->astats->astats.purged, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_base,
-    arenas_i(mib[2])->astats->astats.base, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
-    arenas_i(mib[2])->astats->astats.internal, size_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_tcache_bytes,
-    arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
-    arenas_i(mib[2])->astats->astats.resident, size_t)
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
-    arenas_i(mib[2])->astats->allocated_small, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
-    arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
-    arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
-    arenas_i(mib[2])->astats->nrequests_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
-    arenas_i(mib[2])->astats->astats.allocated_large, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
-    arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
-    arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
-    arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t) /* Intentional. */
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
-    arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
-    arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
-    arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
-    arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
-    arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
-    arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
-    arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
-    arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
-    arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
-    size_t j)
-{
-	if (j > NBINS)
-		return (NULL);
-	return (super_stats_arenas_i_bins_j_node);
-}
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
-    arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
-    arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
-    arenas_i(mib[2])->astats->lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
-    arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
-    size_t j)
-{
-	if (j > NSIZES - NBINS)
-		return (NULL);
-	return (super_stats_arenas_i_lextents_j_node);
-}
-
-static const ctl_named_node_t *
-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-	const ctl_named_node_t *ret;
-	size_t a;
-
-	malloc_mutex_lock(tsdn, &ctl_mtx);
-	a = arenas_i2a_impl(i, true, true);
-	if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
-		ret = NULL;
-		goto label_return;
-	}
-
-	ret = super_stats_arenas_i_node;
-label_return:
-	malloc_mutex_unlock(tsdn, &ctl_mtx);
-	return (ret);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/extent.c b/zircon/third_party/ulib/jemalloc/src/extent.c
deleted file mode 100644
index be40aaa..0000000
--- a/zircon/third_party/ulib/jemalloc/src/extent.c
+++ /dev/null
@@ -1,1381 +0,0 @@
-#define	JEMALLOC_EXTENT_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-rtree_t		extents_rtree;
-
-static void	*extent_alloc_default(extent_hooks_t *extent_hooks,
-    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
-    unsigned arena_ind);
-static bool	extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, bool committed, unsigned arena_ind);
-static bool	extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool	extent_decommit_default(extent_hooks_t *extent_hooks,
-    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool	extent_purge_lazy_default(extent_hooks_t *extent_hooks,
-    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool	extent_purge_forced_default(extent_hooks_t *extent_hooks,
-    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-#ifdef JEMALLOC_MAPS_COALESCE
-static bool	extent_split_default(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t size_a, size_t size_b, bool committed,
-    unsigned arena_ind);
-static bool	extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
-    size_t size_a, void *addr_b, size_t size_b, bool committed,
-    unsigned arena_ind);
-#endif
-
-const extent_hooks_t	extent_hooks_default = {
-	extent_alloc_default,
-	extent_dalloc_default,
-	extent_commit_default,
-	extent_decommit_default
-#ifdef PAGES_CAN_PURGE_LAZY
-	,
-	extent_purge_lazy_default
-#else
-	,
-	NULL
-#endif
-#ifdef PAGES_CAN_PURGE_FORCED
-	,
-	extent_purge_forced_default
-#else
-	,
-	NULL
-#endif
-#ifdef JEMALLOC_MAPS_COALESCE
-	,
-	extent_split_default,
-	extent_merge_default
-#endif
-};
-
-/* Used exclusively for gdump triggering. */
-static size_t	curpages;
-static size_t	highpages;
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static void	extent_record(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_heap_t extent_heaps[NPSIZES+1],
-    bool cache, extent_t *extent);
-
-/******************************************************************************/
-
-extent_t *
-extent_alloc(tsdn_t *tsdn, arena_t *arena)
-{
-	extent_t *extent;
-
-	malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
-	extent = ql_last(&arena->extent_cache, ql_link);
-	if (extent == NULL) {
-		malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
-		return (base_alloc(tsdn, arena->base, sizeof(extent_t),
-		    QUANTUM));
-	}
-	ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
-	malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
-	return (extent);
-}
-
-void
-extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
-{
-	malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
-	ql_elm_new(extent, ql_link);
-	ql_tail_insert(&arena->extent_cache, extent, ql_link);
-	malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
-}
-
-extent_hooks_t *
-extent_hooks_get(arena_t *arena)
-{
-	return (base_extent_hooks_get(arena->base));
-}
-
-extent_hooks_t *
-extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks)
-{
-	return (base_extent_hooks_set(arena->base, extent_hooks));
-}
-
-static void
-extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks)
-{
-	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER)
-		*r_extent_hooks = extent_hooks_get(arena);
-}
-
-#ifdef JEMALLOC_JET
-#undef extent_size_quantize_floor
-#define	extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor)
-#endif
-size_t
-extent_size_quantize_floor(size_t size)
-{
-	size_t ret;
-	pszind_t pind;
-
-	assert(size > 0);
-	assert((size & PAGE_MASK) == 0);
-
-	assert(size != 0);
-	assert(size == PAGE_CEILING(size));
-
-	pind = psz2ind(size - large_pad + 1);
-	if (pind == 0) {
-		/*
-		 * Avoid underflow.  This short-circuit would also do the right
-		 * thing for all sizes in the range for which there are
-		 * PAGE-spaced size classes, but it's simplest to just handle
-		 * the one case that would cause erroneous results.
-		 */
-		return (size);
-	}
-	ret = pind2sz(pind - 1) + large_pad;
-	assert(ret <= size);
-	return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef extent_size_quantize_floor
-#define	extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
-extent_size_quantize_t *extent_size_quantize_floor =
-    JEMALLOC_N(n_extent_size_quantize_floor);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef extent_size_quantize_ceil
-#define	extent_size_quantize_ceil JEMALLOC_N(n_extent_size_quantize_ceil)
-#endif
-size_t
-extent_size_quantize_ceil(size_t size)
-{
-	size_t ret;
-
-	assert(size > 0);
-	assert(size - large_pad <= LARGE_MAXCLASS);
-	assert((size & PAGE_MASK) == 0);
-
-	ret = extent_size_quantize_floor(size);
-	if (ret < size) {
-		/*
-		 * Skip a quantization that may have an adequately large extent,
-		 * because under-sized extents may be mixed in.  This only
-		 * happens when an unusual size is requested, i.e. for aligned
-		 * allocation, and is just one of several places where linear
-		 * search would potentially find sufficiently aligned available
-		 * memory somewhere lower.
-		 */
-		ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
-	}
-	return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef extent_size_quantize_ceil
-#define	extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
-extent_size_quantize_t *extent_size_quantize_ceil =
-    JEMALLOC_N(n_extent_size_quantize_ceil);
-#endif
-
-/* Generate pairing heap functions. */
-ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
-
-static void
-extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
-    extent_t *extent)
-{
-	size_t psz = extent_size_quantize_floor(extent_size_get(extent));
-	pszind_t pind = psz2ind(psz);
-
-	malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
-
-	extent_heap_insert(&extent_heaps[pind], extent);
-}
-
-static void
-extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
-    extent_t *extent)
-{
-	size_t psz = extent_size_quantize_floor(extent_size_get(extent));
-	pszind_t pind = psz2ind(psz);
-
-	malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
-
-	extent_heap_remove(&extent_heaps[pind], extent);
-}
-
-static bool
-extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
-    const extent_t *extent, bool dependent, bool init_missing,
-    rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b)
-{
-	*r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
-	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
-	if (!dependent && *r_elm_a == NULL)
-		return (true);
-	assert(*r_elm_a != NULL);
-
-	if (extent_size_get(extent) > PAGE) {
-		*r_elm_b = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
-		    (uintptr_t)extent_last_get(extent), dependent,
-		    init_missing);
-		if (!dependent && *r_elm_b == NULL) {
-			rtree_elm_release(tsdn, &extents_rtree, *r_elm_a);
-			return (true);
-		}
-		assert(*r_elm_b != NULL);
-	} else
-		*r_elm_b = NULL;
-
-	return (false);
-}
-
-static void
-extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a,
-    rtree_elm_t *elm_b, const extent_t *extent)
-{
-	rtree_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent);
-	if (elm_b != NULL)
-		rtree_elm_write_acquired(tsdn, &extents_rtree, elm_b, extent);
-}
-
-static void
-extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
-{
-	rtree_elm_release(tsdn, &extents_rtree, elm_a);
-	if (elm_b != NULL)
-		rtree_elm_release(tsdn, &extents_rtree, elm_b);
-}
-
-static void
-extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
-    const extent_t *extent)
-{
-	size_t i;
-
-	assert(extent_slab_get(extent));
-
-	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
-		rtree_write(tsdn, &extents_rtree, rtree_ctx,
-		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
-		    LG_PAGE), extent);
-	}
-}
-
-static void
-extent_gprof_add(tsdn_t *tsdn, const extent_t *extent)
-{
-	cassert(config_prof);
-
-	if (opt_prof && extent_active_get(extent)) {
-		size_t nadd = extent_size_get(extent) >> LG_PAGE;
-		size_t cur = atomic_add_zu(&curpages, nadd);
-		size_t high = atomic_read_zu(&highpages);
-		while (cur > high && atomic_cas_zu(&highpages, high, cur)) {
-			/*
-			 * Don't refresh cur, because it may have decreased
-			 * since this thread lost the highpages update race.
-			 */
-			high = atomic_read_zu(&highpages);
-		}
-		if (cur > high && prof_gdump_get_unlocked())
-			prof_gdump(tsdn);
-	}
-}
-
-static void
-extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent)
-{
-	cassert(config_prof);
-
-	if (opt_prof && extent_active_get(extent)) {
-		size_t nsub = extent_size_get(extent) >> LG_PAGE;
-		assert(atomic_read_zu(&curpages) >= nsub);
-		atomic_sub_zu(&curpages, nsub);
-	}
-}
-
-static bool
-extent_register(tsdn_t *tsdn, const extent_t *extent)
-{
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-	rtree_elm_t *elm_a, *elm_b;
-
-	if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a,
-	    &elm_b))
-		return (true);
-	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
-	if (extent_slab_get(extent))
-		extent_interior_register(tsdn, rtree_ctx, extent);
-	extent_rtree_release(tsdn, elm_a, elm_b);
-
-	if (config_prof)
-		extent_gprof_add(tsdn, extent);
-
-	return (false);
-}
-
-static void
-extent_reregister(tsdn_t *tsdn, const extent_t *extent)
-{
-	bool err = extent_register(tsdn, extent);
-	assert(!err);
-}
-
-static void
-extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
-    const extent_t *extent)
-{
-	size_t i;
-
-	assert(extent_slab_get(extent));
-
-	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
-		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
-		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
-		    LG_PAGE));
-	}
-}
-
-static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent)
-{
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-	rtree_elm_t *elm_a, *elm_b;
-
-	extent_rtree_acquire(tsdn, rtree_ctx, extent, true, false, &elm_a,
-	    &elm_b);
-	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
-	if (extent_slab_get(extent)) {
-		extent_interior_deregister(tsdn, rtree_ctx, extent);
-		extent_slab_set(extent, false);
-	}
-	extent_rtree_release(tsdn, elm_a, elm_b);
-
-	if (config_prof)
-		extent_gprof_sub(tsdn, extent);
-}
-
-/*
- * Do first-best-fit extent selection, i.e. select the oldest/lowest extent that
- * best fits.
- */
-static extent_t *
-extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
-    extent_heap_t extent_heaps[NPSIZES+1], size_t size)
-{
-	pszind_t pind, i;
-
-	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
-
-	pind = psz2ind(extent_size_quantize_ceil(size));
-	for (i = pind; i < NPSIZES+1; i++) {
-		extent_t *extent = extent_heap_first(&extent_heaps[i]);
-		if (extent != NULL)
-			return (extent);
-	}
-
-	return (NULL);
-}
-
-static void
-extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
-    bool cache, extent_t *extent)
-{
-	/*
-	 * Leak extent after making sure its pages have already been purged, so
-	 * that this is only a virtual memory leak.
-	 */
-	if (cache) {
-		if (extent_purge_lazy_wrapper(tsdn, arena, r_extent_hooks,
-		    extent, 0, extent_size_get(extent))) {
-			extent_purge_forced_wrapper(tsdn, arena, r_extent_hooks,
-			    extent, 0, extent_size_get(extent));
-		}
-	}
-	extent_dalloc(tsdn, arena, extent);
-}
-
-static extent_t *
-extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
-    extent_heap_t extent_heaps[NPSIZES+1], bool locked, bool cache,
-    void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
-    bool *commit, bool slab)
-{
-	extent_t *extent;
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-	size_t size, alloc_size, leadsize, trailsize;
-
-	if (locked)
-		malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
-	assert(new_addr == NULL || !slab);
-	assert(pad == 0 || !slab);
-	assert(alignment > 0);
-	if (config_debug && new_addr != NULL) {
-		extent_t *prev;
-
-		/*
-		 * Non-NULL new_addr has two use cases:
-		 *
-		 *   1) Recycle a known-extant extent, e.g. during purging.
-		 *   2) Perform in-place expanding reallocation.
-		 *
-		 * Regardless of use case, new_addr must either refer to a
-		 * non-existing extent, or to the base of an extant extent,
-		 * since only active slabs support interior lookups (which of
-		 * course cannot be recycled).
-		 */
-		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
-		assert(pad == 0);
-		assert(alignment <= PAGE);
-		prev = extent_lookup(tsdn, (void *)((uintptr_t)new_addr - PAGE),
-		    false);
-		assert(prev == NULL || extent_past_get(prev) == new_addr);
-	}
-
-	size = usize + pad;
-	alloc_size = size + PAGE_CEILING(alignment) - PAGE;
-	/* Beware size_t wrap-around. */
-	if (alloc_size < usize)
-		return (NULL);
-	if (!locked)
-		malloc_mutex_lock(tsdn, &arena->extents_mtx);
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-	if (new_addr != NULL) {
-		rtree_elm_t *elm;
-
-		elm = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
-		    (uintptr_t)new_addr, false, false);
-		if (elm != NULL) {
-			extent = rtree_elm_read_acquired(tsdn, &extents_rtree,
-			    elm);
-			if (extent != NULL) {
-				assert(extent_base_get(extent) == new_addr);
-				if (extent_arena_get(extent) != arena ||
-				    extent_size_get(extent) < size ||
-				    extent_active_get(extent) ||
-				    extent_retained_get(extent) == cache)
-					extent = NULL;
-			}
-			rtree_elm_release(tsdn, &extents_rtree, elm);
-		} else
-			extent = NULL;
-	} else {
-		extent = extent_first_best_fit(tsdn, arena, extent_heaps,
-		    alloc_size);
-	}
-	if (extent == NULL) {
-		if (!locked)
-			malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-		return (NULL);
-	}
-	extent_heaps_remove(tsdn, extent_heaps, extent);
-	arena_extent_cache_maybe_remove(tsdn, arena, extent, cache);
-
-	leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
-	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
-	assert(new_addr == NULL || leadsize == 0);
-	assert(extent_size_get(extent) >= leadsize + size);
-	trailsize = extent_size_get(extent) - leadsize - size;
-	if (extent_zeroed_get(extent))
-		*zero = true;
-	if (extent_committed_get(extent))
-		*commit = true;
-
-	/* Split the lead. */
-	if (leadsize != 0) {
-		extent_t *lead = extent;
-		extent = extent_split_wrapper(tsdn, arena, r_extent_hooks,
-		    lead, leadsize, leadsize, size + trailsize, usize +
-		    trailsize);
-		if (extent == NULL) {
-			extent_deregister(tsdn, lead);
-			extent_leak(tsdn, arena, r_extent_hooks, cache, lead);
-			if (!locked)
-				malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-			return (NULL);
-		}
-		extent_heaps_insert(tsdn, extent_heaps, lead);
-		arena_extent_cache_maybe_insert(tsdn, arena, lead, cache);
-	}
-
-	/* Split the trail. */
-	if (trailsize != 0) {
-		extent_t *trail = extent_split_wrapper(tsdn, arena,
-		    r_extent_hooks, extent, size, usize, trailsize, trailsize);
-		if (trail == NULL) {
-			extent_deregister(tsdn, extent);
-			extent_leak(tsdn, arena, r_extent_hooks, cache,
-			    extent);
-			if (!locked)
-				malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-			return (NULL);
-		}
-		extent_heaps_insert(tsdn, extent_heaps, trail);
-		arena_extent_cache_maybe_insert(tsdn, arena, trail, cache);
-	} else if (leadsize == 0) {
-		/*
-		 * Splitting causes usize to be set as a side effect, but no
-		 * splitting occurred.
-		 */
-		extent_usize_set(extent, usize);
-	}
-
-	if (*commit && !extent_committed_get(extent)) {
-		if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent,
-		    0, extent_size_get(extent))) {
-			if (!locked)
-				malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-			extent_record(tsdn, arena, r_extent_hooks, extent_heaps,
-			    cache, extent);
-			return (NULL);
-		}
-		extent_zeroed_set(extent, true);
-	}
-
-	if (pad != 0)
-		extent_addr_randomize(tsdn, extent, alignment);
-	extent_active_set(extent, true);
-	if (slab) {
-		extent_slab_set(extent, slab);
-		extent_interior_register(tsdn, rtree_ctx, extent);
-	}
-
-	if (!locked)
-		malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-
-	if (*zero) {
-		if (!extent_zeroed_get(extent)) {
-			memset(extent_addr_get(extent), 0,
-			    extent_usize_get(extent));
-		} else if (config_debug) {
-			size_t i;
-			size_t *p = (size_t *)(uintptr_t)
-			    extent_addr_get(extent);
-
-			for (i = 0; i < usize / sizeof(size_t); i++)
-				assert(p[i] == 0);
-		}
-	}
-	return (extent);
-}
-
-/*
- * If the caller specifies (!*zero), it is still possible to receive zeroed
- * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
- * advantage of this to avoid demanding zeroed extents, but taking advantage of
- * them if they are returned.
- */
-static void *
-extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
-    size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
-{
-	void *ret;
-
-	assert(size != 0);
-	assert(alignment != 0);
-
-	/* "primary" dss. */
-	if (have_dss && dss_prec == dss_prec_primary && (ret =
-	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
-	    commit)) != NULL)
-		return (ret);
-	/* mmap. */
-	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
-	    != NULL)
-		return (ret);
-	/* "secondary" dss. */
-	if (have_dss && dss_prec == dss_prec_secondary && (ret =
-	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
-	    commit)) != NULL)
-		return (ret);
-
-	/* All strategies for allocation failed. */
-	return (NULL);
-}
-
-static extent_t *
-extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize,
-    size_t pad, size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	extent_t *extent;
-
-	assert(usize + pad != 0);
-	assert(alignment != 0);
-
-	extent = extent_recycle(tsdn, arena, r_extent_hooks,
-	    arena->extents_cached, locked, true, new_addr, usize, pad,
-	    alignment, zero, commit, slab);
-	return (extent);
-}
-
-extent_t *
-extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
-
-	return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
-	    new_addr, usize, pad, alignment, zero, commit, slab));
-}
-
-extent_t *
-extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
-	    new_addr, usize, pad, alignment, zero, commit, slab));
-}
-
-static void *
-extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
-    size_t size, size_t alignment, bool *zero, bool *commit)
-{
-	void *ret;
-
-	ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
-	    commit, arena->dss_prec);
-	return (ret);
-}
-
-static void *
-extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind)
-{
-	tsdn_t *tsdn;
-	arena_t *arena;
-
-	assert(extent_hooks == &extent_hooks_default);
-
-	tsdn = tsdn_fetch();
-	arena = arena_get(tsdn, arena_ind, false);
-	/*
-	 * The arena we're allocating on behalf of must have been initialized
-	 * already.
-	 */
-	assert(arena != NULL);
-
-	return (extent_alloc_default_impl(tsdn, arena, new_addr, size,
-	    alignment, zero, commit));
-}
-
-static void
-extent_retain(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
-    extent_t *extent)
-{
-	if (config_stats)
-		arena->stats.retained += extent_size_get(extent);
-	extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained,
-	    false, extent);
-}
-
-/*
- * If virtual memory is retained, create increasingly larger extents from which
- * to split requested extents in order to limit the total number of disjoint
- * virtual memory ranges retained by each arena.
- */
-static extent_t *
-extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	extent_t *extent;
-	void *ptr;
-	size_t size, alloc_size, alloc_size_min, leadsize, trailsize;
-	bool zeroed, committed;
-
-	/*
-	 * Check whether the next extent size in the series would be large
-	 * enough to satisfy this request.  If no, just bail, so that e.g. a
-	 * series of unsatisfiable allocation requests doesn't cause unused
-	 * extent creation as a side effect.
-	 */
-	size = usize + pad;
-	alloc_size = pind2sz(arena->extent_grow_next);
-	alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
-	/* Beware size_t wrap-around. */
-	if (alloc_size_min < usize)
-		return (NULL);
-	if (alloc_size < alloc_size_min)
-		return (NULL);
-	extent = extent_alloc(tsdn, arena);
-	if (extent == NULL)
-		return (NULL);
-	zeroed = false;
-	committed = false;
-	ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE,
-	    &zeroed, &committed, arena->dss_prec);
-	extent_init(extent, arena, ptr, alloc_size, alloc_size,
-	    arena_extent_sn_next(arena), false, zeroed, committed, false);
-	if (ptr == NULL || extent_register(tsdn, extent)) {
-		extent_dalloc(tsdn, arena, extent);
-		return (NULL);
-	}
-	/*
-	 * Set the extent as active *after registration so that no gprof-related
-	 * accounting occurs during registration.
-	 */
-	extent_active_set(extent, true);
-
-	leadsize = ALIGNMENT_CEILING((uintptr_t)ptr, PAGE_CEILING(alignment)) -
-	    (uintptr_t)ptr;
-	assert(new_addr == NULL || leadsize == 0);
-	assert(alloc_size >= leadsize + size);
-	trailsize = alloc_size - leadsize - size;
-	if (extent_zeroed_get(extent))
-		*zero = true;
-	if (extent_committed_get(extent))
-		*commit = true;
-
-	/* Split the lead. */
-	if (leadsize != 0) {
-		extent_t *lead = extent;
-		extent = extent_split_wrapper(tsdn, arena, r_extent_hooks, lead,
-		    leadsize, leadsize, size + trailsize, usize + trailsize);
-		if (extent == NULL) {
-			extent_deregister(tsdn, lead);
-			extent_leak(tsdn, arena, r_extent_hooks, false, lead);
-			return (NULL);
-		}
-		extent_retain(tsdn, arena, r_extent_hooks, lead);
-	}
-
-	/* Split the trail. */
-	if (trailsize != 0) {
-		extent_t *trail = extent_split_wrapper(tsdn, arena,
-		    r_extent_hooks, extent, size, usize, trailsize, trailsize);
-		if (trail == NULL) {
-			extent_deregister(tsdn, extent);
-			extent_leak(tsdn, arena, r_extent_hooks, false, extent);
-			return (NULL);
-		}
-		extent_retain(tsdn, arena, r_extent_hooks, trail);
-	} else if (leadsize == 0) {
-		/*
-		 * Splitting causes usize to be set as a side effect, but no
-		 * splitting occurred.
-		 */
-		extent_usize_set(extent, usize);
-	}
-
-	if (*commit && !extent_committed_get(extent)) {
-		if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent,
-		    0, extent_size_get(extent))) {
-			extent_retain(tsdn, arena, r_extent_hooks, extent);
-			return (NULL);
-		}
-		extent_zeroed_set(extent, true);
-	}
-
-	if (config_prof) {
-		/* Adjust gprof stats now that extent is final size. */
-		extent_gprof_add(tsdn, extent);
-	}
-	if (pad != 0)
-		extent_addr_randomize(tsdn, extent, alignment);
-	if (slab) {
-		rtree_ctx_t rtree_ctx_fallback;
-		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
-		    &rtree_ctx_fallback);
-
-		extent_slab_set(extent, true);
-		extent_interior_register(tsdn, rtree_ctx, extent);
-	}
-	if (*zero && !extent_zeroed_get(extent))
-		memset(extent_addr_get(extent), 0, extent_usize_get(extent));
-	if (arena->extent_grow_next + 1 < NPSIZES)
-		arena->extent_grow_next++;
-	return (extent);
-}
-
-static extent_t *
-extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	extent_t *extent;
-
-	assert(usize != 0);
-	assert(alignment != 0);
-
-	extent = extent_recycle(tsdn, arena, r_extent_hooks,
-	    arena->extents_retained, false, false, new_addr, usize, pad,
-	    alignment, zero, commit, slab);
-	if (extent != NULL) {
-		if (config_stats) {
-			size_t size = usize + pad;
-			arena->stats.retained -= size;
-		}
-		if (config_prof)
-			extent_gprof_add(tsdn, extent);
-	}
-	if (!config_munmap && extent == NULL) {
-		extent = extent_grow_retained(tsdn, arena, r_extent_hooks,
-		    new_addr, usize, pad, alignment, zero, commit, slab);
-	}
-
-	return (extent);
-}
-
-static extent_t *
-extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	extent_t *extent;
-	size_t size;
-	void *addr;
-
-	size = usize + pad;
-	extent = extent_alloc(tsdn, arena);
-	if (extent == NULL)
-		return (NULL);
-	if (*r_extent_hooks == &extent_hooks_default) {
-		/* Call directly to propagate tsdn. */
-		addr = extent_alloc_default_impl(tsdn, arena, new_addr, size,
-		    alignment, zero, commit);
-	} else {
-		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, size,
-		    alignment, zero, commit, arena_ind_get(arena));
-	}
-	if (addr == NULL) {
-		extent_dalloc(tsdn, arena, extent);
-		return (NULL);
-	}
-	extent_init(extent, arena, addr, size, usize,
-	    arena_extent_sn_next(arena), true, zero, commit, slab);
-	if (pad != 0)
-		extent_addr_randomize(tsdn, extent, alignment);
-	if (extent_register(tsdn, extent)) {
-		extent_leak(tsdn, arena, r_extent_hooks, false, extent);
-		return (NULL);
-	}
-
-	return (extent);
-}
-
-extent_t *
-extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool *commit, bool slab)
-{
-	extent_t *extent;
-
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-
-	extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, new_addr,
-	    usize, pad, alignment, zero, commit, slab);
-	if (extent == NULL) {
-		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
-		    new_addr, usize, pad, alignment, zero, commit, slab);
-	}
-
-	return (extent);
-}
-
-static bool
-extent_can_coalesce(const extent_t *a, const extent_t *b)
-{
-	if (extent_arena_get(a) != extent_arena_get(b))
-		return (false);
-	if (extent_active_get(a) != extent_active_get(b))
-		return (false);
-	if (extent_committed_get(a) != extent_committed_get(b))
-		return (false);
-	if (extent_retained_get(a) != extent_retained_get(b))
-		return (false);
-
-	return (true);
-}
-
-static void
-extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
-    extent_heap_t extent_heaps[NPSIZES+1], bool cache)
-{
-	if (!extent_can_coalesce(a, b))
-		return;
-
-	extent_heaps_remove(tsdn, extent_heaps, a);
-	extent_heaps_remove(tsdn, extent_heaps, b);
-
-	arena_extent_cache_maybe_remove(tsdn, extent_arena_get(a), a, cache);
-	arena_extent_cache_maybe_remove(tsdn, extent_arena_get(b), b, cache);
-
-	if (extent_merge_wrapper(tsdn, arena, r_extent_hooks, a, b)) {
-		extent_heaps_insert(tsdn, extent_heaps, a);
-		extent_heaps_insert(tsdn, extent_heaps, b);
-		arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a,
-		    cache);
-		arena_extent_cache_maybe_insert(tsdn, extent_arena_get(b), b,
-		    cache);
-		return;
-	}
-
-	extent_heaps_insert(tsdn, extent_heaps, a);
-	arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a, cache);
-}
-
-static void
-extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
-    extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent)
-{
-	extent_t *prev, *next;
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
-	assert(!cache || !extent_zeroed_get(extent));
-
-	malloc_mutex_lock(tsdn, &arena->extents_mtx);
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-
-	extent_usize_set(extent, 0);
-	extent_active_set(extent, false);
-	extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
-	if (extent_slab_get(extent)) {
-		extent_interior_deregister(tsdn, rtree_ctx, extent);
-		extent_slab_set(extent, false);
-	}
-
-	assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
-	extent_heaps_insert(tsdn, extent_heaps, extent);
-	arena_extent_cache_maybe_insert(tsdn, arena, extent, cache);
-
-	/* Try to coalesce forward. */
-	next = rtree_read(tsdn, &extents_rtree, rtree_ctx,
-	    (uintptr_t)extent_past_get(extent), false);
-	if (next != NULL) {
-		extent_try_coalesce(tsdn, arena, r_extent_hooks, extent, next,
-		    extent_heaps, cache);
-	}
-
-	/* Try to coalesce backward. */
-	prev = rtree_read(tsdn, &extents_rtree, rtree_ctx,
-	    (uintptr_t)extent_before_get(extent), false);
-	if (prev != NULL) {
-		extent_try_coalesce(tsdn, arena, r_extent_hooks, prev, extent,
-		    extent_heaps, cache);
-	}
-
-	malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-}
-
-void
-extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
-{
-	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
-	if (extent_register(tsdn, extent)) {
-		extent_leak(tsdn, arena, &extent_hooks, false, extent);
-		return;
-	}
-	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
-}
-
-void
-extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
-	assert(extent_base_get(extent) != NULL);
-	assert(extent_size_get(extent) != 0);
-
-	extent_addr_set(extent, extent_base_get(extent));
-	extent_zeroed_set(extent, false);
-
-	extent_record(tsdn, arena, r_extent_hooks, arena->extents_cached, true,
-	    extent);
-}
-
-static bool
-extent_dalloc_default_impl(void *addr, size_t size)
-{
-	if (!have_dss || !extent_in_dss(addr))
-		return (extent_dalloc_mmap(addr, size));
-	return (true);
-}
-
-static bool
-extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    bool committed, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-
-	return (extent_dalloc_default_impl(addr, size));
-}
-
-bool
-extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
-	bool err;
-
-	assert(extent_base_get(extent) != NULL);
-	assert(extent_size_get(extent) != 0);
-
-	extent_addr_set(extent, extent_base_get(extent));
-
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-	/*
-	 * Try to deallocate.  Deregister first to avoid a race with other
-	 * allocating threads, and reregister if deallocation fails.
-	 */
-	extent_deregister(tsdn, extent);
-	if (*r_extent_hooks == &extent_hooks_default) {
-		/* Call directly to propagate tsdn. */
-		err = extent_dalloc_default_impl(extent_base_get(extent),
-		    extent_size_get(extent));
-	} else {
-		err = ((*r_extent_hooks)->dalloc == NULL ||
-		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
-		    extent_base_get(extent), extent_size_get(extent),
-		    extent_committed_get(extent), arena_ind_get(arena)));
-	}
-
-	if (!err)
-		extent_dalloc(tsdn, arena, extent);
-
-	return (err);
-}
-
-void
-extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
-	bool zeroed;
-
-	if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent))
-		return;
-
-	extent_reregister(tsdn, extent);
-	/* Try to decommit; purge if that fails. */
-	if (!extent_committed_get(extent))
-		zeroed = true;
-	else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
-	    0, extent_size_get(extent)))
-		zeroed = true;
-	else if ((*r_extent_hooks)->purge_lazy != NULL &&
-	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
-	    extent_base_get(extent), extent_size_get(extent), 0,
-	    extent_size_get(extent), arena_ind_get(arena)))
-		zeroed = false;
-	else if ((*r_extent_hooks)->purge_forced != NULL &&
-	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
-	    extent_base_get(extent), extent_size_get(extent), 0,
-	    extent_size_get(extent), arena_ind_get(arena)))
-		zeroed = true;
-	else
-		zeroed = false;
-	extent_zeroed_set(extent, zeroed);
-
-	if (config_stats)
-		arena->stats.retained += extent_size_get(extent);
-	if (config_prof)
-		extent_gprof_sub(tsdn, extent);
-
-	extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained,
-	    false, extent);
-}
-
-static bool
-extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-
-	return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
-	    length));
-}
-
-bool
-extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length)
-{
-	bool err;
-
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-	err = ((*r_extent_hooks)->commit == NULL ||
-	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
-	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
-	extent_committed_set(extent, extent_committed_get(extent) || !err);
-	return (err);
-}
-
-static bool
-extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-
-	return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
-	    length));
-}
-
-bool
-extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length)
-{
-	bool err;
-
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-
-	err = ((*r_extent_hooks)->decommit == NULL ||
-	    (*r_extent_hooks)->decommit(*r_extent_hooks,
-	    extent_base_get(extent), extent_size_get(extent), offset, length,
-	    arena_ind_get(arena)));
-	extent_committed_set(extent, extent_committed_get(extent) && err);
-	return (err);
-}
-
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool
-extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-	assert(addr != NULL);
-	assert((offset & PAGE_MASK) == 0);
-	assert(length != 0);
-	assert((length & PAGE_MASK) == 0);
-
-	return (pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
-	    length));
-}
-#endif
-
-bool
-extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length)
-{
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-	return ((*r_extent_hooks)->purge_lazy == NULL ||
-	    (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
-	    extent_base_get(extent), extent_size_get(extent), offset, length,
-	    arena_ind_get(arena)));
-}
-
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool
-extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t offset, size_t length, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-	assert(addr != NULL);
-	assert((offset & PAGE_MASK) == 0);
-	assert(length != 0);
-	assert((length & PAGE_MASK) == 0);
-
-	return (pages_purge_forced((void *)((uintptr_t)addr +
-	    (uintptr_t)offset), length));
-}
-#endif
-
-bool
-extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
-    size_t length)
-{
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-	return ((*r_extent_hooks)->purge_forced == NULL ||
-	    (*r_extent_hooks)->purge_forced(*r_extent_hooks,
-	    extent_base_get(extent), extent_size_get(extent), offset, length,
-	    arena_ind_get(arena)));
-}
-
-#ifdef JEMALLOC_MAPS_COALESCE
-static bool
-extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t size_a, size_t size_b, bool committed, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-
-	if (!maps_coalesce)
-		return (true);
-	return (false);
-}
-#endif
-
-extent_t *
-extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
-    size_t usize_a, size_t size_b, size_t usize_b)
-{
-	extent_t *trail;
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-	rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
-
-	assert(extent_size_get(extent) == size_a + size_b);
-
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-
-	if ((*r_extent_hooks)->split == NULL)
-		return (NULL);
-
-	trail = extent_alloc(tsdn, arena);
-	if (trail == NULL)
-		goto label_error_a;
-
-	{
-		extent_t lead;
-
-		extent_init(&lead, arena, extent_addr_get(extent), size_a,
-		    usize_a, extent_sn_get(extent), extent_active_get(extent),
-		    extent_zeroed_get(extent), extent_committed_get(extent),
-		    extent_slab_get(extent));
-
-		if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true,
-		    &lead_elm_a, &lead_elm_b))
-			goto label_error_b;
-	}
-
-	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
-	    size_a), size_b, usize_b, extent_sn_get(extent),
-	    extent_active_get(extent), extent_zeroed_get(extent),
-	    extent_committed_get(extent), extent_slab_get(extent));
-	if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
-	    &trail_elm_a, &trail_elm_b))
-		goto label_error_c;
-
-	if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
-	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
-	    arena_ind_get(arena)))
-		goto label_error_d;
-
-	extent_size_set(extent, size_a);
-	extent_usize_set(extent, usize_a);
-
-	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
-	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
-
-	extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
-	extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
-
-	return (trail);
-label_error_d:
-	extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
-label_error_c:
-	extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
-label_error_b:
-	extent_dalloc(tsdn, arena, trail);
-label_error_a:
-	return (NULL);
-}
-
-static bool
-extent_merge_default_impl(void *addr_a, void *addr_b)
-{
-	if (!maps_coalesce)
-		return (true);
-	if (have_dss && !extent_dss_mergeable(addr_a, addr_b))
-		return (true);
-
-	return (false);
-}
-
-#ifdef JEMALLOC_MAPS_COALESCE
-static bool
-extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
-    void *addr_b, size_t size_b, bool committed, unsigned arena_ind)
-{
-	assert(extent_hooks == &extent_hooks_default);
-
-	return (extent_merge_default_impl(addr_a, addr_b));
-}
-#endif
-
-bool
-extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b)
-{
-	bool err;
-	rtree_ctx_t rtree_ctx_fallback;
-	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-	rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
-
-	extent_hooks_assure_initialized(arena, r_extent_hooks);
-
-	if ((*r_extent_hooks)->merge == NULL)
-		return (true);
-
-	if (*r_extent_hooks == &extent_hooks_default) {
-		/* Call directly to propagate tsdn. */
-		err = extent_merge_default_impl(extent_base_get(a),
-		    extent_base_get(b));
-	} else {
-		err = (*r_extent_hooks)->merge(*r_extent_hooks,
-		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
-		    extent_size_get(b), extent_committed_get(a),
-		    arena_ind_get(arena));
-	}
-
-	if (err)
-		return (true);
-
-	/*
-	 * The rtree writes must happen while all the relevant elements are
-	 * owned, so the following code uses decomposed helper functions rather
-	 * than extent_{,de}register() to do things in the right order.
-	 */
-	extent_rtree_acquire(tsdn, rtree_ctx, a, true, false, &a_elm_a,
-	    &a_elm_b);
-	extent_rtree_acquire(tsdn, rtree_ctx, b, true, false, &b_elm_a,
-	    &b_elm_b);
-
-	if (a_elm_b != NULL) {
-		rtree_elm_write_acquired(tsdn, &extents_rtree, a_elm_b, NULL);
-		rtree_elm_release(tsdn, &extents_rtree, a_elm_b);
-	}
-	if (b_elm_b != NULL) {
-		rtree_elm_write_acquired(tsdn, &extents_rtree, b_elm_a, NULL);
-		rtree_elm_release(tsdn, &extents_rtree, b_elm_a);
-	} else
-		b_elm_b = b_elm_a;
-
-	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
-	extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
-	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
-	    extent_sn_get(a) : extent_sn_get(b));
-	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
-
-	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
-	extent_rtree_release(tsdn, a_elm_a, b_elm_b);
-
-	extent_dalloc(tsdn, extent_arena_get(b), b);
-
-	return (false);
-}
-
-bool
-extent_boot(void)
-{
-	if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
-	    LG_PAGE)))
-		return (true);
-
-	if (have_dss)
-		extent_dss_boot();
-
-	return (false);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/extent_dss.c b/zircon/third_party/ulib/jemalloc/src/extent_dss.c
deleted file mode 100644
index 5aa95b1..0000000
--- a/zircon/third_party/ulib/jemalloc/src/extent_dss.c
+++ /dev/null
@@ -1,247 +0,0 @@
-#define	JEMALLOC_EXTENT_DSS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-/******************************************************************************/
-/* Data. */
-
-const char	*opt_dss = DSS_DEFAULT;
-
-const char	*dss_prec_names[] = {
-	"disabled",
-	"primary",
-	"secondary",
-	"N/A"
-};
-
-/*
- * Current dss precedence default, used when creating new arenas.  NB: This is
- * stored as unsigned rather than dss_prec_t because in principle there's no
- * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
- * atomic operations to synchronize the setting.
- */
-static unsigned		dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
-
-/* Base address of the DSS. */
-static void		*dss_base;
-/* Atomic boolean indicating whether the DSS is exhausted. */
-static unsigned		dss_exhausted;
-/* Atomic current upper limit on DSS addresses. */
-static void		*dss_max;
-
-/******************************************************************************/
-
-static void *
-extent_dss_sbrk(intptr_t increment)
-{
-#ifdef JEMALLOC_DSS
-	return (sbrk(increment));
-#else
-	not_implemented();
-	return (NULL);
-#endif
-}
-
-dss_prec_t
-extent_dss_prec_get(void)
-{
-	dss_prec_t ret;
-
-	if (!have_dss)
-		return (dss_prec_disabled);
-	ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
-	return (ret);
-}
-
-bool
-extent_dss_prec_set(dss_prec_t dss_prec)
-{
-	if (!have_dss)
-		return (dss_prec != dss_prec_disabled);
-	atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
-	return (false);
-}
-
-static void *
-extent_dss_max_update(void *new_addr)
-{
-	void *max_cur;
-	spin_t spinner;
-
-	/*
-	 * Get the current end of the DSS as max_cur and assure that dss_max is
-	 * up to date.
-	 */
-	spin_init(&spinner);
-	while (true) {
-		void *max_prev = atomic_read_p(&dss_max);
-
-		max_cur = extent_dss_sbrk(0);
-		if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
-			/*
-			 * Another thread optimistically updated dss_max.  Wait
-			 * for it to finish.
-			 */
-			spin_adaptive(&spinner);
-			continue;
-		}
-		if (!atomic_cas_p(&dss_max, max_prev, max_cur))
-			break;
-	}
-	/* Fixed new_addr can only be supported if it is at the edge of DSS. */
-	if (new_addr != NULL && max_cur != new_addr)
-		return (NULL);
-
-	return (max_cur);
-}
-
-void *
-extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
-    size_t alignment, bool *zero, bool *commit)
-{
-	extent_t *gap;
-
-	cassert(have_dss);
-	assert(size > 0);
-	assert(alignment > 0);
-
-	/*
-	 * sbrk() uses a signed increment argument, so take care not to
-	 * interpret a large allocation request as a negative increment.
-	 */
-	if ((intptr_t)size < 0)
-		return (NULL);
-
-	gap = extent_alloc(tsdn, arena);
-	if (gap == NULL)
-		return (NULL);
-
-	if (!atomic_read_u(&dss_exhausted)) {
-		/*
-		 * The loop is necessary to recover from races with other
-		 * threads that are using the DSS for something other than
-		 * malloc.
-		 */
-		while (true) {
-			void *ret, *max_cur, *gap_addr, *dss_next, *dss_prev;
-			size_t gap_size;
-			intptr_t incr;
-
-			max_cur = extent_dss_max_update(new_addr);
-			if (max_cur == NULL)
-				goto label_oom;
-
-			/*
-			 * Compute how much gap space (if any) is necessary to
-			 * satisfy alignment.  This space can be recycled for
-			 * later use.
-			 */
-			gap_addr = (void *)(PAGE_CEILING((uintptr_t)max_cur));
-			ret = (void *)ALIGNMENT_CEILING((uintptr_t)gap_addr,
-			    PAGE_CEILING(alignment));
-			gap_size = (uintptr_t)ret - (uintptr_t)gap_addr;
-			if (gap_size != 0) {
-				extent_init(gap, arena, gap_addr, gap_size,
-				    gap_size, arena_extent_sn_next(arena),
-				    false, false, true, false);
-			}
-			dss_next = (void *)((uintptr_t)ret + size);
-			if ((uintptr_t)ret < (uintptr_t)max_cur ||
-			    (uintptr_t)dss_next < (uintptr_t)max_cur)
-				goto label_oom; /* Wrap-around. */
-			incr = gap_size + size;
-
-			/*
-			 * Optimistically update dss_max, and roll back below if
-			 * sbrk() fails.  No other thread will try to extend the
-			 * DSS while dss_max is greater than the current DSS
-			 * max reported by sbrk(0).
-			 */
-			if (atomic_cas_p(&dss_max, max_cur, dss_next))
-				continue;
-
-			/* Try to allocate. */
-			dss_prev = extent_dss_sbrk(incr);
-			if (dss_prev == max_cur) {
-				/* Success. */
-				if (gap_size != 0)
-					extent_dalloc_gap(tsdn, arena, gap);
-				else
-					extent_dalloc(tsdn, arena, gap);
-				if (!*commit)
-					*commit = pages_decommit(ret, size);
-				if (*zero && *commit) {
-					extent_hooks_t *extent_hooks =
-					    EXTENT_HOOKS_INITIALIZER;
-					extent_t extent;
-
-					extent_init(&extent, arena, ret, size,
-					    size, 0, true, false, true, false);
-					if (extent_purge_forced_wrapper(tsdn,
-					    arena, &extent_hooks, &extent, 0,
-					    size))
-						memset(ret, 0, size);
-				}
-				return (ret);
-			}
-			/*
-			 * Failure, whether due to OOM or a race with a raw
-			 * sbrk() call from outside the allocator.  Try to roll
-			 * back optimistic dss_max update; if rollback fails,
-			 * it's due to another caller of this function having
-			 * succeeded since this invocation started, in which
-			 * case rollback is not necessary.
-			 */
-			atomic_cas_p(&dss_max, dss_next, max_cur);
-			if (dss_prev == (void *)-1) {
-				/* OOM. */
-				atomic_write_u(&dss_exhausted, (unsigned)true);
-				goto label_oom;
-			}
-		}
-	}
-label_oom:
-	extent_dalloc(tsdn, arena, gap);
-	return (NULL);
-}
-
-static bool
-extent_in_dss_helper(void *addr, void *max)
-{
-	return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
-	    (uintptr_t)max);
-}
-
-bool
-extent_in_dss(void *addr)
-{
-	cassert(have_dss);
-
-	return (extent_in_dss_helper(addr, atomic_read_p(&dss_max)));
-}
-
-bool
-extent_dss_mergeable(void *addr_a, void *addr_b)
-{
-	void *max;
-
-	cassert(have_dss);
-
-	if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
-	    (uintptr_t)dss_base)
-		return (true);
-
-	max = atomic_read_p(&dss_max);
-	return (extent_in_dss_helper(addr_a, max) ==
-	    extent_in_dss_helper(addr_b, max));
-}
-
-void
-extent_dss_boot(void)
-{
-	cassert(have_dss);
-
-	dss_base = extent_dss_sbrk(0);
-	dss_exhausted = (unsigned)(dss_base == (void *)-1);
-	dss_max = dss_base;
-}
-
-/******************************************************************************/
diff --git a/zircon/third_party/ulib/jemalloc/src/extent_mmap.c b/zircon/third_party/ulib/jemalloc/src/extent_mmap.c
deleted file mode 100644
index e685a45..0000000
--- a/zircon/third_party/ulib/jemalloc/src/extent_mmap.c
+++ /dev/null
@@ -1,75 +0,0 @@
-#define	JEMALLOC_EXTENT_MMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-
-static void *
-extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
-{
-	void *ret;
-	size_t alloc_size;
-
-	alloc_size = size + alignment - PAGE;
-	/* Beware size_t wrap-around. */
-	if (alloc_size < size)
-		return (NULL);
-	do {
-		void *pages;
-		size_t leadsize;
-		pages = pages_map(NULL, alloc_size, commit);
-		if (pages == NULL)
-			return (NULL);
-		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
-		    (uintptr_t)pages;
-		ret = pages_trim(pages, alloc_size, leadsize, size, commit);
-	} while (ret == NULL);
-
-	assert(ret != NULL);
-	*zero = true;
-	return (ret);
-}
-
-void *
-extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
-    bool *commit)
-{
-	void *ret;
-	size_t offset;
-
-	/*
-	 * Ideally, there would be a way to specify alignment to mmap() (like
-	 * NetBSD has), but in the absence of such a feature, we have to work
-	 * hard to efficiently create aligned mappings.  The reliable, but
-	 * slow method is to create a mapping that is over-sized, then trim the
-	 * excess.  However, that always results in one or two calls to
-	 * pages_unmap().
-	 *
-	 * Optimistically try mapping precisely the right amount before falling
-	 * back to the slow method, with the expectation that the optimistic
-	 * approach works most of the time.
-	 */
-
-	assert(alignment != 0);
-
-	ret = pages_map(new_addr, size, commit);
-	if (ret == NULL || ret == new_addr)
-		return (ret);
-	assert(new_addr == NULL);
-	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
-	if (offset != 0) {
-		pages_unmap(ret, size);
-		return (extent_alloc_mmap_slow(size, alignment, zero, commit));
-	}
-
-	assert(ret != NULL);
-	*zero = true;
-	return (ret);
-}
-
-bool
-extent_dalloc_mmap(void *addr, size_t size)
-{
-	if (config_munmap)
-		pages_unmap(addr, size);
-	return (!config_munmap);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/hash.c b/zircon/third_party/ulib/jemalloc/src/hash.c
deleted file mode 100644
index cfa4da0..0000000
--- a/zircon/third_party/ulib/jemalloc/src/hash.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/jemalloc.c b/zircon/third_party/ulib/jemalloc/src/jemalloc.c
deleted file mode 100644
index 425f1e9..0000000
--- a/zircon/third_party/ulib/jemalloc/src/jemalloc.c
+++ /dev/null
@@ -1,2843 +0,0 @@
-#define	JEMALLOC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-/* Runtime configuration options. */
-const char	*je_malloc_conf
-#ifndef _WIN32
-    JEMALLOC_ATTR(weak)
-#endif
-    ;
-bool	opt_abort =
-#ifdef JEMALLOC_DEBUG
-    true
-#else
-    false
-#endif
-    ;
-const char	*opt_junk =
-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
-    "true"
-#else
-    "false"
-#endif
-    ;
-bool	opt_junk_alloc =
-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
-    true
-#else
-    false
-#endif
-    ;
-bool	opt_junk_free =
-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
-    true
-#else
-    false
-#endif
-    ;
-
-bool	opt_utrace = false;
-bool	opt_xmalloc = false;
-bool	opt_zero = false;
-unsigned	opt_narenas = 0;
-
-unsigned	ncpus;
-
-/* Protects arenas initialization. */
-static malloc_mutex_t	arenas_lock;
-/*
- * Arenas that are used to service external requests.  Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- *
- * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
- * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
- * takes some action to create them and allocate from them.
- */
-arena_t			**arenas;
-static unsigned		narenas_total; /* Use narenas_total_*(). */
-static arena_t		*a0; /* arenas[0]; read-only after initialization. */
-unsigned		narenas_auto; /* Read-only after initialization. */
-
-typedef enum {
-	malloc_init_uninitialized	= 3,
-	malloc_init_a0_initialized	= 2,
-	malloc_init_recursible		= 1,
-	malloc_init_initialized		= 0 /* Common case --> jnz. */
-} malloc_init_t;
-static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
-
-/* False should be the common case.  Set to true to trigger initialization. */
-static bool	malloc_slow = true;
-
-/* When malloc_slow is true, set the corresponding bits for sanity check. */
-enum {
-	flag_opt_junk_alloc	= (1U),
-	flag_opt_junk_free	= (1U << 1),
-	flag_opt_zero		= (1U << 2),
-	flag_opt_utrace		= (1U << 3),
-	flag_opt_xmalloc	= (1U << 4)
-};
-static uint8_t	malloc_slow_flags;
-
-JEMALLOC_ALIGNED(CACHELINE)
-const size_t	pind2sz_tab[NPSIZES+1] = {
-#define	PSZ_yes(lg_grp, ndelta, lg_delta)				\
-	(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
-#define	PSZ_no(lg_grp, ndelta, lg_delta)
-#define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
-	PSZ_##psz(lg_grp, ndelta, lg_delta)
-	SIZE_CLASSES
-#undef PSZ_yes
-#undef PSZ_no
-#undef SC
-	(LARGE_MAXCLASS + PAGE)
-};
-
-JEMALLOC_ALIGNED(CACHELINE)
-const size_t	index2size_tab[NSIZES] = {
-#define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
-	((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
-	SIZE_CLASSES
-#undef SC
-};
-
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t	size2index_tab[] = {
-#if LG_TINY_MIN == 0
-#warning "Dangerous LG_TINY_MIN"
-#define	S2B_0(i)	i,
-#elif LG_TINY_MIN == 1
-#warning "Dangerous LG_TINY_MIN"
-#define	S2B_1(i)	i,
-#elif LG_TINY_MIN == 2
-#warning "Dangerous LG_TINY_MIN"
-#define	S2B_2(i)	i,
-#elif LG_TINY_MIN == 3
-#define	S2B_3(i)	i,
-#elif LG_TINY_MIN == 4
-#define	S2B_4(i)	i,
-#elif LG_TINY_MIN == 5
-#define	S2B_5(i)	i,
-#elif LG_TINY_MIN == 6
-#define	S2B_6(i)	i,
-#elif LG_TINY_MIN == 7
-#define	S2B_7(i)	i,
-#elif LG_TINY_MIN == 8
-#define	S2B_8(i)	i,
-#elif LG_TINY_MIN == 9
-#define	S2B_9(i)	i,
-#elif LG_TINY_MIN == 10
-#define	S2B_10(i)	i,
-#elif LG_TINY_MIN == 11
-#define	S2B_11(i)	i,
-#else
-#error "Unsupported LG_TINY_MIN"
-#endif
-#if LG_TINY_MIN < 1
-#define	S2B_1(i)	S2B_0(i) S2B_0(i)
-#endif
-#if LG_TINY_MIN < 2
-#define	S2B_2(i)	S2B_1(i) S2B_1(i)
-#endif
-#if LG_TINY_MIN < 3
-#define	S2B_3(i)	S2B_2(i) S2B_2(i)
-#endif
-#if LG_TINY_MIN < 4
-#define	S2B_4(i)	S2B_3(i) S2B_3(i)
-#endif
-#if LG_TINY_MIN < 5
-#define	S2B_5(i)	S2B_4(i) S2B_4(i)
-#endif
-#if LG_TINY_MIN < 6
-#define	S2B_6(i)	S2B_5(i) S2B_5(i)
-#endif
-#if LG_TINY_MIN < 7
-#define	S2B_7(i)	S2B_6(i) S2B_6(i)
-#endif
-#if LG_TINY_MIN < 8
-#define	S2B_8(i)	S2B_7(i) S2B_7(i)
-#endif
-#if LG_TINY_MIN < 9
-#define	S2B_9(i)	S2B_8(i) S2B_8(i)
-#endif
-#if LG_TINY_MIN < 10
-#define	S2B_10(i)	S2B_9(i) S2B_9(i)
-#endif
-#if LG_TINY_MIN < 11
-#define	S2B_11(i)	S2B_10(i) S2B_10(i)
-#endif
-#define	S2B_no(i)
-#define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
-	S2B_##lg_delta_lookup(index)
-	SIZE_CLASSES
-#undef S2B_3
-#undef S2B_4
-#undef S2B_5
-#undef S2B_6
-#undef S2B_7
-#undef S2B_8
-#undef S2B_9
-#undef S2B_10
-#undef S2B_11
-#undef S2B_no
-#undef SC
-};
-
-#ifdef JEMALLOC_THREADED_INIT
-/* Used to let the initializing thread recursively allocate. */
-#  define NO_INITIALIZER	NULL
-#  define INITIALIZER		pthread_self()
-#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
-static pthread_t		malloc_initializer = NO_INITIALIZER;
-#else
-#  define NO_INITIALIZER	false
-#  define INITIALIZER		true
-#  define IS_INITIALIZER	malloc_initializer
-static bool			malloc_initializer = NO_INITIALIZER;
-#endif
-
-/* Used to avoid initialization races. */
-#ifdef _WIN32
-#if _WIN32_WINNT >= 0x0600
-static malloc_mutex_t	init_lock = SRWLOCK_INIT;
-#else
-static malloc_mutex_t	init_lock;
-static bool init_lock_initialized = false;
-
-JEMALLOC_ATTR(constructor)
-static void WINAPI
-_init_init_lock(void)
-{
-	/*
-	 * If another constructor in the same binary is using mallctl to e.g.
-	 * set up extent hooks, it may end up running before this one, and
-	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
-	 * we force an initialization of the lock in malloc_init_hard as well.
-	 * We don't try to care about atomicity of the accessed to the
-	 * init_lock_initialized boolean, since it really only matters early in
-	 * the process creation, before any separate thread normally starts
-	 * doing anything.
-	 */
-	if (!init_lock_initialized)
-		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
-	init_lock_initialized = true;
-}
-
-#ifdef _MSC_VER
-#  pragma section(".CRT$XCU", read)
-JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
-static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
-#endif
-#endif
-#else
-static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
-#endif
-
-typedef struct {
-	void	*p;	/* Input pointer (as in realloc(p, s)). */
-	size_t	s;	/* Request size. */
-	void	*r;	/* Result pointer. */
-} malloc_utrace_t;
-
-#ifdef JEMALLOC_UTRACE
-#  define UTRACE(a, b, c) do {						\
-	if (unlikely(opt_utrace)) {					\
-		int utrace_serrno = errno;				\
-		malloc_utrace_t ut;					\
-		ut.p = (a);						\
-		ut.s = (b);						\
-		ut.r = (c);						\
-		utrace(&ut, sizeof(ut));				\
-		errno = utrace_serrno;					\
-	}								\
-} while (0)
-#else
-#  define UTRACE(a, b, c)
-#endif
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static bool	malloc_init_hard_a0(void);
-static bool	malloc_init_hard(void);
-
-/******************************************************************************/
-/*
- * Begin miscellaneous support functions.
- */
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_initialized(void)
-{
-	return (malloc_init_state == malloc_init_initialized);
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init_a0(void)
-{
-	if (unlikely(malloc_init_state == malloc_init_uninitialized))
-		return (malloc_init_hard_a0());
-	return (false);
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
-	if (unlikely(!malloc_initialized()) && malloc_init_hard())
-		return (true);
-	return (false);
-}
-
-/*
- * The a0*() functions are used instead of i{d,}alloc() in situations that
- * cannot tolerate TLS variable access.
- */
-
-static void *
-a0ialloc(size_t size, bool zero, bool is_internal)
-{
-	if (unlikely(malloc_init_a0()))
-		return (NULL);
-
-	return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
-	    is_internal, arena_get(TSDN_NULL, 0, true), true));
-}
-
-static void
-a0idalloc(extent_t *extent, void *ptr, bool is_internal)
-{
-	idalloctm(TSDN_NULL, extent, ptr, false, is_internal, true);
-}
-
-void *
-a0malloc(size_t size)
-{
-	return (a0ialloc(size, false, true));
-}
-
-void
-a0dalloc(void *ptr)
-{
-	a0idalloc(iealloc(NULL, ptr), ptr, true);
-}
-
-/*
- * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
- * situations that cannot tolerate TLS variable access (TLS allocation and very
- * early internal data structure initialization).
- */
-
-void *
-bootstrap_malloc(size_t size)
-{
-	if (unlikely(size == 0))
-		size = 1;
-
-	return (a0ialloc(size, false, false));
-}
-
-void *
-bootstrap_calloc(size_t num, size_t size)
-{
-	size_t num_size;
-
-	num_size = num * size;
-	if (unlikely(num_size == 0)) {
-		assert(num == 0 || size == 0);
-		num_size = 1;
-	}
-
-	return (a0ialloc(num_size, true, false));
-}
-
-void
-bootstrap_free(void *ptr)
-{
-	if (unlikely(ptr == NULL))
-		return;
-
-	a0idalloc(iealloc(NULL, ptr), ptr, false);
-}
-
-void
-arena_set(unsigned ind, arena_t *arena)
-{
-	atomic_write_p((void **)&arenas[ind], arena);
-}
-
-static void
-narenas_total_set(unsigned narenas)
-{
-	atomic_write_u(&narenas_total, narenas);
-}
-
-static void
-narenas_total_inc(void)
-{
-	atomic_add_u(&narenas_total, 1);
-}
-
-unsigned
-narenas_total_get(void)
-{
-	return (atomic_read_u(&narenas_total));
-}
-
-/* Create a new arena and insert it into the arenas array at index ind. */
-static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
-	arena_t *arena;
-
-	assert(ind <= narenas_total_get());
-	if (ind > MALLOCX_ARENA_MAX)
-		return (NULL);
-	if (ind == narenas_total_get())
-		narenas_total_inc();
-
-	/*
-	 * Another thread may have already initialized arenas[ind] if it's an
-	 * auto arena.
-	 */
-	arena = arena_get(tsdn, ind, false);
-	if (arena != NULL) {
-		assert(ind < narenas_auto);
-		return (arena);
-	}
-
-	/* Actually initialize the arena. */
-	arena = arena_new(tsdn, ind, extent_hooks);
-	arena_set(ind, arena);
-	return (arena);
-}
-
-arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
-	arena_t *arena;
-
-	malloc_mutex_lock(tsdn, &arenas_lock);
-	arena = arena_init_locked(tsdn, ind, extent_hooks);
-	malloc_mutex_unlock(tsdn, &arenas_lock);
-	return (arena);
-}
-
-static void
-arena_bind(tsd_t *tsd, unsigned ind, bool internal)
-{
-	arena_t *arena;
-
-	if (!tsd_nominal(tsd))
-		return;
-
-	arena = arena_get(tsd_tsdn(tsd), ind, false);
-	arena_nthreads_inc(arena, internal);
-
-	if (internal)
-		tsd_iarena_set(tsd, arena);
-	else
-		tsd_arena_set(tsd, arena);
-}
-
-void
-arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
-{
-	arena_t *oldarena, *newarena;
-
-	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
-	newarena = arena_get(tsd_tsdn(tsd), newind, false);
-	arena_nthreads_dec(oldarena, false);
-	arena_nthreads_inc(newarena, false);
-	tsd_arena_set(tsd, newarena);
-}
-
-static void
-arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
-{
-	arena_t *arena;
-
-	arena = arena_get(tsd_tsdn(tsd), ind, false);
-	arena_nthreads_dec(arena, internal);
-	if (internal)
-		tsd_iarena_set(tsd, NULL);
-	else
-		tsd_arena_set(tsd, NULL);
-}
-
-arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
-{
-	arena_tdata_t *tdata, *arenas_tdata_old;
-	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-	unsigned narenas_tdata_old, i;
-	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
-	unsigned narenas_actual = narenas_total_get();
-
-	/*
-	 * Dissociate old tdata array (and set up for deallocation upon return)
-	 * if it's too small.
-	 */
-	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
-		arenas_tdata_old = arenas_tdata;
-		narenas_tdata_old = narenas_tdata;
-		arenas_tdata = NULL;
-		narenas_tdata = 0;
-		tsd_arenas_tdata_set(tsd, arenas_tdata);
-		tsd_narenas_tdata_set(tsd, narenas_tdata);
-	} else {
-		arenas_tdata_old = NULL;
-		narenas_tdata_old = 0;
-	}
-
-	/* Allocate tdata array if it's missing. */
-	if (arenas_tdata == NULL) {
-		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
-		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
-
-		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
-			*arenas_tdata_bypassp = true;
-			arenas_tdata = (arena_tdata_t *)a0malloc(
-			    sizeof(arena_tdata_t) * narenas_tdata);
-			*arenas_tdata_bypassp = false;
-		}
-		if (arenas_tdata == NULL) {
-			tdata = NULL;
-			goto label_return;
-		}
-		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
-		tsd_arenas_tdata_set(tsd, arenas_tdata);
-		tsd_narenas_tdata_set(tsd, narenas_tdata);
-	}
-
-	/*
-	 * Copy to tdata array.  It's possible that the actual number of arenas
-	 * has increased since narenas_total_get() was called above, but that
-	 * causes no correctness issues unless two threads concurrently execute
-	 * the arenas.create mallctl, which we trust mallctl synchronization to
-	 * prevent.
-	 */
-
-	/* Copy/initialize tickers. */
-	for (i = 0; i < narenas_actual; i++) {
-		if (i < narenas_tdata_old) {
-			ticker_copy(&arenas_tdata[i].decay_ticker,
-			    &arenas_tdata_old[i].decay_ticker);
-		} else {
-			ticker_init(&arenas_tdata[i].decay_ticker,
-			    DECAY_NTICKS_PER_UPDATE);
-		}
-	}
-	if (narenas_tdata > narenas_actual) {
-		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
-		    * (narenas_tdata - narenas_actual));
-	}
-
-	/* Read the refreshed tdata array. */
-	tdata = &arenas_tdata[ind];
-label_return:
-	if (arenas_tdata_old != NULL)
-		a0dalloc(arenas_tdata_old);
-	return (tdata);
-}
-
-/* Slow path, called only by arena_choose(). */
-arena_t *
-arena_choose_hard(tsd_t *tsd, bool internal)
-{
-	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
-
-	if (narenas_auto > 1) {
-		unsigned i, j, choose[2], first_null;
-
-		/*
-		 * Determine binding for both non-internal and internal
-		 * allocation.
-		 *
-		 *   choose[0]: For application allocation.
-		 *   choose[1]: For internal metadata allocation.
-		 */
-
-		for (j = 0; j < 2; j++)
-			choose[j] = 0;
-
-		first_null = narenas_auto;
-		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
-		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
-		for (i = 1; i < narenas_auto; i++) {
-			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
-				/*
-				 * Choose the first arena that has the lowest
-				 * number of threads assigned to it.
-				 */
-				for (j = 0; j < 2; j++) {
-					if (arena_nthreads_get(arena_get(
-					    tsd_tsdn(tsd), i, false), !!j) <
-					    arena_nthreads_get(arena_get(
-					    tsd_tsdn(tsd), choose[j], false),
-					    !!j))
-						choose[j] = i;
-				}
-			} else if (first_null == narenas_auto) {
-				/*
-				 * Record the index of the first uninitialized
-				 * arena, in case all extant arenas are in use.
-				 *
-				 * NB: It is possible for there to be
-				 * discontinuities in terms of initialized
-				 * versus uninitialized arenas, due to the
-				 * "thread.arena" mallctl.
-				 */
-				first_null = i;
-			}
-		}
-
-		for (j = 0; j < 2; j++) {
-			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
-			    choose[j], false), !!j) == 0 || first_null ==
-			    narenas_auto) {
-				/*
-				 * Use an unloaded arena, or the least loaded
-				 * arena if all arenas are already initialized.
-				 */
-				if (!!j == internal) {
-					ret = arena_get(tsd_tsdn(tsd),
-					    choose[j], false);
-				}
-			} else {
-				arena_t *arena;
-
-				/* Initialize a new arena. */
-				choose[j] = first_null;
-				arena = arena_init_locked(tsd_tsdn(tsd),
-				    choose[j],
-				    (extent_hooks_t *)&extent_hooks_default);
-				if (arena == NULL) {
-					malloc_mutex_unlock(tsd_tsdn(tsd),
-					    &arenas_lock);
-					return (NULL);
-				}
-				if (!!j == internal)
-					ret = arena;
-			}
-			arena_bind(tsd, choose[j], !!j);
-		}
-		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
-	} else {
-		ret = arena_get(tsd_tsdn(tsd), 0, false);
-		arena_bind(tsd, 0, false);
-		arena_bind(tsd, 0, true);
-	}
-
-	return (ret);
-}
-
-void
-iarena_cleanup(tsd_t *tsd)
-{
-	arena_t *iarena;
-
-	iarena = tsd_iarena_get(tsd);
-	if (iarena != NULL)
-		arena_unbind(tsd, arena_ind_get(iarena), true);
-}
-
-void
-arena_cleanup(tsd_t *tsd)
-{
-	arena_t *arena;
-
-	arena = tsd_arena_get(tsd);
-	if (arena != NULL)
-		arena_unbind(tsd, arena_ind_get(arena), false);
-}
-
-void
-arenas_tdata_cleanup(tsd_t *tsd)
-{
-	arena_tdata_t *arenas_tdata;
-
-	/* Prevent tsd->arenas_tdata from being (re)created. */
-	*tsd_arenas_tdata_bypassp_get(tsd) = true;
-
-	arenas_tdata = tsd_arenas_tdata_get(tsd);
-	if (arenas_tdata != NULL) {
-		tsd_arenas_tdata_set(tsd, NULL);
-		a0dalloc(arenas_tdata);
-	}
-}
-
-static void
-stats_print_atexit(void)
-{
-	if (config_tcache && config_stats) {
-		tsdn_t *tsdn;
-		unsigned narenas, i;
-
-		tsdn = tsdn_fetch();
-
-		/*
-		 * Merge stats from extant threads.  This is racy, since
-		 * individual threads do not lock when recording tcache stats
-		 * events.  As a consequence, the final stats may be slightly
-		 * out of date by the time they are reported, if other threads
-		 * continue to allocate.
-		 */
-		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
-			arena_t *arena = arena_get(tsdn, i, false);
-			if (arena != NULL) {
-				tcache_t *tcache;
-
-				/*
-				 * tcache_stats_merge() locks bins, so if any
-				 * code is introduced that acquires both arena
-				 * and bin locks in the opposite order,
-				 * deadlocks may result.
-				 */
-				malloc_mutex_lock(tsdn, &arena->lock);
-				ql_foreach(tcache, &arena->tcache_ql, link) {
-					tcache_stats_merge(tsdn, tcache, arena);
-				}
-				malloc_mutex_unlock(tsdn, &arena->lock);
-			}
-		}
-	}
-	je_malloc_stats_print(NULL, NULL, NULL);
-}
-
-/*
- * End miscellaneous support functions.
- */
-/******************************************************************************/
-/*
- * Begin initialization functions.
- */
-
-#ifndef JEMALLOC_HAVE_SECURE_GETENV
-static char *
-secure_getenv(const char *name)
-{
-#  ifdef JEMALLOC_HAVE_ISSETUGID
-	if (issetugid() != 0)
-		return (NULL);
-#  endif
-	return (getenv(name));
-}
-#endif
-
-static unsigned
-malloc_ncpus(void)
-{
-	long result;
-
-#ifdef _WIN32
-	SYSTEM_INFO si;
-	GetSystemInfo(&si);
-	result = si.dwNumberOfProcessors;
-#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
-	/*
-	 * glibc >= 2.6 has the CPU_COUNT macro.
-	 *
-	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
-	 * *before* setting up the isspace tables.  Therefore we need a
-	 * different method to get the number of CPUs.
-	 */
-	{
-		cpu_set_t set;
-
-		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
-		result = CPU_COUNT(&set);
-	}
-#else
-	result = sysconf(_SC_NPROCESSORS_ONLN);
-#endif
-	return ((result == -1) ? 1 : (unsigned)result);
-}
-
-static bool
-malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
-    char const **v_p, size_t *vlen_p)
-{
-	bool accept;
-	const char *opts = *opts_p;
-
-	*k_p = opts;
-
-	for (accept = false; !accept;) {
-		switch (*opts) {
-		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
-		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
-		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
-		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
-		case 'Y': case 'Z':
-		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
-		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
-		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
-		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
-		case 'y': case 'z':
-		case '0': case '1': case '2': case '3': case '4': case '5':
-		case '6': case '7': case '8': case '9':
-		case '_':
-			opts++;
-			break;
-		case ':':
-			opts++;
-			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
-			*v_p = opts;
-			accept = true;
-			break;
-		case '\0':
-			if (opts != *opts_p) {
-				malloc_write("<jemalloc>: Conf string ends "
-				    "with key\n");
-			}
-			return (true);
-		default:
-			malloc_write("<jemalloc>: Malformed conf string\n");
-			return (true);
-		}
-	}
-
-	for (accept = false; !accept;) {
-		switch (*opts) {
-		case ',':
-			opts++;
-			/*
-			 * Look ahead one character here, because the next time
-			 * this function is called, it will assume that end of
-			 * input has been cleanly reached if no input remains,
-			 * but we have optimistically already consumed the
-			 * comma if one exists.
-			 */
-			if (*opts == '\0') {
-				malloc_write("<jemalloc>: Conf string ends "
-				    "with comma\n");
-			}
-			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
-			accept = true;
-			break;
-		case '\0':
-			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
-			accept = true;
-			break;
-		default:
-			opts++;
-			break;
-		}
-	}
-
-	*opts_p = opts;
-	return (false);
-}
-
-static void
-malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
-    size_t vlen)
-{
-	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
-	    (int)vlen, v);
-}
-
-static void
-malloc_slow_flag_init(void)
-{
-	/*
-	 * Combine the runtime options into malloc_slow for fast path.  Called
-	 * after processing all the options.
-	 */
-	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
-	    | (opt_junk_free ? flag_opt_junk_free : 0)
-	    | (opt_zero ? flag_opt_zero : 0)
-	    | (opt_utrace ? flag_opt_utrace : 0)
-	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
-
-	malloc_slow = (malloc_slow_flags != 0);
-}
-
-static void
-malloc_conf_init(void)
-{
-	unsigned i;
-	char buf[PATH_MAX + 1];
-	const char *opts, *k, *v;
-	size_t klen, vlen;
-
-	for (i = 0; i < 4; i++) {
-		/* Get runtime configuration. */
-		switch (i) {
-		case 0:
-			opts = config_malloc_conf;
-			break;
-		case 1:
-			if (je_malloc_conf != NULL) {
-				/*
-				 * Use options that were compiled into the
-				 * program.
-				 */
-				opts = je_malloc_conf;
-			} else {
-				/* No configuration specified. */
-				buf[0] = '\0';
-				opts = buf;
-			}
-			break;
-		case 2: {
-			ssize_t linklen = 0;
-#if !defined(_WIN32) && !defined(__Fuchsia__)
-			int saved_errno = errno;
-			const char *linkname =
-#  ifdef JEMALLOC_PREFIX
-			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
-#  else
-			    "/etc/malloc.conf"
-#  endif
-			    ;
-
-			/*
-			 * Try to use the contents of the "/etc/malloc.conf"
-			 * symbolic link's name.
-			 */
-			linklen = readlink(linkname, buf, sizeof(buf) - 1);
-			if (linklen == -1) {
-				/* No configuration specified. */
-				linklen = 0;
-				/* Restore errno. */
-				set_errno(saved_errno);
-			}
-#endif
-			buf[linklen] = '\0';
-			opts = buf;
-			break;
-		} case 3: {
-			const char *envname =
-#ifdef JEMALLOC_PREFIX
-			    JEMALLOC_CPREFIX"MALLOC_CONF"
-#else
-			    "MALLOC_CONF"
-#endif
-			    ;
-
-			if ((opts = secure_getenv(envname)) != NULL) {
-				/*
-				 * Do nothing; opts is already initialized to
-				 * the value of the MALLOC_CONF environment
-				 * variable.
-				 */
-			} else {
-				/* No configuration specified. */
-				buf[0] = '\0';
-				opts = buf;
-			}
-			break;
-		} default:
-			not_reached();
-			buf[0] = '\0';
-			opts = buf;
-		}
-
-		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
-		    &vlen)) {
-#define	CONF_MATCH(n)							\
-	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
-#define	CONF_MATCH_VALUE(n)						\
-	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
-#define	CONF_HANDLE_BOOL(o, n, cont)					\
-			if (CONF_MATCH(n)) {				\
-				if (CONF_MATCH_VALUE("true"))		\
-					o = true;			\
-				else if (CONF_MATCH_VALUE("false"))	\
-					o = false;			\
-				else {					\
-					malloc_conf_error(		\
-					    "Invalid conf value",	\
-					    k, klen, v, vlen);		\
-				}					\
-				if (cont)				\
-					continue;			\
-			}
-#define	CONF_MIN_no(um, min)	false
-#define	CONF_MIN_yes(um, min)	((um) < (min))
-#define	CONF_MAX_no(um, max)	false
-#define	CONF_MAX_yes(um, max)	((um) > (max))
-#define	CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
-			if (CONF_MATCH(n)) {				\
-				uintmax_t um;				\
-				char *end;				\
-									\
-				set_errno(0);				\
-				um = malloc_strtoumax(v, &end, 0);	\
-				if (get_errno() != 0 || (uintptr_t)end -\
-				    (uintptr_t)v != vlen) {		\
-					malloc_conf_error(		\
-					    "Invalid conf value",	\
-					    k, klen, v, vlen);		\
-				} else if (clip) {			\
-					if (CONF_MIN_##check_min(um,	\
-					    (min)))			\
-						o = (t)(min);		\
-					else if (CONF_MAX_##check_max(	\
-					    um, (max)))			\
-						o = (t)(max);		\
-					else				\
-						o = (t)um;		\
-				} else {				\
-					if (CONF_MIN_##check_min(um,	\
-					    (min)) ||			\
-					    CONF_MAX_##check_max(um,	\
-					    (max))) {			\
-						malloc_conf_error(	\
-						    "Out-of-range "	\
-						    "conf value",	\
-						    k, klen, v, vlen);	\
-					} else				\
-						o = (t)um;		\
-				}					\
-				continue;				\
-			}
-#define	CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
-    clip)								\
-			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
-			    check_min, check_max, clip)
-#define	CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
-			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
-			    check_min, check_max, clip)
-#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
-			if (CONF_MATCH(n)) {				\
-				long l;					\
-				char *end;				\
-									\
-				set_errno(0);				\
-				l = strtol(v, &end, 0);			\
-				if (get_errno() != 0 || (uintptr_t)end -\
-				    (uintptr_t)v != vlen) {		\
-					malloc_conf_error(		\
-					    "Invalid conf value",	\
-					    k, klen, v, vlen);		\
-				} else if (l < (ssize_t)(min) || l >	\
-				    (ssize_t)(max)) {			\
-					malloc_conf_error(		\
-					    "Out-of-range conf value",	\
-					    k, klen, v, vlen);		\
-				} else					\
-					o = l;				\
-				continue;				\
-			}
-#define	CONF_HANDLE_CHAR_P(o, n, d)					\
-			if (CONF_MATCH(n)) {				\
-				size_t cpylen = (vlen <=		\
-				    sizeof(o)-1) ? vlen :		\
-				    sizeof(o)-1;			\
-				strncpy(o, v, cpylen);			\
-				o[cpylen] = '\0';			\
-				continue;				\
-			}
-
-			CONF_HANDLE_BOOL(opt_abort, "abort", true)
-			if (strncmp("dss", k, klen) == 0) {
-				int i;
-				bool match = false;
-				for (i = 0; i < dss_prec_limit; i++) {
-					if (strncmp(dss_prec_names[i], v, vlen)
-					    == 0) {
-						if (extent_dss_prec_set(i)) {
-							malloc_conf_error(
-							    "Error setting dss",
-							    k, klen, v, vlen);
-						} else {
-							opt_dss =
-							    dss_prec_names[i];
-							match = true;
-							break;
-						}
-					}
-				}
-				if (!match) {
-					malloc_conf_error("Invalid conf value",
-					    k, klen, v, vlen);
-				}
-				continue;
-			}
-			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
-			    UINT_MAX, yes, no, false)
-			CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
-			    NSTIME_SEC_MAX);
-			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
-			if (config_fill) {
-				if (CONF_MATCH("junk")) {
-					if (CONF_MATCH_VALUE("true")) {
-						opt_junk = "true";
-						opt_junk_alloc = opt_junk_free =
-						    true;
-					} else if (CONF_MATCH_VALUE("false")) {
-						opt_junk = "false";
-						opt_junk_alloc = opt_junk_free =
-						    false;
-					} else if (CONF_MATCH_VALUE("alloc")) {
-						opt_junk = "alloc";
-						opt_junk_alloc = true;
-						opt_junk_free = false;
-					} else if (CONF_MATCH_VALUE("free")) {
-						opt_junk = "free";
-						opt_junk_alloc = false;
-						opt_junk_free = true;
-					} else {
-						malloc_conf_error(
-						    "Invalid conf value", k,
-						    klen, v, vlen);
-					}
-					continue;
-				}
-				CONF_HANDLE_BOOL(opt_zero, "zero", true)
-			}
-			if (config_utrace) {
-				CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
-			}
-			if (config_xmalloc) {
-				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
-			}
-			if (config_tcache) {
-				CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
-				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
-				    "lg_tcache_max", -1,
-				    (sizeof(size_t) << 3) - 1)
-			}
-			if (config_prof) {
-				CONF_HANDLE_BOOL(opt_prof, "prof", true)
-				CONF_HANDLE_CHAR_P(opt_prof_prefix,
-				    "prof_prefix", "jeprof")
-				CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
-				    true)
-				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
-				    "prof_thread_active_init", true)
-				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
-				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
-				    - 1, no, yes, true)
-				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
-				    true)
-				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
-				    "lg_prof_interval", -1,
-				    (sizeof(uint64_t) << 3) - 1)
-				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
-				    true)
-				CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
-				    true)
-				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
-				    true)
-			}
-			malloc_conf_error("Invalid conf pair", k, klen, v,
-			    vlen);
-#undef CONF_MATCH
-#undef CONF_MATCH_VALUE
-#undef CONF_HANDLE_BOOL
-#undef CONF_MIN_no
-#undef CONF_MIN_yes
-#undef CONF_MAX_no
-#undef CONF_MAX_yes
-#undef CONF_HANDLE_T_U
-#undef CONF_HANDLE_UNSIGNED
-#undef CONF_HANDLE_SIZE_T
-#undef CONF_HANDLE_SSIZE_T
-#undef CONF_HANDLE_CHAR_P
-		}
-	}
-}
-
-static bool
-malloc_init_hard_needed(void)
-{
-	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
-	    malloc_init_recursible)) {
-		/*
-		 * Another thread initialized the allocator before this one
-		 * acquired init_lock, or this thread is the initializing
-		 * thread, and it is recursively allocating.
-		 */
-		return (false);
-	}
-#ifdef JEMALLOC_THREADED_INIT
-	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
-		spin_t spinner;
-
-		/* Busy-wait until the initializing thread completes. */
-		spin_init(&spinner);
-		do {
-			malloc_mutex_unlock(TSDN_NULL, &init_lock);
-			spin_adaptive(&spinner);
-			malloc_mutex_lock(TSDN_NULL, &init_lock);
-		} while (!malloc_initialized());
-		return (false);
-	}
-#endif
-	return (true);
-}
-
-static bool
-malloc_init_hard_a0_locked(void)
-{
-	malloc_initializer = INITIALIZER;
-
-	if (config_prof)
-		prof_boot0();
-	malloc_conf_init();
-	if (opt_stats_print) {
-		/* Print statistics at exit. */
-		if (atexit(stats_print_atexit) != 0) {
-			malloc_write("<jemalloc>: Error in atexit()\n");
-			if (opt_abort)
-				abort();
-		}
-	}
-	pages_boot();
-	if (base_boot(TSDN_NULL))
-		return (true);
-	if (extent_boot())
-		return (true);
-	if (ctl_boot())
-		return (true);
-	if (config_prof)
-		prof_boot1();
-	arena_boot();
-	if (config_tcache && tcache_boot(TSDN_NULL))
-		return (true);
-	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
-		return (true);
-	/*
-	 * Create enough scaffolding to allow recursive allocation in
-	 * malloc_ncpus().
-	 */
-	narenas_auto = 1;
-	narenas_total_set(narenas_auto);
-	arenas = &a0;
-	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
-	/*
-	 * Initialize one arena here.  The rest are lazily created in
-	 * arena_choose_hard().
-	 */
-	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) ==
-	    NULL)
-		return (true);
-
-	malloc_init_state = malloc_init_a0_initialized;
-
-	return (false);
-}
-
-static bool
-malloc_init_hard_a0(void)
-{
-	bool ret;
-
-	malloc_mutex_lock(TSDN_NULL, &init_lock);
-	ret = malloc_init_hard_a0_locked();
-	malloc_mutex_unlock(TSDN_NULL, &init_lock);
-	return (ret);
-}
-
-/* Initialize data structures which may trigger recursive allocation. */
-static bool
-malloc_init_hard_recursible(void)
-{
-	malloc_init_state = malloc_init_recursible;
-
-	ncpus = malloc_ncpus();
-
-#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
-    && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
-    !defined(__native_client__))
-	/* LinuxThreads' pthread_atfork() allocates. */
-	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
-	    jemalloc_postfork_child) != 0) {
-		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
-		if (opt_abort)
-			abort();
-		return (true);
-	}
-#endif
-
-	return (false);
-}
-
-static bool
-malloc_init_hard_finish(tsdn_t *tsdn)
-{
-	if (malloc_mutex_boot())
-		return (true);
-
-	if (opt_narenas == 0) {
-		/*
-		 * For SMP systems, create more than one arena per CPU by
-		 * default.
-		 */
-		if (ncpus > 1)
-			opt_narenas = ncpus << 2;
-		else
-			opt_narenas = 1;
-	}
-	narenas_auto = opt_narenas;
-	/*
-	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
-	 */
-	if (narenas_auto > MALLOCX_ARENA_MAX) {
-		narenas_auto = MALLOCX_ARENA_MAX;
-		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
-		    narenas_auto);
-	}
-	narenas_total_set(narenas_auto);
-
-	/* Allocate and initialize arenas. */
-	arenas = (arena_t **)base_alloc(tsdn, a0->base, sizeof(arena_t *) *
-	    (MALLOCX_ARENA_MAX+1), CACHELINE);
-	if (arenas == NULL)
-		return (true);
-	/* Copy the pointer to the one arena that was already initialized. */
-	arena_set(0, a0);
-
-	malloc_init_state = malloc_init_initialized;
-	malloc_slow_flag_init();
-
-	return (false);
-}
-
-static bool
-malloc_init_hard(void)
-{
-	tsd_t *tsd;
-
-#if defined(_WIN32) && _WIN32_WINNT < 0x0600
-	_init_init_lock();
-#endif
-	malloc_mutex_lock(TSDN_NULL, &init_lock);
-	if (!malloc_init_hard_needed()) {
-		malloc_mutex_unlock(TSDN_NULL, &init_lock);
-		return (false);
-	}
-
-	if (malloc_init_state != malloc_init_a0_initialized &&
-	    malloc_init_hard_a0_locked()) {
-		malloc_mutex_unlock(TSDN_NULL, &init_lock);
-		return (true);
-	}
-
-	malloc_mutex_unlock(TSDN_NULL, &init_lock);
-	/* Recursive allocation relies on functional tsd. */
-	tsd = malloc_tsd_boot0();
-	if (tsd == NULL)
-		return (true);
-	if (malloc_init_hard_recursible())
-		return (true);
-	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
-
-	if (config_prof && prof_boot2(tsd)) {
-		malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
-		return (true);
-	}
-
-	if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
-		malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
-		return (true);
-	}
-
-	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
-	malloc_tsd_boot1();
-	return (false);
-}
-
-/*
- * End initialization functions.
- */
-/******************************************************************************/
-/*
- * Begin allocation-path internal functions and data structures.
- */
-
-/*
- * Settings determined by the documented behavior of the allocation functions.
- */
-typedef struct static_opts_s static_opts_t;
-struct static_opts_s {
-	/* Whether or not allocations of size 0 should be treated as size 1. */
-	bool bump_empty_alloc;
-	/*
-	 * Whether to assert that allocations are not of size 0 (after any
-	 * bumping).
-	 */
-	bool assert_nonempty_alloc;
-
-	/*
-	 * Whether or not to modify the 'result' argument to malloc in case of
-	 * error.
-	 */
-	bool null_out_result_on_error;
-	/* Whether to set errno when we encounter an error condition. */
-	bool set_errno_on_error;
-
-	/*
-	 * The minimum valid alignment for functions requesting aligned storage.
-	 */
-	size_t min_alignment;
-
-	/* The error string to use if we oom. */
-	const char *oom_string;
-	/* The error string to use if the passed-in alignment is invalid. */
-	const char *invalid_alignment_string;
-
-	/*
-	 * False if we're configured to skip some time-consuming operations.
-	 *
-	 * This isn't really a malloc "behavior", but it acts as a useful
-	 * summary of several other static (or at least, static after program
-	 * initialization) options.
-	 */
-	bool slow;
-};
-
-JEMALLOC_ALWAYS_INLINE_C void
-static_opts_init(static_opts_t *static_opts) {
-	static_opts->bump_empty_alloc = false;
-	static_opts->assert_nonempty_alloc = false;
-	static_opts->null_out_result_on_error = false;
-	static_opts->set_errno_on_error = false;
-	static_opts->min_alignment = 0;
-	static_opts->oom_string = "";
-	static_opts->invalid_alignment_string = "";
-	static_opts->slow = false;
-}
-
-/*
- * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
- * should have one constant here per magic value there.  Note however that the
- * representations need not be related.
- */
-#define TCACHE_IND_NONE ((unsigned)-1)
-#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
-#define ARENA_IND_AUTOMATIC ((unsigned)-1)
-
-typedef struct dynamic_opts_s dynamic_opts_t;
-struct dynamic_opts_s {
-	void **result;
-	size_t num_items;
-	size_t item_size;
-	size_t alignment;
-	bool zero;
-	unsigned tcache_ind;
-	unsigned arena_ind;
-};
-
-JEMALLOC_ALWAYS_INLINE_C void
-dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
-	dynamic_opts->result = NULL;
-	dynamic_opts->num_items = 0;
-	dynamic_opts->item_size = 0;
-	dynamic_opts->alignment = 0;
-	dynamic_opts->zero = false;
-	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
-	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
-}
-
-/* ind is ignored if dopts->alignment > 0. */
-JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
-    size_t size, size_t usize, szind_t ind) {
-	tcache_t *tcache;
-	arena_t *arena;
-
-	/* Fill in the tcache. */
-	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
-		tcache = tcache_get(tsd, true);
-	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
-		tcache = NULL;
-	} else {
-		tcache = tcaches_get(tsd, dopts->tcache_ind);
-	}
-
-	/* Fill in the arena. */
-	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
-		/*
-		 * In case of automatic arena management, we defer arena
-		 * computation until as late as we can, hoping to fill the
-		 * allocation out of the tcache.
-		 */
-		arena = NULL;
-	} else {
-		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
-	}
-
-	if (unlikely(dopts->alignment != 0)) {
-		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
-		    dopts->zero, tcache, arena);
-	}
-
-	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
-	    arena, sopts->slow);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
-    size_t usize, szind_t ind) {
-	void *ret;
-
-	/*
-	 * For small allocations, sampling bumps the usize.  If so, we allocate
-	 * from the ind_large bucket.
-	 */
-	szind_t ind_large;
-	size_t bumped_usize = usize;
-
-	if (usize <= SMALL_MAXCLASS) {
-		assert(((dopts->alignment == 0) ? s2u(LARGE_MINCLASS) :
-		    sa2u(LARGE_MINCLASS, dopts->alignment)) == LARGE_MINCLASS);
-		ind_large = size2index(LARGE_MINCLASS);
-		bumped_usize = s2u(LARGE_MINCLASS);
-		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
-		    bumped_usize, ind_large);
-		if (unlikely(ret == NULL)) {
-			return NULL;
-		}
-		arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret),
-		    ret, usize);
-	} else {
-		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
-	}
-
-	return ret;
-}
-
-/*
- * Returns true if the allocation will overflow, and false otherwise.  Sets
- * *size to the product either way.
- */
-JEMALLOC_ALWAYS_INLINE_C bool
-compute_size_with_overflow(dynamic_opts_t *dopts, size_t *size) {
-	/*
-	 * This function is just num_items * item_size, except that we have to
-	 * check for overflow.
-	 */
-
-	/* A size_t with its high-half bits all set to 1. */
-	static const size_t high_bits = SIZE_T_MAX >> (sizeof(size_t) * 8 / 2);
-
-	*size = dopts->item_size * dopts->num_items;
-
-	if (unlikely(*size == 0)) {
-		return (dopts->num_items != 0 && dopts->item_size != 0);
-	}
-
-	/*
-	 * We got a non-zero size, but we don't know if we overflowed to get
-	 * there.  To avoid having to do a divide, we'll be clever and note that
-	 * if both A and B can be represented in N/2 bits, then their product
-	 * can be represented in N bits (without the possibility of overflow).
-	 */
-	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
-		return false;
-	}
-	if (likely(*size / dopts->item_size == dopts->num_items)) {
-		return false;
-	}
-	return true;
-}
-
-JEMALLOC_ALWAYS_INLINE_C int
-imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
-	/* Where the actual allocated memory will live. */
-	void *allocation = NULL;
-	/* Filled in by compute_size_with_overflow below. */
-	size_t size = 0;
-	/* We compute a value for this right before allocating. */
-	tsd_t *tsd = NULL;
-	/*
-	 * For unaligned allocations, we need only ind.  For aligned
-	 * allocations, or in case of stats or profiling we need usize.
-	 *
-	 * These are actually dead stores, in that their values are reset before
-	 * any branch on their value is taken.  Sometimes though, it's
-	 * convenient to pass them as arguments before this point.  To avoid
-	 * undefined behavior then, we initialize them with dummy stores.
-	 */
-	szind_t ind = 0;
-	size_t usize = 0;
-
-	/* Initialize (if we can't prove we don't have to). */
-	if (sopts->slow) {
-		if (unlikely(malloc_init())) {
-			goto label_oom;
-		}
-	}
-
-	/* Compute the amount of memory the user wants. */
-	bool overflow = compute_size_with_overflow(dopts, &size);
-	if (unlikely(overflow)) {
-		goto label_oom;
-	}
-
-	/* Validate the user input. */
-	if (sopts->bump_empty_alloc) {
-		if (unlikely(size == 0)) {
-			size = 1;
-		}
-	}
-
-	if (sopts->assert_nonempty_alloc) {
-		assert (size != 0);
-	}
-
-	if (unlikely(dopts->alignment < sopts->min_alignment
-	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
-		goto label_invalid_alignment;
-	}
-
-	/* This is the beginning of the "core" algorithm. */
-
-	if (dopts->alignment == 0) {
-		ind = size2index(size);
-		if (unlikely(ind >= NSIZES)) {
-			goto label_oom;
-		}
-		if (config_stats || (config_prof && opt_prof)) {
-			usize = index2size(ind);
-			assert(usize > 0 && usize <= LARGE_MAXCLASS);
-		}
-	} else {
-		usize = sa2u(size, dopts->alignment);
-		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
-			goto label_oom;
-		}
-	}
-
-	/*
-	 * We always need the tsd, even if we aren't going to use the tcache for
-	 * some reason.  Let's grab it right away.
-	 */
-	tsd = tsd_fetch();
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-
-	/* If profiling is on, get our profiling context. */
-	if (config_prof && opt_prof) {
-		/*
-		 * Note that if we're going down this path, usize must have been
-		 * initialized in the previous if statement.
-		 */
-		prof_tctx_t *tctx = prof_alloc_prep(
-		    tsd, usize, prof_active_get_unlocked(), true);
-		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
-			allocation = imalloc_no_sample(
-			    sopts, dopts, tsd, usize, usize, ind);
-		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
-			/*
-			 * Note that ind might still be 0 here.  This is fine;
-			 * imalloc_sample ignores ind if dopts->alignment > 0.
-			 */
-			allocation = imalloc_sample(
-			    sopts, dopts, tsd, usize, ind);
-		} else {
-			allocation = NULL;
-		}
-
-		if (unlikely(allocation == NULL)) {
-			prof_alloc_rollback(tsd, tctx, true);
-			goto label_oom;
-		}
-
-		prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), allocation),
-		    allocation, usize, tctx);
-
-	} else {
-		/*
-		 * If dopts->alignment > 0, then ind is still 0, but usize was
-		 * computed in the previous if statement.  Down the positive
-		 * alignment path, imalloc_no_sample ind and size (relying only
-		 * on usize).
-		 */
-		allocation = imalloc_no_sample(sopts, dopts, tsd, usize, usize,
-		    ind);
-		if (unlikely(allocation == NULL)) {
-			goto label_oom;
-		}
-	}
-
-	/*
-	 * Allocation has been done at this point.  We still have some
-	 * post-allocation work to do though.
-	 */
-	assert(dopts->alignment == 0
-	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
-
-	if (config_stats) {
-		assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
-		    allocation), allocation));
-		*tsd_thread_allocatedp_get(tsd) += usize;
-	}
-
-	if (sopts->slow) {
-		UTRACE(0, size, allocation);
-	}
-
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-
-
-	/* Success! */
-	*dopts->result = allocation;
-	return 0;
-
-label_oom:
-	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
-		malloc_write(sopts->oom_string);
-		abort();
-	}
-
-	if (sopts->slow) {
-		UTRACE(NULL, size, NULL);
-	}
-
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-	if (sopts->set_errno_on_error) {
-		set_errno(ENOMEM);
-	}
-
-	if (sopts->null_out_result_on_error) {
-		*dopts->result = NULL;
-	}
-
-	return ENOMEM;
-
-	/*
-	 * This label is only jumped to by one goto; we move it out of line
-	 * anyways to avoid obscuring the non-error paths, and for symmetry with
-	 * the oom case.
-	 */
-label_invalid_alignment:
-	if (config_xmalloc && unlikely(opt_xmalloc)) {
-		malloc_write(sopts->invalid_alignment_string);
-		abort();
-	}
-
-	if (sopts->set_errno_on_error) {
-		set_errno(EINVAL);
-	}
-
-	if (sopts->slow) {
-		UTRACE(NULL, size, NULL);
-	}
-
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-	if (sopts->null_out_result_on_error) {
-		*dopts->result = NULL;
-	}
-
-	return EINVAL;
-}
-
-/* Returns the errno-style error code of the allocation. */
-JEMALLOC_ALWAYS_INLINE_C int
-imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
-	if (unlikely(malloc_slow)) {
-		sopts->slow = true;
-		return imalloc_body(sopts, dopts);
-	} else {
-		sopts->slow = false;
-		return imalloc_body(sopts, dopts);
-	}
-}
-/******************************************************************************/
-/*
- * Begin malloc(3)-compatible functions.
- */
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_malloc(size_t size)
-{
-	void *ret;
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.bump_empty_alloc = true;
-	sopts.null_out_result_on_error = true;
-	sopts.set_errno_on_error = true;
-	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
-
-	dopts.result = &ret;
-	dopts.num_items = 1;
-	dopts.item_size = size;
-
-	imalloc(&sopts, &dopts);
-
-	return ret;
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-JEMALLOC_ATTR(nonnull(1))
-je_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
-	int ret;
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.bump_empty_alloc = true;
-	sopts.min_alignment = sizeof(void *);
-	sopts.oom_string =
-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
-	sopts.invalid_alignment_string =
-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-
-	dopts.result = memptr;
-	dopts.num_items = 1;
-	dopts.item_size = size;
-	dopts.alignment = alignment;
-
-	ret = imalloc(&sopts, &dopts);
-	return ret;
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
-je_aligned_alloc(size_t alignment, size_t size)
-{
-	void *ret;
-
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.bump_empty_alloc = true;
-	sopts.null_out_result_on_error = true;
-	sopts.set_errno_on_error = true;
-	sopts.min_alignment = 1;
-	sopts.oom_string =
-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
-	sopts.invalid_alignment_string =
-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-
-	dopts.result = &ret;
-	dopts.num_items = 1;
-	dopts.item_size = size;
-	dopts.alignment = alignment;
-
-	imalloc(&sopts, &dopts);
-	return (ret);
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
-je_calloc(size_t num, size_t size)
-{
-	void *ret;
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.bump_empty_alloc = true;
-	sopts.null_out_result_on_error = true;
-	sopts.set_errno_on_error = true;
-	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
-
-	dopts.result = &ret;
-	dopts.num_items = num;
-	dopts.item_size = size;
-	dopts.zero = true;
-
-	imalloc(&sopts, &dopts);
-
-	return ret;
-}
-
-static void *
-irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
-    size_t old_usize, size_t usize, prof_tctx_t *tctx)
-{
-	void *p;
-
-	if (tctx == NULL)
-		return (NULL);
-	if (usize <= SMALL_MAXCLASS) {
-		p = iralloc(tsd, extent, old_ptr, old_usize, LARGE_MINCLASS, 0,
-		    false);
-		if (p == NULL)
-			return (NULL);
-		arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
-		    usize);
-	} else
-		p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
-
-	return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
-    size_t usize)
-{
-	void *p;
-	extent_t *extent;
-	bool prof_active;
-	prof_tctx_t *old_tctx, *tctx;
-
-	prof_active = prof_active_get_unlocked();
-	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_extent, old_ptr);
-	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
-	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
-		p = irealloc_prof_sample(tsd, old_extent, old_ptr, old_usize,
-		    usize, tctx);
-	} else {
-		p = iralloc(tsd, old_extent, old_ptr, old_usize, usize, 0,
-		    false);
-	}
-	if (unlikely(p == NULL)) {
-		prof_alloc_rollback(tsd, tctx, true);
-		return (NULL);
-	}
-	extent = (p == old_ptr) ? old_extent : iealloc(tsd_tsdn(tsd), p);
-	prof_realloc(tsd, extent, p, usize, tctx, prof_active, true, old_extent,
-	    old_ptr, old_usize, old_tctx);
-
-	return (p);
-}
-
-JEMALLOC_INLINE_C void
-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
-{
-	extent_t *extent;
-	size_t usize;
-
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-	assert(ptr != NULL);
-	assert(malloc_initialized() || IS_INITIALIZER);
-
-	extent = iealloc(tsd_tsdn(tsd), ptr);
-	if (config_prof && opt_prof) {
-		usize = isalloc(tsd_tsdn(tsd), extent, ptr);
-		prof_free(tsd, extent, ptr, usize);
-	} else if (config_stats)
-		usize = isalloc(tsd_tsdn(tsd), extent, ptr);
-	if (config_stats)
-		*tsd_thread_deallocatedp_get(tsd) += usize;
-
-	if (likely(!slow_path))
-		idalloctm(tsd_tsdn(tsd), extent, ptr, tcache, false, false);
-	else
-		idalloctm(tsd_tsdn(tsd), extent, ptr, tcache, false, true);
-}
-
-JEMALLOC_INLINE_C void
-isfree(tsd_t *tsd, extent_t *extent, void *ptr, size_t usize, tcache_t *tcache,
-    bool slow_path)
-{
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-	assert(ptr != NULL);
-	assert(malloc_initialized() || IS_INITIALIZER);
-
-	if (config_prof && opt_prof)
-		prof_free(tsd, extent, ptr, usize);
-	if (config_stats)
-		*tsd_thread_deallocatedp_get(tsd) += usize;
-
-	if (likely(!slow_path))
-		isdalloct(tsd_tsdn(tsd), extent, ptr, usize, tcache, false);
-	else
-		isdalloct(tsd_tsdn(tsd), extent, ptr, usize, tcache, true);
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_realloc(void *ptr, size_t size)
-{
-	void *ret;
-	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
-	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-	size_t old_usize = 0;
-
-	if (unlikely(size == 0)) {
-		if (ptr != NULL) {
-			tsd_t *tsd;
-
-			/* realloc(ptr, 0) is equivalent to free(ptr). */
-			UTRACE(ptr, 0, 0);
-			tsd = tsd_fetch();
-			ifree(tsd, ptr, tcache_get(tsd, false), true);
-			return (NULL);
-		}
-		size = 1;
-	}
-
-	if (likely(ptr != NULL)) {
-		tsd_t *tsd;
-		extent_t *extent;
-
-		assert(malloc_initialized() || IS_INITIALIZER);
-		tsd = tsd_fetch();
-
-		witness_assert_lockless(tsd_tsdn(tsd));
-
-		extent = iealloc(tsd_tsdn(tsd), ptr);
-		old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
-		if (config_prof && opt_prof) {
-			usize = s2u(size);
-			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
-			    NULL : irealloc_prof(tsd, extent, ptr, old_usize,
-			    usize);
-		} else {
-			if (config_stats)
-				usize = s2u(size);
-			ret = iralloc(tsd, extent, ptr, old_usize, size, 0,
-			    false);
-		}
-		tsdn = tsd_tsdn(tsd);
-	} else {
-		/* realloc(NULL, size) is equivalent to malloc(size). */
-		return je_malloc(size);
-	}
-
-	if (unlikely(ret == NULL)) {
-		if (config_xmalloc && unlikely(opt_xmalloc)) {
-			malloc_write("<jemalloc>: Error in realloc(): "
-			    "out of memory\n");
-			abort();
-		}
-		set_errno(ENOMEM);
-	}
-	if (config_stats && likely(ret != NULL)) {
-		tsd_t *tsd;
-
-		assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
-		tsd = tsdn_tsd(tsdn);
-		*tsd_thread_allocatedp_get(tsd) += usize;
-		*tsd_thread_deallocatedp_get(tsd) += old_usize;
-	}
-	UTRACE(ptr, size, ret);
-	witness_assert_lockless(tsdn);
-	return (ret);
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_free(void *ptr)
-{
-	UTRACE(ptr, 0, 0);
-	if (likely(ptr != NULL)) {
-		tsd_t *tsd = tsd_fetch();
-		witness_assert_lockless(tsd_tsdn(tsd));
-		if (likely(!malloc_slow))
-			ifree(tsd, ptr, tcache_get(tsd, false), false);
-		else
-			ifree(tsd, ptr, tcache_get(tsd, false), true);
-		witness_assert_lockless(tsd_tsdn(tsd));
-	}
-}
-
-/*
- * End malloc(3)-compatible functions.
- */
-/******************************************************************************/
-/*
- * Begin non-standard override functions.
- */
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc)
-je_memalign(size_t alignment, size_t size)
-{
-	void *ret;
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.bump_empty_alloc = true;
-	sopts.min_alignment = 1;
-	sopts.oom_string =
-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
-	sopts.invalid_alignment_string =
-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-	sopts.null_out_result_on_error = true;
-
-	dopts.result = &ret;
-	dopts.num_items = 1;
-	dopts.item_size = size;
-	dopts.alignment = alignment;
-
-	imalloc(&sopts, &dopts);
-	return ret;
-}
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc)
-je_valloc(size_t size)
-{
-	void *ret;
-
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.bump_empty_alloc = true;
-	sopts.null_out_result_on_error = true;
-	sopts.min_alignment = PAGE;
-	sopts.oom_string =
-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
-	sopts.invalid_alignment_string =
-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
-
-	dopts.result = &ret;
-	dopts.num_items = 1;
-	dopts.item_size = size;
-	dopts.alignment = PAGE;
-
-	imalloc(&sopts, &dopts);
-
-	return (ret);
-}
-#endif
-
-/*
- * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
- * #define je_malloc malloc
- */
-#define	malloc_is_malloc 1
-#define	is_malloc_(a) malloc_is_ ## a
-#define	is_malloc(a) is_malloc_(a)
-
-#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
-/*
- * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
- * to inconsistently reference libc's malloc(3)-compatible functions
- * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
- *
- * These definitions interpose hooks in glibc.  The functions are actually
- * passed an extra argument for the caller return address, which will be
- * ignored.
- */
-JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
-# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
-JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
-    je_memalign;
-# endif
-
-#ifdef CPU_COUNT
-/*
- * To enable static linking with glibc, the libc specific malloc interface must
- * be implemented also, so none of glibc's malloc.o functions are added to the
- * link.
- */
-#define	ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
-/* To force macro expansion of je_ prefix before stringification. */
-#define	PREALIAS(je_fn)  ALIAS(je_fn)
-void	*__libc_malloc(size_t size) PREALIAS(je_malloc);
-void	__libc_free(void* ptr) PREALIAS(je_free);
-void	*__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
-void	*__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
-void	*__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
-void	*__libc_valloc(size_t size) PREALIAS(je_valloc);
-int	__posix_memalign(void** r, size_t a, size_t s)
-    PREALIAS(je_posix_memalign);
-#undef PREALIAS
-#undef ALIAS
-
-#endif
-
-#endif
-
-/*
- * End non-standard override functions.
- */
-/******************************************************************************/
-/*
- * Begin non-standard functions.
- */
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_mallocx(size_t size, int flags)
-{
-	void *ret;
-	static_opts_t sopts;
-	dynamic_opts_t dopts;
-
-	static_opts_init(&sopts);
-	dynamic_opts_init(&dopts);
-
-	sopts.assert_nonempty_alloc = true;
-	sopts.null_out_result_on_error = true;
-	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
-
-	dopts.result = &ret;
-	dopts.num_items = 1;
-	dopts.item_size = size;
-	if (unlikely(flags != 0)) {
-		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
-			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
-		}
-
-		dopts.zero = MALLOCX_ZERO_GET(flags);
-
-		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
-			if ((flags & MALLOCX_TCACHE_MASK)
-			    == MALLOCX_TCACHE_NONE) {
-				dopts.tcache_ind = TCACHE_IND_NONE;
-			} else {
-				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
-			}
-		} else {
-			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
-		}
-
-		if ((flags & MALLOCX_ARENA_MASK) != 0)
-			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
-	}
-
-	imalloc(&sopts, &dopts);
-	return ret;
-}
-
-static void *
-irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
-    size_t old_usize, size_t usize, size_t alignment, bool zero,
-    tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx)
-{
-	void *p;
-
-	if (tctx == NULL)
-		return (NULL);
-	if (usize <= SMALL_MAXCLASS) {
-		p = iralloct(tsdn, extent, old_ptr, old_usize, LARGE_MINCLASS,
-		    alignment, zero, tcache, arena);
-		if (p == NULL)
-			return (NULL);
-		arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
-	} else {
-		p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
-		    zero, tcache, arena);
-	}
-
-	return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
-    size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
-    arena_t *arena)
-{
-	void *p;
-	extent_t *extent;
-	bool prof_active;
-	prof_tctx_t *old_tctx, *tctx;
-
-	prof_active = prof_active_get_unlocked();
-	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_extent, old_ptr);
-	tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
-	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
-		p = irallocx_prof_sample(tsd_tsdn(tsd), old_extent, old_ptr,
-		    old_usize, *usize, alignment, zero, tcache, arena, tctx);
-	} else {
-		p = iralloct(tsd_tsdn(tsd), old_extent, old_ptr, old_usize,
-		    size, alignment, zero, tcache, arena);
-	}
-	if (unlikely(p == NULL)) {
-		prof_alloc_rollback(tsd, tctx, false);
-		return (NULL);
-	}
-
-	if (p == old_ptr && alignment != 0) {
-		/*
-		 * The allocation did not move, so it is possible that the size
-		 * class is smaller than would guarantee the requested
-		 * alignment, and that the alignment constraint was
-		 * serendipitously satisfied.  Additionally, old_usize may not
-		 * be the same as the current usize because of in-place large
-		 * reallocation.  Therefore, query the actual value of usize.
-		 */
-		extent = old_extent;
-		*usize = isalloc(tsd_tsdn(tsd), extent, p);
-	} else
-		extent = iealloc(tsd_tsdn(tsd), p);
-	prof_realloc(tsd, extent, p, *usize, tctx, prof_active, false,
-	    old_extent, old_ptr, old_usize, old_tctx);
-
-	return (p);
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_rallocx(void *ptr, size_t size, int flags)
-{
-	void *p;
-	tsd_t *tsd;
-	extent_t *extent;
-	size_t usize;
-	size_t old_usize;
-	size_t alignment = MALLOCX_ALIGN_GET(flags);
-	bool zero = flags & MALLOCX_ZERO;
-	arena_t *arena;
-	tcache_t *tcache;
-
-	assert(ptr != NULL);
-	assert(size != 0);
-	assert(malloc_initialized() || IS_INITIALIZER);
-	tsd = tsd_fetch();
-	witness_assert_lockless(tsd_tsdn(tsd));
-	extent = iealloc(tsd_tsdn(tsd), ptr);
-
-	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
-		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
-		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
-		if (unlikely(arena == NULL))
-			goto label_oom;
-	} else
-		arena = NULL;
-
-	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
-		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
-			tcache = NULL;
-		else
-			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
-	} else
-		tcache = tcache_get(tsd, true);
-
-	old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
-
-	if (config_prof && opt_prof) {
-		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
-		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
-			goto label_oom;
-		p = irallocx_prof(tsd, extent, ptr, old_usize, size, alignment,
-		    &usize, zero, tcache, arena);
-		if (unlikely(p == NULL))
-			goto label_oom;
-	} else {
-		p = iralloct(tsd_tsdn(tsd), extent, ptr, old_usize, size,
-		    alignment, zero, tcache, arena);
-		if (unlikely(p == NULL))
-			goto label_oom;
-		if (config_stats) {
-			usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
-			    p), p);
-		}
-	}
-	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
-
-	if (config_stats) {
-		*tsd_thread_allocatedp_get(tsd) += usize;
-		*tsd_thread_deallocatedp_get(tsd) += old_usize;
-	}
-	UTRACE(ptr, size, p);
-	witness_assert_lockless(tsd_tsdn(tsd));
-	return (p);
-label_oom:
-	if (config_xmalloc && unlikely(opt_xmalloc)) {
-		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
-		abort();
-	}
-	UTRACE(ptr, size, 0);
-	witness_assert_lockless(tsd_tsdn(tsd));
-	return (NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
-    size_t size, size_t extra, size_t alignment, bool zero)
-{
-	size_t usize;
-
-	if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero))
-		return (old_usize);
-	usize = isalloc(tsdn, extent, ptr);
-
-	return (usize);
-}
-
-static size_t
-ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr,
-    size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero,
-    prof_tctx_t *tctx)
-{
-	size_t usize;
-
-	if (tctx == NULL)
-		return (old_usize);
-	usize = ixallocx_helper(tsdn, extent, ptr, old_usize, size, extra,
-	    alignment, zero);
-
-	return (usize);
-}
-
-JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
-    size_t size, size_t extra, size_t alignment, bool zero)
-{
-	size_t usize_max, usize;
-	bool prof_active;
-	prof_tctx_t *old_tctx, *tctx;
-
-	prof_active = prof_active_get_unlocked();
-	old_tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
-	/*
-	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
-	 * Therefore, compute its maximum possible value and use that in
-	 * prof_alloc_prep() to decide whether to capture a backtrace.
-	 * prof_realloc() will use the actual usize to decide whether to sample.
-	 */
-	if (alignment == 0) {
-		usize_max = s2u(size+extra);
-		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
-	} else {
-		usize_max = sa2u(size+extra, alignment);
-		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
-			/*
-			 * usize_max is out of range, and chances are that
-			 * allocation will fail, but use the maximum possible
-			 * value and carry on with prof_alloc_prep(), just in
-			 * case allocation succeeds.
-			 */
-			usize_max = LARGE_MAXCLASS;
-		}
-	}
-	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
-
-	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
-		usize = ixallocx_prof_sample(tsd_tsdn(tsd), extent, ptr,
-		    old_usize, size, extra, alignment, zero, tctx);
-	} else {
-		usize = ixallocx_helper(tsd_tsdn(tsd), extent, ptr, old_usize,
-		    size, extra, alignment, zero);
-	}
-	if (usize == old_usize) {
-		prof_alloc_rollback(tsd, tctx, false);
-		return (usize);
-	}
-	prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, extent,
-	    ptr, old_usize, old_tctx);
-
-	return (usize);
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_xallocx(void *ptr, size_t size, size_t extra, int flags)
-{
-	tsd_t *tsd;
-	extent_t *extent;
-	size_t usize, old_usize;
-	size_t alignment = MALLOCX_ALIGN_GET(flags);
-	bool zero = flags & MALLOCX_ZERO;
-
-	assert(ptr != NULL);
-	assert(size != 0);
-	assert(SIZE_T_MAX - size >= extra);
-	assert(malloc_initialized() || IS_INITIALIZER);
-	tsd = tsd_fetch();
-	witness_assert_lockless(tsd_tsdn(tsd));
-	extent = iealloc(tsd_tsdn(tsd), ptr);
-
-	old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
-
-	/*
-	 * The API explicitly absolves itself of protecting against (size +
-	 * extra) numerical overflow, but we may need to clamp extra to avoid
-	 * exceeding LARGE_MAXCLASS.
-	 *
-	 * Ordinarily, size limit checking is handled deeper down, but here we
-	 * have to check as part of (size + extra) clamping, since we need the
-	 * clamped value in the above helper functions.
-	 */
-	if (unlikely(size > LARGE_MAXCLASS)) {
-		usize = old_usize;
-		goto label_not_resized;
-	}
-	if (unlikely(LARGE_MAXCLASS - size < extra))
-		extra = LARGE_MAXCLASS - size;
-
-	if (config_prof && opt_prof) {
-		usize = ixallocx_prof(tsd, extent, ptr, old_usize, size, extra,
-		    alignment, zero);
-	} else {
-		usize = ixallocx_helper(tsd_tsdn(tsd), extent, ptr, old_usize,
-		    size, extra, alignment, zero);
-	}
-	if (unlikely(usize == old_usize))
-		goto label_not_resized;
-
-	if (config_stats) {
-		*tsd_thread_allocatedp_get(tsd) += usize;
-		*tsd_thread_deallocatedp_get(tsd) += old_usize;
-	}
-label_not_resized:
-	UTRACE(ptr, size, ptr);
-	witness_assert_lockless(tsd_tsdn(tsd));
-	return (usize);
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-JEMALLOC_ATTR(pure)
-je_sallocx(const void *ptr, int flags)
-{
-	size_t usize;
-	tsdn_t *tsdn;
-
-	assert(malloc_initialized() || IS_INITIALIZER);
-
-	tsdn = tsdn_fetch();
-	witness_assert_lockless(tsdn);
-
-	if (config_ivsalloc)
-		usize = ivsalloc(tsdn, ptr);
-	else
-		usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
-
-	witness_assert_lockless(tsdn);
-	return (usize);
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_dallocx(void *ptr, int flags)
-{
-	tsd_t *tsd;
-	tcache_t *tcache;
-
-	assert(ptr != NULL);
-	assert(malloc_initialized() || IS_INITIALIZER);
-
-	tsd = tsd_fetch();
-	witness_assert_lockless(tsd_tsdn(tsd));
-	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
-		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
-			tcache = NULL;
-		else
-			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
-	} else
-		tcache = tcache_get(tsd, false);
-
-	UTRACE(ptr, 0, 0);
-	if (likely(!malloc_slow))
-		ifree(tsd, ptr, tcache, false);
-	else
-		ifree(tsd, ptr, tcache, true);
-	witness_assert_lockless(tsd_tsdn(tsd));
-}
-
-JEMALLOC_ALWAYS_INLINE_C size_t
-inallocx(tsdn_t *tsdn, size_t size, int flags)
-{
-	size_t usize;
-
-	witness_assert_lockless(tsdn);
-
-	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
-		usize = s2u(size);
-	else
-		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
-	witness_assert_lockless(tsdn);
-	return (usize);
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_sdallocx(void *ptr, size_t size, int flags)
-{
-	tsd_t *tsd;
-	extent_t *extent;
-	size_t usize;
-	tcache_t *tcache;
-
-	assert(ptr != NULL);
-	assert(malloc_initialized() || IS_INITIALIZER);
-	tsd = tsd_fetch();
-	extent = iealloc(tsd_tsdn(tsd), ptr);
-	usize = inallocx(tsd_tsdn(tsd), size, flags);
-	assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
-
-	witness_assert_lockless(tsd_tsdn(tsd));
-	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
-		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
-			tcache = NULL;
-		else
-			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
-	} else
-		tcache = tcache_get(tsd, false);
-
-	UTRACE(ptr, 0, 0);
-	if (likely(!malloc_slow))
-		isfree(tsd, extent, ptr, usize, tcache, false);
-	else
-		isfree(tsd, extent, ptr, usize, tcache, true);
-	witness_assert_lockless(tsd_tsdn(tsd));
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-JEMALLOC_ATTR(pure)
-je_nallocx(size_t size, int flags)
-{
-	size_t usize;
-	tsdn_t *tsdn;
-
-	assert(size != 0);
-
-	if (unlikely(malloc_init()))
-		return (0);
-
-	tsdn = tsdn_fetch();
-	witness_assert_lockless(tsdn);
-
-	usize = inallocx(tsdn, size, flags);
-	if (unlikely(usize > LARGE_MAXCLASS))
-		return (0);
-
-	witness_assert_lockless(tsdn);
-	return (usize);
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
-    size_t newlen)
-{
-	int ret;
-	tsd_t *tsd;
-
-	if (unlikely(malloc_init()))
-		return (EAGAIN);
-
-	tsd = tsd_fetch();
-	witness_assert_lockless(tsd_tsdn(tsd));
-	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
-	witness_assert_lockless(tsd_tsdn(tsd));
-	return (ret);
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
-	int ret;
-	tsdn_t *tsdn;
-
-	if (unlikely(malloc_init()))
-		return (EAGAIN);
-
-	tsdn = tsdn_fetch();
-	witness_assert_lockless(tsdn);
-	ret = ctl_nametomib(tsdn, name, mibp, miblenp);
-	witness_assert_lockless(tsdn);
-	return (ret);
-}
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
-  void *newp, size_t newlen)
-{
-	int ret;
-	tsd_t *tsd;
-
-	if (unlikely(malloc_init()))
-		return (EAGAIN);
-
-	tsd = tsd_fetch();
-	witness_assert_lockless(tsd_tsdn(tsd));
-	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
-	witness_assert_lockless(tsd_tsdn(tsd));
-	return (ret);
-}
-
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
-    const char *opts)
-{
-	tsdn_t *tsdn;
-
-	tsdn = tsdn_fetch();
-	witness_assert_lockless(tsdn);
-	stats_print(write_cb, cbopaque, opts);
-	witness_assert_lockless(tsdn);
-}
-
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
-{
-	size_t ret;
-	tsdn_t *tsdn;
-
-	assert(malloc_initialized() || IS_INITIALIZER);
-
-	tsdn = tsdn_fetch();
-	witness_assert_lockless(tsdn);
-
-	if (config_ivsalloc)
-		ret = ivsalloc(tsdn, ptr);
-	else {
-		ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
-		    ptr);
-	}
-
-	witness_assert_lockless(tsdn);
-	return (ret);
-}
-
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * The following functions are used by threading libraries for protection of
- * malloc during fork().
- */
-
-/*
- * If an application creates a thread before doing any allocation in the main
- * thread, then calls fork(2) in the main thread followed by memory allocation
- * in the child process, a race can occur that results in deadlock within the
- * child: the main thread may have forked while the created thread had
- * partially initialized the allocator.  Ordinarily jemalloc prevents
- * fork/malloc races via the following functions it registers during
- * initialization using pthread_atfork(), but of course that does no good if
- * the allocator isn't fully initialized at fork time.  The following library
- * constructor is a partial solution to this problem.  It may still be possible
- * to trigger the deadlock described above, but doing so would involve forking
- * via a library constructor that runs before jemalloc's runs.
- */
-#ifndef JEMALLOC_JET
-JEMALLOC_ATTR(constructor)
-static void
-jemalloc_constructor(void)
-{
-	malloc_init();
-}
-#endif
-
-#ifndef JEMALLOC_MUTEX_INIT_CB
-void
-jemalloc_prefork(void)
-#else
-JEMALLOC_EXPORT void
-_malloc_prefork(void)
-#endif
-{
-	tsd_t *tsd;
-	unsigned i, j, narenas;
-	arena_t *arena;
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
-	if (!malloc_initialized())
-		return;
-#endif
-	assert(malloc_initialized());
-
-	tsd = tsd_fetch();
-
-	narenas = narenas_total_get();
-
-	witness_prefork(tsd);
-	/* Acquire all mutexes in a safe order. */
-	ctl_prefork(tsd_tsdn(tsd));
-	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
-	prof_prefork0(tsd_tsdn(tsd));
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < narenas; j++) {
-			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
-			    NULL) {
-				switch (i) {
-				case 0:
-					arena_prefork0(tsd_tsdn(tsd), arena);
-					break;
-				case 1:
-					arena_prefork1(tsd_tsdn(tsd), arena);
-					break;
-				case 2:
-					arena_prefork2(tsd_tsdn(tsd), arena);
-					break;
-				default: not_reached();
-				}
-			}
-		}
-	}
-	for (i = 0; i < narenas; i++) {
-		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
-			arena_prefork3(tsd_tsdn(tsd), arena);
-	}
-	prof_prefork1(tsd_tsdn(tsd));
-}
-
-#ifndef JEMALLOC_MUTEX_INIT_CB
-void
-jemalloc_postfork_parent(void)
-#else
-JEMALLOC_EXPORT void
-_malloc_postfork(void)
-#endif
-{
-	tsd_t *tsd;
-	unsigned i, narenas;
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
-	if (!malloc_initialized())
-		return;
-#endif
-	assert(malloc_initialized());
-
-	tsd = tsd_fetch();
-
-	witness_postfork_parent(tsd);
-	/* Release all mutexes, now that fork() has completed. */
-	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
-		arena_t *arena;
-
-		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
-			arena_postfork_parent(tsd_tsdn(tsd), arena);
-	}
-	prof_postfork_parent(tsd_tsdn(tsd));
-	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
-	ctl_postfork_parent(tsd_tsdn(tsd));
-}
-
-void
-jemalloc_postfork_child(void)
-{
-	tsd_t *tsd;
-	unsigned i, narenas;
-
-	assert(malloc_initialized());
-
-	tsd = tsd_fetch();
-
-	witness_postfork_child(tsd);
-	/* Release all mutexes, now that fork() has completed. */
-	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
-		arena_t *arena;
-
-		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
-			arena_postfork_child(tsd_tsdn(tsd), arena);
-	}
-	prof_postfork_child(tsd_tsdn(tsd));
-	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
-	ctl_postfork_child(tsd_tsdn(tsd));
-}
-
-/******************************************************************************/
diff --git a/zircon/third_party/ulib/jemalloc/src/jemalloc_cpp.cpp b/zircon/third_party/ulib/jemalloc/src/jemalloc_cpp.cpp
deleted file mode 100644
index 984c944..0000000
--- a/zircon/third_party/ulib/jemalloc/src/jemalloc_cpp.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-#include <mutex>
-#include <new>
-
-#define	JEMALLOC_CPP_CPP_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-// All operators in this file are exported.
-
-// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
-// thunk?
-//
-// extern __typeof (sdallocx) sdallocx_int
-//  __attribute ((alias ("sdallocx"),
-//		visibility ("hidden")));
-//
-// ... but it needs to work with jemalloc namespaces.
-
-void	*operator new(std::size_t size);
-void	*operator new[](std::size_t size);
-void	*operator new(std::size_t size, const std::nothrow_t &) noexcept;
-void	*operator new[](std::size_t size, const std::nothrow_t &) noexcept;
-void	operator delete(void *ptr) noexcept;
-void	operator delete[](void *ptr) noexcept;
-void	operator delete(void *ptr, const std::nothrow_t &) noexcept;
-void	operator delete[](void *ptr, const std::nothrow_t &) noexcept;
-
-#if __cpp_sized_deallocation >= 201309
-/* C++14's sized-delete operators. */
-void	operator delete(void *ptr, std::size_t size) noexcept;
-void	operator delete[](void *ptr, std::size_t size) noexcept;
-#endif
-
-template <bool IsNoExcept>
-JEMALLOC_INLINE
-void *
-newImpl(std::size_t size) noexcept(IsNoExcept)
-{
-	void *ptr = je_malloc(size);
-	if (likely(ptr != nullptr))
-		return (ptr);
-
-	while (ptr == nullptr) {
-		std::new_handler handler;
-		// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
-		{
-			static std::mutex mtx;
-			std::lock_guard<std::mutex> lock(mtx);
-
-			handler = std::set_new_handler(nullptr);
-			std::set_new_handler(handler);
-		}
-		if (handler == nullptr)
-			break;
-
-		try {
-			handler();
-		} catch (const std::bad_alloc &) {
-			break;
-		}
-
-		ptr = je_malloc(size);
-	}
-
-	if (ptr == nullptr && !IsNoExcept)
-		std::__throw_bad_alloc();
-	return (ptr);
-}
-
-void *
-operator new(std::size_t size)
-{
-	return (newImpl<false>(size));
-}
-
-void *
-operator new[](std::size_t size)
-{
-	return (newImpl<false>(size));
-}
-
-void *
-operator new(std::size_t size, const std::nothrow_t &) noexcept
-{
-	return (newImpl<true>(size));
-}
-
-void *
-operator new[](std::size_t size, const std::nothrow_t &) noexcept
-{
-	return (newImpl<true>(size));
-}
-
-void
-operator delete(void *ptr) noexcept
-{
-	je_free(ptr);
-}
-
-void
-operator delete[](void *ptr) noexcept
-{
-	je_free(ptr);
-}
-
-void
-operator delete(void *ptr, const std::nothrow_t &) noexcept
-{
-	je_free(ptr);
-}
-
-void operator delete[](void *ptr, const std::nothrow_t &) noexcept
-{
-	je_free(ptr);
-}
-
-#if __cpp_sized_deallocation >= 201309
-
-void
-operator delete(void *ptr, std::size_t size) noexcept
-{
-	if (unlikely(ptr == nullptr)) {
-		return;
-	}
-	je_sdallocx(ptr, size, /*flags=*/0);
-}
-
-void operator delete[](void *ptr, std::size_t size) noexcept
-{
-	if (unlikely(ptr == nullptr)) {
-		return;
-	}
-	je_sdallocx(ptr, size, /*flags=*/0);
-}
-
-#endif  // __cpp_sized_deallocation
diff --git a/zircon/third_party/ulib/jemalloc/src/large.c b/zircon/third_party/ulib/jemalloc/src/large.c
deleted file mode 100644
index 9936b23..0000000
--- a/zircon/third_party/ulib/jemalloc/src/large.c
+++ /dev/null
@@ -1,343 +0,0 @@
-#define	JEMALLOC_LARGE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-
-void *
-large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
-{
-	assert(usize == s2u(usize));
-
-	return (large_palloc(tsdn, arena, usize, CACHELINE, zero));
-}
-
-void *
-large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
-    bool zero)
-{
-	size_t ausize;
-	extent_t *extent;
-	bool is_zeroed;
-	UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
-
-	assert(!tsdn_null(tsdn) || arena != NULL);
-
-	ausize = sa2u(usize, alignment);
-	if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS))
-		return (NULL);
-
-	/*
-	 * Copy zero into is_zeroed and pass the copy to extent_alloc(), so that
-	 * it is possible to make correct junk/zero fill decisions below.
-	 */
-	is_zeroed = zero;
-	if (likely(!tsdn_null(tsdn)))
-		arena = arena_choose(tsdn_tsd(tsdn), arena);
-	if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
-	    arena, usize, alignment, &is_zeroed)) == NULL)
-		return (NULL);
-
-	/* Insert extent into large. */
-	malloc_mutex_lock(tsdn, &arena->large_mtx);
-	ql_elm_new(extent, ql_link);
-	ql_tail_insert(&arena->large, extent, ql_link);
-	malloc_mutex_unlock(tsdn, &arena->large_mtx);
-	if (config_prof && arena_prof_accum(tsdn, arena, usize))
-		prof_idump(tsdn);
-
-	if (zero || (config_fill && unlikely(opt_zero))) {
-		if (!is_zeroed) {
-			memset(extent_addr_get(extent), 0,
-			    extent_usize_get(extent));
-		}
-	} else if (config_fill && unlikely(opt_junk_alloc)) {
-		memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
-		    extent_usize_get(extent));
-	}
-
-	arena_decay_tick(tsdn, arena);
-	return (extent_addr_get(extent));
-}
-
-#ifdef JEMALLOC_JET
-#undef large_dalloc_junk
-#define	large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk)
-#endif
-void
-large_dalloc_junk(void *ptr, size_t usize)
-{
-	memset(ptr, JEMALLOC_FREE_JUNK, usize);
-}
-#ifdef JEMALLOC_JET
-#undef large_dalloc_junk
-#define	large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
-large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef large_dalloc_maybe_junk
-#define	large_dalloc_maybe_junk JEMALLOC_N(n_large_dalloc_maybe_junk)
-#endif
-void
-large_dalloc_maybe_junk(void *ptr, size_t usize)
-{
-	if (config_fill && have_dss && unlikely(opt_junk_free)) {
-		/*
-		 * Only bother junk filling if the extent isn't about to be
-		 * unmapped.
-		 */
-		if (!config_munmap || (have_dss && extent_in_dss(ptr)))
-			large_dalloc_junk(ptr, usize);
-	}
-}
-#ifdef JEMALLOC_JET
-#undef large_dalloc_maybe_junk
-#define	large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
-large_dalloc_maybe_junk_t *large_dalloc_maybe_junk =
-    JEMALLOC_N(n_large_dalloc_maybe_junk);
-#endif
-
-static bool
-large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
-{
-	arena_t *arena = extent_arena_get(extent);
-	size_t oldusize = extent_usize_get(extent);
-	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-	size_t diff = extent_size_get(extent) - (usize + large_pad);
-
-	assert(oldusize > usize);
-
-	if (extent_hooks->split == NULL)
-		return (true);
-
-	/* Split excess pages. */
-	if (diff != 0) {
-		extent_t *trail = extent_split_wrapper(tsdn, arena,
-		    &extent_hooks, extent, usize + large_pad, usize, diff,
-		    diff);
-		if (trail == NULL)
-			return (true);
-
-		if (config_fill && unlikely(opt_junk_free)) {
-			large_dalloc_maybe_junk(extent_addr_get(trail),
-			    extent_usize_get(trail));
-		}
-
-		arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, trail);
-	}
-
-	arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
-
-	return (false);
-}
-
-static bool
-large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
-    bool zero)
-{
-	arena_t *arena = extent_arena_get(extent);
-	size_t oldusize = extent_usize_get(extent);
-	bool is_zeroed_trail = false;
-	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-	size_t trailsize = usize - extent_usize_get(extent);
-	extent_t *trail;
-
-	if (extent_hooks->merge == NULL)
-		return (true);
-
-	if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
-	    extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) ==
-	    NULL) {
-		bool commit = true;
-		if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
-		    extent_past_get(extent), trailsize, 0, CACHELINE,
-		    &is_zeroed_trail, &commit, false)) == NULL)
-			return (true);
-	}
-
-	if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
-		extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
-		return (true);
-	}
-
-	if (zero || (config_fill && unlikely(opt_zero))) {
-		if (config_cache_oblivious) {
-			/*
-			 * Zero the trailing bytes of the original allocation's
-			 * last page, since they are in an indeterminate state.
-			 * There will always be trailing bytes, because ptr's
-			 * offset from the beginning of the extent is a multiple
-			 * of CACHELINE in [0 .. PAGE).
-			 */
-			void *zbase = (void *)
-			    ((uintptr_t)extent_addr_get(extent) + oldusize);
-			void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
-			    PAGE));
-			size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
-			assert(nzero > 0);
-			memset(zbase, 0, nzero);
-		}
-		if (!is_zeroed_trail) {
-			memset((void *)((uintptr_t)extent_addr_get(extent) +
-			    oldusize), 0, usize - oldusize);
-		}
-	} else if (config_fill && unlikely(opt_junk_alloc)) {
-		memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
-		    JEMALLOC_ALLOC_JUNK, usize - oldusize);
-	}
-
-	arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
-
-	return (false);
-}
-
-bool
-large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
-    size_t usize_max, bool zero)
-{
-	assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent));
-	/* The following should have been caught by callers. */
-	assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
-	/* Both allocation sizes must be large to avoid a move. */
-	assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
-	    LARGE_MINCLASS);
-
-	if (usize_max > extent_usize_get(extent)) {
-		/* Attempt to expand the allocation in-place. */
-		if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
-		    zero)) {
-			arena_decay_tick(tsdn, extent_arena_get(extent));
-			return (false);
-		}
-		/* Try again, this time with usize_min. */
-		if (usize_min < usize_max && usize_min >
-		    extent_usize_get(extent) &&
-		    large_ralloc_no_move_expand(tsdn, extent, usize_min,
-		    zero)) {
-			arena_decay_tick(tsdn, extent_arena_get(extent));
-			return (false);
-		}
-	}
-
-	/*
-	 * Avoid moving the allocation if the existing extent size accommodates
-	 * the new size.
-	 */
-	if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <=
-	    usize_max) {
-		arena_decay_tick(tsdn, extent_arena_get(extent));
-		return (false);
-	}
-
-	/* Attempt to shrink the allocation in-place. */
-	if (extent_usize_get(extent) > usize_max) {
-		if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
-			arena_decay_tick(tsdn, extent_arena_get(extent));
-			return (false);
-		}
-	}
-	return (true);
-}
-
-static void *
-large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool zero)
-{
-	if (alignment <= CACHELINE)
-		return (large_malloc(tsdn, arena, usize, zero));
-	return (large_palloc(tsdn, arena, usize, alignment, zero));
-}
-
-void *
-large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
-    size_t alignment, bool zero, tcache_t *tcache)
-{
-	void *ret;
-	size_t copysize;
-
-	/* The following should have been caught by callers. */
-	assert(usize > 0 && usize <= LARGE_MAXCLASS);
-	/* Both allocation sizes must be large to avoid a move. */
-	assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
-	    LARGE_MINCLASS);
-
-	/* Try to avoid moving the allocation. */
-	if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero))
-		return (extent_addr_get(extent));
-
-	/*
-	 * usize and old size are different enough that we need to use a
-	 * different size class.  In that case, fall back to allocating new
-	 * space and copying.
-	 */
-	ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
-	if (ret == NULL)
-		return (NULL);
-
-	copysize = (usize < extent_usize_get(extent)) ? usize :
-	    extent_usize_get(extent);
-	memcpy(ret, extent_addr_get(extent), copysize);
-	isdalloct(tsdn, extent, extent_addr_get(extent),
-	    extent_usize_get(extent), tcache, true);
-	return (ret);
-}
-
-/*
- * junked_locked indicates whether the extent's data have been junk-filled, and
- * whether the arena's lock is currently held.  The arena's large_mtx is
- * independent of these considerations.
- */
-static void
-large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
-{
-	arena_t *arena;
-
-	arena = extent_arena_get(extent);
-	malloc_mutex_lock(tsdn, &arena->large_mtx);
-	ql_remove(&arena->large, extent, ql_link);
-	malloc_mutex_unlock(tsdn, &arena->large_mtx);
-	if (!junked_locked) {
-		large_dalloc_maybe_junk(extent_addr_get(extent),
-		    extent_usize_get(extent));
-	}
-	arena_extent_dalloc_large(tsdn, arena, extent, junked_locked);
-
-	if (!junked_locked)
-		arena_decay_tick(tsdn, arena);
-}
-
-void
-large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
-{
-	large_dalloc_impl(tsdn, extent, true);
-}
-
-void
-large_dalloc(tsdn_t *tsdn, extent_t *extent)
-{
-	large_dalloc_impl(tsdn, extent, false);
-}
-
-size_t
-large_salloc(tsdn_t *tsdn, const extent_t *extent)
-{
-	return (extent_usize_get(extent));
-}
-
-prof_tctx_t *
-large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
-{
-	return (extent_prof_tctx_get(extent));
-}
-
-void
-large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
-{
-	extent_prof_tctx_set(extent, tctx);
-}
-
-void
-large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
-{
-	large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/mb.c b/zircon/third_party/ulib/jemalloc/src/mb.c
deleted file mode 100644
index dc2c0a2..0000000
--- a/zircon/third_party/ulib/jemalloc/src/mb.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	JEMALLOC_MB_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/mutex.c b/zircon/third_party/ulib/jemalloc/src/mutex.c
deleted file mode 100644
index bde536d..0000000
--- a/zircon/third_party/ulib/jemalloc/src/mutex.c
+++ /dev/null
@@ -1,152 +0,0 @@
-#define	JEMALLOC_MUTEX_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-#include <dlfcn.h>
-#endif
-
-#ifndef _CRT_SPINCOUNT
-#define	_CRT_SPINCOUNT 4000
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-#ifdef JEMALLOC_LAZY_LOCK
-bool isthreaded = false;
-#endif
-#ifdef JEMALLOC_MUTEX_INIT_CB
-static bool		postpone_init = true;
-static malloc_mutex_t	*postponed_mutexes = NULL;
-#endif
-
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static void	pthread_create_once(void);
-#endif
-
-/******************************************************************************/
-/*
- * We intercept pthread_create() calls in order to toggle isthreaded if the
- * process goes multi-threaded.
- */
-
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
-    void *(*)(void *), void *__restrict);
-
-static void
-pthread_create_once(void)
-{
-	pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
-	if (pthread_create_fptr == NULL) {
-		malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
-		    "\"pthread_create\")\n");
-		abort();
-	}
-
-	isthreaded = true;
-}
-
-JEMALLOC_EXPORT int
-pthread_create(pthread_t *__restrict thread,
-    const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
-    void *__restrict arg)
-{
-	static pthread_once_t once_control = PTHREAD_ONCE_INIT;
-
-	pthread_once(&once_control, pthread_create_once);
-
-	return (pthread_create_fptr(thread, attr, start_routine, arg));
-}
-#endif
-
-/******************************************************************************/
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
-JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
-    void *(calloc_cb)(size_t, size_t));
-#endif
-
-bool
-malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
-{
-#ifdef _WIN32
-#  if _WIN32_WINNT >= 0x0600
-	InitializeSRWLock(&mutex->lock);
-#  else
-	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
-	    _CRT_SPINCOUNT))
-		return (true);
-#  endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-	mutex->lock = OS_UNFAIR_LOCK_INIT;
-#elif (defined(JEMALLOC_OSSPIN))
-	mutex->lock = 0;
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-	if (postpone_init) {
-		mutex->postponed_next = postponed_mutexes;
-		postponed_mutexes = mutex;
-	} else {
-		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
-		    bootstrap_calloc) != 0)
-			return (true);
-	}
-#else
-	pthread_mutexattr_t attr;
-
-	if (pthread_mutexattr_init(&attr) != 0)
-		return (true);
-	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
-	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
-		pthread_mutexattr_destroy(&attr);
-		return (true);
-	}
-	pthread_mutexattr_destroy(&attr);
-#endif
-	if (config_debug)
-		witness_init(&mutex->witness, name, rank, NULL, NULL);
-	return (false);
-}
-
-void
-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-	malloc_mutex_lock(tsdn, mutex);
-}
-
-void
-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-	malloc_mutex_unlock(tsdn, mutex);
-}
-
-void
-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-#ifdef JEMALLOC_MUTEX_INIT_CB
-	malloc_mutex_unlock(tsdn, mutex);
-#else
-	if (malloc_mutex_init(mutex, mutex->witness.name,
-	    mutex->witness.rank)) {
-		malloc_printf("<jemalloc>: Error re-initializing mutex in "
-		    "child\n");
-		if (opt_abort)
-			abort();
-	}
-#endif
-}
-
-bool
-malloc_mutex_boot(void)
-{
-#ifdef JEMALLOC_MUTEX_INIT_CB
-	postpone_init = false;
-	while (postponed_mutexes != NULL) {
-		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
-		    bootstrap_calloc) != 0)
-			return (true);
-		postponed_mutexes = postponed_mutexes->postponed_next;
-	}
-#endif
-	return (false);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/nstime.c b/zircon/third_party/ulib/jemalloc/src/nstime.c
deleted file mode 100644
index 57ebf2e..0000000
--- a/zircon/third_party/ulib/jemalloc/src/nstime.c
+++ /dev/null
@@ -1,180 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#define	BILLION	UINT64_C(1000000000)
-
-void
-nstime_init(nstime_t *time, uint64_t ns)
-{
-	time->ns = ns;
-}
-
-void
-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
-{
-	time->ns = sec * BILLION + nsec;
-}
-
-uint64_t
-nstime_ns(const nstime_t *time)
-{
-	return (time->ns);
-}
-
-uint64_t
-nstime_sec(const nstime_t *time)
-{
-	return (time->ns / BILLION);
-}
-
-uint64_t
-nstime_nsec(const nstime_t *time)
-{
-	return (time->ns % BILLION);
-}
-
-void
-nstime_copy(nstime_t *time, const nstime_t *source)
-{
-	*time = *source;
-}
-
-int
-nstime_compare(const nstime_t *a, const nstime_t *b)
-{
-	return ((a->ns > b->ns) - (a->ns < b->ns));
-}
-
-void
-nstime_add(nstime_t *time, const nstime_t *addend)
-{
-	assert(UINT64_MAX - time->ns >= addend->ns);
-
-	time->ns += addend->ns;
-}
-
-void
-nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
-{
-	assert(nstime_compare(time, subtrahend) >= 0);
-
-	time->ns -= subtrahend->ns;
-}
-
-void
-nstime_imultiply(nstime_t *time, uint64_t multiplier)
-{
-	assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
-	    2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
-
-	time->ns *= multiplier;
-}
-
-void
-nstime_idivide(nstime_t *time, uint64_t divisor)
-{
-	assert(divisor != 0);
-
-	time->ns /= divisor;
-}
-
-uint64_t
-nstime_divide(const nstime_t *time, const nstime_t *divisor)
-{
-	assert(divisor->ns != 0);
-
-	return (time->ns / divisor->ns);
-}
-
-#ifdef _WIN32
-#  define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
-	FILETIME ft;
-	uint64_t ticks_100ns;
-
-	GetSystemTimeAsFileTime(&ft);
-	ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-
-	nstime_init(time, ticks_100ns * 100);
-}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-#  define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
-	struct timespec ts;
-
-	clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
-	nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
-#  define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
-	struct timespec ts;
-
-	clock_gettime(CLOCK_MONOTONIC, &ts);
-	nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-#  define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
-	nstime_init(time, mach_absolute_time());
-}
-#else
-#  define NSTIME_MONOTONIC false
-static void
-nstime_get(nstime_t *time)
-{
-	struct timeval tv;
-
-	gettimeofday(&tv, NULL);
-	nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
-}
-#endif
-
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define	nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
-#endif
-bool
-nstime_monotonic(void)
-{
-	return (NSTIME_MONOTONIC);
-#undef NSTIME_MONOTONIC
-}
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define	nstime_monotonic JEMALLOC_N(nstime_monotonic)
-nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define	nstime_update JEMALLOC_N(n_nstime_update)
-#endif
-bool
-nstime_update(nstime_t *time)
-{
-	nstime_t old_time;
-
-	nstime_copy(&old_time, time);
-	nstime_get(time);
-
-	/* Handle non-monotonic clocks. */
-	if (unlikely(nstime_compare(&old_time, time) > 0)) {
-		nstime_copy(time, &old_time);
-		return (true);
-	}
-
-	return (false);
-}
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define	nstime_update JEMALLOC_N(nstime_update)
-nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/src/pages.c b/zircon/third_party/ulib/jemalloc/src/pages.c
deleted file mode 100644
index dc45b30..0000000
--- a/zircon/third_party/ulib/jemalloc/src/pages.c
+++ /dev/null
@@ -1,433 +0,0 @@
-#define	JEMALLOC_PAGES_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#include <sys/sysctl.h>
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-#if !defined(_WIN32) && !defined(__Fuchsia__)
-#  define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
-#  define PAGES_PROT_DECOMMIT (PROT_NONE)
-static int	mmap_flags;
-#endif
-static bool	os_overcommits;
-
-/******************************************************************************/
-
-#ifdef __Fuchsia__
-
-#include <threads.h>
-
-#include <zircon/process.h>
-#include <zircon/status.h>
-#include <zircon/syscalls.h>
-
-// Reserve a terabyte of address space for heap allocations.
-#define VMAR_SIZE (1ull << 40)
-
-#define MMAP_VMO_NAME "jemalloc-heap"
-
-// malloc wants to manage both address space and memory mapped within
-// chunks of address space. To maintain claims to address space we
-// must use our own vmar.
-static uintptr_t pages_base;
-static zx_handle_t pages_vmar;
-static zx_handle_t pages_vmo;
-
-// Protect reservations to the pages_vmar.
-static mtx_t vmar_lock;
-
-static void* fuchsia_pages_map(void* start, size_t len) {
-	if (len >= PTRDIFF_MAX) {
-		return NULL;
-	}
-
-	// round up to page size
-	len = (len + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
-
-	mtx_lock(&vmar_lock);
-
-	// If we are given a base address, then jemalloc's internal
-	// bookkeeping expects to be able to extend an allocation at
-	// that bit of the address space, and so we just directly
-	// compute an offset. If we are not, ask for a new random
-	// region from the pages_vmar.
-
-	// TODO(kulakowski) Extending a region might fail. Investigate
-	// whether it is worthwhile teaching jemalloc about vmars and
-	// vmos at the extent.c or arena.c layer.
-	size_t offset;
-	if (start != NULL) {
-		uintptr_t addr = (uintptr_t)start;
-		if (addr < pages_base)
-			abort();
-		offset = addr - pages_base;
-	} else {
-		// TODO(kulakowski) Use ZX-942 instead of having to
-		// allocate and destroy under a lock.
-		zx_handle_t subvmar;
-		uintptr_t subvmar_base;
-		zx_status_t status = _zx_vmar_allocate(pages_vmar,
-		    ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE,
-			  0u, len, &subvmar, &subvmar_base);
-		if (status != ZX_OK)
-			abort();
-		_zx_vmar_destroy(subvmar);
-		_zx_handle_close(subvmar);
-		offset = subvmar_base - pages_base;
-	}
-
-	uintptr_t ptr = 0;
-	zx_vm_option_t zx_options = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
-	    ZX_VM_SPECIFIC;
-	zx_status_t status = _zx_vmar_map(pages_vmar, zx_options, offset, pages_vmo,
-	    offset, len, &ptr);
-	if (status != ZX_OK) {
-		ptr = 0u;
-	}
-
-	mtx_unlock(&vmar_lock);
-	return (void*)ptr;
-}
-
-static zx_status_t fuchsia_pages_free(void* addr, size_t size) {
-	uintptr_t ptr = (uintptr_t)addr;
-	uintptr_t offset = ptr - pages_base;
-
-	zx_status_t status = _zx_vmar_unmap(pages_vmar, ptr, size);
-	if (status != ZX_OK)
-		return status;
-	/*
-	 * Decommit after unmapping to avoid modifying the page mapping twice.
-	 */
-	return _zx_vmo_op_range(pages_vmo, ZX_VMO_OP_DECOMMIT, offset, size, NULL, 0);
-}
-
-static void* fuchsia_pages_trim(void* ret, void* addr, size_t size,
-    size_t alloc_size, size_t leadsize) {
-	size_t trailsize = alloc_size - leadsize - size;
-
-	if (leadsize != 0)
-		pages_unmap(addr, leadsize);
-	if (trailsize != 0)
-		pages_unmap((void *)((uintptr_t)ret + size), trailsize);
-	return (ret);
-}
-
-#endif
-
-void *
-pages_map(void *addr, size_t size, bool *commit)
-{
-	void *ret;
-
-	assert(size != 0);
-
-	if (os_overcommits)
-		*commit = true;
-
-#ifdef _WIN32
-	/*
-	 * If VirtualAlloc can't allocate at the given address when one is
-	 * given, it fails and returns NULL.
-	 */
-	ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
-	    PAGE_READWRITE);
-#elif __Fuchsia__
-	ret = fuchsia_pages_map(addr, size);
-#else
-	/*
-	 * We don't use MAP_FIXED here, because it can cause the *replacement*
-	 * of existing mappings, and we only want to create new mappings.
-	 */
-	{
-		int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
-
-		ret = mmap(addr, size, prot, mmap_flags, -1, 0);
-	}
-	assert(ret != NULL);
-
-	if (ret == MAP_FAILED)
-		ret = NULL;
-	else if (addr != NULL && ret != addr) {
-		/*
-		 * We succeeded in mapping memory, but not in the right place.
-		 */
-		pages_unmap(ret, size);
-		ret = NULL;
-	}
-#endif
-	assert(ret == NULL || (addr == NULL && ret != addr)
-	    || (addr != NULL && ret == addr));
-	return (ret);
-}
-
-void
-pages_unmap(void *addr, size_t size)
-{
-#ifdef _WIN32
-	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
-#elif __Fuchsia__
-	zx_status_t status = fuchsia_pages_free(addr, size);
-	if (status != ZX_OK)
-#else
-	if (munmap(addr, size) == -1)
-#endif
-	{
-#if __Fuchsia__
-		const char* buf = _zx_status_get_string(status);
-#else
-		char buf[BUFERROR_BUF];
-		buferror(get_errno(), buf, sizeof(buf));
-#endif
-
-		malloc_printf("<jemalloc>: Error in "
-#ifdef _WIN32
-		              "VirtualFree"
-#elif __Fuchsia__
-		              "unmapping jemalloc heap pages"
-#else
-		              "munmap"
-#endif
-		              "(): %s\n", buf);
-		if (opt_abort)
-			abort();
-	}
-}
-
-void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
-    bool *commit)
-{
-	void *ret = (void *)((uintptr_t)addr + leadsize);
-
-	assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
-	{
-		void *new_addr;
-
-		pages_unmap(addr, alloc_size);
-		new_addr = pages_map(ret, size, commit);
-		if (new_addr == ret)
-			return (ret);
-		if (new_addr)
-			pages_unmap(new_addr, size);
-		return (NULL);
-	}
-#elif __Fuchsia__
-	return fuchsia_pages_trim(ret, addr, size, alloc_size, leadsize);
-#else
-	{
-		size_t trailsize = alloc_size - leadsize - size;
-
-		if (leadsize != 0)
-			pages_unmap(addr, leadsize);
-		if (trailsize != 0)
-			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
-		return (ret);
-	}
-#endif
-}
-
-static bool
-pages_commit_impl(void *addr, size_t size, bool commit)
-{
-	if (os_overcommits)
-		return (true);
-
-#ifdef _WIN32
-	return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
-	    PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
-#elif __Fuchsia__
-	not_reached();
-#else
-	{
-		int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
-		void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
-		    -1, 0);
-		if (result == MAP_FAILED)
-			return (true);
-		if (result != addr) {
-			/*
-			 * We succeeded in mapping memory, but not in the right
-			 * place.
-			 */
-			pages_unmap(result, size);
-			return (true);
-		}
-		return (false);
-	}
-#endif
-}
-
-bool
-pages_commit(void *addr, size_t size)
-{
-	return (pages_commit_impl(addr, size, true));
-}
-
-bool
-pages_decommit(void *addr, size_t size)
-{
-	return (pages_commit_impl(addr, size, false));
-}
-
-bool
-pages_purge_lazy(void *addr, size_t size)
-{
-	if (!pages_can_purge_lazy)
-		return (true);
-
-#ifdef _WIN32
-	VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
-#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-	madvise(addr, size, MADV_FREE);
-#else
-	not_reached();
-#endif
-	return (false);
-}
-
-bool
-pages_purge_forced(void *addr, size_t size)
-{
-	if (!pages_can_purge_forced)
-		return (true);
-
-#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
-	return (madvise(addr, size, MADV_DONTNEED) != 0);
-#elif __Fuchsia__
-	uintptr_t offset = (uintptr_t)addr - pages_base;
-
-	return _zx_vmo_op_range(pages_vmo, ZX_VMO_OP_DECOMMIT, offset, size, NULL, 0) != ZX_OK;
-#else
-	not_reached();
-#endif
-}
-
-bool
-pages_huge(void *addr, size_t size)
-{
-	assert(HUGEPAGE_ADDR2BASE(addr) == addr);
-	assert(HUGEPAGE_CEILING(size) == size);
-
-#ifdef JEMALLOC_THP
-	return (madvise(addr, size, MADV_HUGEPAGE) != 0);
-#else
-	return (true);
-#endif
-}
-
-bool
-pages_nohuge(void *addr, size_t size)
-{
-	assert(HUGEPAGE_ADDR2BASE(addr) == addr);
-	assert(HUGEPAGE_CEILING(size) == size);
-
-#ifdef JEMALLOC_THP
-	return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
-#else
-	return (false);
-#endif
-}
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-static bool
-os_overcommits_sysctl(void)
-{
-	int vm_overcommit;
-	size_t sz;
-
-	sz = sizeof(vm_overcommit);
-	if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
-		return (false); /* Error. */
-
-	return ((vm_overcommit & 0x3) == 0);
-}
-#endif
-
-#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-/*
- * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
- * reentry during bootstrapping if another library has interposed system call
- * wrappers.
- */
-static bool
-os_overcommits_proc(void)
-{
-	int fd;
-	char buf[1];
-	ssize_t nread;
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
-	fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
-#else
-	fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
-#endif
-	if (fd == -1)
-		return (false); /* Error. */
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
-	nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
-#else
-	nread = read(fd, &buf, sizeof(buf));
-#endif
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
-	syscall(SYS_close, fd);
-#else
-	close(fd);
-#endif
-
-	if (nread < 1)
-		return (false); /* Error. */
-	/*
-	 * /proc/sys/vm/overcommit_memory meanings:
-	 * 0: Heuristic overcommit.
-	 * 1: Always overcommit.
-	 * 2: Never overcommit.
-	 */
-	return (buf[0] == '0' || buf[0] == '1');
-}
-#endif
-
-void
-pages_boot(void)
-{
-#if !defined(_WIN32) && !defined(__Fuchsia__)
-	mmap_flags = MAP_PRIVATE | MAP_ANON;
-#endif
-
-#if defined(__Fuchsia__)
-	zx_vm_option_t vmar_flags = ZX_VM_CAN_MAP_SPECIFIC | ZX_VM_CAN_MAP_READ |
-	    ZX_VM_CAN_MAP_WRITE;
-	zx_status_t status = _zx_vmar_allocate(_zx_vmar_root_self(), vmar_flags, 0,
-	                        VMAR_SIZE, &pages_vmar, &pages_base);
-	if (status != ZX_OK)
-		abort();
-	status = _zx_vmo_create(VMAR_SIZE, 0, &pages_vmo);
-	if (status != ZX_OK)
-		abort();
-	status = _zx_object_set_property(pages_vmo, ZX_PROP_NAME, MMAP_VMO_NAME,
-	    strlen(MMAP_VMO_NAME));
-	if (status != ZX_OK)
-		abort();
-#endif
-
-#if defined(__Fuchsia__)
-	os_overcommits = true;
-#elif defined(JEMALLOC_SYSCTL_VM_OVERCOMMIT)
-	os_overcommits = os_overcommits_sysctl();
-#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
-	os_overcommits = os_overcommits_proc();
-#  ifdef MAP_NORESERVE
-	if (os_overcommits)
-		mmap_flags |= MAP_NORESERVE;
-#  endif
-#else
-	os_overcommits = false;
-#endif
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/prng.c b/zircon/third_party/ulib/jemalloc/src/prng.c
deleted file mode 100644
index 76646a2..0000000
--- a/zircon/third_party/ulib/jemalloc/src/prng.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/prof.c b/zircon/third_party/ulib/jemalloc/src/prof.c
deleted file mode 100644
index 286e8db..0000000
--- a/zircon/third_party/ulib/jemalloc/src/prof.c
+++ /dev/null
@@ -1,2421 +0,0 @@
-#define	JEMALLOC_PROF_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-/******************************************************************************/
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-#define	UNW_LOCAL_ONLY
-#include <libunwind.h>
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
-#include <unwind.h>
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-bool		opt_prof = false;
-bool		opt_prof_active = true;
-bool		opt_prof_thread_active_init = true;
-size_t		opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
-ssize_t		opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
-bool		opt_prof_gdump = false;
-bool		opt_prof_final = false;
-bool		opt_prof_leak = false;
-bool		opt_prof_accum = false;
-char		opt_prof_prefix[
-    /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
-    PATH_MAX +
-#endif
-    1];
-
-/*
- * Initialized as opt_prof_active, and accessed via
- * prof_active_[gs]et{_unlocked,}().
- */
-bool			prof_active;
-static malloc_mutex_t	prof_active_mtx;
-
-/*
- * Initialized as opt_prof_thread_active_init, and accessed via
- * prof_thread_active_init_[gs]et().
- */
-static bool		prof_thread_active_init;
-static malloc_mutex_t	prof_thread_active_init_mtx;
-
-/*
- * Initialized as opt_prof_gdump, and accessed via
- * prof_gdump_[gs]et{_unlocked,}().
- */
-bool			prof_gdump_val;
-static malloc_mutex_t	prof_gdump_mtx;
-
-uint64_t	prof_interval = 0;
-
-size_t		lg_prof_sample;
-
-/*
- * Table of mutexes that are shared among gctx's.  These are leaf locks, so
- * there is no problem with using them for more than one gctx at the same time.
- * The primary motivation for this sharing though is that gctx's are ephemeral,
- * and destroying mutexes causes complications for systems that allocate when
- * creating/destroying mutexes.
- */
-static malloc_mutex_t	*gctx_locks;
-static unsigned		cum_gctxs; /* Atomic counter. */
-
-/*
- * Table of mutexes that are shared among tdata's.  No operations require
- * holding multiple tdata locks, so there is no problem with using them for more
- * than one tdata at the same time, even though a gctx lock may be acquired
- * while holding a tdata lock.
- */
-static malloc_mutex_t	*tdata_locks;
-
-/*
- * Global hash of (prof_bt_t *)-->(prof_gctx_t *).  This is the master data
- * structure that knows about all backtraces currently captured.
- */
-static ckh_t		bt2gctx;
-static malloc_mutex_t	bt2gctx_mtx;
-
-/*
- * Tree of all extant prof_tdata_t structures, regardless of state,
- * {attached,detached,expired}.
- */
-static prof_tdata_tree_t	tdatas;
-static malloc_mutex_t	tdatas_mtx;
-
-static uint64_t		next_thr_uid;
-static malloc_mutex_t	next_thr_uid_mtx;
-
-static malloc_mutex_t	prof_dump_seq_mtx;
-static uint64_t		prof_dump_seq;
-static uint64_t		prof_dump_iseq;
-static uint64_t		prof_dump_mseq;
-static uint64_t		prof_dump_useq;
-
-/*
- * This buffer is rather large for stack allocation, so use a single buffer for
- * all profile dumps.
- */
-static malloc_mutex_t	prof_dump_mtx;
-static char		prof_dump_buf[
-    /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
-    PROF_DUMP_BUFSIZE
-#else
-    1
-#endif
-];
-static size_t		prof_dump_buf_end;
-static int		prof_dump_fd;
-
-/* Do not dump any profiles until bootstrapping is complete. */
-static bool		prof_booted = false;
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static bool	prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
-static void	prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool	prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
-    bool even_if_attached);
-static void	prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
-    bool even_if_attached);
-static char	*prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
-
-/******************************************************************************/
-/* Red-black trees. */
-
-JEMALLOC_INLINE_C int
-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
-{
-	uint64_t a_thr_uid = a->thr_uid;
-	uint64_t b_thr_uid = b->thr_uid;
-	int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
-	if (ret == 0) {
-		uint64_t a_thr_discrim = a->thr_discrim;
-		uint64_t b_thr_discrim = b->thr_discrim;
-		ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
-		    b_thr_discrim);
-		if (ret == 0) {
-			uint64_t a_tctx_uid = a->tctx_uid;
-			uint64_t b_tctx_uid = b->tctx_uid;
-			ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
-			    b_tctx_uid);
-		}
-	}
-	return (ret);
-}
-
-rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
-    tctx_link, prof_tctx_comp)
-
-JEMALLOC_INLINE_C int
-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
-{
-	unsigned a_len = a->bt.len;
-	unsigned b_len = b->bt.len;
-	unsigned comp_len = (a_len < b_len) ? a_len : b_len;
-	int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
-	if (ret == 0)
-		ret = (a_len > b_len) - (a_len < b_len);
-	return (ret);
-}
-
-rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
-    prof_gctx_comp)
-
-JEMALLOC_INLINE_C int
-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
-{
-	int ret;
-	uint64_t a_uid = a->thr_uid;
-	uint64_t b_uid = b->thr_uid;
-
-	ret = ((a_uid > b_uid) - (a_uid < b_uid));
-	if (ret == 0) {
-		uint64_t a_discrim = a->thr_discrim;
-		uint64_t b_discrim = b->thr_discrim;
-
-		ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
-	}
-	return (ret);
-}
-
-rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
-    prof_tdata_comp)
-
-/******************************************************************************/
-
-void
-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
-{
-	prof_tdata_t *tdata;
-
-	cassert(config_prof);
-
-	if (updated) {
-		/*
-		 * Compute a new sample threshold.  This isn't very important in
-		 * practice, because this function is rarely executed, so the
-		 * potential for sample bias is minimal except in contrived
-		 * programs.
-		 */
-		tdata = prof_tdata_get(tsd, true);
-		if (tdata != NULL)
-			prof_sample_threshold_update(tdata);
-	}
-
-	if ((uintptr_t)tctx > (uintptr_t)1U) {
-		malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
-		tctx->prepared = false;
-		if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
-			prof_tctx_destroy(tsd, tctx);
-		else
-			malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
-	}
-}
-
-void
-prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent, const void *ptr,
-    size_t usize, prof_tctx_t *tctx)
-{
-	prof_tctx_set(tsdn, extent, ptr, usize, tctx);
-
-	malloc_mutex_lock(tsdn, tctx->tdata->lock);
-	tctx->cnts.curobjs++;
-	tctx->cnts.curbytes += usize;
-	if (opt_prof_accum) {
-		tctx->cnts.accumobjs++;
-		tctx->cnts.accumbytes += usize;
-	}
-	tctx->prepared = false;
-	malloc_mutex_unlock(tsdn, tctx->tdata->lock);
-}
-
-void
-prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
-{
-	malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
-	assert(tctx->cnts.curobjs > 0);
-	assert(tctx->cnts.curbytes >= usize);
-	tctx->cnts.curobjs--;
-	tctx->cnts.curbytes -= usize;
-
-	if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
-		prof_tctx_destroy(tsd, tctx);
-	else
-		malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
-}
-
-void
-bt_init(prof_bt_t *bt, void **vec)
-{
-	cassert(config_prof);
-
-	bt->vec = vec;
-	bt->len = 0;
-}
-
-JEMALLOC_INLINE_C void
-prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
-{
-	cassert(config_prof);
-	assert(tdata == prof_tdata_get(tsd, false));
-
-	if (tdata != NULL) {
-		assert(!tdata->enq);
-		tdata->enq = true;
-	}
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
-}
-
-JEMALLOC_INLINE_C void
-prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
-{
-	cassert(config_prof);
-	assert(tdata == prof_tdata_get(tsd, false));
-
-	malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
-
-	if (tdata != NULL) {
-		bool idump, gdump;
-
-		assert(tdata->enq);
-		tdata->enq = false;
-		idump = tdata->enq_idump;
-		tdata->enq_idump = false;
-		gdump = tdata->enq_gdump;
-		tdata->enq_gdump = false;
-
-		if (idump)
-			prof_idump(tsd_tsdn(tsd));
-		if (gdump)
-			prof_gdump(tsd_tsdn(tsd));
-	}
-}
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-void
-prof_backtrace(prof_bt_t *bt)
-{
-	int nframes;
-
-	cassert(config_prof);
-	assert(bt->len == 0);
-	assert(bt->vec != NULL);
-
-	nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
-	if (nframes <= 0)
-		return;
-	bt->len = nframes;
-}
-#elif (defined(JEMALLOC_PROF_LIBGCC))
-static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
-{
-	cassert(config_prof);
-
-	return (_URC_NO_REASON);
-}
-
-static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg)
-{
-	prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
-	void *ip;
-
-	cassert(config_prof);
-
-	ip = (void *)_Unwind_GetIP(context);
-	if (ip == NULL)
-		return (_URC_END_OF_STACK);
-	data->bt->vec[data->bt->len] = ip;
-	data->bt->len++;
-	if (data->bt->len == data->max)
-		return (_URC_END_OF_STACK);
-
-	return (_URC_NO_REASON);
-}
-
-void
-prof_backtrace(prof_bt_t *bt)
-{
-	prof_unwind_data_t data = {bt, PROF_BT_MAX};
-
-	cassert(config_prof);
-
-	_Unwind_Backtrace(prof_unwind_callback, &data);
-}
-#elif (defined(JEMALLOC_PROF_GCC))
-void
-prof_backtrace(prof_bt_t *bt)
-{
-#define	BT_FRAME(i)							\
-	if ((i) < PROF_BT_MAX) {					\
-		void *p;						\
-		if (__builtin_frame_address(i) == 0)			\
-			return;						\
-		p = __builtin_return_address(i);			\
-		if (p == NULL)						\
-			return;						\
-		bt->vec[(i)] = p;					\
-		bt->len = (i) + 1;					\
-	} else								\
-		return;
-
-	cassert(config_prof);
-
-	BT_FRAME(0)
-	BT_FRAME(1)
-	BT_FRAME(2)
-	BT_FRAME(3)
-	BT_FRAME(4)
-	BT_FRAME(5)
-	BT_FRAME(6)
-	BT_FRAME(7)
-	BT_FRAME(8)
-	BT_FRAME(9)
-
-	BT_FRAME(10)
-	BT_FRAME(11)
-	BT_FRAME(12)
-	BT_FRAME(13)
-	BT_FRAME(14)
-	BT_FRAME(15)
-	BT_FRAME(16)
-	BT_FRAME(17)
-	BT_FRAME(18)
-	BT_FRAME(19)
-
-	BT_FRAME(20)
-	BT_FRAME(21)
-	BT_FRAME(22)
-	BT_FRAME(23)
-	BT_FRAME(24)
-	BT_FRAME(25)
-	BT_FRAME(26)
-	BT_FRAME(27)
-	BT_FRAME(28)
-	BT_FRAME(29)
-
-	BT_FRAME(30)
-	BT_FRAME(31)
-	BT_FRAME(32)
-	BT_FRAME(33)
-	BT_FRAME(34)
-	BT_FRAME(35)
-	BT_FRAME(36)
-	BT_FRAME(37)
-	BT_FRAME(38)
-	BT_FRAME(39)
-
-	BT_FRAME(40)
-	BT_FRAME(41)
-	BT_FRAME(42)
-	BT_FRAME(43)
-	BT_FRAME(44)
-	BT_FRAME(45)
-	BT_FRAME(46)
-	BT_FRAME(47)
-	BT_FRAME(48)
-	BT_FRAME(49)
-
-	BT_FRAME(50)
-	BT_FRAME(51)
-	BT_FRAME(52)
-	BT_FRAME(53)
-	BT_FRAME(54)
-	BT_FRAME(55)
-	BT_FRAME(56)
-	BT_FRAME(57)
-	BT_FRAME(58)
-	BT_FRAME(59)
-
-	BT_FRAME(60)
-	BT_FRAME(61)
-	BT_FRAME(62)
-	BT_FRAME(63)
-	BT_FRAME(64)
-	BT_FRAME(65)
-	BT_FRAME(66)
-	BT_FRAME(67)
-	BT_FRAME(68)
-	BT_FRAME(69)
-
-	BT_FRAME(70)
-	BT_FRAME(71)
-	BT_FRAME(72)
-	BT_FRAME(73)
-	BT_FRAME(74)
-	BT_FRAME(75)
-	BT_FRAME(76)
-	BT_FRAME(77)
-	BT_FRAME(78)
-	BT_FRAME(79)
-
-	BT_FRAME(80)
-	BT_FRAME(81)
-	BT_FRAME(82)
-	BT_FRAME(83)
-	BT_FRAME(84)
-	BT_FRAME(85)
-	BT_FRAME(86)
-	BT_FRAME(87)
-	BT_FRAME(88)
-	BT_FRAME(89)
-
-	BT_FRAME(90)
-	BT_FRAME(91)
-	BT_FRAME(92)
-	BT_FRAME(93)
-	BT_FRAME(94)
-	BT_FRAME(95)
-	BT_FRAME(96)
-	BT_FRAME(97)
-	BT_FRAME(98)
-	BT_FRAME(99)
-
-	BT_FRAME(100)
-	BT_FRAME(101)
-	BT_FRAME(102)
-	BT_FRAME(103)
-	BT_FRAME(104)
-	BT_FRAME(105)
-	BT_FRAME(106)
-	BT_FRAME(107)
-	BT_FRAME(108)
-	BT_FRAME(109)
-
-	BT_FRAME(110)
-	BT_FRAME(111)
-	BT_FRAME(112)
-	BT_FRAME(113)
-	BT_FRAME(114)
-	BT_FRAME(115)
-	BT_FRAME(116)
-	BT_FRAME(117)
-	BT_FRAME(118)
-	BT_FRAME(119)
-
-	BT_FRAME(120)
-	BT_FRAME(121)
-	BT_FRAME(122)
-	BT_FRAME(123)
-	BT_FRAME(124)
-	BT_FRAME(125)
-	BT_FRAME(126)
-	BT_FRAME(127)
-#undef BT_FRAME
-}
-#else
-void
-prof_backtrace(prof_bt_t *bt)
-{
-	cassert(config_prof);
-	not_reached();
-}
-#endif
-
-static malloc_mutex_t *
-prof_gctx_mutex_choose(void)
-{
-	unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
-
-	return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
-}
-
-static malloc_mutex_t *
-prof_tdata_mutex_choose(uint64_t thr_uid)
-{
-	return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
-}
-
-static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
-{
-	/*
-	 * Create a single allocation that has space for vec of length bt->len.
-	 */
-	size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
-	prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
-	    size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
-	    true);
-	if (gctx == NULL)
-		return (NULL);
-	gctx->lock = prof_gctx_mutex_choose();
-	/*
-	 * Set nlimbo to 1, in order to avoid a race condition with
-	 * prof_tctx_destroy()/prof_gctx_try_destroy().
-	 */
-	gctx->nlimbo = 1;
-	tctx_tree_new(&gctx->tctxs);
-	/* Duplicate bt. */
-	memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
-	gctx->bt.vec = gctx->vec;
-	gctx->bt.len = bt->len;
-	return (gctx);
-}
-
-static void
-prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
-    prof_tdata_t *tdata)
-{
-	cassert(config_prof);
-
-	/*
-	 * Check that gctx is still unused by any thread cache before destroying
-	 * it.  prof_lookup() increments gctx->nlimbo in order to avoid a race
-	 * condition with this function, as does prof_tctx_destroy() in order to
-	 * avoid a race between the main body of prof_tctx_destroy() and entry
-	 * into this function.
-	 */
-	prof_enter(tsd, tdata_self);
-	malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
-	assert(gctx->nlimbo != 0);
-	if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
-		/* Remove gctx from bt2gctx. */
-		if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
-			not_reached();
-		prof_leave(tsd, tdata_self);
-		/* Destroy gctx. */
-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx), gctx,
-		    NULL, true, true);
-	} else {
-		/*
-		 * Compensate for increment in prof_tctx_destroy() or
-		 * prof_lookup().
-		 */
-		gctx->nlimbo--;
-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
-		prof_leave(tsd, tdata_self);
-	}
-}
-
-static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
-{
-	malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
-	if (opt_prof_accum)
-		return (false);
-	if (tctx->cnts.curobjs != 0)
-		return (false);
-	if (tctx->prepared)
-		return (false);
-	return (true);
-}
-
-static bool
-prof_gctx_should_destroy(prof_gctx_t *gctx)
-{
-	if (opt_prof_accum)
-		return (false);
-	if (!tctx_tree_empty(&gctx->tctxs))
-		return (false);
-	if (gctx->nlimbo != 0)
-		return (false);
-	return (true);
-}
-
-static void
-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
-{
-	prof_tdata_t *tdata = tctx->tdata;
-	prof_gctx_t *gctx = tctx->gctx;
-	bool destroy_tdata, destroy_tctx, destroy_gctx;
-
-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
-	assert(tctx->cnts.curobjs == 0);
-	assert(tctx->cnts.curbytes == 0);
-	assert(!opt_prof_accum);
-	assert(tctx->cnts.accumobjs == 0);
-	assert(tctx->cnts.accumbytes == 0);
-
-	ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
-	destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
-	malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-
-	malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
-	switch (tctx->state) {
-	case prof_tctx_state_nominal:
-		tctx_tree_remove(&gctx->tctxs, tctx);
-		destroy_tctx = true;
-		if (prof_gctx_should_destroy(gctx)) {
-			/*
-			 * Increment gctx->nlimbo in order to keep another
-			 * thread from winning the race to destroy gctx while
-			 * this one has gctx->lock dropped.  Without this, it
-			 * would be possible for another thread to:
-			 *
-			 * 1) Sample an allocation associated with gctx.
-			 * 2) Deallocate the sampled object.
-			 * 3) Successfully prof_gctx_try_destroy(gctx).
-			 *
-			 * The result would be that gctx no longer exists by the
-			 * time this thread accesses it in
-			 * prof_gctx_try_destroy().
-			 */
-			gctx->nlimbo++;
-			destroy_gctx = true;
-		} else
-			destroy_gctx = false;
-		break;
-	case prof_tctx_state_dumping:
-		/*
-		 * A dumping thread needs tctx to remain valid until dumping
-		 * has finished.  Change state such that the dumping thread will
-		 * complete destruction during a late dump iteration phase.
-		 */
-		tctx->state = prof_tctx_state_purgatory;
-		destroy_tctx = false;
-		destroy_gctx = false;
-		break;
-	default:
-		not_reached();
-		destroy_tctx = false;
-		destroy_gctx = false;
-	}
-	malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
-	if (destroy_gctx) {
-		prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
-		    tdata);
-	}
-
-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
-	if (destroy_tdata)
-		prof_tdata_destroy(tsd, tdata, false);
-
-	if (destroy_tctx)
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tctx), tctx,
-		    NULL, true, true);
-}
-
-static bool
-prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
-    void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
-{
-	union {
-		prof_gctx_t	*p;
-		void		*v;
-	} gctx;
-	union {
-		prof_bt_t	*p;
-		void		*v;
-	} btkey;
-	bool new_gctx;
-
-	prof_enter(tsd, tdata);
-	if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
-		/* bt has never been seen before.  Insert it. */
-		gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
-		if (gctx.v == NULL) {
-			prof_leave(tsd, tdata);
-			return (true);
-		}
-		btkey.p = &gctx.p->bt;
-		if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
-			/* OOM. */
-			prof_leave(tsd, tdata);
-			idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx.v),
-			    gctx.v, NULL, true, true);
-			return (true);
-		}
-		new_gctx = true;
-	} else {
-		/*
-		 * Increment nlimbo, in order to avoid a race condition with
-		 * prof_tctx_destroy()/prof_gctx_try_destroy().
-		 */
-		malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
-		gctx.p->nlimbo++;
-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
-		new_gctx = false;
-	}
-	prof_leave(tsd, tdata);
-
-	*p_btkey = btkey.v;
-	*p_gctx = gctx.p;
-	*p_new_gctx = new_gctx;
-	return (false);
-}
-
-prof_tctx_t *
-prof_lookup(tsd_t *tsd, prof_bt_t *bt)
-{
-	union {
-		prof_tctx_t	*p;
-		void		*v;
-	} ret;
-	prof_tdata_t *tdata;
-	bool not_found;
-
-	cassert(config_prof);
-
-	tdata = prof_tdata_get(tsd, false);
-	if (tdata == NULL)
-		return (NULL);
-
-	malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
-	not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
-	if (!not_found) /* Note double negative! */
-		ret.p->prepared = true;
-	malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-	if (not_found) {
-		void *btkey;
-		prof_gctx_t *gctx;
-		bool new_gctx, error;
-
-		/*
-		 * This thread's cache lacks bt.  Look for it in the global
-		 * cache.
-		 */
-		if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
-		    &new_gctx))
-			return (NULL);
-
-		/* Link a prof_tctx_t into gctx for this thread. */
-		ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
-		    size2index(sizeof(prof_tctx_t)), false, NULL, true,
-		    arena_ichoose(tsd, NULL), true);
-		if (ret.p == NULL) {
-			if (new_gctx)
-				prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
-			return (NULL);
-		}
-		ret.p->tdata = tdata;
-		ret.p->thr_uid = tdata->thr_uid;
-		ret.p->thr_discrim = tdata->thr_discrim;
-		memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
-		ret.p->gctx = gctx;
-		ret.p->tctx_uid = tdata->tctx_uid_next++;
-		ret.p->prepared = true;
-		ret.p->state = prof_tctx_state_initializing;
-		malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
-		error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
-		malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-		if (error) {
-			if (new_gctx)
-				prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
-			idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v),
-			    ret.v, NULL, true, true);
-			return (NULL);
-		}
-		malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
-		ret.p->state = prof_tctx_state_nominal;
-		tctx_tree_insert(&gctx->tctxs, ret.p);
-		gctx->nlimbo--;
-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
-	}
-
-	return (ret.p);
-}
-
-/*
- * The bodies of this function and prof_leakcheck() are compiled out unless heap
- * profiling is enabled, so that it is possible to compile jemalloc with
- * floating point support completely disabled.  Avoiding floating point code is
- * important on memory-constrained systems, but it also enables a workaround for
- * versions of glibc that don't properly save/restore floating point registers
- * during dynamic lazy symbol loading (which internally calls into whatever
- * malloc implementation happens to be integrated into the application).  Note
- * that some compilers (e.g.  gcc 4.8) may use floating point registers for fast
- * memory moves, so jemalloc must be compiled with such optimizations disabled
- * (e.g.
- * -mno-sse) in order for the workaround to be complete.
- */
-void
-prof_sample_threshold_update(prof_tdata_t *tdata)
-{
-#ifdef JEMALLOC_PROF
-	uint64_t r;
-	double u;
-
-	if (!config_prof)
-		return;
-
-	if (lg_prof_sample == 0) {
-		tdata->bytes_until_sample = 0;
-		return;
-	}
-
-	/*
-	 * Compute sample interval as a geometrically distributed random
-	 * variable with mean (2^lg_prof_sample).
-	 *
-	 *                             __        __
-	 *                             |  log(u)  |                     1
-	 * tdata->bytes_until_sample = | -------- |, where p = ---------------
-	 *                             | log(1-p) |             lg_prof_sample
-	 *                                                     2
-	 *
-	 * For more information on the math, see:
-	 *
-	 *   Non-Uniform Random Variate Generation
-	 *   Luc Devroye
-	 *   Springer-Verlag, New York, 1986
-	 *   pp 500
-	 *   (http://luc.devroye.org/rnbookindex.html)
-	 */
-	r = prng_lg_range_u64(&tdata->prng_state, 53);
-	u = (double)r * (1.0/9007199254740992.0L);
-	tdata->bytes_until_sample = (uint64_t)(log(u) /
-	    log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
-	    + (uint64_t)1U;
-#endif
-}
-
-#ifdef JEMALLOC_JET
-static prof_tdata_t *
-prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
-	size_t *tdata_count = (size_t *)arg;
-
-	(*tdata_count)++;
-
-	return (NULL);
-}
-
-size_t
-prof_tdata_count(void)
-{
-	size_t tdata_count = 0;
-	tsdn_t *tsdn;
-
-	tsdn = tsdn_fetch();
-	malloc_mutex_lock(tsdn, &tdatas_mtx);
-	tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
-	    (void *)&tdata_count);
-	malloc_mutex_unlock(tsdn, &tdatas_mtx);
-
-	return (tdata_count);
-}
-#endif
-
-#ifdef JEMALLOC_JET
-size_t
-prof_bt_count(void)
-{
-	size_t bt_count;
-	tsd_t *tsd;
-	prof_tdata_t *tdata;
-
-	tsd = tsd_fetch();
-	tdata = prof_tdata_get(tsd, false);
-	if (tdata == NULL)
-		return (0);
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
-	bt_count = ckh_count(&bt2gctx);
-	malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
-
-	return (bt_count);
-}
-#endif
-
-#ifdef JEMALLOC_JET
-#undef prof_dump_open
-#define	prof_dump_open JEMALLOC_N(prof_dump_open_impl)
-#endif
-static int
-prof_dump_open(bool propagate_err, const char *filename)
-{
-	int fd;
-
-	fd = creat(filename, 0644);
-	if (fd == -1 && !propagate_err) {
-		malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
-		    filename);
-		if (opt_abort)
-			abort();
-	}
-
-	return (fd);
-}
-#ifdef JEMALLOC_JET
-#undef prof_dump_open
-#define	prof_dump_open JEMALLOC_N(prof_dump_open)
-prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
-#endif
-
-static bool
-prof_dump_flush(bool propagate_err)
-{
-	bool ret = false;
-	ssize_t err;
-
-	cassert(config_prof);
-
-	err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
-	if (err == -1) {
-		if (!propagate_err) {
-			malloc_write("<jemalloc>: write() failed during heap "
-			    "profile flush\n");
-			if (opt_abort)
-				abort();
-		}
-		ret = true;
-	}
-	prof_dump_buf_end = 0;
-
-	return (ret);
-}
-
-static bool
-prof_dump_close(bool propagate_err)
-{
-	bool ret;
-
-	assert(prof_dump_fd != -1);
-	ret = prof_dump_flush(propagate_err);
-	close(prof_dump_fd);
-	prof_dump_fd = -1;
-
-	return (ret);
-}
-
-static bool
-prof_dump_write(bool propagate_err, const char *s)
-{
-	size_t i, slen, n;
-
-	cassert(config_prof);
-
-	i = 0;
-	slen = strlen(s);
-	while (i < slen) {
-		/* Flush the buffer if it is full. */
-		if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
-			if (prof_dump_flush(propagate_err) && propagate_err)
-				return (true);
-
-		if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
-			/* Finish writing. */
-			n = slen - i;
-		} else {
-			/* Write as much of s as will fit. */
-			n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
-		}
-		memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
-		prof_dump_buf_end += n;
-		i += n;
-	}
-
-	return (false);
-}
-
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static bool
-prof_dump_printf(bool propagate_err, const char *format, ...)
-{
-	bool ret;
-	va_list ap;
-	char buf[PROF_PRINTF_BUFSIZE];
-
-	va_start(ap, format);
-	malloc_vsnprintf(buf, sizeof(buf), format, ap);
-	va_end(ap);
-	ret = prof_dump_write(propagate_err, buf);
-
-	return (ret);
-}
-
-static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
-{
-	malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
-	malloc_mutex_lock(tsdn, tctx->gctx->lock);
-
-	switch (tctx->state) {
-	case prof_tctx_state_initializing:
-		malloc_mutex_unlock(tsdn, tctx->gctx->lock);
-		return;
-	case prof_tctx_state_nominal:
-		tctx->state = prof_tctx_state_dumping;
-		malloc_mutex_unlock(tsdn, tctx->gctx->lock);
-
-		memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
-
-		tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
-		tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
-		if (opt_prof_accum) {
-			tdata->cnt_summed.accumobjs +=
-			    tctx->dump_cnts.accumobjs;
-			tdata->cnt_summed.accumbytes +=
-			    tctx->dump_cnts.accumbytes;
-		}
-		break;
-	case prof_tctx_state_dumping:
-	case prof_tctx_state_purgatory:
-		not_reached();
-	}
-}
-
-static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
-{
-	malloc_mutex_assert_owner(tsdn, gctx->lock);
-
-	gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
-	gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
-	if (opt_prof_accum) {
-		gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
-		gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
-	}
-}
-
-static prof_tctx_t *
-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
-{
-	tsdn_t *tsdn = (tsdn_t *)arg;
-
-	malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
-	switch (tctx->state) {
-	case prof_tctx_state_nominal:
-		/* New since dumping started; ignore. */
-		break;
-	case prof_tctx_state_dumping:
-	case prof_tctx_state_purgatory:
-		prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
-		break;
-	default:
-		not_reached();
-	}
-
-	return (NULL);
-}
-
-struct prof_tctx_dump_iter_arg_s {
-	tsdn_t	*tsdn;
-	bool	propagate_err;
-};
-
-static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
-{
-	struct prof_tctx_dump_iter_arg_s *arg =
-	    (struct prof_tctx_dump_iter_arg_s *)opaque;
-
-	malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
-
-	switch (tctx->state) {
-	case prof_tctx_state_initializing:
-	case prof_tctx_state_nominal:
-		/* Not captured by this dump. */
-		break;
-	case prof_tctx_state_dumping:
-	case prof_tctx_state_purgatory:
-		if (prof_dump_printf(arg->propagate_err,
-		    "  t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
-		    "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
-		    tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
-		    tctx->dump_cnts.accumbytes))
-			return (tctx);
-		break;
-	default:
-		not_reached();
-	}
-	return (NULL);
-}
-
-static prof_tctx_t *
-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
-{
-	tsdn_t *tsdn = (tsdn_t *)arg;
-	prof_tctx_t *ret;
-
-	malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
-	switch (tctx->state) {
-	case prof_tctx_state_nominal:
-		/* New since dumping started; ignore. */
-		break;
-	case prof_tctx_state_dumping:
-		tctx->state = prof_tctx_state_nominal;
-		break;
-	case prof_tctx_state_purgatory:
-		ret = tctx;
-		goto label_return;
-	default:
-		not_reached();
-	}
-
-	ret = NULL;
-label_return:
-	return (ret);
-}
-
-static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
-{
-	cassert(config_prof);
-
-	malloc_mutex_lock(tsdn, gctx->lock);
-
-	/*
-	 * Increment nlimbo so that gctx won't go away before dump.
-	 * Additionally, link gctx into the dump list so that it is included in
-	 * prof_dump()'s second pass.
-	 */
-	gctx->nlimbo++;
-	gctx_tree_insert(gctxs, gctx);
-
-	memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
-
-	malloc_mutex_unlock(tsdn, gctx->lock);
-}
-
-struct prof_gctx_merge_iter_arg_s {
-	tsdn_t	*tsdn;
-	size_t	leak_ngctx;
-};
-
-static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
-{
-	struct prof_gctx_merge_iter_arg_s *arg =
-	    (struct prof_gctx_merge_iter_arg_s *)opaque;
-
-	malloc_mutex_lock(arg->tsdn, gctx->lock);
-	tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
-	    (void *)arg->tsdn);
-	if (gctx->cnt_summed.curobjs != 0)
-		arg->leak_ngctx++;
-	malloc_mutex_unlock(arg->tsdn, gctx->lock);
-
-	return (NULL);
-}
-
-static void
-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
-{
-	prof_tdata_t *tdata = prof_tdata_get(tsd, false);
-	prof_gctx_t *gctx;
-
-	/*
-	 * Standard tree iteration won't work here, because as soon as we
-	 * decrement gctx->nlimbo and unlock gctx, another thread can
-	 * concurrently destroy it, which will corrupt the tree.  Therefore,
-	 * tear down the tree one node at a time during iteration.
-	 */
-	while ((gctx = gctx_tree_first(gctxs)) != NULL) {
-		gctx_tree_remove(gctxs, gctx);
-		malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
-		{
-			prof_tctx_t *next;
-
-			next = NULL;
-			do {
-				prof_tctx_t *to_destroy =
-				    tctx_tree_iter(&gctx->tctxs, next,
-				    prof_tctx_finish_iter,
-				    (void *)tsd_tsdn(tsd));
-				if (to_destroy != NULL) {
-					next = tctx_tree_next(&gctx->tctxs,
-					    to_destroy);
-					tctx_tree_remove(&gctx->tctxs,
-					    to_destroy);
-					idalloctm(tsd_tsdn(tsd),
-					    iealloc(tsd_tsdn(tsd), to_destroy),
-					    to_destroy, NULL, true, true);
-				} else
-					next = NULL;
-			} while (next != NULL);
-		}
-		gctx->nlimbo--;
-		if (prof_gctx_should_destroy(gctx)) {
-			gctx->nlimbo++;
-			malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
-			prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
-		} else
-			malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
-	}
-}
-
-struct prof_tdata_merge_iter_arg_s {
-	tsdn_t		*tsdn;
-	prof_cnt_t	cnt_all;
-};
-
-static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
-    void *opaque)
-{
-	struct prof_tdata_merge_iter_arg_s *arg =
-	    (struct prof_tdata_merge_iter_arg_s *)opaque;
-
-	malloc_mutex_lock(arg->tsdn, tdata->lock);
-	if (!tdata->expired) {
-		size_t tabind;
-		union {
-			prof_tctx_t	*p;
-			void		*v;
-		} tctx;
-
-		tdata->dumping = true;
-		memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
-		for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
-		    &tctx.v);)
-			prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
-
-		arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
-		arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
-		if (opt_prof_accum) {
-			arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
-			arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
-		}
-	} else
-		tdata->dumping = false;
-	malloc_mutex_unlock(arg->tsdn, tdata->lock);
-
-	return (NULL);
-}
-
-static prof_tdata_t *
-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
-	bool propagate_err = *(bool *)arg;
-
-	if (!tdata->dumping)
-		return (NULL);
-
-	if (prof_dump_printf(propagate_err,
-	    "  t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
-	    tdata->thr_uid, tdata->cnt_summed.curobjs,
-	    tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
-	    tdata->cnt_summed.accumbytes,
-	    (tdata->thread_name != NULL) ? " " : "",
-	    (tdata->thread_name != NULL) ? tdata->thread_name : ""))
-		return (tdata);
-	return (NULL);
-}
-
-#ifdef JEMALLOC_JET
-#undef prof_dump_header
-#define	prof_dump_header JEMALLOC_N(prof_dump_header_impl)
-#endif
-static bool
-prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
-{
-	bool ret;
-
-	if (prof_dump_printf(propagate_err,
-	    "heap_v2/%"FMTu64"\n"
-	    "  t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
-	    ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
-	    cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
-		return (true);
-
-	malloc_mutex_lock(tsdn, &tdatas_mtx);
-	ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
-	    (void *)&propagate_err) != NULL);
-	malloc_mutex_unlock(tsdn, &tdatas_mtx);
-	return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef prof_dump_header
-#define	prof_dump_header JEMALLOC_N(prof_dump_header)
-prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
-#endif
-
-static bool
-prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
-    const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
-{
-	bool ret;
-	unsigned i;
-	struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
-
-	cassert(config_prof);
-	malloc_mutex_assert_owner(tsdn, gctx->lock);
-
-	/* Avoid dumping such gctx's that have no useful data. */
-	if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
-	    (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
-		assert(gctx->cnt_summed.curobjs == 0);
-		assert(gctx->cnt_summed.curbytes == 0);
-		assert(gctx->cnt_summed.accumobjs == 0);
-		assert(gctx->cnt_summed.accumbytes == 0);
-		ret = false;
-		goto label_return;
-	}
-
-	if (prof_dump_printf(propagate_err, "@")) {
-		ret = true;
-		goto label_return;
-	}
-	for (i = 0; i < bt->len; i++) {
-		if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
-		    (uintptr_t)bt->vec[i])) {
-			ret = true;
-			goto label_return;
-		}
-	}
-
-	if (prof_dump_printf(propagate_err,
-	    "\n"
-	    "  t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
-	    gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
-	    gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
-		ret = true;
-		goto label_return;
-	}
-
-	prof_tctx_dump_iter_arg.tsdn = tsdn;
-	prof_tctx_dump_iter_arg.propagate_err = propagate_err;
-	if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
-	    (void *)&prof_tctx_dump_iter_arg) != NULL) {
-		ret = true;
-		goto label_return;
-	}
-
-	ret = false;
-label_return:
-	return (ret);
-}
-
-#ifndef _WIN32
-JEMALLOC_FORMAT_PRINTF(1, 2)
-static int
-prof_open_maps(const char *format, ...)
-{
-	int mfd;
-	va_list ap;
-	char filename[PATH_MAX + 1];
-
-	va_start(ap, format);
-	malloc_vsnprintf(filename, sizeof(filename), format, ap);
-	va_end(ap);
-	mfd = open(filename, O_RDONLY);
-
-	return (mfd);
-}
-#endif
-
-static int
-prof_getpid(void)
-{
-#ifdef _WIN32
-	return (GetCurrentProcessId());
-#else
-	return (getpid());
-#endif
-}
-
-static bool
-prof_dump_maps(bool propagate_err)
-{
-	bool ret;
-	int mfd;
-
-	cassert(config_prof);
-#ifdef __FreeBSD__
-	mfd = prof_open_maps("/proc/curproc/map");
-#elif defined(_WIN32)
-	mfd = -1; // Not implemented
-#else
-	{
-		int pid = prof_getpid();
-
-		mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
-		if (mfd == -1)
-			mfd = prof_open_maps("/proc/%d/maps", pid);
-	}
-#endif
-	if (mfd != -1) {
-		ssize_t nread;
-
-		if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
-		    propagate_err) {
-			ret = true;
-			goto label_return;
-		}
-		nread = 0;
-		do {
-			prof_dump_buf_end += nread;
-			if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
-				/* Make space in prof_dump_buf before read(). */
-				if (prof_dump_flush(propagate_err) &&
-				    propagate_err) {
-					ret = true;
-					goto label_return;
-				}
-			}
-			nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
-			    PROF_DUMP_BUFSIZE - prof_dump_buf_end);
-		} while (nread > 0);
-	} else {
-		ret = true;
-		goto label_return;
-	}
-
-	ret = false;
-label_return:
-	if (mfd != -1)
-		close(mfd);
-	return (ret);
-}
-
-/*
- * See prof_sample_threshold_update() comment for why the body of this function
- * is conditionally compiled.
- */
-static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
-    const char *filename)
-{
-#ifdef JEMALLOC_PROF
-	/*
-	 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
-	 * differ slightly from what jeprof reports, because here we scale the
-	 * summary values, whereas jeprof scales each context individually and
-	 * reports the sums of the scaled values.
-	 */
-	if (cnt_all->curbytes != 0) {
-		double sample_period = (double)((uint64_t)1 << lg_prof_sample);
-		double ratio = (((double)cnt_all->curbytes) /
-		    (double)cnt_all->curobjs) / sample_period;
-		double scale_factor = 1.0 / (1.0 - exp(-ratio));
-		uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
-		    * scale_factor);
-		uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
-		    scale_factor);
-
-		malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
-		    " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
-		    curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
-		    1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
-		malloc_printf(
-		    "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
-		    filename);
-	}
-#endif
-}
-
-struct prof_gctx_dump_iter_arg_s {
-	tsdn_t	*tsdn;
-	bool	propagate_err;
-};
-
-static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
-{
-	prof_gctx_t *ret;
-	struct prof_gctx_dump_iter_arg_s *arg =
-	    (struct prof_gctx_dump_iter_arg_s *)opaque;
-
-	malloc_mutex_lock(arg->tsdn, gctx->lock);
-
-	if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
-	    gctxs)) {
-		ret = gctx;
-		goto label_return;
-	}
-
-	ret = NULL;
-label_return:
-	malloc_mutex_unlock(arg->tsdn, gctx->lock);
-	return (ret);
-}
-
-static void
-prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
-    struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
-    struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
-    prof_gctx_tree_t *gctxs)
-{
-	size_t tabind;
-	union {
-		prof_gctx_t	*p;
-		void		*v;
-	} gctx;
-
-	prof_enter(tsd, tdata);
-
-	/*
-	 * Put gctx's in limbo and clear their counters in preparation for
-	 * summing.
-	 */
-	gctx_tree_new(gctxs);
-	for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
-		prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
-	}
-
-	/*
-	 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
-	 * stats and merge them into the associated gctx's.
-	 */
-	prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
-	memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-	tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
-	    (void *)prof_tdata_merge_iter_arg);
-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
-	/* Merge tctx stats into gctx's. */
-	prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
-	prof_gctx_merge_iter_arg->leak_ngctx = 0;
-	gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
-	    (void *)prof_gctx_merge_iter_arg);
-
-	prof_leave(tsd, tdata);
-}
-
-static bool
-prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
-    bool leakcheck, prof_tdata_t *tdata,
-    struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
-    struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
-    struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
-    prof_gctx_tree_t *gctxs)
-{
-	/* Create dump file. */
-	if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
-		return true;
-	}
-
-	/* Dump profile header. */
-	if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
-	    &prof_tdata_merge_iter_arg->cnt_all)) {
-		goto label_write_error;
-	}
-
-	/* Dump per gctx profile stats. */
-	prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
-	prof_gctx_dump_iter_arg->propagate_err = propagate_err;
-	if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
-	    (void *)prof_gctx_dump_iter_arg) != NULL) {
-		goto label_write_error;
-	}
-
-	/* Dump /proc/<pid>/maps if possible. */
-	if (prof_dump_maps(propagate_err)) {
-		goto label_write_error;
-	}
-
-	if (prof_dump_close(propagate_err)) {
-		return true;
-	}
-
-	return false;
-label_write_error:
-	prof_dump_close(propagate_err);
-	return true;
-}
-
-static bool
-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
-{
-	prof_tdata_t *tdata;
-	struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
-	struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
-	struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
-	prof_gctx_tree_t gctxs;
-	bool err;
-
-	cassert(config_prof);
-
-	tdata = prof_tdata_get(tsd, true);
-	if (tdata == NULL) {
-		return true;
-	}
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
-
-	prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
-	    &prof_gctx_merge_iter_arg, &gctxs);
-	err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
-	    &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
-	    &prof_gctx_dump_iter_arg, &gctxs);
-	prof_gctx_finish(tsd, &gctxs);
-
-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
-
-	if (err) {
-		return true;
-	}
-
-	if (leakcheck) {
-		prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
-		    prof_gctx_merge_iter_arg.leak_ngctx, filename);
-	}
-	return false;
-}
-
-#ifdef JEMALLOC_JET
-void
-prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
-    uint64_t *accumbytes)
-{
-	tsd_t *tsd;
-	prof_tdata_t *tdata;
-	struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
-	struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
-	prof_gctx_tree_t gctxs;
-
-	tsd = tsd_fetch();
-	tdata = prof_tdata_get(tsd, false);
-	if (tdata == NULL) {
-		if (curobjs != NULL) {
-			*curobjs = 0;
-		}
-		if (curbytes != NULL) {
-			*curbytes = 0;
-		}
-		if (accumobjs != NULL) {
-			*accumobjs = 0;
-		}
-		if (accumbytes != NULL) {
-			*accumbytes = 0;
-		}
-		return;
-	}
-
-	prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
-	    &prof_gctx_merge_iter_arg, &gctxs);
-	prof_gctx_finish(tsd, &gctxs);
-
-	if (curobjs != NULL) {
-		*curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
-	}
-	if (curbytes != NULL) {
-		*curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
-	}
-	if (accumobjs != NULL) {
-		*accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
-	}
-	if (accumbytes != NULL) {
-		*accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
-	}
-}
-#endif
-
-#define	DUMP_FILENAME_BUFSIZE	(PATH_MAX + 1)
-#define	VSEQ_INVALID		UINT64_C(0xffffffffffffffff)
-static void
-prof_dump_filename(char *filename, char v, uint64_t vseq)
-{
-	cassert(config_prof);
-
-	if (vseq != VSEQ_INVALID) {
-	        /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
-		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
-		    "%s.%d.%"FMTu64".%c%"FMTu64".heap",
-		    opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
-	} else {
-	        /* "<prefix>.<pid>.<seq>.<v>.heap" */
-		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
-		    "%s.%d.%"FMTu64".%c.heap",
-		    opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
-	}
-	prof_dump_seq++;
-}
-
-static void
-prof_fdump(void)
-{
-	tsd_t *tsd;
-	char filename[DUMP_FILENAME_BUFSIZE];
-
-	cassert(config_prof);
-	assert(opt_prof_final);
-	assert(opt_prof_prefix[0] != '\0');
-
-	if (!prof_booted)
-		return;
-	tsd = tsd_fetch();
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
-	prof_dump_filename(filename, 'f', VSEQ_INVALID);
-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
-	prof_dump(tsd, false, filename, opt_prof_leak);
-}
-
-void
-prof_idump(tsdn_t *tsdn)
-{
-	tsd_t *tsd;
-	prof_tdata_t *tdata;
-
-	cassert(config_prof);
-
-	if (!prof_booted || tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	tdata = prof_tdata_get(tsd, false);
-	if (tdata == NULL)
-		return;
-	if (tdata->enq) {
-		tdata->enq_idump = true;
-		return;
-	}
-
-	if (opt_prof_prefix[0] != '\0') {
-		char filename[PATH_MAX + 1];
-		malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
-		prof_dump_filename(filename, 'i', prof_dump_iseq);
-		prof_dump_iseq++;
-		malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
-		prof_dump(tsd, false, filename, false);
-	}
-}
-
-bool
-prof_mdump(tsd_t *tsd, const char *filename)
-{
-	char filename_buf[DUMP_FILENAME_BUFSIZE];
-
-	cassert(config_prof);
-
-	if (!opt_prof || !prof_booted)
-		return (true);
-
-	if (filename == NULL) {
-		/* No filename specified, so automatically generate one. */
-		if (opt_prof_prefix[0] == '\0')
-			return (true);
-		malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
-		prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
-		prof_dump_mseq++;
-		malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
-		filename = filename_buf;
-	}
-	return (prof_dump(tsd, true, filename, false));
-}
-
-void
-prof_gdump(tsdn_t *tsdn)
-{
-	tsd_t *tsd;
-	prof_tdata_t *tdata;
-
-	cassert(config_prof);
-
-	if (!prof_booted || tsdn_null(tsdn))
-		return;
-	tsd = tsdn_tsd(tsdn);
-	tdata = prof_tdata_get(tsd, false);
-	if (tdata == NULL)
-		return;
-	if (tdata->enq) {
-		tdata->enq_gdump = true;
-		return;
-	}
-
-	if (opt_prof_prefix[0] != '\0') {
-		char filename[DUMP_FILENAME_BUFSIZE];
-		malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
-		prof_dump_filename(filename, 'u', prof_dump_useq);
-		prof_dump_useq++;
-		malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
-		prof_dump(tsd, false, filename, false);
-	}
-}
-
-static void
-prof_bt_hash(const void *key, size_t r_hash[2])
-{
-	prof_bt_t *bt = (prof_bt_t *)key;
-
-	cassert(config_prof);
-
-	hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
-}
-
-static bool
-prof_bt_keycomp(const void *k1, const void *k2)
-{
-	const prof_bt_t *bt1 = (prof_bt_t *)k1;
-	const prof_bt_t *bt2 = (prof_bt_t *)k2;
-
-	cassert(config_prof);
-
-	if (bt1->len != bt2->len)
-		return (false);
-	return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
-}
-
-JEMALLOC_INLINE_C uint64_t
-prof_thr_uid_alloc(tsdn_t *tsdn)
-{
-	uint64_t thr_uid;
-
-	malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
-	thr_uid = next_thr_uid;
-	next_thr_uid++;
-	malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
-
-	return (thr_uid);
-}
-
-static prof_tdata_t *
-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
-    char *thread_name, bool active)
-{
-	prof_tdata_t *tdata;
-
-	cassert(config_prof);
-
-	/* Initialize an empty cache for this thread. */
-	tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
-	    size2index(sizeof(prof_tdata_t)), false, NULL, true,
-	    arena_get(TSDN_NULL, 0, true), true);
-	if (tdata == NULL)
-		return (NULL);
-
-	tdata->lock = prof_tdata_mutex_choose(thr_uid);
-	tdata->thr_uid = thr_uid;
-	tdata->thr_discrim = thr_discrim;
-	tdata->thread_name = thread_name;
-	tdata->attached = true;
-	tdata->expired = false;
-	tdata->tctx_uid_next = 0;
-
-	if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
-	    prof_bt_keycomp)) {
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata), tdata,
-		    NULL, true, true);
-		return (NULL);
-	}
-
-	tdata->prng_state = (uint64_t)(uintptr_t)tdata;
-	prof_sample_threshold_update(tdata);
-
-	tdata->enq = false;
-	tdata->enq_idump = false;
-	tdata->enq_gdump = false;
-
-	tdata->dumping = false;
-	tdata->active = active;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-	tdata_tree_insert(&tdatas, tdata);
-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
-	return (tdata);
-}
-
-prof_tdata_t *
-prof_tdata_init(tsd_t *tsd)
-{
-	return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
-	    NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
-}
-
-static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
-{
-	if (tdata->attached && !even_if_attached)
-		return (false);
-	if (ckh_count(&tdata->bt2tctx) != 0)
-		return (false);
-	return (true);
-}
-
-static bool
-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
-    bool even_if_attached)
-{
-	malloc_mutex_assert_owner(tsdn, tdata->lock);
-
-	return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-}
-
-static void
-prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
-    bool even_if_attached)
-{
-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
-
-	tdata_tree_remove(&tdatas, tdata);
-
-	assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-
-	if (tdata->thread_name != NULL) {
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
-		    tdata->thread_name), tdata->thread_name, NULL, true, true);
-	}
-	ckh_delete(tsd, &tdata->bt2tctx);
-	idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata), tdata, NULL,
-	    true, true);
-}
-
-static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
-{
-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-	prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-}
-
-static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
-{
-	bool destroy_tdata;
-
-	malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
-	if (tdata->attached) {
-		destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
-		    true);
-		/*
-		 * Only detach if !destroy_tdata, because detaching would allow
-		 * another thread to win the race to destroy tdata.
-		 */
-		if (!destroy_tdata)
-			tdata->attached = false;
-		tsd_prof_tdata_set(tsd, NULL);
-	} else
-		destroy_tdata = false;
-	malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-	if (destroy_tdata)
-		prof_tdata_destroy(tsd, tdata, true);
-}
-
-prof_tdata_t *
-prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
-{
-	uint64_t thr_uid = tdata->thr_uid;
-	uint64_t thr_discrim = tdata->thr_discrim + 1;
-	char *thread_name = (tdata->thread_name != NULL) ?
-	    prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
-	bool active = tdata->active;
-
-	prof_tdata_detach(tsd, tdata);
-	return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
-	    active));
-}
-
-static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
-{
-	bool destroy_tdata;
-
-	malloc_mutex_lock(tsdn, tdata->lock);
-	if (!tdata->expired) {
-		tdata->expired = true;
-		destroy_tdata = tdata->attached ? false :
-		    prof_tdata_should_destroy(tsdn, tdata, false);
-	} else
-		destroy_tdata = false;
-	malloc_mutex_unlock(tsdn, tdata->lock);
-
-	return (destroy_tdata);
-}
-
-static prof_tdata_t *
-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
-	tsdn_t *tsdn = (tsdn_t *)arg;
-
-	return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
-}
-
-void
-prof_reset(tsd_t *tsd, size_t lg_sample)
-{
-	prof_tdata_t *next;
-
-	assert(lg_sample < (sizeof(uint64_t) << 3));
-
-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-
-	lg_prof_sample = lg_sample;
-
-	next = NULL;
-	do {
-		prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
-		    prof_tdata_reset_iter, (void *)tsd);
-		if (to_destroy != NULL) {
-			next = tdata_tree_next(&tdatas, to_destroy);
-			prof_tdata_destroy_locked(tsd, to_destroy, false);
-		} else
-			next = NULL;
-	} while (next != NULL);
-
-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
-}
-
-void
-prof_tdata_cleanup(tsd_t *tsd)
-{
-	prof_tdata_t *tdata;
-
-	if (!config_prof)
-		return;
-
-	tdata = tsd_prof_tdata_get(tsd);
-	if (tdata != NULL)
-		prof_tdata_detach(tsd, tdata);
-}
-
-bool
-prof_active_get(tsdn_t *tsdn)
-{
-	bool prof_active_current;
-
-	malloc_mutex_lock(tsdn, &prof_active_mtx);
-	prof_active_current = prof_active;
-	malloc_mutex_unlock(tsdn, &prof_active_mtx);
-	return (prof_active_current);
-}
-
-bool
-prof_active_set(tsdn_t *tsdn, bool active)
-{
-	bool prof_active_old;
-
-	malloc_mutex_lock(tsdn, &prof_active_mtx);
-	prof_active_old = prof_active;
-	prof_active = active;
-	malloc_mutex_unlock(tsdn, &prof_active_mtx);
-	return (prof_active_old);
-}
-
-const char *
-prof_thread_name_get(tsd_t *tsd)
-{
-	prof_tdata_t *tdata;
-
-	tdata = prof_tdata_get(tsd, true);
-	if (tdata == NULL)
-		return ("");
-	return (tdata->thread_name != NULL ? tdata->thread_name : "");
-}
-
-static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
-{
-	char *ret;
-	size_t size;
-
-	if (thread_name == NULL)
-		return (NULL);
-
-	size = strlen(thread_name) + 1;
-	if (size == 1)
-		return (char*)("");
-
-	ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
-	    arena_get(TSDN_NULL, 0, true), true);
-	if (ret == NULL)
-		return (NULL);
-	memcpy(ret, thread_name, size);
-	return (ret);
-}
-
-int
-prof_thread_name_set(tsd_t *tsd, const char *thread_name)
-{
-	prof_tdata_t *tdata;
-	unsigned i;
-	char *s;
-
-	tdata = prof_tdata_get(tsd, true);
-	if (tdata == NULL)
-		return (EAGAIN);
-
-	/* Validate input. */
-	if (thread_name == NULL)
-		return (EFAULT);
-	for (i = 0; thread_name[i] != '\0'; i++) {
-		char c = thread_name[i];
-		if (!isgraph(c) && !isblank(c))
-			return (EFAULT);
-	}
-
-	s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
-	if (s == NULL)
-		return (EAGAIN);
-
-	if (tdata->thread_name != NULL) {
-		idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
-		    tdata->thread_name), tdata->thread_name, NULL, true, true);
-		tdata->thread_name = NULL;
-	}
-	if (strlen(s) > 0)
-		tdata->thread_name = s;
-	return (0);
-}
-
-bool
-prof_thread_active_get(tsd_t *tsd)
-{
-	prof_tdata_t *tdata;
-
-	tdata = prof_tdata_get(tsd, true);
-	if (tdata == NULL)
-		return (false);
-	return (tdata->active);
-}
-
-bool
-prof_thread_active_set(tsd_t *tsd, bool active)
-{
-	prof_tdata_t *tdata;
-
-	tdata = prof_tdata_get(tsd, true);
-	if (tdata == NULL)
-		return (true);
-	tdata->active = active;
-	return (false);
-}
-
-bool
-prof_thread_active_init_get(tsdn_t *tsdn)
-{
-	bool active_init;
-
-	malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
-	active_init = prof_thread_active_init;
-	malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
-	return (active_init);
-}
-
-bool
-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
-{
-	bool active_init_old;
-
-	malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
-	active_init_old = prof_thread_active_init;
-	prof_thread_active_init = active_init;
-	malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
-	return (active_init_old);
-}
-
-bool
-prof_gdump_get(tsdn_t *tsdn)
-{
-	bool prof_gdump_current;
-
-	malloc_mutex_lock(tsdn, &prof_gdump_mtx);
-	prof_gdump_current = prof_gdump_val;
-	malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
-	return (prof_gdump_current);
-}
-
-bool
-prof_gdump_set(tsdn_t *tsdn, bool gdump)
-{
-	bool prof_gdump_old;
-
-	malloc_mutex_lock(tsdn, &prof_gdump_mtx);
-	prof_gdump_old = prof_gdump_val;
-	prof_gdump_val = gdump;
-	malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
-	return (prof_gdump_old);
-}
-
-void
-prof_boot0(void)
-{
-	cassert(config_prof);
-
-	memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
-	    sizeof(PROF_PREFIX_DEFAULT));
-}
-
-void
-prof_boot1(void)
-{
-	cassert(config_prof);
-
-	/*
-	 * opt_prof must be in its final state before any arenas are
-	 * initialized, so this function must be executed early.
-	 */
-
-	if (opt_prof_leak && !opt_prof) {
-		/*
-		 * Enable opt_prof, but in such a way that profiles are never
-		 * automatically dumped.
-		 */
-		opt_prof = true;
-		opt_prof_gdump = false;
-	} else if (opt_prof) {
-		if (opt_lg_prof_interval >= 0) {
-			prof_interval = (((uint64_t)1U) <<
-			    opt_lg_prof_interval);
-		}
-	}
-}
-
-bool
-prof_boot2(tsd_t *tsd)
-{
-	cassert(config_prof);
-
-	if (opt_prof) {
-		unsigned i;
-
-		lg_prof_sample = opt_lg_prof_sample;
-
-		prof_active = opt_prof_active;
-		if (malloc_mutex_init(&prof_active_mtx, "prof_active",
-		    WITNESS_RANK_PROF_ACTIVE))
-			return (true);
-
-		prof_gdump_val = opt_prof_gdump;
-		if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
-		    WITNESS_RANK_PROF_GDUMP))
-			return (true);
-
-		prof_thread_active_init = opt_prof_thread_active_init;
-		if (malloc_mutex_init(&prof_thread_active_init_mtx,
-		    "prof_thread_active_init",
-		    WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
-			return (true);
-
-		if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
-		    prof_bt_keycomp))
-			return (true);
-		if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
-		    WITNESS_RANK_PROF_BT2GCTX))
-			return (true);
-
-		tdata_tree_new(&tdatas);
-		if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
-		    WITNESS_RANK_PROF_TDATAS))
-			return (true);
-
-		next_thr_uid = 0;
-		if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
-		    WITNESS_RANK_PROF_NEXT_THR_UID))
-			return (true);
-
-		if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
-		    WITNESS_RANK_PROF_DUMP_SEQ))
-			return (true);
-		if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
-		    WITNESS_RANK_PROF_DUMP))
-			return (true);
-
-		if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
-		    atexit(prof_fdump) != 0) {
-			malloc_write("<jemalloc>: Error in atexit()\n");
-			if (opt_abort)
-				abort();
-		}
-
-		gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
-		    b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
-		    CACHELINE);
-		if (gctx_locks == NULL)
-			return (true);
-		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
-			if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
-			    WITNESS_RANK_PROF_GCTX))
-				return (true);
-		}
-
-		tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
-		    b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
-		    CACHELINE);
-		if (tdata_locks == NULL)
-			return (true);
-		for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
-			if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
-			    WITNESS_RANK_PROF_TDATA))
-				return (true);
-		}
-	}
-
-#ifdef JEMALLOC_PROF_LIBGCC
-	/*
-	 * Cause the backtracing machinery to allocate its internal state
-	 * before enabling profiling.
-	 */
-	_Unwind_Backtrace(prof_unwind_init_callback, NULL);
-#endif
-
-	prof_booted = true;
-
-	return (false);
-}
-
-void
-prof_prefork0(tsdn_t *tsdn)
-{
-	if (opt_prof) {
-		unsigned i;
-
-		malloc_mutex_prefork(tsdn, &prof_dump_mtx);
-		malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
-		malloc_mutex_prefork(tsdn, &tdatas_mtx);
-		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
-			malloc_mutex_prefork(tsdn, &tdata_locks[i]);
-		for (i = 0; i < PROF_NCTX_LOCKS; i++)
-			malloc_mutex_prefork(tsdn, &gctx_locks[i]);
-	}
-}
-
-void
-prof_prefork1(tsdn_t *tsdn)
-{
-	if (opt_prof) {
-		malloc_mutex_prefork(tsdn, &prof_active_mtx);
-		malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
-		malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
-		malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
-		malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
-	}
-}
-
-void
-prof_postfork_parent(tsdn_t *tsdn)
-{
-	if (opt_prof) {
-		unsigned i;
-
-		malloc_mutex_postfork_parent(tsdn,
-		    &prof_thread_active_init_mtx);
-		malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
-		malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
-		malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
-		malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
-		for (i = 0; i < PROF_NCTX_LOCKS; i++)
-			malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
-		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
-			malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
-		malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
-		malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
-		malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
-	}
-}
-
-void
-prof_postfork_child(tsdn_t *tsdn)
-{
-	if (opt_prof) {
-		unsigned i;
-
-		malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
-		malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
-		malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
-		malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
-		malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
-		for (i = 0; i < PROF_NCTX_LOCKS; i++)
-			malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
-		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
-			malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
-		malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
-		malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
-		malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
-	}
-}
-
-/******************************************************************************/
diff --git a/zircon/third_party/ulib/jemalloc/src/rtree.c b/zircon/third_party/ulib/jemalloc/src/rtree.c
deleted file mode 100644
index 43f21652..0000000
--- a/zircon/third_party/ulib/jemalloc/src/rtree.c
+++ /dev/null
@@ -1,281 +0,0 @@
-#define	JEMALLOC_RTREE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-static unsigned
-hmin(unsigned ha, unsigned hb)
-{
-	return (ha < hb ? ha : hb);
-}
-
-/*
- * Only the most significant bits of keys passed to rtree_{read,write}() are
- * used.
- */
-bool
-rtree_new(rtree_t *rtree, unsigned bits)
-{
-	unsigned bits_in_leaf, height, i;
-
-	assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
-	    RTREE_BITS_PER_LEVEL));
-	assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
-
-	bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
-	    : (bits % RTREE_BITS_PER_LEVEL);
-	if (bits > bits_in_leaf) {
-		height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
-		if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
-			height++;
-	} else
-		height = 1;
-	assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
-
-	rtree->height = height;
-
-	/* Root level. */
-	rtree->levels[0].subtree = NULL;
-	rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
-	    bits_in_leaf;
-	rtree->levels[0].cumbits = rtree->levels[0].bits;
-	/* Interior levels. */
-	for (i = 1; i < height-1; i++) {
-		rtree->levels[i].subtree = NULL;
-		rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
-		rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
-		    RTREE_BITS_PER_LEVEL;
-	}
-	/* Leaf level. */
-	if (height > 1) {
-		rtree->levels[height-1].subtree = NULL;
-		rtree->levels[height-1].bits = bits_in_leaf;
-		rtree->levels[height-1].cumbits = bits;
-	}
-
-	/* Compute lookup table to be used by rtree_[ctx_]start_level(). */
-	for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
-		rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
-		    1);
-	}
-	rtree->start_level[RTREE_HEIGHT_MAX] = 0;
-
-	malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE);
-
-	return (false);
-}
-
-#ifdef JEMALLOC_JET
-#undef rtree_node_alloc
-#define	rtree_node_alloc JEMALLOC_N(rtree_node_alloc_impl)
-#endif
-static rtree_elm_t *
-rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
-{
-	return ((rtree_elm_t *)base_alloc(tsdn, b0get(), nelms *
-	    sizeof(rtree_elm_t), CACHELINE));
-}
-#ifdef JEMALLOC_JET
-#undef rtree_node_alloc
-#define	rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
-rtree_node_alloc_t *rtree_node_alloc = JEMALLOC_N(rtree_node_alloc_impl);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef rtree_node_dalloc
-#define	rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc_impl)
-#endif
-UNUSED static void
-rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
-{
-	/* Nodes are never deleted during normal operation. */
-	not_reached();
-}
-#ifdef JEMALLOC_JET
-#undef rtree_node_dalloc
-#define	rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
-rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl);
-#endif
-
-#ifdef JEMALLOC_JET
-static void
-rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node,
-    unsigned level)
-{
-	if (level + 1 < rtree->height) {
-		size_t nchildren, i;
-
-		nchildren = ZU(1) << rtree->levels[level].bits;
-		for (i = 0; i < nchildren; i++) {
-			rtree_elm_t *child = node[i].child;
-			if (child != NULL) {
-				rtree_delete_subtree(tsdn, rtree, child, level +
-				    1);
-			}
-		}
-	}
-	rtree_node_dalloc(tsdn, rtree, node);
-}
-
-void
-rtree_delete(tsdn_t *tsdn, rtree_t *rtree)
-{
-	unsigned i;
-
-	for (i = 0; i < rtree->height; i++) {
-		rtree_elm_t *subtree = rtree->levels[i].subtree;
-		if (subtree != NULL)
-			rtree_delete_subtree(tsdn, rtree, subtree, i);
-	}
-}
-#endif
-
-static rtree_elm_t *
-rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
-    rtree_elm_t **elmp)
-{
-	rtree_elm_t *node;
-
-	malloc_mutex_lock(tsdn, &rtree->init_lock);
-	node = atomic_read_p((void**)elmp);
-	if (node == NULL) {
-		node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
-		    rtree->levels[level].bits);
-		if (node == NULL) {
-			malloc_mutex_unlock(tsdn, &rtree->init_lock);
-			return (NULL);
-		}
-		atomic_write_p((void **)elmp, node);
-	}
-	malloc_mutex_unlock(tsdn, &rtree->init_lock);
-
-	return (node);
-}
-
-rtree_elm_t *
-rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level)
-{
-	return (rtree_node_init(tsdn, rtree, level,
-	    &rtree->levels[level].subtree));
-}
-
-rtree_elm_t *
-rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
-    unsigned level)
-{
-	return (rtree_node_init(tsdn, rtree, level+1, &elm->child));
-}
-
-static int
-rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b,
-    void *ob)
-{
-	uintptr_t ka = (uintptr_t)oa;
-	uintptr_t kb = (uintptr_t)ob;
-
-	assert(ka != 0);
-	assert(kb != 0);
-
-	return ((ka > kb) - (ka < kb));
-}
-
-static witness_t *
-rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm)
-{
-	witness_t *witness;
-	size_t i;
-	rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
-
-	/* Iterate over entire array to detect double allocation attempts. */
-	witness = NULL;
-	for (i = 0; i < sizeof(rtree_elm_witness_tsd_t) / sizeof(witness_t);
-	    i++) {
-		rtree_elm_witness_t *rew = &witnesses->witnesses[i];
-
-		assert(rew->elm != elm);
-		if (rew->elm == NULL && witness == NULL) {
-			rew->elm = elm;
-			witness = &rew->witness;
-			witness_init(witness, "rtree_elm",
-			    WITNESS_RANK_RTREE_ELM, rtree_elm_witness_comp,
-			    (void *)key);
-		}
-	}
-	assert(witness != NULL);
-	return (witness);
-}
-
-static witness_t *
-rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm)
-{
-	size_t i;
-	rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
-
-	for (i = 0; i < sizeof(rtree_elm_witness_tsd_t) / sizeof(witness_t);
-	    i++) {
-		rtree_elm_witness_t *rew = &witnesses->witnesses[i];
-
-		if (rew->elm == elm)
-			return (&rew->witness);
-	}
-	not_reached();
-}
-
-static void
-rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, const rtree_elm_t *elm)
-{
-	size_t i;
-	rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
-
-	for (i = 0; i < sizeof(rtree_elm_witness_tsd_t) / sizeof(witness_t);
-	    i++) {
-		rtree_elm_witness_t *rew = &witnesses->witnesses[i];
-
-		if (rew->elm == elm) {
-			rew->elm = NULL;
-			witness_init(&rew->witness, "rtree_elm",
-			    WITNESS_RANK_RTREE_ELM, rtree_elm_witness_comp,
-			    NULL);
-			    return;
-		}
-	}
-	not_reached();
-}
-
-void
-rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree, uintptr_t key,
-    const rtree_elm_t *elm)
-{
-	witness_t *witness;
-
-	if (tsdn_null(tsdn))
-		return;
-
-	witness = rtree_elm_witness_alloc(tsdn_tsd(tsdn), key, elm);
-	witness_lock(tsdn, witness);
-}
-
-void
-rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
-    const rtree_elm_t *elm)
-{
-	witness_t *witness;
-
-	if (tsdn_null(tsdn))
-		return;
-
-	witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm);
-	witness_assert_owner(tsdn, witness);
-}
-
-void
-rtree_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
-    const rtree_elm_t *elm)
-{
-	witness_t *witness;
-
-	if (tsdn_null(tsdn))
-		return;
-
-	witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm);
-	witness_unlock(tsdn, witness);
-	rtree_elm_witness_dalloc(tsdn_tsd(tsdn), witness, elm);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/spin.c b/zircon/third_party/ulib/jemalloc/src/spin.c
deleted file mode 100644
index 5242d95..0000000
--- a/zircon/third_party/ulib/jemalloc/src/spin.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	JEMALLOC_SPIN_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/stats.c b/zircon/third_party/ulib/jemalloc/src/stats.c
deleted file mode 100644
index 020d56b..0000000
--- a/zircon/third_party/ulib/jemalloc/src/stats.c
+++ /dev/null
@@ -1,1029 +0,0 @@
-#define	JEMALLOC_STATS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#define	CTL_GET(n, v, t) do {						\
-	size_t sz = sizeof(t);						\
-	xmallctl(n, (void *)v, &sz, NULL, 0);				\
-} while (0)
-
-#define	CTL_M2_GET(n, i, v, t) do {					\
-	size_t mib[6];							\
-	size_t miblen = sizeof(mib) / sizeof(size_t);			\
-	size_t sz = sizeof(t);						\
-	xmallctlnametomib(n, mib, &miblen);				\
-	mib[2] = (i);							\
-	xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0);		\
-} while (0)
-
-#define	CTL_M2_M4_GET(n, i, j, v, t) do {				\
-	size_t mib[6];							\
-	size_t miblen = sizeof(mib) / sizeof(size_t);			\
-	size_t sz = sizeof(t);						\
-	xmallctlnametomib(n, mib, &miblen);				\
-	mib[2] = (i);							\
-	mib[4] = (j);							\
-	xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0);		\
-} while (0)
-
-/******************************************************************************/
-/* Data. */
-
-bool	opt_stats_print = false;
-
-/******************************************************************************/
-
-static void
-stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
-    bool json, bool large, unsigned i)
-{
-	size_t page;
-	bool in_gap, in_gap_prev;
-	unsigned nbins, j;
-
-	CTL_GET("arenas.page", &page, size_t);
-
-	CTL_GET("arenas.nbins", &nbins, unsigned);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"bins\": [\n");
-	} else {
-		if (config_tcache) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "bins:           size ind    allocated      nmalloc"
-			    "      ndalloc    nrequests      curregs"
-			    "     curslabs regs pgs  util       nfills"
-			    "     nflushes     newslabs      reslabs\n");
-		} else {
-			malloc_cprintf(write_cb, cbopaque,
-			    "bins:           size ind    allocated      nmalloc"
-			    "      ndalloc    nrequests      curregs"
-			    "     curslabs regs pgs  util     newslabs"
-			    "      reslabs\n");
-		}
-	}
-	for (j = 0, in_gap = false; j < nbins; j++) {
-		uint64_t nslabs;
-		size_t reg_size, slab_size, curregs;
-		size_t curslabs;
-		uint32_t nregs;
-		uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
-		uint64_t nreslabs;
-
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
-		    uint64_t);
-		in_gap_prev = in_gap;
-		in_gap = (nslabs == 0);
-
-		if (!json && in_gap_prev && !in_gap) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "                     ---\n");
-		}
-
-		CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
-		CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
-		CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
-
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
-		    uint64_t);
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
-		    uint64_t);
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
-		    size_t);
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
-		    &nrequests, uint64_t);
-		if (config_tcache) {
-			CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
-			    &nfills, uint64_t);
-			CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
-			    &nflushes, uint64_t);
-		}
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
-		    uint64_t);
-		CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
-		    size_t);
-
-		if (json) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t{\n"
-			    "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
-			    "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
-			    "\t\t\t\t\t\t\"curregs\": %zu,\n"
-			    "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
-			    nmalloc,
-			    ndalloc,
-			    curregs,
-			    nrequests);
-			if (config_tcache) {
-				malloc_cprintf(write_cb, cbopaque,
-				    "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
-				    "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
-				    nfills,
-				    nflushes);
-			}
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
-			    "\t\t\t\t\t\t\"curslabs\": %zu\n"
-			    "\t\t\t\t\t}%s\n",
-			    nreslabs,
-			    curslabs,
-			    (j + 1 < nbins) ? "," : "");
-		} else if (!in_gap) {
-			size_t availregs, milli;
-			char util[6]; /* "x.yyy". */
-
-			availregs = nregs * curslabs;
-			milli = (availregs != 0) ? (1000 * curregs) / availregs
-			    : 1000;
-			assert(milli <= 1000);
-			if (milli < 10) {
-				malloc_snprintf(util, sizeof(util),
-				    "0.00%zu", milli);
-			} else if (milli < 100) {
-				malloc_snprintf(util, sizeof(util), "0.0%zu",
-				    milli);
-			} else if (milli < 1000) {
-				malloc_snprintf(util, sizeof(util), "0.%zu",
-				    milli);
-			} else
-				malloc_snprintf(util, sizeof(util), "1");
-
-			if (config_tcache) {
-				malloc_cprintf(write_cb, cbopaque,
-				    "%20zu %3u %12zu %12"FMTu64
-				    " %12"FMTu64" %12"FMTu64" %12zu"
-				    " %12zu %4u %3zu %-5s %12"FMTu64
-				    " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
-				    reg_size, j, curregs * reg_size, nmalloc,
-				    ndalloc, nrequests, curregs, curslabs,
-				    nregs, slab_size / page, util, nfills,
-				    nflushes, nslabs, nreslabs);
-			} else {
-				malloc_cprintf(write_cb, cbopaque,
-				    "%20zu %3u %12zu %12"FMTu64
-				    " %12"FMTu64" %12"FMTu64" %12zu"
-				    " %12zu %4u %3zu %-5s %12"FMTu64
-				    " %12"FMTu64"\n",
-				    reg_size, j, curregs * reg_size, nmalloc,
-				    ndalloc, nrequests, curregs, curslabs,
-				    nregs, slab_size / page, util, nslabs,
-				    nreslabs);
-			}
-		}
-	}
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t]%s\n", large ? "," : "");
-	} else {
-		if (in_gap) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "                     ---\n");
-		}
-	}
-}
-
-static void
-stats_arena_lextents_print(void (*write_cb)(void *, const char *),
-    void *cbopaque, bool json, unsigned i)
-{
-	unsigned nbins, nlextents, j;
-	bool in_gap, in_gap_prev;
-
-	CTL_GET("arenas.nbins", &nbins, unsigned);
-	CTL_GET("arenas.nlextents", &nlextents, unsigned);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"lextents\": [\n");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "large:          size ind    allocated      nmalloc"
-		    "      ndalloc    nrequests  curlextents\n");
-	}
-	for (j = 0, in_gap = false; j < nlextents; j++) {
-		uint64_t nmalloc, ndalloc, nrequests;
-		size_t lextent_size, curlextents;
-
-		CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
-		    &nmalloc, uint64_t);
-		CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
-		    &ndalloc, uint64_t);
-		CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
-		    &nrequests, uint64_t);
-		in_gap_prev = in_gap;
-		in_gap = (nrequests == 0);
-
-		if (!json && in_gap_prev && !in_gap) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "                     ---\n");
-		}
-
-		CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
-		CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
-		    &curlextents, size_t);
-		if (json) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t{\n"
-			    "\t\t\t\t\t\t\"curlextents\": %zu\n"
-			    "\t\t\t\t\t}%s\n",
-			    curlextents,
-			    (j + 1 < nlextents) ? "," : "");
-		} else if (!in_gap) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
-			    " %12"FMTu64" %12zu\n",
-			    lextent_size, nbins + j,
-			    curlextents * lextent_size, nmalloc, ndalloc,
-			    nrequests, curlextents);
-		}
-	}
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t]\n");
-	} else {
-		if (in_gap) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "                     ---\n");
-		}
-	}
-}
-
-static void
-stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
-    bool json, unsigned i, bool bins, bool large)
-{
-	unsigned nthreads;
-	const char *dss;
-	ssize_t decay_time;
-	size_t page, pactive, pdirty, mapped, retained;
-	size_t base, internal, resident;
-	uint64_t npurge, nmadvise, purged;
-	size_t small_allocated;
-	uint64_t small_nmalloc, small_ndalloc, small_nrequests;
-	size_t large_allocated;
-	uint64_t large_nmalloc, large_ndalloc, large_nrequests;
-	size_t tcache_bytes;
-
-	CTL_GET("arenas.page", &page, size_t);
-
-	CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"nthreads\": %u,\n", nthreads);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "assigned threads: %u\n", nthreads);
-	}
-
-	CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"dss\": \"%s\",\n", dss);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "dss allocation precedence: %s\n", dss);
-	}
-
-	CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"decay_time\": %zd,\n", decay_time);
-	} else {
-		if (decay_time >= 0) {
-			malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
-			    decay_time);
-		} else
-			malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
-	}
-
-	CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
-	CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
-	CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
-	CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
-	CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"pactive\": %zu,\n", pactive);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"pdirty\": %zu,\n", pdirty);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"purged\": %"FMTu64",\n", purged);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
-		    ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
-	}
-
-	CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
-	    size_t);
-	CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
-	CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
-	CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
-	    uint64_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"small\": {\n");
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t},\n");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "                            allocated      nmalloc"
-		    "      ndalloc    nrequests\n");
-		malloc_cprintf(write_cb, cbopaque,
-		    "small:                   %12zu %12"FMTu64" %12"FMTu64
-		    " %12"FMTu64"\n",
-		    small_allocated, small_nmalloc, small_ndalloc,
-		    small_nrequests);
-	}
-
-	CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
-	    size_t);
-	CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
-	CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
-	CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
-	    uint64_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"large\": {\n");
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t},\n");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "large:                   %12zu %12"FMTu64" %12"FMTu64
-		    " %12"FMTu64"\n",
-		    large_allocated, large_nmalloc, large_ndalloc,
-		    large_nrequests);
-		malloc_cprintf(write_cb, cbopaque,
-		    "total:                   %12zu %12"FMTu64" %12"FMTu64
-		    " %12"FMTu64"\n",
-		    small_allocated + large_allocated, small_nmalloc +
-		    large_nmalloc, small_ndalloc + large_ndalloc,
-		    small_nrequests + large_nrequests);
-	}
-	if (!json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "active:                  %12zu\n", pactive * page);
-	}
-
-	CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"mapped\": %zu,\n", mapped);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "mapped:                  %12zu\n", mapped);
-	}
-
-	CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"retained\": %zu,\n", retained);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "retained:                %12zu\n", retained);
-	}
-
-	CTL_M2_GET("stats.arenas.0.base", i, &base, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"base\": %zu,\n", base);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "base:                    %12zu\n", base);
-	}
-
-	CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"internal\": %zu,\n", internal);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "internal:                %12zu\n", internal);
-	}
-
-	if (config_tcache) {
-		CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes,
-		    size_t);
-		if (json) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
-		} else {
-			malloc_cprintf(write_cb, cbopaque,
-			    "tcache:                  %12zu\n", tcache_bytes);
-		}
-	}
-
-	CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\t\"resident\": %zu%s\n", resident, (bins || large) ?
-		    "," : "");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "resident:                %12zu\n", resident);
-	}
-
-	if (bins)
-		stats_arena_bins_print(write_cb, cbopaque, json, large, i);
-	if (large)
-		stats_arena_lextents_print(write_cb, cbopaque, json, i);
-}
-
-static void
-stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
-    bool json, bool more)
-{
-	const char *cpv;
-	bool bv;
-	unsigned uv;
-	uint32_t u32v;
-	uint64_t u64v;
-	ssize_t ssv;
-	size_t sv, bsz, usz, ssz, sssz, cpsz;
-
-	bsz = sizeof(bool);
-	usz = sizeof(unsigned);
-	ssz = sizeof(size_t);
-	sssz = sizeof(ssize_t);
-	cpsz = sizeof(const char *);
-
-	CTL_GET("version", &cpv, const char *);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		"\t\t\"version\": \"%s\",\n", cpv);
-	} else
-		malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
-
-	/* config. */
-#define	CONFIG_WRITE_BOOL_JSON(n, c)					\
-	if (json) {							\
-		CTL_GET("config."#n, &bv, bool);			\
-		malloc_cprintf(write_cb, cbopaque,			\
-		    "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false",	\
-		    (c));						\
-	}
-
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\"config\": {\n");
-	}
-
-	CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
-
-	CTL_GET("config.debug", &bv, bool);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
-	} else {
-		malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
-		    bv ? "enabled" : "disabled");
-	}
-
-	CONFIG_WRITE_BOOL_JSON(fill, ",")
-	CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
-
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"malloc_conf\": \"%s\",\n",
-		    config_malloc_conf);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "config.malloc_conf: \"%s\"\n", config_malloc_conf);
-	}
-
-	CONFIG_WRITE_BOOL_JSON(munmap, ",")
-	CONFIG_WRITE_BOOL_JSON(prof, ",")
-	CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
-	CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
-	CONFIG_WRITE_BOOL_JSON(stats, ",")
-	CONFIG_WRITE_BOOL_JSON(tcache, ",")
-	CONFIG_WRITE_BOOL_JSON(tls, ",")
-	CONFIG_WRITE_BOOL_JSON(utrace, ",")
-	CONFIG_WRITE_BOOL_JSON(xmalloc, "")
-
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t},\n");
-	}
-#undef CONFIG_WRITE_BOOL_JSON
-
-	/* opt. */
-#define	OPT_WRITE_BOOL(n, c)						\
-	if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) {	\
-		if (json) {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "\t\t\t\""#n"\": %s%s\n", bv ? "true" :	\
-			    "false", (c));				\
-		} else {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "  opt."#n": %s\n", bv ? "true" : "false");	\
-		}							\
-	}
-#define	OPT_WRITE_BOOL_MUTABLE(n, m, c) {				\
-	bool bv2;							\
-	if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 &&	\
-	    je_mallctl(#m, (void *)&bv2, &bsz, NULL, 0) == 0) {		\
-		if (json) {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "\t\t\t\""#n"\": %s%s\n", bv ? "true" :	\
-			    "false", (c));				\
-		} else {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "  opt."#n": %s ("#m": %s)\n", bv ? "true"	\
-			    : "false", bv2 ? "true" : "false");		\
-		}							\
-	}								\
-}
-#define	OPT_WRITE_UNSIGNED(n, c)					\
-	if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) {	\
-		if (json) {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "\t\t\t\""#n"\": %u%s\n", uv, (c));		\
-		} else {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			"  opt."#n": %u\n", uv);			\
-		}							\
-	}
-#define	OPT_WRITE_SSIZE_T(n, c)						\
-	if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) {	\
-		if (json) {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "\t\t\t\""#n"\": %zd%s\n", ssv, (c));	\
-		} else {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "  opt."#n": %zd\n", ssv);			\
-		}							\
-	}
-#define	OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) {				\
-	ssize_t ssv2;							\
-	if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 &&	\
-	    je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) {	\
-		if (json) {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "\t\t\t\""#n"\": %zd%s\n", ssv, (c));	\
-		} else {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "  opt."#n": %zd ("#m": %zd)\n",		\
-			    ssv, ssv2);					\
-		}							\
-	}								\
-}
-#define	OPT_WRITE_CHAR_P(n, c)						\
-	if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) {	\
-		if (json) {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c));	\
-		} else {						\
-			malloc_cprintf(write_cb, cbopaque,		\
-			    "  opt."#n": \"%s\"\n", cpv);		\
-		}							\
-	}
-
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\"opt\": {\n");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "Run-time option settings:\n");
-	}
-	OPT_WRITE_BOOL(abort, ",")
-	OPT_WRITE_CHAR_P(dss, ",")
-	OPT_WRITE_UNSIGNED(narenas, ",")
-	OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
-	OPT_WRITE_CHAR_P(junk, ",")
-	OPT_WRITE_BOOL(zero, ",")
-	OPT_WRITE_BOOL(utrace, ",")
-	OPT_WRITE_BOOL(xmalloc, ",")
-	OPT_WRITE_BOOL(tcache, ",")
-	OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
-	OPT_WRITE_BOOL(prof, ",")
-	OPT_WRITE_CHAR_P(prof_prefix, ",")
-	OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
-	OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
-	    ",")
-	OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
-	OPT_WRITE_BOOL(prof_accum, ",")
-	OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
-	OPT_WRITE_BOOL(prof_gdump, ",")
-	OPT_WRITE_BOOL(prof_final, ",")
-	OPT_WRITE_BOOL(prof_leak, ",")
-	/*
-	 * stats_print is always emitted, so as long as stats_print comes last
-	 * it's safe to unconditionally omit the comma here (rather than having
-	 * to conditionally omit it elsewhere depending on configuration).
-	 */
-	OPT_WRITE_BOOL(stats_print, "")
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t},\n");
-	}
-
-#undef OPT_WRITE_BOOL
-#undef OPT_WRITE_BOOL_MUTABLE
-#undef OPT_WRITE_SSIZE_T
-#undef OPT_WRITE_CHAR_P
-
-	/* arenas. */
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\"arenas\": {\n");
-	}
-
-	CTL_GET("arenas.narenas", &uv, unsigned);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"narenas\": %u,\n", uv);
-	} else
-		malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
-
-	CTL_GET("arenas.decay_time", &ssv, ssize_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"decay_time\": %zd,\n", ssv);
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "Unused dirty page decay time: %zd%s\n", ssv, (ssv < 0) ?
-		    " (no decay)" : "");
-	}
-
-	CTL_GET("arenas.quantum", &sv, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"quantum\": %zu,\n", sv);
-	} else
-		malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
-
-	CTL_GET("arenas.page", &sv, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"page\": %zu,\n", sv);
-	} else
-		malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
-
-	if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
-		if (json) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\"tcache_max\": %zu,\n", sv);
-		} else {
-			malloc_cprintf(write_cb, cbopaque,
-			    "Maximum thread-cached size class: %zu\n", sv);
-		}
-	}
-
-	if (json) {
-		unsigned nbins, nlextents, i;
-
-		CTL_GET("arenas.nbins", &nbins, unsigned);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"nbins\": %u,\n", nbins);
-
-		if (config_tcache) {
-			CTL_GET("arenas.nhbins", &uv, unsigned);
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\"nhbins\": %u,\n", uv);
-		}
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"bin\": [\n");
-		for (i = 0; i < nbins; i++) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t{\n");
-
-			CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t\"size\": %zu,\n", sv);
-
-			CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
-
-			CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t\"slab_size\": %zu\n", sv);
-
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
-		}
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t],\n");
-
-		CTL_GET("arenas.nlextents", &nlextents, unsigned);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"nlextents\": %u,\n", nlextents);
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"lextent\": [\n");
-		for (i = 0; i < nlextents; i++) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t{\n");
-
-			CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t\t\"size\": %zu\n", sv);
-
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\t\t}%s\n", (i + 1 < nlextents) ? "," : "");
-		}
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t]\n");
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t}%s\n", (config_prof || more) ? "," : "");
-	}
-
-	/* prof. */
-	if (config_prof && json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\"prof\": {\n");
-
-		CTL_GET("prof.thread_active_init", &bv, bool);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
-		    "false");
-
-		CTL_GET("prof.active", &bv, bool);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"active\": %s,\n", bv ? "true" : "false");
-
-		CTL_GET("prof.gdump", &bv, bool);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
-
-		CTL_GET("prof.interval", &u64v, uint64_t);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"interval\": %"FMTu64",\n", u64v);
-
-		CTL_GET("prof.lg_sample", &ssv, ssize_t);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"lg_sample\": %zd\n", ssv);
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t}%s\n", more ? "," : "");
-	}
-}
-
-static void
-stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
-    bool json, bool merged, bool destroyed, bool unmerged, bool bins,
-    bool large)
-{
-	size_t allocated, active, metadata, resident, mapped, retained;
-
-	CTL_GET("stats.allocated", &allocated, size_t);
-	CTL_GET("stats.active", &active, size_t);
-	CTL_GET("stats.metadata", &metadata, size_t);
-	CTL_GET("stats.resident", &resident, size_t);
-	CTL_GET("stats.mapped", &mapped, size_t);
-	CTL_GET("stats.retained", &retained, size_t);
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\"stats\": {\n");
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"allocated\": %zu,\n", allocated);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"active\": %zu,\n", active);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"metadata\": %zu,\n", metadata);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"resident\": %zu,\n", resident);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"mapped\": %zu,\n", mapped);
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t\t\"retained\": %zu\n", retained);
-
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t\t}%s\n", (merged || unmerged) ? "," : "");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "Allocated: %zu, active: %zu, metadata: %zu,"
-		    " resident: %zu, mapped: %zu, retained: %zu\n",
-		    allocated, active, metadata, resident, mapped, retained);
-	}
-
-	if (merged || destroyed || unmerged) {
-		unsigned narenas;
-
-		if (json) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t\"stats.arenas\": {\n");
-		}
-
-		CTL_GET("arenas.narenas", &narenas, unsigned);
-		{
-			size_t mib[3];
-			size_t miblen = sizeof(mib) / sizeof(size_t);
-			size_t sz;
-			VARIABLE_ARRAY(bool, initialized, narenas);
-			bool destroyed_initialized;
-			unsigned i, j, ninitialized;
-
-			xmallctlnametomib("arena.0.initialized", mib, &miblen);
-			for (i = ninitialized = 0; i < narenas; i++) {
-				mib[1] = i;
-				sz = sizeof(bool);
-				xmallctlbymib(mib, miblen, &initialized[i], &sz,
-				    NULL, 0);
-				if (initialized[i])
-					ninitialized++;
-			}
-			mib[1] = MALLCTL_ARENAS_DESTROYED;
-			sz = sizeof(bool);
-			xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
-			    NULL, 0);
-
-			/* Merged stats. */
-			if (merged && (ninitialized > 1 || !unmerged)) {
-				/* Print merged arena stats. */
-				if (json) {
-					malloc_cprintf(write_cb, cbopaque,
-					    "\t\t\t\"merged\": {\n");
-				} else {
-					malloc_cprintf(write_cb, cbopaque,
-					    "\nMerged arenas stats:\n");
-				}
-				stats_arena_print(write_cb, cbopaque, json,
-				    MALLCTL_ARENAS_ALL, bins, large);
-				if (json) {
-					malloc_cprintf(write_cb, cbopaque,
-					    "\t\t\t}%s\n",
-					    ((destroyed_initialized &&
-					    destroyed) || unmerged) ?  "," :
-					    "");
-				}
-			}
-
-			/* Destroyed stats. */
-			if (destroyed_initialized && destroyed) {
-				/* Print destroyed arena stats. */
-				if (json) {
-					malloc_cprintf(write_cb, cbopaque,
-					    "\t\t\t\"destroyed\": {\n");
-				} else {
-					malloc_cprintf(write_cb, cbopaque,
-					    "\nDestroyed arenas stats:\n");
-				}
-				stats_arena_print(write_cb, cbopaque, json,
-				    MALLCTL_ARENAS_DESTROYED, bins, large);
-				if (json) {
-					malloc_cprintf(write_cb, cbopaque,
-					    "\t\t\t}%s\n", unmerged ?  "," :
-					    "");
-				}
-			}
-
-			/* Unmerged stats. */
-			if (unmerged) {
-				for (i = j = 0; i < narenas; i++) {
-					if (initialized[i]) {
-						if (json) {
-							j++;
-							malloc_cprintf(write_cb,
-							    cbopaque,
-							    "\t\t\t\"%u\": {\n",
-							    i);
-						} else {
-							malloc_cprintf(write_cb,
-							    cbopaque,
-							    "\narenas[%u]:\n",
-							    i);
-						}
-						stats_arena_print(write_cb,
-						    cbopaque, json, i, bins,
-						    large);
-						if (json) {
-							malloc_cprintf(write_cb,
-							    cbopaque,
-							    "\t\t\t}%s\n", (j <
-							    ninitialized) ? ","
-							    : "");
-						}
-					}
-				}
-			}
-		}
-
-		if (json) {
-			malloc_cprintf(write_cb, cbopaque,
-			    "\t\t}\n");
-		}
-	}
-}
-
-void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
-    const char *opts)
-{
-	int err;
-	uint64_t epoch;
-	size_t u64sz;
-	bool json = false;
-	bool general = true;
-	bool merged = config_stats;
-	bool destroyed = config_stats;
-	bool unmerged = config_stats;
-	bool bins = true;
-	bool large = true;
-
-	/*
-	 * Refresh stats, in case mallctl() was called by the application.
-	 *
-	 * Check for OOM here, since refreshing the ctl cache can trigger
-	 * allocation.  In practice, none of the subsequent mallctl()-related
-	 * calls in this function will cause OOM if this one succeeds.
-	 * */
-	epoch = 1;
-	u64sz = sizeof(uint64_t);
-	err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
-	    sizeof(uint64_t));
-	if (err != 0) {
-		if (err == EAGAIN) {
-			malloc_write("<jemalloc>: Memory allocation failure in "
-			    "mallctl(\"epoch\", ...)\n");
-			return;
-		}
-		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
-		    "...)\n");
-		abort();
-	}
-
-	if (opts != NULL) {
-		unsigned i;
-
-		for (i = 0; opts[i] != '\0'; i++) {
-			switch (opts[i]) {
-			case 'J':
-				json = true;
-				break;
-			case 'g':
-				general = false;
-				break;
-			case 'm':
-				merged = false;
-				break;
-			case 'd':
-				destroyed = false;
-				break;
-			case 'a':
-				unmerged = false;
-				break;
-			case 'b':
-				bins = false;
-				break;
-			case 'l':
-				large = false;
-				break;
-			default:;
-			}
-		}
-	}
-
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "{\n"
-		    "\t\"jemalloc\": {\n");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "___ Begin jemalloc statistics ___\n");
-	}
-
-	if (general) {
-		bool more = (merged || unmerged);
-		stats_general_print(write_cb, cbopaque, json, more);
-	}
-	if (config_stats) {
-		stats_print_helper(write_cb, cbopaque, json, merged, destroyed,
-		    unmerged, bins, large);
-	}
-
-	if (json) {
-		malloc_cprintf(write_cb, cbopaque,
-		    "\t}\n"
-		    "}\n");
-	} else {
-		malloc_cprintf(write_cb, cbopaque,
-		    "--- End jemalloc statistics ---\n");
-	}
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/tcache.c b/zircon/third_party/ulib/jemalloc/src/tcache.c
deleted file mode 100644
index d132341..0000000
--- a/zircon/third_party/ulib/jemalloc/src/tcache.c
+++ /dev/null
@@ -1,525 +0,0 @@
-#define	JEMALLOC_TCACHE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-bool	opt_tcache = true;
-ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
-
-tcache_bin_info_t	*tcache_bin_info;
-static unsigned		stack_nelms; /* Total stack elms per tcache. */
-
-unsigned		nhbins;
-size_t			tcache_maxclass;
-
-tcaches_t		*tcaches;
-
-/* Index of first element within tcaches that has never been used. */
-static unsigned		tcaches_past;
-
-/* Head of singly linked list tracking available tcaches elements. */
-static tcaches_t	*tcaches_avail;
-
-/******************************************************************************/
-
-size_t
-tcache_salloc(tsdn_t *tsdn, const void *ptr)
-{
-	return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr));
-}
-
-void
-tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
-{
-	szind_t binind = tcache->next_gc_bin;
-	tcache_bin_t *tbin = &tcache->tbins[binind];
-	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
-
-	if (tbin->low_water > 0) {
-		/*
-		 * Flush (ceiling) 3/4 of the objects below the low water mark.
-		 */
-		if (binind < NBINS) {
-			tcache_bin_flush_small(tsd, tcache, tbin, binind,
-			    tbin->ncached - tbin->low_water + (tbin->low_water
-			    >> 2));
-		} else {
-			tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
-			    - tbin->low_water + (tbin->low_water >> 2), tcache);
-		}
-		/*
-		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
-		 * fill count is always at least 1.
-		 */
-		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
-			tbin->lg_fill_div++;
-	} else if (tbin->low_water < 0) {
-		/*
-		 * Increase fill count by 2X.  Make sure lg_fill_div stays
-		 * greater than 0.
-		 */
-		if (tbin->lg_fill_div > 1)
-			tbin->lg_fill_div--;
-	}
-	tbin->low_water = tbin->ncached;
-
-	tcache->next_gc_bin++;
-	if (tcache->next_gc_bin == nhbins)
-		tcache->next_gc_bin = 0;
-}
-
-void *
-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
-    tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
-{
-	void *ret;
-
-	arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
-	    tcache->prof_accumbytes : 0);
-	if (config_prof)
-		tcache->prof_accumbytes = 0;
-	ret = tcache_alloc_easy(tbin, tcache_success);
-
-	return (ret);
-}
-
-void
-tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
-    szind_t binind, unsigned rem)
-{
-	arena_t *arena;
-	void *ptr;
-	unsigned i, nflush, ndeferred;
-	bool merged_stats = false;
-
-	assert(binind < NBINS);
-	assert(rem <= tbin->ncached);
-
-	arena = arena_choose(tsd, NULL);
-	assert(arena != NULL);
-	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
-		/* Lock the arena bin associated with the first object. */
-		extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
-		arena_t *bin_arena = extent_arena_get(extent);
-		arena_bin_t *bin = &bin_arena->bins[binind];
-
-		if (config_prof && bin_arena == arena) {
-			if (arena_prof_accum(tsd_tsdn(tsd), arena,
-			    tcache->prof_accumbytes))
-				prof_idump(tsd_tsdn(tsd));
-			tcache->prof_accumbytes = 0;
-		}
-
-		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
-		if (config_stats && bin_arena == arena) {
-			assert(!merged_stats);
-			merged_stats = true;
-			bin->stats.nflushes++;
-			bin->stats.nrequests += tbin->tstats.nrequests;
-			tbin->tstats.nrequests = 0;
-		}
-		ndeferred = 0;
-		for (i = 0; i < nflush; i++) {
-			ptr = *(tbin->avail - 1 - i);
-			assert(ptr != NULL);
-
-			extent = iealloc(tsd_tsdn(tsd), ptr);
-			if (extent_arena_get(extent) == bin_arena) {
-				arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
-				    bin_arena, extent, ptr);
-			} else {
-				/*
-				 * This object was allocated via a different
-				 * arena bin than the one that is currently
-				 * locked.  Stash the object, so that it can be
-				 * handled in a future pass.
-				 */
-				*(tbin->avail - 1 - ndeferred) = ptr;
-				ndeferred++;
-			}
-		}
-		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-		arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
-	}
-	if (config_stats && !merged_stats) {
-		/*
-		 * The flush loop didn't happen to flush to this thread's
-		 * arena, so the stats didn't get merged.  Manually do so now.
-		 */
-		arena_bin_t *bin = &arena->bins[binind];
-		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
-		bin->stats.nflushes++;
-		bin->stats.nrequests += tbin->tstats.nrequests;
-		tbin->tstats.nrequests = 0;
-		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
-	}
-
-	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
-	    sizeof(void *));
-	tbin->ncached = rem;
-	if ((int)tbin->ncached < tbin->low_water)
-		tbin->low_water = tbin->ncached;
-}
-
-void
-tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
-    unsigned rem, tcache_t *tcache)
-{
-	arena_t *arena;
-	void *ptr;
-	unsigned i, nflush, ndeferred;
-	bool merged_stats = false;
-
-	assert(binind < nhbins);
-	assert(rem <= tbin->ncached);
-
-	arena = arena_choose(tsd, NULL);
-	assert(arena != NULL);
-	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
-		/* Lock the arena associated with the first object. */
-		extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
-		arena_t *locked_arena = extent_arena_get(extent);
-		UNUSED bool idump;
-
-		if (config_prof)
-			idump = false;
-		malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
-		if ((config_prof || config_stats) && locked_arena == arena) {
-			if (config_prof) {
-				idump = arena_prof_accum_locked(arena,
-				    tcache->prof_accumbytes);
-				tcache->prof_accumbytes = 0;
-			}
-			if (config_stats) {
-				merged_stats = true;
-				arena->stats.nrequests_large +=
-				    tbin->tstats.nrequests;
-				arena->stats.lstats[binind - NBINS].nrequests +=
-				    tbin->tstats.nrequests;
-				tbin->tstats.nrequests = 0;
-			}
-		}
-		ndeferred = 0;
-		for (i = 0; i < nflush; i++) {
-			ptr = *(tbin->avail - 1 - i);
-			assert(ptr != NULL);
-			extent = iealloc(tsd_tsdn(tsd), ptr);
-			if (extent_arena_get(extent) == locked_arena) {
-				large_dalloc_junked_locked(tsd_tsdn(tsd),
-				    extent);
-			} else {
-				/*
-				 * This object was allocated via a different
-				 * arena than the one that is currently locked.
-				 * Stash the object, so that it can be handled
-				 * in a future pass.
-				 */
-				*(tbin->avail - 1 - ndeferred) = ptr;
-				ndeferred++;
-			}
-		}
-		malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
-		if (config_prof && idump)
-			prof_idump(tsd_tsdn(tsd));
-		arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
-		    ndeferred);
-	}
-	if (config_stats && !merged_stats) {
-		/*
-		 * The flush loop didn't happen to flush to this thread's
-		 * arena, so the stats didn't get merged.  Manually do so now.
-		 */
-		malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
-		arena->stats.nrequests_large += tbin->tstats.nrequests;
-		arena->stats.lstats[binind - NBINS].nrequests +=
-		    tbin->tstats.nrequests;
-		tbin->tstats.nrequests = 0;
-		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
-	}
-
-	memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
-	    sizeof(void *));
-	tbin->ncached = rem;
-	if ((int)tbin->ncached < tbin->low_water)
-		tbin->low_water = tbin->ncached;
-}
-
-static void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
-	if (config_stats) {
-		/* Link into list of extant tcaches. */
-		malloc_mutex_lock(tsdn, &arena->lock);
-		ql_elm_new(tcache, link);
-		ql_tail_insert(&arena->tcache_ql, tcache, link);
-		malloc_mutex_unlock(tsdn, &arena->lock);
-	}
-}
-
-static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
-	if (config_stats) {
-		/* Unlink from list of extant tcaches. */
-		malloc_mutex_lock(tsdn, &arena->lock);
-		if (config_debug) {
-			bool in_ql = false;
-			tcache_t *iter;
-			ql_foreach(iter, &arena->tcache_ql, link) {
-				if (iter == tcache) {
-					in_ql = true;
-					break;
-				}
-			}
-			assert(in_ql);
-		}
-		ql_remove(&arena->tcache_ql, tcache, link);
-		tcache_stats_merge(tsdn, tcache, arena);
-		malloc_mutex_unlock(tsdn, &arena->lock);
-	}
-}
-
-void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
-    arena_t *newarena)
-{
-	tcache_arena_dissociate(tsdn, tcache, oldarena);
-	tcache_arena_associate(tsdn, tcache, newarena);
-}
-
-tcache_t *
-tcache_get_hard(tsd_t *tsd)
-{
-	arena_t *arena;
-
-	if (!tcache_enabled_get()) {
-		if (tsd_nominal(tsd))
-			tcache_enabled_set(false); /* Memoize. */
-		return (NULL);
-	}
-	arena = arena_choose(tsd, NULL);
-	if (unlikely(arena == NULL))
-		return (NULL);
-	return (tcache_create(tsd_tsdn(tsd), arena));
-}
-
-tcache_t *
-tcache_create(tsdn_t *tsdn, arena_t *arena)
-{
-	tcache_t *tcache;
-	size_t size, stack_offset;
-	unsigned i;
-
-	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
-	/* Naturally align the pointer stacks. */
-	size = PTR_CEILING(size);
-	stack_offset = size;
-	size += stack_nelms * sizeof(void *);
-	/* Avoid false cacheline sharing. */
-	size = sa2u(size, CACHELINE);
-
-	tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
-	    arena_get(TSDN_NULL, 0, true));
-	if (tcache == NULL)
-		return (NULL);
-
-	tcache_arena_associate(tsdn, tcache, arena);
-
-	ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
-
-	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
-	for (i = 0; i < nhbins; i++) {
-		tcache->tbins[i].lg_fill_div = 1;
-		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
-		/*
-		 * avail points past the available space.  Allocations will
-		 * access the slots toward higher addresses (for the benefit of
-		 * prefetch).
-		 */
-		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
-		    (uintptr_t)stack_offset);
-	}
-
-	return (tcache);
-}
-
-static void
-tcache_destroy(tsd_t *tsd, tcache_t *tcache)
-{
-	arena_t *arena;
-	unsigned i;
-
-	arena = arena_choose(tsd, NULL);
-	tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
-
-	for (i = 0; i < NBINS; i++) {
-		tcache_bin_t *tbin = &tcache->tbins[i];
-		tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
-
-		if (config_stats) {
-			assert(tbin->tstats.nrequests == 0);
-		}
-	}
-
-	for (; i < nhbins; i++) {
-		tcache_bin_t *tbin = &tcache->tbins[i];
-		tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
-
-		if (config_stats) {
-			assert(tbin->tstats.nrequests == 0);
-		}
-	}
-
-	if (config_prof && tcache->prof_accumbytes > 0 &&
-	    arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
-		prof_idump(tsd_tsdn(tsd));
-
-	idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tcache), tcache, NULL,
-	    true, true);
-}
-
-void
-tcache_cleanup(tsd_t *tsd)
-{
-	tcache_t *tcache;
-
-	if (!config_tcache)
-		return;
-
-	if ((tcache = tsd_tcache_get(tsd)) != NULL) {
-		tcache_destroy(tsd, tcache);
-		tsd_tcache_set(tsd, NULL);
-	}
-}
-
-void
-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
-	unsigned i;
-
-	cassert(config_stats);
-
-	malloc_mutex_assert_owner(tsdn, &arena->lock);
-
-	/* Merge and reset tcache stats. */
-	for (i = 0; i < NBINS; i++) {
-		arena_bin_t *bin = &arena->bins[i];
-		tcache_bin_t *tbin = &tcache->tbins[i];
-		malloc_mutex_lock(tsdn, &bin->lock);
-		bin->stats.nrequests += tbin->tstats.nrequests;
-		malloc_mutex_unlock(tsdn, &bin->lock);
-		tbin->tstats.nrequests = 0;
-	}
-
-	for (; i < nhbins; i++) {
-		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
-		tcache_bin_t *tbin = &tcache->tbins[i];
-		arena->stats.nrequests_large += tbin->tstats.nrequests;
-		lstats->nrequests += tbin->tstats.nrequests;
-		tbin->tstats.nrequests = 0;
-	}
-}
-
-bool
-tcaches_create(tsd_t *tsd, unsigned *r_ind)
-{
-	arena_t *arena;
-	tcache_t *tcache;
-	tcaches_t *elm;
-
-	if (tcaches == NULL) {
-		tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
-		    * (MALLOCX_TCACHE_MAX+1), CACHELINE);
-		if (tcaches == NULL)
-			return (true);
-	}
-
-	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
-		return (true);
-	arena = arena_ichoose(tsd, NULL);
-	if (unlikely(arena == NULL))
-		return (true);
-	tcache = tcache_create(tsd_tsdn(tsd), arena);
-	if (tcache == NULL)
-		return (true);
-
-	if (tcaches_avail != NULL) {
-		elm = tcaches_avail;
-		tcaches_avail = tcaches_avail->next;
-		elm->tcache = tcache;
-		*r_ind = (unsigned)(elm - tcaches);
-	} else {
-		elm = &tcaches[tcaches_past];
-		elm->tcache = tcache;
-		*r_ind = tcaches_past;
-		tcaches_past++;
-	}
-
-	return (false);
-}
-
-static void
-tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
-{
-	if (elm->tcache == NULL)
-		return;
-	tcache_destroy(tsd, elm->tcache);
-	elm->tcache = NULL;
-}
-
-void
-tcaches_flush(tsd_t *tsd, unsigned ind)
-{
-	tcaches_elm_flush(tsd, &tcaches[ind]);
-}
-
-void
-tcaches_destroy(tsd_t *tsd, unsigned ind)
-{
-	tcaches_t *elm = &tcaches[ind];
-	tcaches_elm_flush(tsd, elm);
-	elm->next = tcaches_avail;
-	tcaches_avail = elm;
-}
-
-bool
-tcache_boot(tsdn_t *tsdn)
-{
-	unsigned i;
-
-	/* If necessary, clamp opt_lg_tcache_max. */
-	if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
-		tcache_maxclass = SMALL_MAXCLASS;
-	else
-		tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
-
-	nhbins = size2index(tcache_maxclass) + 1;
-
-	/* Initialize tcache_bin_info. */
-	tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
-	    * sizeof(tcache_bin_info_t), CACHELINE);
-	if (tcache_bin_info == NULL)
-		return (true);
-	stack_nelms = 0;
-	for (i = 0; i < NBINS; i++) {
-		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
-			tcache_bin_info[i].ncached_max =
-			    TCACHE_NSLOTS_SMALL_MIN;
-		} else if ((arena_bin_info[i].nregs << 1) <=
-		    TCACHE_NSLOTS_SMALL_MAX) {
-			tcache_bin_info[i].ncached_max =
-			    (arena_bin_info[i].nregs << 1);
-		} else {
-			tcache_bin_info[i].ncached_max =
-			    TCACHE_NSLOTS_SMALL_MAX;
-		}
-		stack_nelms += tcache_bin_info[i].ncached_max;
-	}
-	for (; i < nhbins; i++) {
-		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
-		stack_nelms += tcache_bin_info[i].ncached_max;
-	}
-
-	return (false);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/ticker.c b/zircon/third_party/ulib/jemalloc/src/ticker.c
deleted file mode 100644
index db09024..0000000
--- a/zircon/third_party/ulib/jemalloc/src/ticker.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	JEMALLOC_TICKER_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/tsd.c b/zircon/third_party/ulib/jemalloc/src/tsd.c
deleted file mode 100644
index b4d7e79..0000000
--- a/zircon/third_party/ulib/jemalloc/src/tsd.c
+++ /dev/null
@@ -1,195 +0,0 @@
-#define	JEMALLOC_TSD_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-static unsigned ncleanups;
-static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
-
-malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
-
-/******************************************************************************/
-
-void *
-malloc_tsd_malloc(size_t size)
-{
-	return (a0malloc(CACHELINE_CEILING(size)));
-}
-
-void
-malloc_tsd_dalloc(void *wrapper)
-{
-	a0dalloc(wrapper);
-}
-
-void
-malloc_tsd_no_cleanup(void *arg)
-{
-	not_reached();
-}
-
-#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
-#ifndef _WIN32
-JEMALLOC_EXPORT
-#endif
-void
-_malloc_thread_cleanup(void)
-{
-	bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
-	unsigned i;
-
-	for (i = 0; i < ncleanups; i++)
-		pending[i] = true;
-
-	do {
-		again = false;
-		for (i = 0; i < ncleanups; i++) {
-			if (pending[i]) {
-				pending[i] = cleanups[i]();
-				if (pending[i])
-					again = true;
-			}
-		}
-	} while (again);
-}
-#endif
-
-void
-malloc_tsd_cleanup_register(bool (*f)(void))
-{
-	assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
-	cleanups[ncleanups] = f;
-	ncleanups++;
-}
-
-void
-tsd_cleanup(void *arg)
-{
-	tsd_t *tsd = (tsd_t *)arg;
-
-	switch (tsd->state) {
-	case tsd_state_uninitialized:
-		/* Do nothing. */
-		break;
-	case tsd_state_nominal:
-#define	MALLOC_TSD_cleanup_yes(n, t)					\
-		n##_cleanup(tsd);
-#define	MALLOC_TSD_cleanup_no(n, t)
-#define	O(n, t, c)							\
-		MALLOC_TSD_cleanup_##c(n, t)
-MALLOC_TSD
-#undef MALLOC_TSD_cleanup_yes
-#undef MALLOC_TSD_cleanup_no
-#undef O
-		tsd->state = tsd_state_purgatory;
-		tsd_set(tsd);
-		break;
-	case tsd_state_purgatory:
-		/*
-		 * The previous time this destructor was called, we set the
-		 * state to tsd_state_purgatory so that other destructors
-		 * wouldn't cause re-creation of the tsd.  This time, do
-		 * nothing, and do not request another callback.
-		 */
-		break;
-	case tsd_state_reincarnated:
-		/*
-		 * Another destructor deallocated memory after this destructor
-		 * was called.  Reset state to tsd_state_purgatory and request
-		 * another callback.
-		 */
-		tsd->state = tsd_state_purgatory;
-		tsd_set(tsd);
-		break;
-	default:
-		not_reached();
-	}
-}
-
-tsd_t *
-malloc_tsd_boot0(void)
-{
-	tsd_t *tsd;
-
-	ncleanups = 0;
-	if (tsd_boot0())
-		return (NULL);
-	tsd = tsd_fetch();
-	*tsd_arenas_tdata_bypassp_get(tsd) = true;
-	return (tsd);
-}
-
-void
-malloc_tsd_boot1(void)
-{
-	tsd_boot1();
-	*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
-}
-
-#ifdef _WIN32
-static BOOL WINAPI
-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
-{
-	switch (fdwReason) {
-#ifdef JEMALLOC_LAZY_LOCK
-	case DLL_THREAD_ATTACH:
-		isthreaded = true;
-		break;
-#endif
-	case DLL_THREAD_DETACH:
-		_malloc_thread_cleanup();
-		break;
-	default:
-		break;
-	}
-	return (true);
-}
-
-#ifdef _MSC_VER
-#  ifdef _M_IX86
-#    pragma comment(linker, "/INCLUDE:__tls_used")
-#    pragma comment(linker, "/INCLUDE:_tls_callback")
-#  else
-#    pragma comment(linker, "/INCLUDE:_tls_used")
-#    pragma comment(linker, "/INCLUDE:tls_callback")
-#  endif
-#  pragma section(".CRT$XLY",long,read)
-#endif
-JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-BOOL	(WINAPI *const tls_callback)(HINSTANCE hinstDLL,
-    DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
-#endif
-
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
-    !defined(_WIN32))
-void *
-tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
-{
-	pthread_t self = pthread_self();
-	tsd_init_block_t *iter;
-
-	/* Check whether this thread has already inserted into the list. */
-	malloc_mutex_lock(TSDN_NULL, &head->lock);
-	ql_foreach(iter, &head->blocks, link) {
-		if (iter->thread == self) {
-			malloc_mutex_unlock(TSDN_NULL, &head->lock);
-			return (iter->data);
-		}
-	}
-	/* Insert block into list. */
-	ql_elm_new(block, link);
-	block->thread = self;
-	ql_tail_insert(&head->blocks, block, link);
-	malloc_mutex_unlock(TSDN_NULL, &head->lock);
-	return (NULL);
-}
-
-void
-tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
-{
-	malloc_mutex_lock(TSDN_NULL, &head->lock);
-	ql_remove(&head->blocks, block, link);
-	malloc_mutex_unlock(TSDN_NULL, &head->lock);
-}
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/src/util.c b/zircon/third_party/ulib/jemalloc/src/util.c
deleted file mode 100644
index c6ac4e1..0000000
--- a/zircon/third_party/ulib/jemalloc/src/util.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * Define simple versions of assertion macros that won't recurse in case
- * of assertion failures in malloc_*printf().
- */
-#define	assert(e) do {							\
-	if (config_debug && !(e)) {					\
-		malloc_write("<jemalloc>: Failed assertion\n");		\
-		abort();						\
-	}								\
-} while (0)
-
-#define	not_reached() do {						\
-	if (config_debug) {						\
-		malloc_write("<jemalloc>: Unreachable code reached\n");	\
-		abort();						\
-	}								\
-	unreachable();							\
-} while (0)
-
-#define	not_implemented() do {						\
-	if (config_debug) {						\
-		malloc_write("<jemalloc>: Not implemented\n");		\
-		abort();						\
-	}								\
-} while (0)
-
-#define	JEMALLOC_UTIL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void	wrtmessage(void *cbopaque, const char *s);
-#define	U2S_BUFSIZE	((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
-static char	*u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
-    size_t *slen_p);
-#define	D2S_BUFSIZE	(1 + U2S_BUFSIZE)
-static char	*d2s(intmax_t x, char sign, char *s, size_t *slen_p);
-#define	O2S_BUFSIZE	(1 + U2S_BUFSIZE)
-static char	*o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
-#define	X2S_BUFSIZE	(2 + U2S_BUFSIZE)
-static char	*x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
-    size_t *slen_p);
-
-/******************************************************************************/
-
-/* malloc_message() setup. */
-static void
-wrtmessage(void *cbopaque, const char *s)
-{
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
-	/*
-	 * Use syscall(2) rather than write(2) when possible in order to avoid
-	 * the possibility of memory allocation within libc.  This is necessary
-	 * on FreeBSD; most operating systems do not have this problem though.
-	 *
-	 * syscall() returns long or int, depending on platform, so capture the
-	 * unused result in the widest plausible type to avoid compiler
-	 * warnings.
-	 */
-	UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
-#else
-	UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
-#endif
-}
-
-JEMALLOC_EXPORT void	(*je_malloc_message)(void *, const char *s);
-
-/*
- * Wrapper around malloc_message() that avoids the need for
- * je_malloc_message(...) throughout the code.
- */
-void
-malloc_write(const char *s)
-{
-	if (je_malloc_message != NULL)
-		je_malloc_message(NULL, s);
-	else
-		wrtmessage(NULL, s);
-}
-
-/*
- * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
- * provide a wrapper.
- */
-int
-buferror(int err, char *buf, size_t buflen)
-{
-#ifdef _WIN32
-	FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
-	    (LPSTR)buf, (DWORD)buflen, NULL);
-	return (0);
-#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
-	char *b = strerror_r(err, buf, buflen);
-	if (b != buf) {
-		strncpy(buf, b, buflen);
-		buf[buflen-1] = '\0';
-	}
-	return (0);
-#else
-	return (strerror_r(err, buf, buflen));
-#endif
-}
-
-uintmax_t
-malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
-{
-	uintmax_t ret, digit;
-	unsigned b;
-	bool neg;
-	const char *p, *ns;
-
-	p = nptr;
-	if (base < 0 || base == 1 || base > 36) {
-		ns = p;
-		set_errno(EINVAL);
-		ret = UINTMAX_MAX;
-		goto label_return;
-	}
-	b = base;
-
-	/* Swallow leading whitespace and get sign, if any. */
-	neg = false;
-	while (true) {
-		switch (*p) {
-		case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
-			p++;
-			break;
-		case '-':
-			neg = true;
-			/* Fall through. */
-		case '+':
-			p++;
-			/* Fall through. */
-		default:
-			goto label_prefix;
-		}
-	}
-
-	/* Get prefix, if any. */
-	label_prefix:
-	/*
-	 * Note where the first non-whitespace/sign character is so that it is
-	 * possible to tell whether any digits are consumed (e.g., "  0" vs.
-	 * "  -x").
-	 */
-	ns = p;
-	if (*p == '0') {
-		switch (p[1]) {
-		case '0': case '1': case '2': case '3': case '4': case '5':
-		case '6': case '7':
-			if (b == 0)
-				b = 8;
-			if (b == 8)
-				p++;
-			break;
-		case 'X': case 'x':
-			switch (p[2]) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-			case 'A': case 'B': case 'C': case 'D': case 'E':
-			case 'F':
-			case 'a': case 'b': case 'c': case 'd': case 'e':
-			case 'f':
-				if (b == 0)
-					b = 16;
-				if (b == 16)
-					p += 2;
-				break;
-			default:
-				break;
-			}
-			break;
-		default:
-			p++;
-			ret = 0;
-			goto label_return;
-		}
-	}
-	if (b == 0)
-		b = 10;
-
-	/* Convert. */
-	ret = 0;
-	while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
-	    || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
-	    || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
-		uintmax_t pret = ret;
-		ret *= b;
-		ret += digit;
-		if (ret < pret) {
-			/* Overflow. */
-			set_errno(ERANGE);
-			ret = UINTMAX_MAX;
-			goto label_return;
-		}
-		p++;
-	}
-	if (neg)
-		ret = (uintmax_t)(-((intmax_t)ret));
-
-	if (p == ns) {
-		/* No conversion performed. */
-		set_errno(EINVAL);
-		ret = UINTMAX_MAX;
-		goto label_return;
-	}
-
-label_return:
-	if (endptr != NULL) {
-		if (p == ns) {
-			/* No characters were converted. */
-			*endptr = (char *)nptr;
-		} else
-			*endptr = (char *)p;
-	}
-	return (ret);
-}
-
-static char *
-u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
-{
-	unsigned i;
-
-	i = U2S_BUFSIZE - 1;
-	s[i] = '\0';
-	switch (base) {
-	case 10:
-		do {
-			i--;
-			s[i] = "0123456789"[x % (uint64_t)10];
-			x /= (uint64_t)10;
-		} while (x > 0);
-		break;
-	case 16: {
-		const char *digits = (uppercase)
-		    ? "0123456789ABCDEF"
-		    : "0123456789abcdef";
-
-		do {
-			i--;
-			s[i] = digits[x & 0xf];
-			x >>= 4;
-		} while (x > 0);
-		break;
-	} default: {
-		const char *digits = (uppercase)
-		    ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-		    : "0123456789abcdefghijklmnopqrstuvwxyz";
-
-		assert(base >= 2 && base <= 36);
-		do {
-			i--;
-			s[i] = digits[x % (uint64_t)base];
-			x /= (uint64_t)base;
-		} while (x > 0);
-	}}
-
-	*slen_p = U2S_BUFSIZE - 1 - i;
-	return (&s[i]);
-}
-
-static char *
-d2s(intmax_t x, char sign, char *s, size_t *slen_p)
-{
-	bool neg;
-
-	if ((neg = (x < 0)))
-		x = -x;
-	s = u2s(x, 10, false, s, slen_p);
-	if (neg)
-		sign = '-';
-	switch (sign) {
-	case '-':
-		if (!neg)
-			break;
-		/* Fall through. */
-	case ' ':
-	case '+':
-		s--;
-		(*slen_p)++;
-		*s = sign;
-		break;
-	default: not_reached();
-	}
-	return (s);
-}
-
-static char *
-o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
-{
-	s = u2s(x, 8, false, s, slen_p);
-	if (alt_form && *s != '0') {
-		s--;
-		(*slen_p)++;
-		*s = '0';
-	}
-	return (s);
-}
-
-static char *
-x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
-{
-	s = u2s(x, 16, uppercase, s, slen_p);
-	if (alt_form) {
-		s -= 2;
-		(*slen_p) += 2;
-		memcpy(s, uppercase ? "0X" : "0x", 2);
-	}
-	return (s);
-}
-
-size_t
-malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
-{
-	size_t i;
-	const char *f;
-
-#define	APPEND_C(c) do {						\
-	if (i < size)							\
-		str[i] = (c);						\
-	i++;								\
-} while (0)
-#define	APPEND_S(s, slen) do {						\
-	if (i < size) {							\
-		size_t cpylen = (slen <= size - i) ? slen : size - i;	\
-		memcpy(&str[i], s, cpylen);				\
-	}								\
-	i += slen;							\
-} while (0)
-#define	APPEND_PADDED_S(s, slen, width, left_justify) do {		\
-	/* Left padding. */						\
-	size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ?	\
-	    (size_t)width - slen : 0);					\
-	if (!left_justify && pad_len != 0) {				\
-		size_t j;						\
-		for (j = 0; j < pad_len; j++)				\
-			APPEND_C(' ');					\
-	}								\
-	/* Value. */							\
-	APPEND_S(s, slen);						\
-	/* Right padding. */						\
-	if (left_justify && pad_len != 0) {				\
-		size_t j;						\
-		for (j = 0; j < pad_len; j++)				\
-			APPEND_C(' ');					\
-	}								\
-} while (0)
-#define	GET_ARG_NUMERIC(val, len) do {					\
-	switch (len) {							\
-	case '?':							\
-		val = va_arg(ap, int);					\
-		break;							\
-	case '?' | 0x80:						\
-		val = va_arg(ap, unsigned int);				\
-		break;							\
-	case 'l':							\
-		val = va_arg(ap, long);					\
-		break;							\
-	case 'l' | 0x80:						\
-		val = va_arg(ap, unsigned long);			\
-		break;							\
-	case 'q':							\
-		val = va_arg(ap, long long);				\
-		break;							\
-	case 'q' | 0x80:						\
-		val = va_arg(ap, unsigned long long);			\
-		break;							\
-	case 'j':							\
-		val = va_arg(ap, intmax_t);				\
-		break;							\
-	case 'j' | 0x80:						\
-		val = va_arg(ap, uintmax_t);				\
-		break;							\
-	case 't':							\
-		val = va_arg(ap, ptrdiff_t);				\
-		break;							\
-	case 'z':							\
-		val = va_arg(ap, ssize_t);				\
-		break;							\
-	case 'z' | 0x80:						\
-		val = va_arg(ap, size_t);				\
-		break;							\
-	case 'p': /* Synthetic; used for %p. */				\
-		val = va_arg(ap, uintptr_t);				\
-		break;							\
-	default:							\
-		not_reached();						\
-		val = 0;						\
-	}								\
-} while (0)
-
-	i = 0;
-	f = format;
-	while (true) {
-		switch (*f) {
-		case '\0': goto label_out;
-		case '%': {
-			bool alt_form = false;
-			bool left_justify = false;
-			bool plus_space = false;
-			bool plus_plus = false;
-			int prec = -1;
-			int width = -1;
-			unsigned char len = '?';
-			char *s;
-			size_t slen;
-
-			f++;
-			/* Flags. */
-			while (true) {
-				switch (*f) {
-				case '#':
-					assert(!alt_form);
-					alt_form = true;
-					break;
-				case '-':
-					assert(!left_justify);
-					left_justify = true;
-					break;
-				case ' ':
-					assert(!plus_space);
-					plus_space = true;
-					break;
-				case '+':
-					assert(!plus_plus);
-					plus_plus = true;
-					break;
-				default: goto label_width;
-				}
-				f++;
-			}
-			/* Width. */
-			label_width:
-			switch (*f) {
-			case '*':
-				width = va_arg(ap, int);
-				f++;
-				if (width < 0) {
-					left_justify = true;
-					width = -width;
-				}
-				break;
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9': {
-				uintmax_t uwidth;
-				set_errno(0);
-				uwidth = malloc_strtoumax(f, (char **)&f, 10);
-				assert(uwidth != UINTMAX_MAX || get_errno() !=
-				    ERANGE);
-				width = (int)uwidth;
-				break;
-			} default:
-				break;
-			}
-			/* Width/precision separator. */
-			if (*f == '.')
-				f++;
-			else
-				goto label_length;
-			/* Precision. */
-			switch (*f) {
-			case '*':
-				prec = va_arg(ap, int);
-				f++;
-				break;
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9': {
-				uintmax_t uprec;
-				set_errno(0);
-				uprec = malloc_strtoumax(f, (char **)&f, 10);
-				assert(uprec != UINTMAX_MAX || get_errno() !=
-				    ERANGE);
-				prec = (int)uprec;
-				break;
-			}
-			default: break;
-			}
-			/* Length. */
-			label_length:
-			switch (*f) {
-			case 'l':
-				f++;
-				if (*f == 'l') {
-					len = 'q';
-					f++;
-				} else
-					len = 'l';
-				break;
-			case 'q': case 'j': case 't': case 'z':
-				len = *f;
-				f++;
-				break;
-			default: break;
-			}
-			/* Conversion specifier. */
-			switch (*f) {
-			case '%':
-				/* %% */
-				APPEND_C(*f);
-				f++;
-				break;
-			case 'd': case 'i': {
-				intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
-				char buf[D2S_BUFSIZE];
-
-				GET_ARG_NUMERIC(val, len);
-				s = d2s(val, (plus_plus ? '+' : (plus_space ?
-				    ' ' : '-')), buf, &slen);
-				APPEND_PADDED_S(s, slen, width, left_justify);
-				f++;
-				break;
-			} case 'o': {
-				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
-				char buf[O2S_BUFSIZE];
-
-				GET_ARG_NUMERIC(val, len | 0x80);
-				s = o2s(val, alt_form, buf, &slen);
-				APPEND_PADDED_S(s, slen, width, left_justify);
-				f++;
-				break;
-			} case 'u': {
-				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
-				char buf[U2S_BUFSIZE];
-
-				GET_ARG_NUMERIC(val, len | 0x80);
-				s = u2s(val, 10, false, buf, &slen);
-				APPEND_PADDED_S(s, slen, width, left_justify);
-				f++;
-				break;
-			} case 'x': case 'X': {
-				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
-				char buf[X2S_BUFSIZE];
-
-				GET_ARG_NUMERIC(val, len | 0x80);
-				s = x2s(val, alt_form, *f == 'X', buf, &slen);
-				APPEND_PADDED_S(s, slen, width, left_justify);
-				f++;
-				break;
-			} case 'c': {
-				unsigned char val;
-				char buf[2];
-
-				assert(len == '?' || len == 'l');
-				assert_not_implemented(len != 'l');
-				val = va_arg(ap, int);
-				buf[0] = val;
-				buf[1] = '\0';
-				APPEND_PADDED_S(buf, 1, width, left_justify);
-				f++;
-				break;
-			} case 's':
-				assert(len == '?' || len == 'l');
-				assert_not_implemented(len != 'l');
-				s = va_arg(ap, char *);
-				slen = (prec < 0) ? strlen(s) : (size_t)prec;
-				APPEND_PADDED_S(s, slen, width, left_justify);
-				f++;
-				break;
-			case 'p': {
-				uintmax_t val;
-				char buf[X2S_BUFSIZE];
-
-				GET_ARG_NUMERIC(val, 'p');
-				s = x2s(val, true, false, buf, &slen);
-				APPEND_PADDED_S(s, slen, width, left_justify);
-				f++;
-				break;
-			} default: not_reached();
-			}
-			break;
-		} default: {
-			APPEND_C(*f);
-			f++;
-			break;
-		}}
-	}
-	label_out:
-	if (i < size)
-		str[i] = '\0';
-	else
-		str[size - 1] = '\0';
-
-#undef APPEND_C
-#undef APPEND_S
-#undef APPEND_PADDED_S
-#undef GET_ARG_NUMERIC
-	return (i);
-}
-
-JEMALLOC_FORMAT_PRINTF(3, 4)
-size_t
-malloc_snprintf(char *str, size_t size, const char *format, ...)
-{
-	size_t ret;
-	va_list ap;
-
-	va_start(ap, format);
-	ret = malloc_vsnprintf(str, size, format, ap);
-	va_end(ap);
-
-	return (ret);
-}
-
-void
-malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
-    const char *format, va_list ap)
-{
-	char buf[MALLOC_PRINTF_BUFSIZE];
-
-	if (write_cb == NULL) {
-		/*
-		 * The caller did not provide an alternate write_cb callback
-		 * function, so use the default one.  malloc_write() is an
-		 * inline function, so use malloc_message() directly here.
-		 */
-		write_cb = (je_malloc_message != NULL) ? je_malloc_message :
-		    wrtmessage;
-		cbopaque = NULL;
-	}
-
-	malloc_vsnprintf(buf, sizeof(buf), format, ap);
-	write_cb(cbopaque, buf);
-}
-
-/*
- * Print to a callback function in such a way as to (hopefully) avoid memory
- * allocation.
- */
-JEMALLOC_FORMAT_PRINTF(3, 4)
-void
-malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
-    const char *format, ...)
-{
-	va_list ap;
-
-	va_start(ap, format);
-	malloc_vcprintf(write_cb, cbopaque, format, ap);
-	va_end(ap);
-}
-
-/* Print to stderr in such a way as to avoid memory allocation. */
-JEMALLOC_FORMAT_PRINTF(1, 2)
-void
-malloc_printf(const char *format, ...)
-{
-	va_list ap;
-
-	va_start(ap, format);
-	malloc_vcprintf(NULL, NULL, format, ap);
-	va_end(ap);
-}
-
-/*
- * Restore normal assertion macros, in order to make it possible to compile all
- * C files as a single concatenation.
- */
-#undef assert
-#undef not_reached
-#undef not_implemented
-#include "jemalloc/internal/assert.h"
diff --git a/zircon/third_party/ulib/jemalloc/src/witness.c b/zircon/third_party/ulib/jemalloc/src/witness.c
deleted file mode 100644
index ffc7e24..0000000
--- a/zircon/third_party/ulib/jemalloc/src/witness.c
+++ /dev/null
@@ -1,124 +0,0 @@
-#define	JEMALLOC_WITNESS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-void
-witness_init(witness_t *witness, const char *name, witness_rank_t rank,
-    witness_comp_t *comp, void *opaque)
-{
-	witness->name = name;
-	witness->rank = rank;
-	witness->comp = comp;
-	witness->opaque = opaque;
-}
-
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define	witness_lock_error JEMALLOC_N(n_witness_lock_error)
-#endif
-void
-witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
-{
-	witness_t *w;
-
-	malloc_printf("<jemalloc>: Lock rank order reversal:");
-	ql_foreach(w, witnesses, link) {
-		malloc_printf(" %s(%u)", w->name, w->rank);
-	}
-	malloc_printf(" %s(%u)\n", witness->name, witness->rank);
-	abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define	witness_lock_error JEMALLOC_N(witness_lock_error)
-witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define	witness_owner_error JEMALLOC_N(n_witness_owner_error)
-#endif
-void
-witness_owner_error(const witness_t *witness)
-{
-	malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
-	    witness->rank);
-	abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define	witness_owner_error JEMALLOC_N(witness_owner_error)
-witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define	witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
-#endif
-void
-witness_not_owner_error(const witness_t *witness)
-{
-	malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
-	    witness->rank);
-	abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define	witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-witness_not_owner_error_t *witness_not_owner_error =
-    JEMALLOC_N(n_witness_not_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_lockless_error
-#define	witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
-#endif
-void
-witness_lockless_error(const witness_list_t *witnesses)
-{
-	witness_t *w;
-
-	malloc_printf("<jemalloc>: Should not own any locks:");
-	ql_foreach(w, witnesses, link) {
-		malloc_printf(" %s(%u)", w->name, w->rank);
-	}
-	malloc_printf("\n");
-	abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_lockless_error
-#define	witness_lockless_error JEMALLOC_N(witness_lockless_error)
-witness_lockless_error_t *witness_lockless_error =
-    JEMALLOC_N(n_witness_lockless_error);
-#endif
-
-void
-witnesses_cleanup(tsd_t *tsd)
-{
-	witness_assert_lockless(tsd_tsdn(tsd));
-
-	/* Do nothing. */
-}
-
-void
-witness_prefork(tsd_t *tsd)
-{
-	tsd_witness_fork_set(tsd, true);
-}
-
-void
-witness_postfork_parent(tsd_t *tsd)
-{
-	tsd_witness_fork_set(tsd, false);
-}
-
-void
-witness_postfork_child(tsd_t *tsd)
-{
-#ifndef JEMALLOC_MUTEX_INIT_CB
-	witness_list_t *witnesses;
-
-	witnesses = tsd_witnessesp_get(tsd);
-	ql_new(witnesses);
-#endif
-	tsd_witness_fork_set(tsd, false);
-}
diff --git a/zircon/third_party/ulib/jemalloc/src/zone.c b/zircon/third_party/ulib/jemalloc/src/zone.c
deleted file mode 100644
index c54f4a4..0000000
--- a/zircon/third_party/ulib/jemalloc/src/zone.c
+++ /dev/null
@@ -1,467 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal.h"
-#ifndef JEMALLOC_ZONE
-#  error "This source file is for zones on Darwin (OS X)."
-#endif
-
-/* Definitions of the following structs in malloc/malloc.h might be too old
- * for the built binary to run on newer versions of OSX. So use the newest
- * possible version of those structs.
- */
-typedef struct _malloc_zone_t {
-	void *reserved1;
-	void *reserved2;
-	size_t (*size)(struct _malloc_zone_t *, const void *);
-	void *(*malloc)(struct _malloc_zone_t *, size_t);
-	void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
-	void *(*valloc)(struct _malloc_zone_t *, size_t);
-	void (*free)(struct _malloc_zone_t *, void *);
-	void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
-	void (*destroy)(struct _malloc_zone_t *);
-	const char *zone_name;
-	unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
-	void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
-	struct malloc_introspection_t *introspect;
-	unsigned version;
-	void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
-	void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
-	size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
-} malloc_zone_t;
-
-typedef struct {
-	vm_address_t address;
-	vm_size_t size;
-} vm_range_t;
-
-typedef struct malloc_statistics_t {
-	unsigned blocks_in_use;
-	size_t size_in_use;
-	size_t max_size_in_use;
-	size_t size_allocated;
-} malloc_statistics_t;
-
-typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
-
-typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
-
-typedef struct malloc_introspection_t {
-	kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
-	size_t (*good_size)(malloc_zone_t *, size_t);
-	boolean_t (*check)(malloc_zone_t *);
-	void (*print)(malloc_zone_t *, boolean_t);
-	void (*log)(malloc_zone_t *, void *);
-	void (*force_lock)(malloc_zone_t *);
-	void (*force_unlock)(malloc_zone_t *);
-	void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
-	boolean_t (*zone_locked)(malloc_zone_t *);
-	boolean_t (*enable_discharge_checking)(malloc_zone_t *);
-	boolean_t (*disable_discharge_checking)(malloc_zone_t *);
-	void (*discharge)(malloc_zone_t *, void *);
-#ifdef __BLOCKS__
-	void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
-#else
-	void *enumerate_unavailable_without_blocks;
-#endif
-	void (*reinit_lock)(malloc_zone_t *);
-} malloc_introspection_t;
-
-extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
-
-extern malloc_zone_t *malloc_default_zone(void);
-
-extern void malloc_zone_register(malloc_zone_t *zone);
-
-extern void malloc_zone_unregister(malloc_zone_t *zone);
-
-/*
- * The malloc_default_purgeable_zone() function is only available on >= 10.6.
- * We need to check whether it is present at runtime, thus the weak_import.
- */
-extern malloc_zone_t *malloc_default_purgeable_zone(void)
-JEMALLOC_ATTR(weak_import);
-
-/******************************************************************************/
-/* Data. */
-
-static malloc_zone_t *default_zone, *purgeable_zone;
-static malloc_zone_t jemalloc_zone;
-static struct malloc_introspection_t jemalloc_zone_introspect;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static size_t	zone_size(malloc_zone_t *zone, const void *ptr);
-static void	*zone_malloc(malloc_zone_t *zone, size_t size);
-static void	*zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
-static void	*zone_valloc(malloc_zone_t *zone, size_t size);
-static void	zone_free(malloc_zone_t *zone, void *ptr);
-static void	*zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
-static void	*zone_memalign(malloc_zone_t *zone, size_t alignment,
-    size_t size);
-static void	zone_free_definite_size(malloc_zone_t *zone, void *ptr,
-    size_t size);
-static void	zone_destroy(malloc_zone_t *zone);
-static unsigned	zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
-    void **results, unsigned num_requested);
-static void	zone_batch_free(struct _malloc_zone_t *zone,
-    void **to_be_freed, unsigned num_to_be_freed);
-static size_t	zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
-static size_t	zone_good_size(malloc_zone_t *zone, size_t size);
-static kern_return_t	zone_enumerator(task_t task, void *data, unsigned type_mask,
-    vm_address_t zone_address, memory_reader_t reader,
-    vm_range_recorder_t recorder);
-static boolean_t	zone_check(malloc_zone_t *zone);
-static void	zone_print(malloc_zone_t *zone, boolean_t verbose);
-static void	zone_log(malloc_zone_t *zone, void *address);
-static void	zone_force_lock(malloc_zone_t *zone);
-static void	zone_force_unlock(malloc_zone_t *zone);
-static void	zone_statistics(malloc_zone_t *zone,
-    malloc_statistics_t *stats);
-static boolean_t	zone_locked(malloc_zone_t *zone);
-static void	zone_reinit_lock(malloc_zone_t *zone);
-
-/******************************************************************************/
-/*
- * Functions.
- */
-
-static size_t
-zone_size(malloc_zone_t *zone, const void *ptr)
-{
-	/*
-	 * There appear to be places within Darwin (such as setenv(3)) that
-	 * cause calls to this function with pointers that *no* zone owns.  If
-	 * we knew that all pointers were owned by *some* zone, we could split
-	 * our zone into two parts, and use one as the default allocator and
-	 * the other as the default deallocator/reallocator.  Since that will
-	 * not work in practice, we must check all pointers to assure that they
-	 * reside within a mapped extent before determining size.
-	 */
-	return (ivsalloc(tsdn_fetch(), ptr));
-}
-
-static void *
-zone_malloc(malloc_zone_t *zone, size_t size)
-{
-	return (je_malloc(size));
-}
-
-static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
-{
-	return (je_calloc(num, size));
-}
-
-static void *
-zone_valloc(malloc_zone_t *zone, size_t size)
-{
-	void *ret = NULL; /* Assignment avoids useless compiler warning. */
-
-	je_posix_memalign(&ret, PAGE, size);
-
-	return (ret);
-}
-
-static void
-zone_free(malloc_zone_t *zone, void *ptr)
-{
-	if (ivsalloc(tsdn_fetch(), ptr) != 0) {
-		je_free(ptr);
-		return;
-	}
-
-	free(ptr);
-}
-
-static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
-{
-	if (ivsalloc(tsdn_fetch(), ptr) != 0)
-		return (je_realloc(ptr, size));
-
-	return (realloc(ptr, size));
-}
-
-static void *
-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
-{
-	void *ret = NULL; /* Assignment avoids useless compiler warning. */
-
-	je_posix_memalign(&ret, alignment, size);
-
-	return (ret);
-}
-
-static void
-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
-{
-	size_t alloc_size;
-
-	alloc_size = ivsalloc(tsdn_fetch(), ptr);
-	if (alloc_size != 0) {
-		assert(alloc_size == size);
-		je_free(ptr);
-		return;
-	}
-
-	free(ptr);
-}
-
-static void
-zone_destroy(malloc_zone_t *zone)
-{
-	/* This function should never be called. */
-	not_reached();
-}
-
-static unsigned
-zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
-    unsigned num_requested)
-{
-	unsigned i;
-
-	for (i = 0; i < num_requested; i++) {
-		results[i] = je_malloc(size);
-		if (!results[i])
-			break;
-	}
-
-	return i;
-}
-
-static void
-zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
-    unsigned num_to_be_freed)
-{
-	unsigned i;
-
-	for (i = 0; i < num_to_be_freed; i++) {
-		zone_free(zone, to_be_freed[i]);
-		to_be_freed[i] = NULL;
-	}
-}
-
-static size_t
-zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal)
-{
-	return 0;
-}
-
-static size_t
-zone_good_size(malloc_zone_t *zone, size_t size)
-{
-	if (size == 0)
-		size = 1;
-	return (s2u(size));
-}
-
-static kern_return_t
-zone_enumerator(task_t task, void *data, unsigned type_mask,
-    vm_address_t zone_address, memory_reader_t reader,
-    vm_range_recorder_t recorder)
-{
-	return KERN_SUCCESS;
-}
-
-static boolean_t
-zone_check(malloc_zone_t *zone)
-{
-	return true;
-}
-
-static void
-zone_print(malloc_zone_t *zone, boolean_t verbose)
-{
-}
-
-static void
-zone_log(malloc_zone_t *zone, void *address)
-{
-}
-
-static void
-zone_force_lock(malloc_zone_t *zone)
-{
-	if (isthreaded)
-		jemalloc_prefork();
-}
-
-static void
-zone_force_unlock(malloc_zone_t *zone)
-{
-	/*
-	 * Call jemalloc_postfork_child() rather than
-	 * jemalloc_postfork_parent(), because this function is executed by both
-	 * parent and child.  The parent can tolerate having state
-	 * reinitialized, but the child cannot unlock mutexes that were locked
-	 * by the parent.
-	 */
-	if (isthreaded)
-		jemalloc_postfork_child();
-}
-
-static void
-zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
-{
-	/* We make no effort to actually fill the values */
-	stats->blocks_in_use = 0;
-	stats->size_in_use = 0;
-	stats->max_size_in_use = 0;
-	stats->size_allocated = 0;
-}
-
-static boolean_t
-zone_locked(malloc_zone_t *zone)
-{
-	/* Pretend no lock is being held */
-	return false;
-}
-
-static void
-zone_reinit_lock(malloc_zone_t *zone)
-{
-	/* As of OSX 10.12, this function is only used when force_unlock would
-	 * be used if the zone version were < 9. So just use force_unlock. */
-	zone_force_unlock(zone);
-}
-
-static void
-zone_init(void)
-{
-	jemalloc_zone.size = zone_size;
-	jemalloc_zone.malloc = zone_malloc;
-	jemalloc_zone.calloc = zone_calloc;
-	jemalloc_zone.valloc = zone_valloc;
-	jemalloc_zone.free = zone_free;
-	jemalloc_zone.realloc = zone_realloc;
-	jemalloc_zone.destroy = zone_destroy;
-	jemalloc_zone.zone_name = "jemalloc_zone";
-	jemalloc_zone.batch_malloc = zone_batch_malloc;
-	jemalloc_zone.batch_free = zone_batch_free;
-	jemalloc_zone.introspect = &jemalloc_zone_introspect;
-	jemalloc_zone.version = 9;
-	jemalloc_zone.memalign = zone_memalign;
-	jemalloc_zone.free_definite_size = zone_free_definite_size;
-	jemalloc_zone.pressure_relief = zone_pressure_relief;
-
-	jemalloc_zone_introspect.enumerator = zone_enumerator;
-	jemalloc_zone_introspect.good_size = zone_good_size;
-	jemalloc_zone_introspect.check = zone_check;
-	jemalloc_zone_introspect.print = zone_print;
-	jemalloc_zone_introspect.log = zone_log;
-	jemalloc_zone_introspect.force_lock = zone_force_lock;
-	jemalloc_zone_introspect.force_unlock = zone_force_unlock;
-	jemalloc_zone_introspect.statistics = zone_statistics;
-	jemalloc_zone_introspect.zone_locked = zone_locked;
-	jemalloc_zone_introspect.enable_discharge_checking = NULL;
-	jemalloc_zone_introspect.disable_discharge_checking = NULL;
-	jemalloc_zone_introspect.discharge = NULL;
-#ifdef __BLOCKS__
-	jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
-#else
-	jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
-#endif
-	jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
-}
-
-static malloc_zone_t *
-zone_default_get(void)
-{
-	malloc_zone_t **zones = NULL;
-	unsigned int num_zones = 0;
-
-	/*
-	 * On OSX 10.12, malloc_default_zone returns a special zone that is not
-	 * present in the list of registered zones. That zone uses a "lite zone"
-	 * if one is present (apparently enabled when malloc stack logging is
-	 * enabled), or the first registered zone otherwise. In practice this
-	 * means unless malloc stack logging is enabled, the first registered
-	 * zone is the default.  So get the list of zones to get the first one,
-	 * instead of relying on malloc_default_zone.
-	 */
-	if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
-	    (vm_address_t**)&zones, &num_zones)) {
-		/*
-		 * Reset the value in case the failure happened after it was
-		 * set.
-		 */
-		num_zones = 0;
-	}
-
-	if (num_zones)
-		return (zones[0]);
-
-	return (malloc_default_zone());
-}
-
-/* As written, this function can only promote jemalloc_zone. */
-static void
-zone_promote(void)
-{
-	malloc_zone_t *zone;
-
-	do {
-		/*
-		 * Unregister and reregister the default zone.  On OSX >= 10.6,
-		 * unregistering takes the last registered zone and places it
-		 * at the location of the specified zone.  Unregistering the
-		 * default zone thus makes the last registered one the default.
-		 * On OSX < 10.6, unregistering shifts all registered zones.
-		 * The first registered zone then becomes the default.
-		 */
-		malloc_zone_unregister(default_zone);
-		malloc_zone_register(default_zone);
-
-		/*
-		 * On OSX 10.6, having the default purgeable zone appear before
-		 * the default zone makes some things crash because it thinks it
-		 * owns the default zone allocated pointers.  We thus
-		 * unregister/re-register it in order to ensure it's always
-		 * after the default zone.  On OSX < 10.6, there is no purgeable
-		 * zone, so this does nothing.  On OSX >= 10.6, unregistering
-		 * replaces the purgeable zone with the last registered zone
-		 * above, i.e. the default zone.  Registering it again then puts
-		 * it at the end, obviously after the default zone.
-		 */
-		if (purgeable_zone != NULL) {
-			malloc_zone_unregister(purgeable_zone);
-			malloc_zone_register(purgeable_zone);
-		}
-
-		zone = zone_default_get();
-	} while (zone != &jemalloc_zone);
-}
-
-JEMALLOC_ATTR(constructor)
-void
-zone_register(void)
-{
-	/*
-	 * If something else replaced the system default zone allocator, don't
-	 * register jemalloc's.
-	 */
-	default_zone = zone_default_get();
-	if (!default_zone->zone_name || strcmp(default_zone->zone_name,
-	    "DefaultMallocZone") != 0)
-		return;
-
-	/*
-	 * The default purgeable zone is created lazily by OSX's libc.  It uses
-	 * the default zone when it is created for "small" allocations
-	 * (< 15 KiB), but assumes the default zone is a scalable_zone.  This
-	 * obviously fails when the default zone is the jemalloc zone, so
-	 * malloc_default_purgeable_zone() is called beforehand so that the
-	 * default purgeable zone is created when the default zone is still
-	 * a scalable_zone.  As purgeable zones only exist on >= 10.6, we need
-	 * to check for the existence of malloc_default_purgeable_zone() at
-	 * run time.
-	 */
-	purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
-	    malloc_default_purgeable_zone();
-
-	/* Register the custom zone.  At this point it won't be the default. */
-	zone_init();
-	malloc_zone_register(&jemalloc_zone);
-
-	/* Promote the custom zone to be default. */
-	zone_promote();
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-alti.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-alti.h
deleted file mode 100644
index 0005df6..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-alti.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/** 
- * @file SFMT-alti.h 
- *
- * @brief SIMD oriented Fast Mersenne Twister(SFMT)
- * pseudorandom number generator
- *
- * @author Mutsuo Saito (Hiroshima University)
- * @author Makoto Matsumoto (Hiroshima University)
- *
- * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- * University. All rights reserved.
- *
- * The new BSD License is applied to this software.
- * see LICENSE.txt
- */
-
-#ifndef SFMT_ALTI_H
-#define SFMT_ALTI_H
-
-/**
- * This function represents the recursion formula in AltiVec and BIG ENDIAN.
- * @param a a 128-bit part of the interal state array
- * @param b a 128-bit part of the interal state array
- * @param c a 128-bit part of the interal state array
- * @param d a 128-bit part of the interal state array
- * @return output
- */
-JEMALLOC_ALWAYS_INLINE
-vector unsigned int vec_recursion(vector unsigned int a,
-						vector unsigned int b,
-						vector unsigned int c,
-						vector unsigned int d) {
-
-    const vector unsigned int sl1 = ALTI_SL1;
-    const vector unsigned int sr1 = ALTI_SR1;
-#ifdef ONLY64
-    const vector unsigned int mask = ALTI_MSK64;
-    const vector unsigned char perm_sl = ALTI_SL2_PERM64;
-    const vector unsigned char perm_sr = ALTI_SR2_PERM64;
-#else
-    const vector unsigned int mask = ALTI_MSK;
-    const vector unsigned char perm_sl = ALTI_SL2_PERM;
-    const vector unsigned char perm_sr = ALTI_SR2_PERM;
-#endif
-    vector unsigned int v, w, x, y, z;
-    x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
-    v = a;
-    y = vec_sr(b, sr1);
-    z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
-    w = vec_sl(d, sl1);
-    z = vec_xor(z, w);
-    y = vec_and(y, mask);
-    v = vec_xor(v, x);
-    z = vec_xor(z, y);
-    z = vec_xor(z, v);
-    return z;
-}
-
-/**
- * This function fills the internal state array with pseudorandom
- * integers.
- */
-JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
-    int i;
-    vector unsigned int r, r1, r2;
-
-    r1 = ctx->sfmt[N - 2].s;
-    r2 = ctx->sfmt[N - 1].s;
-    for (i = 0; i < N - POS1; i++) {
-	r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
-	ctx->sfmt[i].s = r;
-	r1 = r2;
-	r2 = r;
-    }
-    for (; i < N; i++) {
-	r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
-	ctx->sfmt[i].s = r;
-	r1 = r2;
-	r2 = r;
-    }
-}
-
-/**
- * This function fills the user-specified array with pseudorandom
- * integers.
- *
- * @param array an 128-bit array to be filled by pseudorandom numbers.  
- * @param size number of 128-bit pesudorandom numbers to be generated.
- */
-JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
-    int i, j;
-    vector unsigned int r, r1, r2;
-
-    r1 = ctx->sfmt[N - 2].s;
-    r2 = ctx->sfmt[N - 1].s;
-    for (i = 0; i < N - POS1; i++) {
-	r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
-	array[i].s = r;
-	r1 = r2;
-	r2 = r;
-    }
-    for (; i < N; i++) {
-	r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
-	array[i].s = r;
-	r1 = r2;
-	r2 = r;
-    }
-    /* main loop */
-    for (; i < size - N; i++) {
-	r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
-	array[i].s = r;
-	r1 = r2;
-	r2 = r;
-    }
-    for (j = 0; j < 2 * N - size; j++) {
-	ctx->sfmt[j].s = array[j + size - N].s;
-    }
-    for (; i < size; i++) {
-	r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
-	array[i].s = r;
-	ctx->sfmt[j++].s = r;
-	r1 = r2;
-	r2 = r;
-    }
-}
-
-#ifndef ONLY64
-#if defined(__APPLE__)
-#define ALTI_SWAP (vector unsigned char) \
-	(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
-#else
-#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
-#endif
-/**
- * This function swaps high and low 32-bit of 64-bit integers in user
- * specified array.
- *
- * @param array an 128-bit array to be swaped.
- * @param size size of 128-bit array.
- */
-JEMALLOC_INLINE void swap(w128_t *array, int size) {
-    int i;
-    const vector unsigned char perm = ALTI_SWAP;
-
-    for (i = 0; i < size; i++) {
-	array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
-    }
-}
-#endif
-
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params.h
deleted file mode 100644
index ade6622..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS_H
-#define SFMT_PARAMS_H
-
-#if !defined(MEXP)
-#ifdef __GNUC__
-  #warning "MEXP is not defined. I assume MEXP is 19937."
-#endif
-  #define MEXP 19937
-#endif
-/*-----------------
-  BASIC DEFINITIONS
-  -----------------*/
-/** Mersenne Exponent. The period of the sequence 
- *  is a multiple of 2^MEXP-1.
- * #define MEXP 19937 */
-/** SFMT generator has an internal state array of 128-bit integers,
- * and N is its size. */
-#define N (MEXP / 128 + 1)
-/** N32 is the size of internal state array when regarded as an array
- * of 32-bit integers.*/
-#define N32 (N * 4)
-/** N64 is the size of internal state array when regarded as an array
- * of 64-bit integers.*/
-#define N64 (N * 2)
-
-/*----------------------
-  the parameters of SFMT
-  following definitions are in paramsXXXX.h file.
-  ----------------------*/
-/** the pick up position of the array.
-#define POS1 122 
-*/
-
-/** the parameter of shift left as four 32-bit registers.
-#define SL1 18
- */
-
-/** the parameter of shift left as one 128-bit register. 
- * The 128-bit integer is shifted by (SL2 * 8) bits. 
-#define SL2 1 
-*/
-
-/** the parameter of shift right as four 32-bit registers.
-#define SR1 11
-*/
-
-/** the parameter of shift right as one 128-bit register. 
- * The 128-bit integer is shifted by (SL2 * 8) bits. 
-#define SR2 1 
-*/
-
-/** A bitmask, used in the recursion.  These parameters are introduced
- * to break symmetry of SIMD.
-#define MSK1 0xdfffffefU
-#define MSK2 0xddfecb7fU
-#define MSK3 0xbffaffffU
-#define MSK4 0xbffffff6U 
-*/
-
-/** These definitions are part of a 128-bit period certification vector.
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0x00000000U
-#define PARITY4	0xc98e126aU
-*/
-
-#if MEXP == 607
-  #include "test/SFMT-params607.h"
-#elif MEXP == 1279
-  #include "test/SFMT-params1279.h"
-#elif MEXP == 2281
-  #include "test/SFMT-params2281.h"
-#elif MEXP == 4253
-  #include "test/SFMT-params4253.h"
-#elif MEXP == 11213
-  #include "test/SFMT-params11213.h"
-#elif MEXP == 19937
-  #include "test/SFMT-params19937.h"
-#elif MEXP == 44497
-  #include "test/SFMT-params44497.h"
-#elif MEXP == 86243
-  #include "test/SFMT-params86243.h"
-#elif MEXP == 132049
-  #include "test/SFMT-params132049.h"
-#elif MEXP == 216091
-  #include "test/SFMT-params216091.h"
-#else
-#ifdef __GNUC__
-  #error "MEXP is not valid."
-  #undef MEXP
-#else
-  #undef MEXP
-#endif
-
-#endif
-
-#endif /* SFMT_PARAMS_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params11213.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params11213.h
deleted file mode 100644
index 2994bd21..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params11213.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS11213_H
-#define SFMT_PARAMS11213_H
-
-#define POS1	68
-#define SL1	14
-#define SL2	3
-#define SR1	7
-#define SR2	3
-#define MSK1	0xeffff7fbU
-#define MSK2	0xffffffefU
-#define MSK3	0xdfdfbfffU
-#define MSK4	0x7fffdbfdU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0xe8148000U
-#define PARITY4	0xd0c7afa3U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
-    #define ALTI_SR2_PERM	{5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
-    #define ALTI_SR2_PERM64	{13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
-
-#endif /* SFMT_PARAMS11213_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params1279.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params1279.h
deleted file mode 100644
index d7959f9..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params1279.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS1279_H
-#define SFMT_PARAMS1279_H
-
-#define POS1	7
-#define SL1	14
-#define SL2	3
-#define SR1	5
-#define SR2	1
-#define MSK1	0xf7fefffdU
-#define MSK2	0x7fefcfffU
-#define MSK3	0xaff3ef3fU
-#define MSK4	0xb5ffff7fU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0x00000000U
-#define PARITY4	0x20000000U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
-
-#endif /* SFMT_PARAMS1279_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params132049.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params132049.h
deleted file mode 100644
index a1dcec3..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params132049.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS132049_H
-#define SFMT_PARAMS132049_H
-
-#define POS1	110
-#define SL1	19
-#define SL2	1
-#define SR1	21
-#define SR2	1
-#define MSK1	0xffffbb5fU
-#define MSK2	0xfb6ebf95U
-#define MSK3	0xfffefffaU
-#define MSK4	0xcff77fffU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0xcb520000U
-#define PARITY4	0xc7e91c7dU
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
-
-#endif /* SFMT_PARAMS132049_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params19937.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params19937.h
deleted file mode 100644
index fb92b4c..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params19937.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS19937_H
-#define SFMT_PARAMS19937_H
-
-#define POS1	122
-#define SL1	18
-#define SL2	1
-#define SR1	11
-#define SR2	1
-#define MSK1	0xdfffffefU
-#define MSK2	0xddfecb7fU
-#define MSK3	0xbffaffffU
-#define MSK4	0xbffffff6U
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0x00000000U
-#define PARITY4	0x13c9e684U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
-
-#endif /* SFMT_PARAMS19937_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params216091.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params216091.h
deleted file mode 100644
index 125ce282..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params216091.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS216091_H
-#define SFMT_PARAMS216091_H
-
-#define POS1	627
-#define SL1	11
-#define SL2	3
-#define SR1	10
-#define SR2	1
-#define MSK1	0xbff7bff7U
-#define MSK2	0xbfffffffU
-#define MSK3	0xbffffa7fU
-#define MSK4	0xffddfbfbU
-#define PARITY1	0xf8000001U
-#define PARITY2	0x89e80709U
-#define PARITY3	0x3bd2b64bU
-#define PARITY4	0x0c64b1e4U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
-
-#endif /* SFMT_PARAMS216091_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params2281.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params2281.h
deleted file mode 100644
index 0ef85c4..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params2281.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS2281_H
-#define SFMT_PARAMS2281_H
-
-#define POS1	12
-#define SL1	19
-#define SL2	1
-#define SR1	5
-#define SR2	1
-#define MSK1	0xbff7ffbfU
-#define MSK2	0xfdfffffeU
-#define MSK3	0xf7ffef7fU
-#define MSK4	0xf2f7cbbfU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0x00000000U
-#define PARITY4	0x41dfa600U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
-
-#endif /* SFMT_PARAMS2281_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params4253.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params4253.h
deleted file mode 100644
index 9f07bc6..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params4253.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS4253_H
-#define SFMT_PARAMS4253_H
-
-#define POS1	17
-#define SL1	20
-#define SL2	1
-#define SR1	7
-#define SR2	1
-#define MSK1	0x9f7bffffU
-#define MSK2	0x9fffff5fU
-#define MSK3	0x3efffffbU
-#define MSK4	0xfffff7bbU
-#define PARITY1	0xa8000001U
-#define PARITY2	0xaf5390a3U
-#define PARITY3	0xb740b3f8U
-#define PARITY4	0x6c11486dU
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
-
-#endif /* SFMT_PARAMS4253_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params44497.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params44497.h
deleted file mode 100644
index 85598fe..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params44497.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS44497_H
-#define SFMT_PARAMS44497_H
-
-#define POS1	330
-#define SL1	5
-#define SL2	3
-#define SR1	9
-#define SR2	3
-#define MSK1	0xeffffffbU
-#define MSK2	0xdfbebfffU
-#define MSK3	0xbfbf7befU
-#define MSK4	0x9ffd7bffU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0xa3ac4000U
-#define PARITY4	0xecc1327aU
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
-    #define ALTI_SR2_PERM	{5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
-    #define ALTI_SR2_PERM64	{13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
-
-#endif /* SFMT_PARAMS44497_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params607.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params607.h
deleted file mode 100644
index bc76485..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params607.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS607_H
-#define SFMT_PARAMS607_H
-
-#define POS1	2
-#define SL1	15
-#define SL2	3
-#define SR1	13
-#define SR2	3
-#define MSK1	0xfdff37ffU
-#define MSK2	0xef7f3f7dU
-#define MSK3	0xff777b7dU
-#define MSK4	0x7ff7fb2fU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0x00000000U
-#define PARITY4	0x5986f054U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
-    #define ALTI_SR2_PERM	{5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
-    #define ALTI_SR2_PERM64	{13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
-
-#endif /* SFMT_PARAMS607_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params86243.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params86243.h
deleted file mode 100644
index 5e4d783..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-params86243.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef SFMT_PARAMS86243_H
-#define SFMT_PARAMS86243_H
-
-#define POS1	366
-#define SL1	6
-#define SL2	7
-#define SR1	19
-#define SR2	1
-#define MSK1	0xfdbffbffU
-#define MSK2	0xbff7ff3fU
-#define MSK3	0xfd77efffU
-#define MSK4	0xbf9ff3ffU
-#define PARITY1	0x00000001U
-#define PARITY2	0x00000000U
-#define PARITY3	0x00000000U
-#define PARITY4	0xe9528d85U
-
-
-/* PARAMETERS FOR ALTIVEC */
-#if defined(__APPLE__)	/* For OSX */
-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
-    #define ALTI_MSK64 \
-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
-    #define ALTI_SL2_PERM \
-	(vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
-    #define ALTI_SL2_PERM64 \
-	(vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
-    #define ALTI_SR2_PERM \
-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
-    #define ALTI_SR2_PERM64 \
-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
-#else	/* For OTHER OSs(Linux?) */
-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
-    #define ALTI_SL2_PERM	{25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
-    #define ALTI_SL2_PERM64	{7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
-#endif	/* For OSX */
-#define IDSTR	"SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
-
-#endif /* SFMT_PARAMS86243_H */
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-sse2.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-sse2.h
deleted file mode 100644
index 0314a16..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT-sse2.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/** 
- * @file  SFMT-sse2.h
- * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
- *
- * @author Mutsuo Saito (Hiroshima University)
- * @author Makoto Matsumoto (Hiroshima University)
- *
- * @note We assume LITTLE ENDIAN in this file
- *
- * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- * University. All rights reserved.
- *
- * The new BSD License is applied to this software, see LICENSE.txt
- */
-
-#ifndef SFMT_SSE2_H
-#define SFMT_SSE2_H
-
-/**
- * This function represents the recursion formula.
- * @param a a 128-bit part of the interal state array
- * @param b a 128-bit part of the interal state array
- * @param c a 128-bit part of the interal state array
- * @param d a 128-bit part of the interal state array
- * @param mask 128-bit mask
- * @return output
- */
-JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, 
-				   __m128i c, __m128i d, __m128i mask) {
-    __m128i v, x, y, z;
-    
-    x = _mm_load_si128(a);
-    y = _mm_srli_epi32(*b, SR1);
-    z = _mm_srli_si128(c, SR2);
-    v = _mm_slli_epi32(d, SL1);
-    z = _mm_xor_si128(z, x);
-    z = _mm_xor_si128(z, v);
-    x = _mm_slli_si128(x, SL2);
-    y = _mm_and_si128(y, mask);
-    z = _mm_xor_si128(z, x);
-    z = _mm_xor_si128(z, y);
-    return z;
-}
-
-/**
- * This function fills the internal state array with pseudorandom
- * integers.
- */
-JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
-    int i;
-    __m128i r, r1, r2, mask;
-    mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
-
-    r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
-    r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
-    for (i = 0; i < N - POS1; i++) {
-	r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
-	  mask);
-	_mm_store_si128(&ctx->sfmt[i].si, r);
-	r1 = r2;
-	r2 = r;
-    }
-    for (; i < N; i++) {
-	r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
-	  mask);
-	_mm_store_si128(&ctx->sfmt[i].si, r);
-	r1 = r2;
-	r2 = r;
-    }
-}
-
-/**
- * This function fills the user-specified array with pseudorandom
- * integers.
- *
- * @param array an 128-bit array to be filled by pseudorandom numbers.  
- * @param size number of 128-bit pesudorandom numbers to be generated.
- */
-JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
-    int i, j;
-    __m128i r, r1, r2, mask;
-    mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
-
-    r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
-    r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
-    for (i = 0; i < N - POS1; i++) {
-	r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
-	  mask);
-	_mm_store_si128(&array[i].si, r);
-	r1 = r2;
-	r2 = r;
-    }
-    for (; i < N; i++) {
-	r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
-	  mask);
-	_mm_store_si128(&array[i].si, r);
-	r1 = r2;
-	r2 = r;
-    }
-    /* main loop */
-    for (; i < size - N; i++) {
-	r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
-			 mask);
-	_mm_store_si128(&array[i].si, r);
-	r1 = r2;
-	r2 = r;
-    }
-    for (j = 0; j < 2 * N - size; j++) {
-	r = _mm_load_si128(&array[j + size - N].si);
-	_mm_store_si128(&ctx->sfmt[j].si, r);
-    }
-    for (; i < size; i++) {
-	r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
-			 mask);
-	_mm_store_si128(&array[i].si, r);
-	_mm_store_si128(&ctx->sfmt[j++].si, r);
-	r1 = r2;
-	r2 = r;
-    }
-}
-
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT.h b/zircon/third_party/ulib/jemalloc/test/include/test/SFMT.h
deleted file mode 100644
index 09c1607..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/SFMT.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/** 
- * @file SFMT.h 
- *
- * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
- * number generator
- *
- * @author Mutsuo Saito (Hiroshima University)
- * @author Makoto Matsumoto (Hiroshima University)
- *
- * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- * University. All rights reserved.
- *
- * The new BSD License is applied to this software.
- * see LICENSE.txt
- *
- * @note We assume that your system has inttypes.h.  If your system
- * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
- * and you have to define PRIu64 and PRIx64 in this file as follows:
- * @verbatim
- typedef unsigned int uint32_t
- typedef unsigned long long uint64_t  
- #define PRIu64 "llu"
- #define PRIx64 "llx"
-@endverbatim
- * uint32_t must be exactly 32-bit unsigned integer type (no more, no
- * less), and uint64_t must be exactly 64-bit unsigned integer type.
- * PRIu64 and PRIx64 are used for printf function to print 64-bit
- * unsigned int and 64-bit unsigned int in hexadecimal format.
- */
-
-#ifndef SFMT_H
-#define SFMT_H
-
-typedef struct sfmt_s sfmt_t;
-
-uint32_t gen_rand32(sfmt_t *ctx);
-uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
-uint64_t gen_rand64(sfmt_t *ctx);
-uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
-void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
-void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
-sfmt_t *init_gen_rand(uint32_t seed);
-sfmt_t *init_by_array(uint32_t *init_key, int key_length);
-void fini_gen_rand(sfmt_t *ctx);
-const char *get_idstring(void);
-int get_min_array_size32(void);
-int get_min_array_size64(void);
-
-#ifndef JEMALLOC_ENABLE_INLINE
-double to_real1(uint32_t v);
-double genrand_real1(sfmt_t *ctx);
-double to_real2(uint32_t v);
-double genrand_real2(sfmt_t *ctx);
-double to_real3(uint32_t v);
-double genrand_real3(sfmt_t *ctx);
-double to_res53(uint64_t v);
-double to_res53_mix(uint32_t x, uint32_t y);
-double genrand_res53(sfmt_t *ctx);
-double genrand_res53_mix(sfmt_t *ctx);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
-/* These real versions are due to Isaku Wada */
-/** generates a random number on [0,1]-real-interval */
-JEMALLOC_INLINE double to_real1(uint32_t v)
-{
-    return v * (1.0/4294967295.0); 
-    /* divided by 2^32-1 */ 
-}
-
-/** generates a random number on [0,1]-real-interval */
-JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx)
-{
-    return to_real1(gen_rand32(ctx));
-}
-
-/** generates a random number on [0,1)-real-interval */
-JEMALLOC_INLINE double to_real2(uint32_t v)
-{
-    return v * (1.0/4294967296.0); 
-    /* divided by 2^32 */
-}
-
-/** generates a random number on [0,1)-real-interval */
-JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx)
-{
-    return to_real2(gen_rand32(ctx));
-}
-
-/** generates a random number on (0,1)-real-interval */
-JEMALLOC_INLINE double to_real3(uint32_t v)
-{
-    return (((double)v) + 0.5)*(1.0/4294967296.0); 
-    /* divided by 2^32 */
-}
-
-/** generates a random number on (0,1)-real-interval */
-JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx)
-{
-    return to_real3(gen_rand32(ctx));
-}
-/** These real versions are due to Isaku Wada */
-
-/** generates a random number on [0,1) with 53-bit resolution*/
-JEMALLOC_INLINE double to_res53(uint64_t v) 
-{ 
-    return v * (1.0/18446744073709551616.0L);
-}
-
-/** generates a random number on [0,1) with 53-bit resolution from two
- * 32 bit integers */
-JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) 
-{ 
-    return to_res53(x | ((uint64_t)y << 32));
-}
-
-/** generates a random number on [0,1) with 53-bit resolution
- */
-JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) 
-{ 
-    return to_res53(gen_rand64(ctx));
-} 
-
-/** generates a random number on [0,1) with 53-bit resolution
-    using 32bit integer.
- */
-JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) 
-{ 
-    uint32_t x, y;
-
-    x = gen_rand32(ctx);
-    y = gen_rand32(ctx);
-    return to_res53_mix(x, y);
-} 
-#endif
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/btalloc.h b/zircon/third_party/ulib/jemalloc/test/include/test/btalloc.h
deleted file mode 100644
index c3f9d4d..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/btalloc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* btalloc() provides a mechanism for allocating via permuted backtraces. */
-void	*btalloc(size_t size, unsigned bits);
-
-#define	btalloc_n_proto(n)						\
-void	*btalloc_##n(size_t size, unsigned bits);
-btalloc_n_proto(0)
-btalloc_n_proto(1)
-
-#define	btalloc_n_gen(n)						\
-void *									\
-btalloc_##n(size_t size, unsigned bits)					\
-{									\
-	void *p;							\
-									\
-	if (bits == 0)							\
-		p = mallocx(size, 0);					\
-	else {								\
-		switch (bits & 0x1U) {					\
-		case 0:							\
-			p = (btalloc_0(size, bits >> 1));		\
-			break;						\
-		case 1:							\
-			p = (btalloc_1(size, bits >> 1));		\
-			break;						\
-		default: not_reached();					\
-		}							\
-	}								\
-	/* Intentionally sabotage tail call optimization. */		\
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");		\
-	return (p);							\
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/extent_hooks.h b/zircon/third_party/ulib/jemalloc/test/include/test/extent_hooks.h
deleted file mode 100644
index f50747d..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/extent_hooks.h
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Boilerplate code used for testing extent hooks via interception and
- * passthrough.
- */
-
-static void	*extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr,
-    size_t size, size_t alignment, bool *zero, bool *commit,
-    unsigned arena_ind);
-static bool	extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, bool committed, unsigned arena_ind);
-static bool	extent_commit_hook(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool	extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool	extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool	extent_purge_forced_hook(extent_hooks_t *extent_hooks,
-    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool	extent_split_hook(extent_hooks_t *extent_hooks, void *addr,
-    size_t size, size_t size_a, size_t size_b, bool committed,
-    unsigned arena_ind);
-static bool	extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a,
-    size_t size_a, void *addr_b, size_t size_b, bool committed,
-    unsigned arena_ind);
-
-static extent_hooks_t *default_hooks;
-static extent_hooks_t hooks = {
-	extent_alloc_hook,
-	extent_dalloc_hook,
-	extent_commit_hook,
-	extent_decommit_hook,
-	extent_purge_lazy_hook,
-	extent_purge_forced_hook,
-	extent_split_hook,
-	extent_merge_hook
-};
-
-/* Control whether hook functions pass calls through to default hooks. */
-static bool try_alloc = true;
-static bool try_dalloc = true;
-static bool try_commit = true;
-static bool try_decommit = true;
-static bool try_purge_lazy = true;
-static bool try_purge_forced = true;
-static bool try_split = true;
-static bool try_merge = true;
-
-/* Set to false prior to operations, then introspect after operations. */
-static bool called_alloc;
-static bool called_dalloc;
-static bool called_commit;
-static bool called_decommit;
-static bool called_purge_lazy;
-static bool called_purge_forced;
-static bool called_split;
-static bool called_merge;
-
-/* Set to false prior to operations, then introspect after operations. */
-static bool did_alloc;
-static bool did_dalloc;
-static bool did_commit;
-static bool did_decommit;
-static bool did_purge_lazy;
-static bool did_purge_forced;
-static bool did_split;
-static bool did_merge;
-
-#if 0
-#  define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
-#else
-#  define TRACE_HOOK(fmt, ...)
-#endif
-
-static void *
-extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind)
-{
-	void *ret;
-
-	TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, "
-	    "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks,
-	    new_addr, size, alignment, *zero ?  "true" : "false", *commit ?
-	    "true" : "false", arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
-	    "Wrong hook function");
-	called_alloc = true;
-	if (!try_alloc)
-		return (NULL);
-	ret = default_hooks->alloc(default_hooks, new_addr, size, alignment,
-	    zero, commit, 0);
-	did_alloc = (ret != NULL);
-	return (ret);
-}
-
-static bool
-extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    bool committed, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
-	    "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
-	    "true" : "false", arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
-	    "Wrong hook function");
-	called_dalloc = true;
-	if (!try_dalloc)
-		return (true);
-	err = default_hooks->dalloc(default_hooks, addr, size, committed, 0);
-	did_dalloc = !err;
-	return (err);
-}
-
-static bool
-extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
-	    "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
-	    offset, length, arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->commit, extent_commit_hook,
-	    "Wrong hook function");
-	called_commit = true;
-	if (!try_commit)
-		return (true);
-	err = default_hooks->commit(default_hooks, addr, size, offset, length,
-	    0);
-	did_commit = !err;
-	return (err);
-}
-
-static bool
-extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
-	    "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
-	    offset, length, arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
-	    "Wrong hook function");
-	called_decommit = true;
-	if (!try_decommit)
-		return (true);
-	err = default_hooks->decommit(default_hooks, addr, size, offset, length,
-	    0);
-	did_decommit = !err;
-	return (err);
-}
-
-static bool
-extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
-	    "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
-	    offset, length, arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
-	    "Wrong hook function");
-	called_purge_lazy = true;
-	if (!try_purge_lazy)
-		return (true);
-	err = default_hooks->purge_lazy == NULL ||
-	    default_hooks->purge_lazy(default_hooks, addr, size, offset, length,
-	    0);
-	did_purge_lazy = !err;
-	return (err);
-}
-
-static bool
-extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t offset, size_t length, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
-	    "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
-	    offset, length, arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
-	    "Wrong hook function");
-	called_purge_forced = true;
-	if (!try_purge_forced)
-		return (true);
-	err = default_hooks->purge_forced == NULL ||
-	    default_hooks->purge_forced(default_hooks, addr, size, offset,
-	    length, 0);
-	did_purge_forced = !err;
-	return (err);
-}
-
-static bool
-extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    size_t size_a, size_t size_b, bool committed, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, "
-	    "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
-	    addr, size, size_a, size_b, committed ? "true" : "false",
-	    arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->split, extent_split_hook,
-	    "Wrong hook function");
-	called_split = true;
-	if (!try_split)
-		return (true);
-	err = (default_hooks->split == NULL ||
-	    default_hooks->split(default_hooks, addr, size, size_a, size_b,
-	    committed, 0));
-	did_split = !err;
-	return (err);
-}
-
-static bool
-extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
-    void *addr_b, size_t size_b, bool committed, unsigned arena_ind)
-{
-	bool err;
-
-	TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p "
-	    "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
-	    addr_a, size_a, addr_b, size_b, committed ? "true" : "false",
-	    arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->merge, extent_merge_hook,
-	    "Wrong hook function");
-	called_merge = true;
-	if (!try_merge)
-		return (true);
-	err = (default_hooks->merge == NULL ||
-	    default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b,
-	    committed, 0));
-	did_merge = !err;
-	return (err);
-}
-
-static void
-extent_hooks_prep(void)
-{
-	size_t sz;
-
-	sz = sizeof(default_hooks);
-	assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz,
-	    NULL, 0), 0, "Unexpected mallctl() error");
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/jemalloc_test.h.in b/zircon/third_party/ulib/jemalloc/test/include/test/jemalloc_test.h.in
deleted file mode 100644
index 2dd0cde..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/jemalloc_test.h.in
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <limits.h>
-#ifndef SIZE_T_MAX
-#  define SIZE_T_MAX	SIZE_MAX
-#endif
-#include <stdlib.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <errno.h>
-#include <math.h>
-#include <string.h>
-#ifdef _WIN32
-#  include "msvc_compat/strings.h"
-#endif
-
-#ifdef _WIN32
-#  include <windows.h>
-#  include "msvc_compat/windows_extra.h"
-#else
-#  include <pthread.h>
-#endif
-
-#include "test/jemalloc_test_defs.h"
-
-#ifdef JEMALLOC_OSSPIN
-#  include <libkern/OSAtomic.h>
-#endif
-
-#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
-#  include <altivec.h>
-#endif
-#ifdef HAVE_SSE2
-#  include <emmintrin.h>
-#endif
-
-/******************************************************************************/
-/*
- * For unit tests, expose all public and private interfaces.
- */
-#ifdef JEMALLOC_UNIT_TEST
-#  define JEMALLOC_JET
-#  define JEMALLOC_MANGLE
-#  include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/*
- * For integration tests, expose the public jemalloc interfaces, but only
- * expose the minimum necessary internal utility code (to avoid re-implementing
- * essentially identical code within the test infrastructure).
- */
-#elif defined(JEMALLOC_INTEGRATION_TEST) || \
-    defined(JEMALLOC_INTEGRATION_CPP_TEST)
-#  define JEMALLOC_MANGLE
-#  include "jemalloc/jemalloc@install_suffix@.h"
-#  include "jemalloc/internal/jemalloc_internal_defs.h"
-#  include "jemalloc/internal/jemalloc_internal_macros.h"
-
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
-    true
-#else
-    false
-#endif
-    ;
-
-#  define JEMALLOC_N(n) @private_namespace@##n
-#  include "jemalloc/internal/private_namespace.h"
-
-#  include "jemalloc/internal/nstime_types.h"
-#  include "jemalloc/internal/nstime_structs.h"
-#  include "jemalloc/internal/nstime_externs.h"
-#  include "jemalloc/internal/util_types.h"
-#  include "jemalloc/internal/util_externs.h"
-#  include "jemalloc/internal/util_inlines.h"
-#  include "jemalloc/internal/qr.h"
-#  include "jemalloc/internal/ql.h"
-
-/******************************************************************************/
-/*
- * For stress tests, expose the public jemalloc interfaces with name mangling
- * so that they can be tested as e.g. malloc() and free().  Also expose the
- * public jemalloc interfaces with jet_ prefixes, so that stress tests can use
- * a separate allocator for their internal data structures.
- */
-#elif defined(JEMALLOC_STRESS_TEST)
-#  include "jemalloc/jemalloc@install_suffix@.h"
-
-#  include "jemalloc/jemalloc_protos_jet.h"
-
-#  define JEMALLOC_JET
-#  include "jemalloc/internal/jemalloc_internal.h"
-#  include "jemalloc/internal/public_unnamespace.h"
-#  undef JEMALLOC_JET
-
-#  include "jemalloc/jemalloc_rename.h"
-#  define JEMALLOC_MANGLE
-#  ifdef JEMALLOC_STRESS_TESTLIB
-#    include "jemalloc/jemalloc_mangle_jet.h"
-#  else
-#    include "jemalloc/jemalloc_mangle.h"
-#  endif
-
-/******************************************************************************/
-/*
- * This header does dangerous things, the effects of which only test code
- * should be subject to.
- */
-#else
-#  error "This header cannot be included outside a testing context"
-#endif
-
-/******************************************************************************/
-/*
- * Common test utilities.
- */
-#include "test/btalloc.h"
-#include "test/math.h"
-#include "test/mtx.h"
-#include "test/mq.h"
-#include "test/test.h"
-#include "test/timer.h"
-#include "test/thd.h"
-#define	MEXP 19937
-#include "test/SFMT.h"
-
-/******************************************************************************/
-/*
- * Define always-enabled assertion macros, so that test assertions execute even
- * if assertions are disabled in the library code.
- */
-#undef assert
-#undef not_reached
-#undef not_implemented
-#undef assert_not_implemented
-
-#define	assert(e) do {							\
-	if (!(e)) {							\
-		malloc_printf(						\
-		    "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",	\
-		    __FILE__, __LINE__, #e);				\
-		abort();						\
-	}								\
-} while (0)
-
-#define	not_reached() do {						\
-	malloc_printf(							\
-	    "<jemalloc>: %s:%d: Unreachable code reached\n",		\
-	    __FILE__, __LINE__);					\
-	abort();							\
-} while (0)
-
-#define	not_implemented() do {						\
-	malloc_printf("<jemalloc>: %s:%d: Not implemented\n",		\
-	    __FILE__, __LINE__);					\
-	abort();							\
-} while (0)
-
-#define	assert_not_implemented(e) do {					\
-	if (!(e))							\
-		not_implemented();					\
-} while (0)
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/jemalloc_test_defs.h.in b/zircon/third_party/ulib/jemalloc/test/include/test/jemalloc_test_defs.h.in
deleted file mode 100644
index 5cc8532..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/jemalloc_test_defs.h.in
+++ /dev/null
@@ -1,9 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal_defs.h"
-#include "jemalloc/internal/jemalloc_internal_decls.h"
-
-/*
- * For use by SFMT.  configure.ac doesn't actually define HAVE_SSE2 because its
- * dependencies are notoriously unportable in practice.
- */
-#undef HAVE_SSE2
-#undef HAVE_ALTIVEC
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/math.h b/zircon/third_party/ulib/jemalloc/test/include/test/math.h
deleted file mode 100644
index 1728d60..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/math.h
+++ /dev/null
@@ -1,310 +0,0 @@
-#ifndef JEMALLOC_ENABLE_INLINE
-double	ln_gamma(double x);
-double	i_gamma(double x, double p, double ln_gamma_p);
-double	pt_norm(double p);
-double	pt_chi2(double p, double df, double ln_gamma_df_2);
-double	pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_))
-/*
- * Compute the natural log of Gamma(x), accurate to 10 decimal places.
- *
- * This implementation is based on:
- *
- *   Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
- *   [S14].  Communications of the ACM 9(9):684.
- */
-JEMALLOC_INLINE double
-ln_gamma(double x)
-{
-	double f, z;
-
-	assert(x > 0.0);
-
-	if (x < 7.0) {
-		f = 1.0;
-		z = x;
-		while (z < 7.0) {
-			f *= z;
-			z += 1.0;
-		}
-		x = z;
-		f = -log(f);
-	} else
-		f = 0.0;
-
-	z = 1.0 / (x * x);
-
-	return (f + (x-0.5) * log(x) - x + 0.918938533204673 +
-	    (((-0.000595238095238 * z + 0.000793650793651) * z -
-	    0.002777777777778) * z + 0.083333333333333) / x);
-}
-
-/*
- * Compute the incomplete Gamma ratio for [0..x], where p is the shape
- * parameter, and ln_gamma_p is ln_gamma(p).
- *
- * This implementation is based on:
- *
- *   Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
- *   Applied Statistics 19:285-287.
- */
-JEMALLOC_INLINE double
-i_gamma(double x, double p, double ln_gamma_p)
-{
-	double acu, factor, oflo, gin, term, rn, a, b, an, dif;
-	double pn[6];
-	unsigned i;
-
-	assert(p > 0.0);
-	assert(x >= 0.0);
-
-	if (x == 0.0)
-		return (0.0);
-
-	acu = 1.0e-10;
-	oflo = 1.0e30;
-	gin = 0.0;
-	factor = exp(p * log(x) - x - ln_gamma_p);
-
-	if (x <= 1.0 || x < p) {
-		/* Calculation by series expansion. */
-		gin = 1.0;
-		term = 1.0;
-		rn = p;
-
-		while (true) {
-			rn += 1.0;
-			term *= x / rn;
-			gin += term;
-			if (term <= acu) {
-				gin *= factor / p;
-				return (gin);
-			}
-		}
-	} else {
-		/* Calculation by continued fraction. */
-		a = 1.0 - p;
-		b = a + x + 1.0;
-		term = 0.0;
-		pn[0] = 1.0;
-		pn[1] = x;
-		pn[2] = x + 1.0;
-		pn[3] = x * b;
-		gin = pn[2] / pn[3];
-
-		while (true) {
-			a += 1.0;
-			b += 2.0;
-			term += 1.0;
-			an = a * term;
-			for (i = 0; i < 2; i++)
-				pn[i+4] = b * pn[i+2] - an * pn[i];
-			if (pn[5] != 0.0) {
-				rn = pn[4] / pn[5];
-				dif = fabs(gin - rn);
-				if (dif <= acu && dif <= acu * rn) {
-					gin = 1.0 - factor * gin;
-					return (gin);
-				}
-				gin = rn;
-			}
-			for (i = 0; i < 4; i++)
-				pn[i] = pn[i+2];
-
-			if (fabs(pn[4]) >= oflo) {
-				for (i = 0; i < 4; i++)
-					pn[i] /= oflo;
-			}
-		}
-	}
-}
-
-/*
- * Given a value p in [0..1] of the lower tail area of the normal distribution,
- * compute the limit on the definite integral from [-inf..z] that satisfies p,
- * accurate to 16 decimal places.
- *
- * This implementation is based on:
- *
- *   Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
- *   distribution.  Applied Statistics 37(3):477-484.
- */
-JEMALLOC_INLINE double
-pt_norm(double p)
-{
-	double q, r, ret;
-
-	assert(p > 0.0 && p < 1.0);
-
-	q = p - 0.5;
-	if (fabs(q) <= 0.425) {
-		/* p close to 1/2. */
-		r = 0.180625 - q * q;
-		return (q * (((((((2.5090809287301226727e3 * r +
-		    3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
-		    + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
-		    r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
-		    * r + 3.3871328727963666080e0) /
-		    (((((((5.2264952788528545610e3 * r +
-		    2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
-		    + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
-		    r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
-		    * r + 1.0));
-	} else {
-		if (q < 0.0)
-			r = p;
-		else
-			r = 1.0 - p;
-		assert(r > 0.0);
-
-		r = sqrt(-log(r));
-		if (r <= 5.0) {
-			/* p neither close to 1/2 nor 0 or 1. */
-			r -= 1.6;
-			ret = ((((((((7.74545014278341407640e-4 * r +
-			    2.27238449892691845833e-2) * r +
-			    2.41780725177450611770e-1) * r +
-			    1.27045825245236838258e0) * r +
-			    3.64784832476320460504e0) * r +
-			    5.76949722146069140550e0) * r +
-			    4.63033784615654529590e0) * r +
-			    1.42343711074968357734e0) /
-			    (((((((1.05075007164441684324e-9 * r +
-			    5.47593808499534494600e-4) * r +
-			    1.51986665636164571966e-2)
-			    * r + 1.48103976427480074590e-1) * r +
-			    6.89767334985100004550e-1) * r +
-			    1.67638483018380384940e0) * r +
-			    2.05319162663775882187e0) * r + 1.0));
-		} else {
-			/* p near 0 or 1. */
-			r -= 5.0;
-			ret = ((((((((2.01033439929228813265e-7 * r +
-			    2.71155556874348757815e-5) * r +
-			    1.24266094738807843860e-3) * r +
-			    2.65321895265761230930e-2) * r +
-			    2.96560571828504891230e-1) * r +
-			    1.78482653991729133580e0) * r +
-			    5.46378491116411436990e0) * r +
-			    6.65790464350110377720e0) /
-			    (((((((2.04426310338993978564e-15 * r +
-			    1.42151175831644588870e-7) * r +
-			    1.84631831751005468180e-5) * r +
-			    7.86869131145613259100e-4) * r +
-			    1.48753612908506148525e-2) * r +
-			    1.36929880922735805310e-1) * r +
-			    5.99832206555887937690e-1)
-			    * r + 1.0));
-		}
-		if (q < 0.0)
-			ret = -ret;
-		return (ret);
-	}
-}
-
-/*
- * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
- * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
- * the upper limit on the definite integral from [0..z] that satisfies p,
- * accurate to 12 decimal places.
- *
- * This implementation is based on:
- *
- *   Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
- *   the Chi^2 distribution.  Applied Statistics 24(3):385-388.
- *
- *   Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
- *   points of the Chi^2 distribution.  Applied Statistics 40(1):233-235.
- */
-JEMALLOC_INLINE double
-pt_chi2(double p, double df, double ln_gamma_df_2)
-{
-	double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
-	unsigned i;
-
-	assert(p >= 0.0 && p < 1.0);
-	assert(df > 0.0);
-
-	e = 5.0e-7;
-	aa = 0.6931471805;
-
-	xx = 0.5 * df;
-	c = xx - 1.0;
-
-	if (df < -1.24 * log(p)) {
-		/* Starting approximation for small Chi^2. */
-		ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
-		if (ch - e < 0.0)
-			return (ch);
-	} else {
-		if (df > 0.32) {
-			x = pt_norm(p);
-			/*
-			 * Starting approximation using Wilson and Hilferty
-			 * estimate.
-			 */
-			p1 = 0.222222 / df;
-			ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
-			/* Starting approximation for p tending to 1. */
-			if (ch > 2.2 * df + 6.0) {
-				ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
-				    ln_gamma_df_2);
-			}
-		} else {
-			ch = 0.4;
-			a = log(1.0 - p);
-			while (true) {
-				q = ch;
-				p1 = 1.0 + ch * (4.67 + ch);
-				p2 = ch * (6.73 + ch * (6.66 + ch));
-				t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
-				    * (13.32 + 3.0 * ch)) / p2;
-				ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
-				    c * aa) * p2 / p1) / t;
-				if (fabs(q / ch - 1.0) - 0.01 <= 0.0)
-					break;
-			}
-		}
-	}
-
-	for (i = 0; i < 20; i++) {
-		/* Calculation of seven-term Taylor series. */
-		q = ch;
-		p1 = 0.5 * ch;
-		if (p1 < 0.0)
-			return (-1.0);
-		p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
-		t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
-		b = t / ch;
-		a = 0.5 * t - b * c;
-		s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
-		    60.0 * a))))) / 420.0;
-		s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
-		    a)))) / 2520.0;
-		s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
-		s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
-		    (889.0 + 1740.0 * a))) / 5040.0;
-		s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
-		s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
-		ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
-		    - b * (s4 - b * (s5 - b * s6))))));
-		if (fabs(q / ch - 1.0) <= e)
-			break;
-	}
-
-	return (ch);
-}
-
-/*
- * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
- * compute the upper limit on the definite integral from [0..z] that satisfies
- * p.
- */
-JEMALLOC_INLINE double
-pt_gamma(double p, double shape, double scale, double ln_gamma_shape)
-{
-	return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
-}
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/mq.h b/zircon/third_party/ulib/jemalloc/test/include/test/mq.h
deleted file mode 100644
index a974eb9..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/mq.h
+++ /dev/null
@@ -1,108 +0,0 @@
-void	mq_nanosleep(unsigned ns);
-
-/*
- * Simple templated message queue implementation that relies on only mutexes for
- * synchronization (which reduces portability issues).  Given the following
- * setup:
- *
- *   typedef struct mq_msg_s mq_msg_t;
- *   struct mq_msg_s {
- *           mq_msg(mq_msg_t) link;
- *           [message data]
- *   };
- *   mq_gen(, mq_, mq_t, mq_msg_t, link)
- *
- * The API is as follows:
- *
- *   bool mq_init(mq_t *mq);
- *   void mq_fini(mq_t *mq);
- *   unsigned mq_count(mq_t *mq);
- *   mq_msg_t *mq_tryget(mq_t *mq);
- *   mq_msg_t *mq_get(mq_t *mq);
- *   void mq_put(mq_t *mq, mq_msg_t *msg);
- *
- * The message queue linkage embedded in each message is to be treated as
- * externally opaque (no need to initialize or clean up externally).  mq_fini()
- * does not perform any cleanup of messages, since it knows nothing of their
- * payloads.
- */
-#define	mq_msg(a_mq_msg_type)	ql_elm(a_mq_msg_type)
-
-#define	mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field)	\
-typedef struct {							\
-	mtx_t			lock;					\
-	ql_head(a_mq_msg_type)	msgs;					\
-	unsigned		count;					\
-} a_mq_type;								\
-a_attr bool								\
-a_prefix##init(a_mq_type *mq) {						\
-									\
-	if (mtx_init(&mq->lock))					\
-		return (true);						\
-	ql_new(&mq->msgs);						\
-	mq->count = 0;							\
-	return (false);							\
-}									\
-a_attr void								\
-a_prefix##fini(a_mq_type *mq)						\
-{									\
-	mtx_fini(&mq->lock);						\
-}									\
-a_attr unsigned								\
-a_prefix##count(a_mq_type *mq)						\
-{									\
-	unsigned count;							\
-									\
-	mtx_lock(&mq->lock);						\
-	count = mq->count;						\
-	mtx_unlock(&mq->lock);						\
-	return (count);							\
-}									\
-a_attr a_mq_msg_type *							\
-a_prefix##tryget(a_mq_type *mq)						\
-{									\
-	a_mq_msg_type *msg;						\
-									\
-	mtx_lock(&mq->lock);						\
-	msg = ql_first(&mq->msgs);					\
-	if (msg != NULL) {						\
-		ql_head_remove(&mq->msgs, a_mq_msg_type, a_field);	\
-		mq->count--;						\
-	}								\
-	mtx_unlock(&mq->lock);						\
-	return (msg);							\
-}									\
-a_attr a_mq_msg_type *							\
-a_prefix##get(a_mq_type *mq)						\
-{									\
-	a_mq_msg_type *msg;						\
-	unsigned ns;							\
-									\
-	msg = a_prefix##tryget(mq);					\
-	if (msg != NULL)						\
-		return (msg);						\
-									\
-	ns = 1;								\
-	while (true) {							\
-		mq_nanosleep(ns);					\
-		msg = a_prefix##tryget(mq);				\
-		if (msg != NULL)					\
-			return (msg);					\
-		if (ns < 1000*1000*1000) {				\
-			/* Double sleep time, up to max 1 second. */	\
-			ns <<= 1;					\
-			if (ns > 1000*1000*1000)			\
-				ns = 1000*1000*1000;			\
-		}							\
-	}								\
-}									\
-a_attr void								\
-a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg)			\
-{									\
-									\
-	mtx_lock(&mq->lock);						\
-	ql_elm_new(msg, a_field);					\
-	ql_tail_insert(&mq->msgs, msg, a_field);			\
-	mq->count++;							\
-	mtx_unlock(&mq->lock);						\
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/mtx.h b/zircon/third_party/ulib/jemalloc/test/include/test/mtx.h
deleted file mode 100644
index 58afbc3..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/mtx.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * mtx is a slightly simplified version of malloc_mutex.  This code duplication
- * is unfortunate, but there are allocator bootstrapping considerations that
- * would leak into the test infrastructure if malloc_mutex were used directly
- * in tests.
- */
-
-typedef struct {
-#ifdef _WIN32
-	CRITICAL_SECTION	lock;
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-	os_unfair_lock		lock;
-#elif (defined(JEMALLOC_OSSPIN))
-	OSSpinLock		lock;
-#else
-	pthread_mutex_t		lock;
-#endif
-} mtx_t;
-
-bool	mtx_init(mtx_t *mtx);
-void	mtx_fini(mtx_t *mtx);
-void	mtx_lock(mtx_t *mtx);
-void	mtx_unlock(mtx_t *mtx);
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/test.h b/zircon/third_party/ulib/jemalloc/test/include/test/test.h
deleted file mode 100644
index 8c69fc2..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/test.h
+++ /dev/null
@@ -1,333 +0,0 @@
-#define	ASSERT_BUFSIZE	256
-
-#define	assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do {		\
-	t a_ = (a);							\
-	t b_ = (b);							\
-	if (!(a_ cmp b_)) {						\
-		char prefix[ASSERT_BUFSIZE];				\
-		char message[ASSERT_BUFSIZE];				\
-		malloc_snprintf(prefix, sizeof(prefix),			\
-		    "%s:%s:%d: Failed assertion: "			\
-		    "(%s) " #cmp " (%s) --> "				\
-		    "%" pri " " #neg_cmp " %" pri ": ",			\
-		    __func__, __FILE__, __LINE__,			\
-		    #a, #b, a_, b_);					\
-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
-		p_test_fail(prefix, message);				\
-	}								\
-} while (0)
-
-#define	assert_ptr_eq(a, b, ...)	assert_cmp(void *, a, b, ==,	\
-    !=, "p", __VA_ARGS__)
-#define	assert_ptr_ne(a, b, ...)	assert_cmp(void *, a, b, !=,	\
-    ==, "p", __VA_ARGS__)
-#define	assert_ptr_null(a, ...)		assert_cmp(void *, a, NULL, ==,	\
-    !=, "p", __VA_ARGS__)
-#define	assert_ptr_not_null(a, ...)	assert_cmp(void *, a, NULL, !=,	\
-    ==, "p", __VA_ARGS__)
-
-#define	assert_c_eq(a, b, ...)	assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
-#define	assert_c_ne(a, b, ...)	assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
-#define	assert_c_lt(a, b, ...)	assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
-#define	assert_c_le(a, b, ...)	assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
-#define	assert_c_ge(a, b, ...)	assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
-#define	assert_c_gt(a, b, ...)	assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
-
-#define	assert_x_eq(a, b, ...)	assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
-#define	assert_x_ne(a, b, ...)	assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
-#define	assert_x_lt(a, b, ...)	assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
-#define	assert_x_le(a, b, ...)	assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
-#define	assert_x_ge(a, b, ...)	assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
-#define	assert_x_gt(a, b, ...)	assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
-
-#define	assert_d_eq(a, b, ...)	assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
-#define	assert_d_ne(a, b, ...)	assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
-#define	assert_d_lt(a, b, ...)	assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
-#define	assert_d_le(a, b, ...)	assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
-#define	assert_d_ge(a, b, ...)	assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
-#define	assert_d_gt(a, b, ...)	assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
-
-#define	assert_u_eq(a, b, ...)	assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
-#define	assert_u_ne(a, b, ...)	assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
-#define	assert_u_lt(a, b, ...)	assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
-#define	assert_u_le(a, b, ...)	assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
-#define	assert_u_ge(a, b, ...)	assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
-#define	assert_u_gt(a, b, ...)	assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
-
-#define	assert_ld_eq(a, b, ...)	assert_cmp(long, a, b, ==,	\
-    !=, "ld", __VA_ARGS__)
-#define	assert_ld_ne(a, b, ...)	assert_cmp(long, a, b, !=,	\
-    ==, "ld", __VA_ARGS__)
-#define	assert_ld_lt(a, b, ...)	assert_cmp(long, a, b, <,	\
-    >=, "ld", __VA_ARGS__)
-#define	assert_ld_le(a, b, ...)	assert_cmp(long, a, b, <=,	\
-    >, "ld", __VA_ARGS__)
-#define	assert_ld_ge(a, b, ...)	assert_cmp(long, a, b, >=,	\
-    <, "ld", __VA_ARGS__)
-#define	assert_ld_gt(a, b, ...)	assert_cmp(long, a, b, >,	\
-    <=, "ld", __VA_ARGS__)
-
-#define	assert_lu_eq(a, b, ...)	assert_cmp(unsigned long,	\
-    a, b, ==, !=, "lu", __VA_ARGS__)
-#define	assert_lu_ne(a, b, ...)	assert_cmp(unsigned long,	\
-    a, b, !=, ==, "lu", __VA_ARGS__)
-#define	assert_lu_lt(a, b, ...)	assert_cmp(unsigned long,	\
-    a, b, <, >=, "lu", __VA_ARGS__)
-#define	assert_lu_le(a, b, ...)	assert_cmp(unsigned long,	\
-    a, b, <=, >, "lu", __VA_ARGS__)
-#define	assert_lu_ge(a, b, ...)	assert_cmp(unsigned long,	\
-    a, b, >=, <, "lu", __VA_ARGS__)
-#define	assert_lu_gt(a, b, ...)	assert_cmp(unsigned long,	\
-    a, b, >, <=, "lu", __VA_ARGS__)
-
-#define	assert_qd_eq(a, b, ...)	assert_cmp(long long, a, b, ==,	\
-    !=, "qd", __VA_ARGS__)
-#define	assert_qd_ne(a, b, ...)	assert_cmp(long long, a, b, !=,	\
-    ==, "qd", __VA_ARGS__)
-#define	assert_qd_lt(a, b, ...)	assert_cmp(long long, a, b, <,	\
-    >=, "qd", __VA_ARGS__)
-#define	assert_qd_le(a, b, ...)	assert_cmp(long long, a, b, <=,	\
-    >, "qd", __VA_ARGS__)
-#define	assert_qd_ge(a, b, ...)	assert_cmp(long long, a, b, >=,	\
-    <, "qd", __VA_ARGS__)
-#define	assert_qd_gt(a, b, ...)	assert_cmp(long long, a, b, >,	\
-    <=, "qd", __VA_ARGS__)
-
-#define	assert_qu_eq(a, b, ...)	assert_cmp(unsigned long long,	\
-    a, b, ==, !=, "qu", __VA_ARGS__)
-#define	assert_qu_ne(a, b, ...)	assert_cmp(unsigned long long,	\
-    a, b, !=, ==, "qu", __VA_ARGS__)
-#define	assert_qu_lt(a, b, ...)	assert_cmp(unsigned long long,	\
-    a, b, <, >=, "qu", __VA_ARGS__)
-#define	assert_qu_le(a, b, ...)	assert_cmp(unsigned long long,	\
-    a, b, <=, >, "qu", __VA_ARGS__)
-#define	assert_qu_ge(a, b, ...)	assert_cmp(unsigned long long,	\
-    a, b, >=, <, "qu", __VA_ARGS__)
-#define	assert_qu_gt(a, b, ...)	assert_cmp(unsigned long long,	\
-    a, b, >, <=, "qu", __VA_ARGS__)
-
-#define	assert_jd_eq(a, b, ...)	assert_cmp(intmax_t, a, b, ==,	\
-    !=, "jd", __VA_ARGS__)
-#define	assert_jd_ne(a, b, ...)	assert_cmp(intmax_t, a, b, !=,	\
-    ==, "jd", __VA_ARGS__)
-#define	assert_jd_lt(a, b, ...)	assert_cmp(intmax_t, a, b, <,	\
-    >=, "jd", __VA_ARGS__)
-#define	assert_jd_le(a, b, ...)	assert_cmp(intmax_t, a, b, <=,	\
-    >, "jd", __VA_ARGS__)
-#define	assert_jd_ge(a, b, ...)	assert_cmp(intmax_t, a, b, >=,	\
-    <, "jd", __VA_ARGS__)
-#define	assert_jd_gt(a, b, ...)	assert_cmp(intmax_t, a, b, >,	\
-    <=, "jd", __VA_ARGS__)
-
-#define	assert_ju_eq(a, b, ...)	assert_cmp(uintmax_t, a, b, ==,	\
-    !=, "ju", __VA_ARGS__)
-#define	assert_ju_ne(a, b, ...)	assert_cmp(uintmax_t, a, b, !=,	\
-    ==, "ju", __VA_ARGS__)
-#define	assert_ju_lt(a, b, ...)	assert_cmp(uintmax_t, a, b, <,	\
-    >=, "ju", __VA_ARGS__)
-#define	assert_ju_le(a, b, ...)	assert_cmp(uintmax_t, a, b, <=,	\
-    >, "ju", __VA_ARGS__)
-#define	assert_ju_ge(a, b, ...)	assert_cmp(uintmax_t, a, b, >=,	\
-    <, "ju", __VA_ARGS__)
-#define	assert_ju_gt(a, b, ...)	assert_cmp(uintmax_t, a, b, >,	\
-    <=, "ju", __VA_ARGS__)
-
-#define	assert_zd_eq(a, b, ...)	assert_cmp(ssize_t, a, b, ==,	\
-    !=, "zd", __VA_ARGS__)
-#define	assert_zd_ne(a, b, ...)	assert_cmp(ssize_t, a, b, !=,	\
-    ==, "zd", __VA_ARGS__)
-#define	assert_zd_lt(a, b, ...)	assert_cmp(ssize_t, a, b, <,	\
-    >=, "zd", __VA_ARGS__)
-#define	assert_zd_le(a, b, ...)	assert_cmp(ssize_t, a, b, <=,	\
-    >, "zd", __VA_ARGS__)
-#define	assert_zd_ge(a, b, ...)	assert_cmp(ssize_t, a, b, >=,	\
-    <, "zd", __VA_ARGS__)
-#define	assert_zd_gt(a, b, ...)	assert_cmp(ssize_t, a, b, >,	\
-    <=, "zd", __VA_ARGS__)
-
-#define	assert_zu_eq(a, b, ...)	assert_cmp(size_t, a, b, ==,	\
-    !=, "zu", __VA_ARGS__)
-#define	assert_zu_ne(a, b, ...)	assert_cmp(size_t, a, b, !=,	\
-    ==, "zu", __VA_ARGS__)
-#define	assert_zu_lt(a, b, ...)	assert_cmp(size_t, a, b, <,	\
-    >=, "zu", __VA_ARGS__)
-#define	assert_zu_le(a, b, ...)	assert_cmp(size_t, a, b, <=,	\
-    >, "zu", __VA_ARGS__)
-#define	assert_zu_ge(a, b, ...)	assert_cmp(size_t, a, b, >=,	\
-    <, "zu", __VA_ARGS__)
-#define	assert_zu_gt(a, b, ...)	assert_cmp(size_t, a, b, >,	\
-    <=, "zu", __VA_ARGS__)
-
-#define	assert_d32_eq(a, b, ...)	assert_cmp(int32_t, a, b, ==,	\
-    !=, FMTd32, __VA_ARGS__)
-#define	assert_d32_ne(a, b, ...)	assert_cmp(int32_t, a, b, !=,	\
-    ==, FMTd32, __VA_ARGS__)
-#define	assert_d32_lt(a, b, ...)	assert_cmp(int32_t, a, b, <,	\
-    >=, FMTd32, __VA_ARGS__)
-#define	assert_d32_le(a, b, ...)	assert_cmp(int32_t, a, b, <=,	\
-    >, FMTd32, __VA_ARGS__)
-#define	assert_d32_ge(a, b, ...)	assert_cmp(int32_t, a, b, >=,	\
-    <, FMTd32, __VA_ARGS__)
-#define	assert_d32_gt(a, b, ...)	assert_cmp(int32_t, a, b, >,	\
-    <=, FMTd32, __VA_ARGS__)
-
-#define	assert_u32_eq(a, b, ...)	assert_cmp(uint32_t, a, b, ==,	\
-    !=, FMTu32, __VA_ARGS__)
-#define	assert_u32_ne(a, b, ...)	assert_cmp(uint32_t, a, b, !=,	\
-    ==, FMTu32, __VA_ARGS__)
-#define	assert_u32_lt(a, b, ...)	assert_cmp(uint32_t, a, b, <,	\
-    >=, FMTu32, __VA_ARGS__)
-#define	assert_u32_le(a, b, ...)	assert_cmp(uint32_t, a, b, <=,	\
-    >, FMTu32, __VA_ARGS__)
-#define	assert_u32_ge(a, b, ...)	assert_cmp(uint32_t, a, b, >=,	\
-    <, FMTu32, __VA_ARGS__)
-#define	assert_u32_gt(a, b, ...)	assert_cmp(uint32_t, a, b, >,	\
-    <=, FMTu32, __VA_ARGS__)
-
-#define	assert_d64_eq(a, b, ...)	assert_cmp(int64_t, a, b, ==,	\
-    !=, FMTd64, __VA_ARGS__)
-#define	assert_d64_ne(a, b, ...)	assert_cmp(int64_t, a, b, !=,	\
-    ==, FMTd64, __VA_ARGS__)
-#define	assert_d64_lt(a, b, ...)	assert_cmp(int64_t, a, b, <,	\
-    >=, FMTd64, __VA_ARGS__)
-#define	assert_d64_le(a, b, ...)	assert_cmp(int64_t, a, b, <=,	\
-    >, FMTd64, __VA_ARGS__)
-#define	assert_d64_ge(a, b, ...)	assert_cmp(int64_t, a, b, >=,	\
-    <, FMTd64, __VA_ARGS__)
-#define	assert_d64_gt(a, b, ...)	assert_cmp(int64_t, a, b, >,	\
-    <=, FMTd64, __VA_ARGS__)
-
-#define	assert_u64_eq(a, b, ...)	assert_cmp(uint64_t, a, b, ==,	\
-    !=, FMTu64, __VA_ARGS__)
-#define	assert_u64_ne(a, b, ...)	assert_cmp(uint64_t, a, b, !=,	\
-    ==, FMTu64, __VA_ARGS__)
-#define	assert_u64_lt(a, b, ...)	assert_cmp(uint64_t, a, b, <,	\
-    >=, FMTu64, __VA_ARGS__)
-#define	assert_u64_le(a, b, ...)	assert_cmp(uint64_t, a, b, <=,	\
-    >, FMTu64, __VA_ARGS__)
-#define	assert_u64_ge(a, b, ...)	assert_cmp(uint64_t, a, b, >=,	\
-    <, FMTu64, __VA_ARGS__)
-#define	assert_u64_gt(a, b, ...)	assert_cmp(uint64_t, a, b, >,	\
-    <=, FMTu64, __VA_ARGS__)
-
-#define	assert_b_eq(a, b, ...) do {					\
-	bool a_ = (a);							\
-	bool b_ = (b);							\
-	if (!(a_ == b_)) {						\
-		char prefix[ASSERT_BUFSIZE];				\
-		char message[ASSERT_BUFSIZE];				\
-		malloc_snprintf(prefix, sizeof(prefix),			\
-		    "%s:%s:%d: Failed assertion: "			\
-		    "(%s) == (%s) --> %s != %s: ",			\
-		    __func__, __FILE__, __LINE__,			\
-		    #a, #b, a_ ? "true" : "false",			\
-		    b_ ? "true" : "false");				\
-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
-		p_test_fail(prefix, message);				\
-	}								\
-} while (0)
-#define	assert_b_ne(a, b, ...) do {					\
-	bool a_ = (a);							\
-	bool b_ = (b);							\
-	if (!(a_ != b_)) {						\
-		char prefix[ASSERT_BUFSIZE];				\
-		char message[ASSERT_BUFSIZE];				\
-		malloc_snprintf(prefix, sizeof(prefix),			\
-		    "%s:%s:%d: Failed assertion: "			\
-		    "(%s) != (%s) --> %s == %s: ",			\
-		    __func__, __FILE__, __LINE__,			\
-		    #a, #b, a_ ? "true" : "false",			\
-		    b_ ? "true" : "false");				\
-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
-		p_test_fail(prefix, message);				\
-	}								\
-} while (0)
-#define	assert_true(a, ...)	assert_b_eq(a, true, __VA_ARGS__)
-#define	assert_false(a, ...)	assert_b_eq(a, false, __VA_ARGS__)
-
-#define	assert_str_eq(a, b, ...) do {				\
-	if (strcmp((a), (b))) {						\
-		char prefix[ASSERT_BUFSIZE];				\
-		char message[ASSERT_BUFSIZE];				\
-		malloc_snprintf(prefix, sizeof(prefix),			\
-		    "%s:%s:%d: Failed assertion: "			\
-		    "(%s) same as (%s) --> "				\
-		    "\"%s\" differs from \"%s\": ",			\
-		    __func__, __FILE__, __LINE__, #a, #b, a, b);	\
-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
-		p_test_fail(prefix, message);				\
-	}								\
-} while (0)
-#define	assert_str_ne(a, b, ...) do {				\
-	if (!strcmp((a), (b))) {					\
-		char prefix[ASSERT_BUFSIZE];				\
-		char message[ASSERT_BUFSIZE];				\
-		malloc_snprintf(prefix, sizeof(prefix),			\
-		    "%s:%s:%d: Failed assertion: "			\
-		    "(%s) differs from (%s) --> "			\
-		    "\"%s\" same as \"%s\": ",				\
-		    __func__, __FILE__, __LINE__, #a, #b, a, b);	\
-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
-		p_test_fail(prefix, message);				\
-	}								\
-} while (0)
-
-#define	assert_not_reached(...) do {					\
-	char prefix[ASSERT_BUFSIZE];					\
-	char message[ASSERT_BUFSIZE];					\
-	malloc_snprintf(prefix, sizeof(prefix),				\
-	    "%s:%s:%d: Unreachable code reached: ",			\
-	    __func__, __FILE__, __LINE__);				\
-	malloc_snprintf(message, sizeof(message), __VA_ARGS__);		\
-	p_test_fail(prefix, message);					\
-} while (0)
-
-/*
- * If this enum changes, corresponding changes in test/test.sh.in are also
- * necessary.
- */
-typedef enum {
-	test_status_pass = 0,
-	test_status_skip = 1,
-	test_status_fail = 2,
-
-	test_status_count = 3
-} test_status_t;
-
-typedef void (test_t)(void);
-
-#define	TEST_BEGIN(f)							\
-static void								\
-f(void)									\
-{									\
-	p_test_init(#f);
-
-#define	TEST_END							\
-	goto label_test_end;						\
-label_test_end:								\
-	p_test_fini();							\
-}
-
-#define	test(...)							\
-	p_test(__VA_ARGS__, NULL)
-
-#define	test_no_malloc_init(...)					\
-	p_test_no_malloc_init(__VA_ARGS__, NULL)
-
-#define	test_skip_if(e) do {						\
-	if (e) {							\
-		test_skip("%s:%s:%d: Test skipped: (%s)",		\
-		    __func__, __FILE__, __LINE__, #e);			\
-		goto label_test_end;					\
-	}								\
-} while (0)
-
-void	test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
-void	test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
-
-/* For private use by macros. */
-test_status_t	p_test(test_t *t, ...);
-test_status_t	p_test_no_malloc_init(test_t *t, ...);
-void	p_test_init(const char *name);
-void	p_test_fini(void);
-void	p_test_fail(const char *prefix, const char *message);
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/thd.h b/zircon/third_party/ulib/jemalloc/test/include/test/thd.h
deleted file mode 100644
index 47a5126..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/thd.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Abstraction layer for threading in tests. */
-#ifdef _WIN32
-typedef HANDLE thd_t;
-#else
-typedef pthread_t thd_t;
-#endif
-
-void	thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
-void	thd_join(thd_t thd, void **ret);
diff --git a/zircon/third_party/ulib/jemalloc/test/include/test/timer.h b/zircon/third_party/ulib/jemalloc/test/include/test/timer.h
deleted file mode 100644
index ace6191..0000000
--- a/zircon/third_party/ulib/jemalloc/test/include/test/timer.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Simple timer, for use in benchmark reporting. */
-
-typedef struct {
-	nstime_t t0;
-	nstime_t t1;
-} timedelta_t;
-
-void	timer_start(timedelta_t *timer);
-void	timer_stop(timedelta_t *timer);
-uint64_t	timer_usec(const timedelta_t *timer);
-void	timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen);
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/MALLOCX_ARENA.c b/zircon/third_party/ulib/jemalloc/test/integration/MALLOCX_ARENA.c
deleted file mode 100644
index 1d9e423..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/MALLOCX_ARENA.c
+++ /dev/null
@@ -1,68 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	NTHREADS 10
-
-static bool have_dss =
-#ifdef JEMALLOC_DSS
-    true
-#else
-    false
-#endif
-    ;
-
-void *
-thd_start(void *arg)
-{
-	unsigned thread_ind = (unsigned)(uintptr_t)arg;
-	unsigned arena_ind;
-	void *p;
-	size_t sz;
-
-	sz = sizeof(arena_ind);
-	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
-	    0, "Error in arenas.create");
-
-	if (thread_ind % 4 != 3) {
-		size_t mib[3];
-		size_t miblen = sizeof(mib) / sizeof(size_t);
-		const char *dss_precs[] = {"disabled", "primary", "secondary"};
-		unsigned prec_ind = thread_ind %
-		    (sizeof(dss_precs)/sizeof(char*));
-		const char *dss = dss_precs[prec_ind];
-		int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
-		assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
-		    "Error in mallctlnametomib()");
-		mib[1] = arena_ind;
-		assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
-		    sizeof(const char *)), expected_err,
-		    "Error in mallctlbymib()");
-	}
-
-	p = mallocx(1, MALLOCX_ARENA(arena_ind));
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	dallocx(p, 0);
-
-	return (NULL);
-}
-
-TEST_BEGIN(test_MALLOCX_ARENA)
-{
-	thd_t thds[NTHREADS];
-	unsigned i;
-
-	for (i = 0; i < NTHREADS; i++) {
-		thd_create(&thds[i], thd_start,
-		    (void *)(uintptr_t)i);
-	}
-
-	for (i = 0; i < NTHREADS; i++)
-		thd_join(thds[i], NULL);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_MALLOCX_ARENA));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/aligned_alloc.c b/zircon/third_party/ulib/jemalloc/test/integration/aligned_alloc.c
deleted file mode 100644
index 52b69ac..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/aligned_alloc.c
+++ /dev/null
@@ -1,136 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	MAXALIGN (((size_t)1) << 23)
-
-/*
- * On systems which can't merge extents, tests that call this function generate
- * a lot of dirty memory very quickly.  Purging between cycles mitigates
- * potential OOM on e.g. 32-bit Windows.
- */
-static void
-purge(void)
-{
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl error");
-}
-
-TEST_BEGIN(test_alignment_errors)
-{
-	size_t alignment;
-	void *p;
-
-	alignment = 0;
-	set_errno(0);
-	p = aligned_alloc(alignment, 1);
-	assert_false(p != NULL || get_errno() != EINVAL,
-	    "Expected error for invalid alignment %zu", alignment);
-
-	for (alignment = sizeof(size_t); alignment < MAXALIGN;
-	    alignment <<= 1) {
-		set_errno(0);
-		p = aligned_alloc(alignment + 1, 1);
-		assert_false(p != NULL || get_errno() != EINVAL,
-		    "Expected error for invalid alignment %zu",
-		    alignment + 1);
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_oom_errors)
-{
-	size_t alignment, size;
-	void *p;
-
-#if LG_SIZEOF_PTR == 3
-	alignment = UINT64_C(0x8000000000000000);
-	size      = UINT64_C(0x8000000000000000);
-#else
-	alignment = 0x80000000LU;
-	size      = 0x80000000LU;
-#endif
-	set_errno(0);
-	p = aligned_alloc(alignment, size);
-	assert_false(p != NULL || get_errno() != ENOMEM,
-	    "Expected error for aligned_alloc(%zu, %zu)",
-	    alignment, size);
-
-#if LG_SIZEOF_PTR == 3
-	alignment = UINT64_C(0x4000000000000000);
-	size      = UINT64_C(0xc000000000000001);
-#else
-	alignment = 0x40000000LU;
-	size      = 0xc0000001LU;
-#endif
-	set_errno(0);
-	p = aligned_alloc(alignment, size);
-	assert_false(p != NULL || get_errno() != ENOMEM,
-	    "Expected error for aligned_alloc(%zu, %zu)",
-	    alignment, size);
-
-	alignment = 0x10LU;
-#if LG_SIZEOF_PTR == 3
-	size = UINT64_C(0xfffffffffffffff0);
-#else
-	size = 0xfffffff0LU;
-#endif
-	set_errno(0);
-	p = aligned_alloc(alignment, size);
-	assert_false(p != NULL || get_errno() != ENOMEM,
-	    "Expected error for aligned_alloc(&p, %zu, %zu)",
-	    alignment, size);
-}
-TEST_END
-
-TEST_BEGIN(test_alignment_and_size)
-{
-#define	NITER 4
-	size_t alignment, size, total;
-	unsigned i;
-	void *ps[NITER];
-
-	for (i = 0; i < NITER; i++)
-		ps[i] = NULL;
-
-	for (alignment = 8;
-	    alignment <= MAXALIGN;
-	    alignment <<= 1) {
-		total = 0;
-		for (size = 1;
-		    size < 3 * alignment && size < (1U << 31);
-		    size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
-			for (i = 0; i < NITER; i++) {
-				ps[i] = aligned_alloc(alignment, size);
-				if (ps[i] == NULL) {
-					char buf[BUFERROR_BUF];
-
-					buferror(get_errno(), buf, sizeof(buf));
-					test_fail(
-					    "Error for alignment=%zu, "
-					    "size=%zu (%#zx): %s",
-					    alignment, size, size, buf);
-				}
-				total += malloc_usable_size(ps[i]);
-				if (total >= (MAXALIGN << 1))
-					break;
-			}
-			for (i = 0; i < NITER; i++) {
-				if (ps[i] != NULL) {
-					free(ps[i]);
-					ps[i] = NULL;
-				}
-			}
-		}
-		purge();
-	}
-#undef NITER
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_alignment_errors,
-	    test_oom_errors,
-	    test_alignment_and_size));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/allocated.c b/zircon/third_party/ulib/jemalloc/test/integration/allocated.c
deleted file mode 100644
index 7570c52..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/allocated.c
+++ /dev/null
@@ -1,124 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static const bool config_stats =
-#ifdef JEMALLOC_STATS
-    true
-#else
-    false
-#endif
-    ;
-
-void *
-thd_start(void *arg)
-{
-	int err;
-	void *p;
-	uint64_t a0, a1, d0, d1;
-	uint64_t *ap0, *ap1, *dp0, *dp1;
-	size_t sz, usize;
-
-	sz = sizeof(a0);
-	if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
-		if (err == ENOENT)
-			goto label_ENOENT;
-		test_fail("%s(): Error in mallctl(): %s", __func__,
-		    strerror(err));
-	}
-	sz = sizeof(ap0);
-	if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
-		if (err == ENOENT)
-			goto label_ENOENT;
-		test_fail("%s(): Error in mallctl(): %s", __func__,
-		    strerror(err));
-	}
-	assert_u64_eq(*ap0, a0,
-	    "\"thread.allocatedp\" should provide a pointer to internal "
-	    "storage");
-
-	sz = sizeof(d0);
-	if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
-		if (err == ENOENT)
-			goto label_ENOENT;
-		test_fail("%s(): Error in mallctl(): %s", __func__,
-		    strerror(err));
-	}
-	sz = sizeof(dp0);
-	if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
-	    0))) {
-		if (err == ENOENT)
-			goto label_ENOENT;
-		test_fail("%s(): Error in mallctl(): %s", __func__,
-		    strerror(err));
-	}
-	assert_u64_eq(*dp0, d0,
-	    "\"thread.deallocatedp\" should provide a pointer to internal "
-	    "storage");
-
-	p = malloc(1);
-	assert_ptr_not_null(p, "Unexpected malloc() error");
-
-	sz = sizeof(a1);
-	mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
-	sz = sizeof(ap1);
-	mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
-	assert_u64_eq(*ap1, a1,
-	    "Dereferenced \"thread.allocatedp\" value should equal "
-	    "\"thread.allocated\" value");
-	assert_ptr_eq(ap0, ap1,
-	    "Pointer returned by \"thread.allocatedp\" should not change");
-
-	usize = malloc_usable_size(p);
-	assert_u64_le(a0 + usize, a1,
-	    "Allocated memory counter should increase by at least the amount "
-	    "explicitly allocated");
-
-	free(p);
-
-	sz = sizeof(d1);
-	mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
-	sz = sizeof(dp1);
-	mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
-	assert_u64_eq(*dp1, d1,
-	    "Dereferenced \"thread.deallocatedp\" value should equal "
-	    "\"thread.deallocated\" value");
-	assert_ptr_eq(dp0, dp1,
-	    "Pointer returned by \"thread.deallocatedp\" should not change");
-
-	assert_u64_le(d0 + usize, d1,
-	    "Deallocated memory counter should increase by at least the amount "
-	    "explicitly deallocated");
-
-	return (NULL);
-label_ENOENT:
-	assert_false(config_stats,
-	    "ENOENT should only be returned if stats are disabled");
-	test_skip("\"thread.allocated\" mallctl not available");
-	return (NULL);
-}
-
-TEST_BEGIN(test_main_thread)
-{
-	thd_start(NULL);
-}
-TEST_END
-
-TEST_BEGIN(test_subthread)
-{
-	thd_t thd;
-
-	thd_create(&thd, thd_start, NULL);
-	thd_join(thd, NULL);
-}
-TEST_END
-
-int
-main(void)
-{
-	/* Run tests multiple times to check for bad interactions. */
-	return (test(
-	    test_main_thread,
-	    test_subthread,
-	    test_main_thread,
-	    test_subthread,
-	    test_main_thread));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/extent.c b/zircon/third_party/ulib/jemalloc/test/integration/extent.c
deleted file mode 100644
index 30849b0..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/extent.c
+++ /dev/null
@@ -1,182 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_FILL
-const char *malloc_conf = "junk:false";
-#endif
-
-#include "test/extent_hooks.h"
-
-static void
-test_extent_body(unsigned arena_ind)
-{
-	void *p;
-	size_t large0, large1, large2, sz;
-	size_t purge_mib[3];
-	size_t purge_miblen;
-	int flags;
-	bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
-
-	flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
-
-	/* Get large size classes. */
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
-	    0), 0, "Unexpected arenas.lextent.0.size failure");
-	assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
-	    0), 0, "Unexpected arenas.lextent.1.size failure");
-	assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
-	    0), 0, "Unexpected arenas.lextent.2.size failure");
-
-	/* Test dalloc/decommit/purge cascade. */
-	purge_miblen = sizeof(purge_mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
-	    0, "Unexpected mallctlnametomib() failure");
-	purge_mib[1] = (size_t)arena_ind;
-	try_dalloc = false;
-	try_decommit = false;
-	p = mallocx(large0 * 2, flags);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	called_dalloc = false;
-	called_decommit = false;
-	did_purge_lazy = false;
-	did_purge_forced = false;
-	called_split = false;
-	xallocx_success_a = (xallocx(p, large0, 0, flags) == large0);
-	assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
-	    0, "Unexpected arena.%u.purge error", arena_ind);
-	if (xallocx_success_a) {
-		assert_true(called_dalloc, "Expected dalloc call");
-		assert_true(called_decommit, "Expected decommit call");
-		assert_true(did_purge_lazy || did_purge_forced,
-		    "Expected purge");
-	}
-	assert_true(called_split, "Expected split call");
-	dallocx(p, flags);
-	try_dalloc = true;
-
-	/* Test decommit/commit and observe split/merge. */
-	try_dalloc = false;
-	try_decommit = true;
-	p = mallocx(large0 * 2, flags);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	did_decommit = false;
-	did_commit = false;
-	called_split = false;
-	did_split = false;
-	did_merge = false;
-	xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
-	assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
-	    0, "Unexpected arena.%u.purge error", arena_ind);
-	if (xallocx_success_b)
-		assert_true(did_split, "Expected split");
-	xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
-	if (did_split) {
-		assert_b_eq(did_decommit, did_commit,
-		    "Expected decommit/commit match");
-	}
-	if (xallocx_success_b && xallocx_success_c)
-		assert_true(did_merge, "Expected merge");
-	dallocx(p, flags);
-	try_dalloc = true;
-	try_decommit = false;
-
-	/* Make sure non-large allocation succeeds. */
-	p = mallocx(42, flags);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	dallocx(p, flags);
-}
-
-TEST_BEGIN(test_extent_manual_hook)
-{
-	unsigned arena_ind;
-	size_t old_size, new_size, sz;
-	size_t hooks_mib[3];
-	size_t hooks_miblen;
-	extent_hooks_t *new_hooks, *old_hooks;
-
-	extent_hooks_prep();
-
-	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
-	    0, "Unexpected mallctl() failure");
-
-	/* Install custom extent hooks. */
-	hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
-	    &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
-	hooks_mib[1] = (size_t)arena_ind;
-	old_size = sizeof(extent_hooks_t *);
-	new_hooks = &hooks;
-	new_size = sizeof(extent_hooks_t *);
-	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
-	    &old_size, (void *)&new_hooks, new_size), 0,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->alloc, extent_alloc_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->commit, extent_commit_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->decommit, extent_decommit_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->split, extent_split_hook,
-	    "Unexpected extent_hooks error");
-	assert_ptr_ne(old_hooks->merge, extent_merge_hook,
-	    "Unexpected extent_hooks error");
-
-	test_extent_body(arena_ind);
-
-	/* Restore extent hooks. */
-	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
-	    (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error");
-	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
-	    &old_size, NULL, 0), 0, "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->alloc, default_hooks->alloc,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->commit, default_hooks->commit,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->decommit, default_hooks->decommit,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->split, default_hooks->split,
-	    "Unexpected extent_hooks error");
-	assert_ptr_eq(old_hooks->merge, default_hooks->merge,
-	    "Unexpected extent_hooks error");
-}
-TEST_END
-
-TEST_BEGIN(test_extent_auto_hook)
-{
-	unsigned arena_ind;
-	size_t new_size, sz;
-	extent_hooks_t *new_hooks;
-
-	extent_hooks_prep();
-
-	sz = sizeof(unsigned);
-	new_hooks = &hooks;
-	new_size = sizeof(extent_hooks_t *);
-	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
-	    (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure");
-
-	test_extent_body(arena_ind);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_extent_manual_hook,
-	    test_extent_auto_hook));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/mallocx.c b/zircon/third_party/ulib/jemalloc/test/integration/mallocx.c
deleted file mode 100644
index 7617b1b..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/mallocx.c
+++ /dev/null
@@ -1,230 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_FILL
-const char *malloc_conf = "junk:false";
-#endif
-
-static unsigned
-get_nsizes_impl(const char *cmd)
-{
-	unsigned ret;
-	size_t z;
-
-	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
-
-	return (ret);
-}
-
-static unsigned
-get_nlarge(void)
-{
-	return (get_nsizes_impl("arenas.nlextents"));
-}
-
-static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
-	size_t ret;
-	size_t z;
-	size_t mib[4];
-	size_t miblen = 4;
-
-	z = sizeof(size_t);
-	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
-	mib[2] = ind;
-	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
-
-	return (ret);
-}
-
-static size_t
-get_large_size(size_t ind)
-{
-	return (get_size_impl("arenas.lextent.0.size", ind));
-}
-
-/*
- * On systems which can't merge extents, tests that call this function generate
- * a lot of dirty memory very quickly.  Purging between cycles mitigates
- * potential OOM on e.g. 32-bit Windows.
- */
-static void
-purge(void)
-{
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl error");
-}
-
-TEST_BEGIN(test_overflow)
-{
-	size_t largemax;
-
-	largemax = get_large_size(get_nlarge()-1);
-
-	assert_ptr_null(mallocx(largemax+1, 0),
-	    "Expected OOM for mallocx(size=%#zx, 0)", largemax+1);
-
-	assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
-	    "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
-
-	assert_ptr_null(mallocx(SIZE_T_MAX, 0),
-	    "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
-
-	assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
-	    "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
-	    ZU(PTRDIFF_MAX)+1);
-}
-TEST_END
-
-TEST_BEGIN(test_oom)
-{
-	size_t largemax;
-	bool oom;
-	void *ptrs[3];
-	unsigned i;
-
-	/*
-	 * It should be impossible to allocate three objects that each consume
-	 * nearly half the virtual address space.
-	 */
-	largemax = get_large_size(get_nlarge()-1);
-	oom = false;
-	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
-		ptrs[i] = mallocx(largemax, 0);
-		if (ptrs[i] == NULL)
-			oom = true;
-	}
-	assert_true(oom,
-	    "Expected OOM during series of calls to mallocx(size=%zu, 0)",
-	    largemax);
-	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
-		if (ptrs[i] != NULL)
-			dallocx(ptrs[i], 0);
-	}
-	purge();
-
-#if LG_SIZEOF_PTR == 3
-	assert_ptr_null(mallocx(0x8000000000000000ULL,
-	    MALLOCX_ALIGN(0x8000000000000000ULL)),
-	    "Expected OOM for mallocx()");
-	assert_ptr_null(mallocx(0x8000000000000000ULL,
-	    MALLOCX_ALIGN(0x80000000)),
-	    "Expected OOM for mallocx()");
-#else
-	assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
-	    "Expected OOM for mallocx()");
-#endif
-}
-TEST_END
-
-TEST_BEGIN(test_basic)
-{
-#define	MAXSZ (((size_t)1) << 23)
-	size_t sz;
-
-	for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
-		size_t nsz, rsz;
-		void *p;
-		nsz = nallocx(sz, 0);
-		assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
-		p = mallocx(sz, 0);
-		assert_ptr_not_null(p,
-		    "Unexpected mallocx(size=%zx, flags=0) error", sz);
-		rsz = sallocx(p, 0);
-		assert_zu_ge(rsz, sz, "Real size smaller than expected");
-		assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
-		dallocx(p, 0);
-
-		p = mallocx(sz, 0);
-		assert_ptr_not_null(p,
-		    "Unexpected mallocx(size=%zx, flags=0) error", sz);
-		dallocx(p, 0);
-
-		nsz = nallocx(sz, MALLOCX_ZERO);
-		assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
-		p = mallocx(sz, MALLOCX_ZERO);
-		assert_ptr_not_null(p,
-		    "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
-		    nsz);
-		rsz = sallocx(p, 0);
-		assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
-		dallocx(p, 0);
-		purge();
-	}
-#undef MAXSZ
-}
-TEST_END
-
-TEST_BEGIN(test_alignment_and_size)
-{
-#define	MAXALIGN (((size_t)1) << 23)
-#define	NITER 4
-	size_t nsz, rsz, sz, alignment, total;
-	unsigned i;
-	void *ps[NITER];
-
-	for (i = 0; i < NITER; i++)
-		ps[i] = NULL;
-
-	for (alignment = 8;
-	    alignment <= MAXALIGN;
-	    alignment <<= 1) {
-		total = 0;
-		for (sz = 1;
-		    sz < 3 * alignment && sz < (1U << 31);
-		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
-			for (i = 0; i < NITER; i++) {
-				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
-				    MALLOCX_ZERO);
-				assert_zu_ne(nsz, 0,
-				    "nallocx() error for alignment=%zu, "
-				    "size=%zu (%#zx)", alignment, sz, sz);
-				ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
-				    MALLOCX_ZERO);
-				assert_ptr_not_null(ps[i],
-				    "mallocx() error for alignment=%zu, "
-				    "size=%zu (%#zx)", alignment, sz, sz);
-				rsz = sallocx(ps[i], 0);
-				assert_zu_ge(rsz, sz,
-				    "Real size smaller than expected for "
-				    "alignment=%zu, size=%zu", alignment, sz);
-				assert_zu_eq(nsz, rsz,
-				    "nallocx()/sallocx() size mismatch for "
-				    "alignment=%zu, size=%zu", alignment, sz);
-				assert_ptr_null(
-				    (void *)((uintptr_t)ps[i] & (alignment-1)),
-				    "%p inadequately aligned for"
-				    " alignment=%zu, size=%zu", ps[i],
-				    alignment, sz);
-				total += rsz;
-				if (total >= (MAXALIGN << 1))
-					break;
-			}
-			for (i = 0; i < NITER; i++) {
-				if (ps[i] != NULL) {
-					dallocx(ps[i], 0);
-					ps[i] = NULL;
-				}
-			}
-		}
-		purge();
-	}
-#undef MAXALIGN
-#undef NITER
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_overflow,
-	    test_oom,
-	    test_basic,
-	    test_alignment_and_size));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/overflow.c b/zircon/third_party/ulib/jemalloc/test/integration/overflow.c
deleted file mode 100644
index ad867e7..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/overflow.c
+++ /dev/null
@@ -1,48 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_overflow)
-{
-	unsigned nlextents;
-	size_t mib[4];
-	size_t sz, miblen, max_size_class;
-	void *p;
-
-	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
-	    0), 0, "Unexpected mallctl() error");
-
-	miblen = sizeof(mib) / sizeof(size_t);
-	assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() error");
-	mib[2] = nlextents - 1;
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
-	    NULL, 0), 0, "Unexpected mallctlbymib() error");
-
-	assert_ptr_null(malloc(max_size_class + 1),
-	    "Expected OOM due to over-sized allocation request");
-	assert_ptr_null(malloc(SIZE_T_MAX),
-	    "Expected OOM due to over-sized allocation request");
-
-	assert_ptr_null(calloc(1, max_size_class + 1),
-	    "Expected OOM due to over-sized allocation request");
-	assert_ptr_null(calloc(1, SIZE_T_MAX),
-	    "Expected OOM due to over-sized allocation request");
-
-	p = malloc(1);
-	assert_ptr_not_null(p, "Unexpected malloc() OOM");
-	assert_ptr_null(realloc(p, max_size_class + 1),
-	    "Expected OOM due to over-sized allocation request");
-	assert_ptr_null(realloc(p, SIZE_T_MAX),
-	    "Expected OOM due to over-sized allocation request");
-	free(p);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_overflow));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/posix_memalign.c b/zircon/third_party/ulib/jemalloc/test/integration/posix_memalign.c
deleted file mode 100644
index dace10f..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/posix_memalign.c
+++ /dev/null
@@ -1,130 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	MAXALIGN (((size_t)1) << 23)
-
-/*
- * On systems which can't merge extents, tests that call this function generate
- * a lot of dirty memory very quickly.  Purging between cycles mitigates
- * potential OOM on e.g. 32-bit Windows.
- */
-static void
-purge(void)
-{
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl error");
-}
-
-TEST_BEGIN(test_alignment_errors)
-{
-	size_t alignment;
-	void *p;
-
-	for (alignment = 0; alignment < sizeof(void *); alignment++) {
-		assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
-		    "Expected error for invalid alignment %zu",
-		    alignment);
-	}
-
-	for (alignment = sizeof(size_t); alignment < MAXALIGN;
-	    alignment <<= 1) {
-		assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
-		    "Expected error for invalid alignment %zu",
-		    alignment + 1);
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_oom_errors)
-{
-	size_t alignment, size;
-	void *p;
-
-#if LG_SIZEOF_PTR == 3
-	alignment = UINT64_C(0x8000000000000000);
-	size      = UINT64_C(0x8000000000000000);
-#else
-	alignment = 0x80000000LU;
-	size      = 0x80000000LU;
-#endif
-	assert_d_ne(posix_memalign(&p, alignment, size), 0,
-	    "Expected error for posix_memalign(&p, %zu, %zu)",
-	    alignment, size);
-
-#if LG_SIZEOF_PTR == 3
-	alignment = UINT64_C(0x4000000000000000);
-	size      = UINT64_C(0xc000000000000001);
-#else
-	alignment = 0x40000000LU;
-	size      = 0xc0000001LU;
-#endif
-	assert_d_ne(posix_memalign(&p, alignment, size), 0,
-	    "Expected error for posix_memalign(&p, %zu, %zu)",
-	    alignment, size);
-
-	alignment = 0x10LU;
-#if LG_SIZEOF_PTR == 3
-	size = UINT64_C(0xfffffffffffffff0);
-#else
-	size = 0xfffffff0LU;
-#endif
-	assert_d_ne(posix_memalign(&p, alignment, size), 0,
-	    "Expected error for posix_memalign(&p, %zu, %zu)",
-	    alignment, size);
-}
-TEST_END
-
-TEST_BEGIN(test_alignment_and_size)
-{
-#define	NITER 4
-	size_t alignment, size, total;
-	unsigned i;
-	int err;
-	void *ps[NITER];
-
-	for (i = 0; i < NITER; i++)
-		ps[i] = NULL;
-
-	for (alignment = 8;
-	    alignment <= MAXALIGN;
-	    alignment <<= 1) {
-		total = 0;
-		for (size = 1;
-		    size < 3 * alignment && size < (1U << 31);
-		    size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
-			for (i = 0; i < NITER; i++) {
-				err = posix_memalign(&ps[i],
-				    alignment, size);
-				if (err) {
-					char buf[BUFERROR_BUF];
-
-					buferror(get_errno(), buf, sizeof(buf));
-					test_fail(
-					    "Error for alignment=%zu, "
-					    "size=%zu (%#zx): %s",
-					    alignment, size, size, buf);
-				}
-				total += malloc_usable_size(ps[i]);
-				if (total >= (MAXALIGN << 1))
-					break;
-			}
-			for (i = 0; i < NITER; i++) {
-				if (ps[i] != NULL) {
-					free(ps[i]);
-					ps[i] = NULL;
-				}
-			}
-		}
-		purge();
-	}
-#undef NITER
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_alignment_errors,
-	    test_oom_errors,
-	    test_alignment_and_size));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/rallocx.c b/zircon/third_party/ulib/jemalloc/test/integration/rallocx.c
deleted file mode 100644
index 0a8b50c..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/rallocx.c
+++ /dev/null
@@ -1,256 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static unsigned
-get_nsizes_impl(const char *cmd)
-{
-	unsigned ret;
-	size_t z;
-
-	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
-
-	return (ret);
-}
-
-static unsigned
-get_nlarge(void)
-{
-	return (get_nsizes_impl("arenas.nlextents"));
-}
-
-static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
-	size_t ret;
-	size_t z;
-	size_t mib[4];
-	size_t miblen = 4;
-
-	z = sizeof(size_t);
-	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
-	mib[2] = ind;
-	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
-
-	return (ret);
-}
-
-static size_t
-get_large_size(size_t ind)
-{
-	return (get_size_impl("arenas.lextent.0.size", ind));
-}
-
-TEST_BEGIN(test_grow_and_shrink)
-{
-	void *p, *q;
-	size_t tsz;
-#define	NCYCLES 3
-	unsigned i, j;
-#define	NSZS 1024
-	size_t szs[NSZS];
-#define	MAXSZ ZU(12 * 1024 * 1024)
-
-	p = mallocx(1, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	szs[0] = sallocx(p, 0);
-
-	for (i = 0; i < NCYCLES; i++) {
-		for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
-			q = rallocx(p, szs[j-1]+1, 0);
-			assert_ptr_not_null(q,
-			    "Unexpected rallocx() error for size=%zu-->%zu",
-			    szs[j-1], szs[j-1]+1);
-			szs[j] = sallocx(q, 0);
-			assert_zu_ne(szs[j], szs[j-1]+1,
-			    "Expected size to be at least: %zu", szs[j-1]+1);
-			p = q;
-		}
-
-		for (j--; j > 0; j--) {
-			q = rallocx(p, szs[j-1], 0);
-			assert_ptr_not_null(q,
-			    "Unexpected rallocx() error for size=%zu-->%zu",
-			    szs[j], szs[j-1]);
-			tsz = sallocx(q, 0);
-			assert_zu_eq(tsz, szs[j-1],
-			    "Expected size=%zu, got size=%zu", szs[j-1], tsz);
-			p = q;
-		}
-	}
-
-	dallocx(p, 0);
-#undef MAXSZ
-#undef NSZS
-#undef NCYCLES
-}
-TEST_END
-
-static bool
-validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
-{
-	bool ret = false;
-	const uint8_t *buf = (const uint8_t *)p;
-	size_t i;
-
-	for (i = 0; i < len; i++) {
-		uint8_t b = buf[offset+i];
-		if (b != c) {
-			test_fail("Allocation at %p (len=%zu) contains %#x "
-			    "rather than %#x at offset %zu", p, len, b, c,
-			    offset+i);
-			ret = true;
-		}
-	}
-
-	return (ret);
-}
-
-TEST_BEGIN(test_zero)
-{
-	void *p, *q;
-	size_t psz, qsz, i, j;
-	size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
-#define	FILL_BYTE 0xaaU
-#define	RANGE 2048
-
-	for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
-		size_t start_size = start_sizes[i];
-		p = mallocx(start_size, MALLOCX_ZERO);
-		assert_ptr_not_null(p, "Unexpected mallocx() error");
-		psz = sallocx(p, 0);
-
-		assert_false(validate_fill(p, 0, 0, psz),
-		    "Expected zeroed memory");
-		memset(p, FILL_BYTE, psz);
-		assert_false(validate_fill(p, FILL_BYTE, 0, psz),
-		    "Expected filled memory");
-
-		for (j = 1; j < RANGE; j++) {
-			q = rallocx(p, start_size+j, MALLOCX_ZERO);
-			assert_ptr_not_null(q, "Unexpected rallocx() error");
-			qsz = sallocx(q, 0);
-			if (q != p || qsz != psz) {
-				assert_false(validate_fill(q, FILL_BYTE, 0,
-				    psz), "Expected filled memory");
-				assert_false(validate_fill(q, 0, psz, qsz-psz),
-				    "Expected zeroed memory");
-			}
-			if (psz != qsz) {
-				memset((void *)((uintptr_t)q+psz), FILL_BYTE,
-				    qsz-psz);
-				psz = qsz;
-			}
-			p = q;
-		}
-		assert_false(validate_fill(p, FILL_BYTE, 0, psz),
-		    "Expected filled memory");
-		dallocx(p, 0);
-	}
-#undef FILL_BYTE
-}
-TEST_END
-
-TEST_BEGIN(test_align)
-{
-	void *p, *q;
-	size_t align;
-#define	MAX_ALIGN (ZU(1) << 25)
-
-	align = ZU(1);
-	p = mallocx(1, MALLOCX_ALIGN(align));
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-
-	for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
-		q = rallocx(p, 1, MALLOCX_ALIGN(align));
-		assert_ptr_not_null(q,
-		    "Unexpected rallocx() error for align=%zu", align);
-		assert_ptr_null(
-		    (void *)((uintptr_t)q & (align-1)),
-		    "%p inadequately aligned for align=%zu",
-		    q, align);
-		p = q;
-	}
-	dallocx(p, 0);
-#undef MAX_ALIGN
-}
-TEST_END
-
-TEST_BEGIN(test_lg_align_and_zero)
-{
-	void *p, *q;
-	unsigned lg_align;
-	size_t sz;
-#define	MAX_LG_ALIGN 25
-#define	MAX_VALIDATE (ZU(1) << 22)
-
-	lg_align = 0;
-	p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-
-	for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
-		q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
-		assert_ptr_not_null(q,
-		    "Unexpected rallocx() error for lg_align=%u", lg_align);
-		assert_ptr_null(
-		    (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
-		    "%p inadequately aligned for lg_align=%u", q, lg_align);
-		sz = sallocx(q, 0);
-		if ((sz << 1) <= MAX_VALIDATE) {
-			assert_false(validate_fill(q, 0, 0, sz),
-			    "Expected zeroed memory");
-		} else {
-			assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
-			    "Expected zeroed memory");
-			assert_false(validate_fill(
-			    (void *)((uintptr_t)q+sz-MAX_VALIDATE),
-			    0, 0, MAX_VALIDATE), "Expected zeroed memory");
-		}
-		p = q;
-	}
-	dallocx(p, 0);
-#undef MAX_VALIDATE
-#undef MAX_LG_ALIGN
-}
-TEST_END
-
-TEST_BEGIN(test_overflow)
-{
-	size_t largemax;
-	void *p;
-
-	largemax = get_large_size(get_nlarge()-1);
-
-	p = mallocx(1, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-	assert_ptr_null(rallocx(p, largemax+1, 0),
-	    "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1);
-
-	assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
-	    "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
-
-	assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
-	    "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
-
-	assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
-	    "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
-	    ZU(PTRDIFF_MAX)+1);
-
-	dallocx(p, 0);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_grow_and_shrink,
-	    test_zero,
-	    test_align,
-	    test_lg_align_and_zero,
-	    test_overflow));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/sdallocx.c b/zircon/third_party/ulib/jemalloc/test/integration/sdallocx.c
deleted file mode 100644
index 5d0a8f8..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/sdallocx.c
+++ /dev/null
@@ -1,56 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	MAXALIGN (((size_t)1) << 22)
-#define	NITER 3
-
-TEST_BEGIN(test_basic)
-{
-	void *ptr = mallocx(64, 0);
-	sdallocx(ptr, 64, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_alignment_and_size)
-{
-	size_t nsz, sz, alignment, total;
-	unsigned i;
-	void *ps[NITER];
-
-	for (i = 0; i < NITER; i++)
-		ps[i] = NULL;
-
-	for (alignment = 8;
-	    alignment <= MAXALIGN;
-	    alignment <<= 1) {
-		total = 0;
-		for (sz = 1;
-		    sz < 3 * alignment && sz < (1U << 31);
-		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
-			for (i = 0; i < NITER; i++) {
-				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
-				    MALLOCX_ZERO);
-				ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
-				    MALLOCX_ZERO);
-				total += nsz;
-				if (total >= (MAXALIGN << 1))
-					break;
-			}
-			for (i = 0; i < NITER; i++) {
-				if (ps[i] != NULL) {
-					sdallocx(ps[i], sz,
-					    MALLOCX_ALIGN(alignment));
-					ps[i] = NULL;
-				}
-			}
-		}
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_basic,
-	    test_alignment_and_size));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/thread_arena.c b/zircon/third_party/ulib/jemalloc/test/integration/thread_arena.c
deleted file mode 100644
index cf8240d1..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/thread_arena.c
+++ /dev/null
@@ -1,80 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	NTHREADS 10
-
-void *
-thd_start(void *arg)
-{
-	unsigned main_arena_ind = *(unsigned *)arg;
-	void *p;
-	unsigned arena_ind;
-	size_t size;
-	int err;
-
-	p = malloc(1);
-	assert_ptr_not_null(p, "Error in malloc()");
-	free(p);
-
-	size = sizeof(arena_ind);
-	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
-	    (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
-		char buf[BUFERROR_BUF];
-
-		buferror(err, buf, sizeof(buf));
-		test_fail("Error in mallctl(): %s", buf);
-	}
-
-	size = sizeof(arena_ind);
-	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
-	    0))) {
-		char buf[BUFERROR_BUF];
-
-		buferror(err, buf, sizeof(buf));
-		test_fail("Error in mallctl(): %s", buf);
-	}
-	assert_u_eq(arena_ind, main_arena_ind,
-	    "Arena index should be same as for main thread");
-
-	return (NULL);
-}
-
-TEST_BEGIN(test_thread_arena)
-{
-	void *p;
-	unsigned arena_ind;
-	size_t size;
-	int err;
-	thd_t thds[NTHREADS];
-	unsigned i;
-
-	p = malloc(1);
-	assert_ptr_not_null(p, "Error in malloc()");
-
-	size = sizeof(arena_ind);
-	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
-	    0))) {
-		char buf[BUFERROR_BUF];
-
-		buferror(err, buf, sizeof(buf));
-		test_fail("Error in mallctl(): %s", buf);
-	}
-
-	for (i = 0; i < NTHREADS; i++) {
-		thd_create(&thds[i], thd_start,
-		    (void *)&arena_ind);
-	}
-
-	for (i = 0; i < NTHREADS; i++) {
-		intptr_t join_ret;
-		thd_join(thds[i], (void *)&join_ret);
-		assert_zd_eq(join_ret, 0, "Unexpected thread join error");
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_thread_arena));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/thread_tcache_enabled.c b/zircon/third_party/ulib/jemalloc/test/integration/thread_tcache_enabled.c
deleted file mode 100644
index 1394371b..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/thread_tcache_enabled.c
+++ /dev/null
@@ -1,112 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static const bool config_tcache =
-#ifdef JEMALLOC_TCACHE
-    true
-#else
-    false
-#endif
-    ;
-
-void *
-thd_start(void *arg)
-{
-	int err;
-	size_t sz;
-	bool e0, e1;
-
-	sz = sizeof(bool);
-	if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
-	    0))) {
-		if (err == ENOENT) {
-			assert_false(config_tcache,
-			    "ENOENT should only be returned if tcache is "
-			    "disabled");
-		}
-		goto label_ENOENT;
-	}
-
-	if (e0) {
-		e1 = false;
-		assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-		    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-		assert_true(e0, "tcache should be enabled");
-	}
-
-	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_false(e0, "tcache should be disabled");
-
-	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_true(e0, "tcache should be enabled");
-
-	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_true(e0, "tcache should be enabled");
-
-	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_false(e0, "tcache should be disabled");
-
-	free(malloc(1));
-	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_false(e0, "tcache should be disabled");
-
-	free(malloc(1));
-	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_true(e0, "tcache should be enabled");
-
-	free(malloc(1));
-	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_true(e0, "tcache should be enabled");
-
-	free(malloc(1));
-	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
-	assert_false(e0, "tcache should be disabled");
-
-	free(malloc(1));
-	return (NULL);
-label_ENOENT:
-	test_skip("\"thread.tcache.enabled\" mallctl not available");
-	return (NULL);
-}
-
-TEST_BEGIN(test_main_thread)
-{
-	thd_start(NULL);
-}
-TEST_END
-
-TEST_BEGIN(test_subthread)
-{
-	thd_t thd;
-
-	thd_create(&thd, thd_start, NULL);
-	thd_join(thd, NULL);
-}
-TEST_END
-
-int
-main(void)
-{
-	/* Run tests multiple times to check for bad interactions. */
-	return (test(
-	    test_main_thread,
-	    test_subthread,
-	    test_main_thread,
-	    test_subthread,
-	    test_main_thread));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/integration/xallocx.c b/zircon/third_party/ulib/jemalloc/test/integration/xallocx.c
deleted file mode 100644
index 647404a..0000000
--- a/zircon/third_party/ulib/jemalloc/test/integration/xallocx.c
+++ /dev/null
@@ -1,405 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_FILL
-const char *malloc_conf = "junk:false";
-#endif
-
-/*
- * Use a separate arena for xallocx() extension/contraction tests so that
- * internal allocation e.g. by heap profiling can't interpose allocations where
- * xallocx() would ordinarily be able to extend.
- */
-static unsigned
-arena_ind(void)
-{
-	static unsigned ind = 0;
-
-	if (ind == 0) {
-		size_t sz = sizeof(ind);
-		assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
-		    0), 0, "Unexpected mallctl failure creating arena");
-	}
-
-	return (ind);
-}
-
-TEST_BEGIN(test_same_size)
-{
-	void *p;
-	size_t sz, tsz;
-
-	p = mallocx(42, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	sz = sallocx(p, 0);
-
-	tsz = xallocx(p, sz, 0, 0);
-	assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_extra_no_move)
-{
-	void *p;
-	size_t sz, tsz;
-
-	p = mallocx(42, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	sz = sallocx(p, 0);
-
-	tsz = xallocx(p, sz, sz-42, 0);
-	assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_no_move_fail)
-{
-	void *p;
-	size_t sz, tsz;
-
-	p = mallocx(42, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	sz = sallocx(p, 0);
-
-	tsz = xallocx(p, sz + 5, 0, 0);
-	assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
-
-	dallocx(p, 0);
-}
-TEST_END
-
-static unsigned
-get_nsizes_impl(const char *cmd)
-{
-	unsigned ret;
-	size_t z;
-
-	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
-
-	return (ret);
-}
-
-static unsigned
-get_nsmall(void)
-{
-	return (get_nsizes_impl("arenas.nbins"));
-}
-
-static unsigned
-get_nlarge(void)
-{
-	return (get_nsizes_impl("arenas.nlextents"));
-}
-
-static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
-	size_t ret;
-	size_t z;
-	size_t mib[4];
-	size_t miblen = 4;
-
-	z = sizeof(size_t);
-	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
-	mib[2] = ind;
-	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
-
-	return (ret);
-}
-
-static size_t
-get_small_size(size_t ind)
-{
-	return (get_size_impl("arenas.bin.0.size", ind));
-}
-
-static size_t
-get_large_size(size_t ind)
-{
-	return (get_size_impl("arenas.lextent.0.size", ind));
-}
-
-TEST_BEGIN(test_size)
-{
-	size_t small0, largemax;
-	void *p;
-
-	/* Get size classes. */
-	small0 = get_small_size(0);
-	largemax = get_large_size(get_nlarge()-1);
-
-	p = mallocx(small0, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-
-	/* Test smallest supported size. */
-	assert_zu_eq(xallocx(p, 1, 0, 0), small0,
-	    "Unexpected xallocx() behavior");
-
-	/* Test largest supported size. */
-	assert_zu_le(xallocx(p, largemax, 0, 0), largemax,
-	    "Unexpected xallocx() behavior");
-
-	/* Test size overflow. */
-	assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
-	    "Unexpected xallocx() behavior");
-	assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
-	    "Unexpected xallocx() behavior");
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_size_extra_overflow)
-{
-	size_t small0, largemax;
-	void *p;
-
-	/* Get size classes. */
-	small0 = get_small_size(0);
-	largemax = get_large_size(get_nlarge()-1);
-
-	p = mallocx(small0, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-
-	/* Test overflows that can be resolved by clamping extra. */
-	assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
-	    "Unexpected xallocx() behavior");
-	assert_zu_le(xallocx(p, largemax, 1, 0), largemax,
-	    "Unexpected xallocx() behavior");
-
-	/* Test overflow such that largemax-size underflows. */
-	assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
-	    "Unexpected xallocx() behavior");
-	assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
-	    "Unexpected xallocx() behavior");
-	assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
-	    "Unexpected xallocx() behavior");
-	assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
-	    "Unexpected xallocx() behavior");
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_extra_small)
-{
-	size_t small0, small1, largemax;
-	void *p;
-
-	/* Get size classes. */
-	small0 = get_small_size(0);
-	small1 = get_small_size(1);
-	largemax = get_large_size(get_nlarge()-1);
-
-	p = mallocx(small0, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-
-	assert_zu_eq(xallocx(p, small1, 0, 0), small0,
-	    "Unexpected xallocx() behavior");
-
-	assert_zu_eq(xallocx(p, small1, 0, 0), small0,
-	    "Unexpected xallocx() behavior");
-
-	assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
-	    "Unexpected xallocx() behavior");
-
-	/* Test size+extra overflow. */
-	assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
-	    "Unexpected xallocx() behavior");
-	assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
-	    "Unexpected xallocx() behavior");
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_extra_large)
-{
-	int flags = MALLOCX_ARENA(arena_ind());
-	size_t smallmax, large1, large2, large3, largemax;
-	void *p;
-
-	/* Get size classes. */
-	smallmax = get_small_size(get_nsmall()-1);
-	large1 = get_large_size(1);
-	large2 = get_large_size(2);
-	large3 = get_large_size(3);
-	largemax = get_large_size(get_nlarge()-1);
-
-	p = mallocx(large3, flags);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-
-	assert_zu_eq(xallocx(p, large3, 0, flags), large3,
-	    "Unexpected xallocx() behavior");
-	/* Test size decrease with zero extra. */
-	assert_zu_ge(xallocx(p, large1, 0, flags), large1,
-	    "Unexpected xallocx() behavior");
-	assert_zu_ge(xallocx(p, smallmax, 0, flags), large1,
-	    "Unexpected xallocx() behavior");
-
-	if (xallocx(p, large3, 0, flags) != large3) {
-		p = rallocx(p, large3, flags);
-		assert_ptr_not_null(p, "Unexpected rallocx() failure");
-	}
-	/* Test size decrease with non-zero extra. */
-	assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
-	    "Unexpected xallocx() behavior");
-	assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
-	    "Unexpected xallocx() behavior");
-	assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
-	    "Unexpected xallocx() behavior");
-	assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
-	    "Unexpected xallocx() behavior");
-
-	assert_zu_ge(xallocx(p, large1, 0, flags), large1,
-	    "Unexpected xallocx() behavior");
-	/* Test size increase with zero extra. */
-	assert_zu_le(xallocx(p, large3, 0, flags), large3,
-	    "Unexpected xallocx() behavior");
-	assert_zu_le(xallocx(p, largemax+1, 0, flags), large3,
-	    "Unexpected xallocx() behavior");
-
-	assert_zu_ge(xallocx(p, large1, 0, flags), large1,
-	    "Unexpected xallocx() behavior");
-	/* Test size increase with non-zero extra. */
-	assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
-	    "Unexpected xallocx() behavior");
-
-	assert_zu_ge(xallocx(p, large1, 0, flags), large1,
-	    "Unexpected xallocx() behavior");
-	/* Test size increase with non-zero extra. */
-	assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
-	    "Unexpected xallocx() behavior");
-
-	if (xallocx(p, large3, 0, flags) != large3) {
-		p = rallocx(p, large3, flags);
-		assert_ptr_not_null(p, "Unexpected rallocx() failure");
-	}
-	/* Test size+extra overflow. */
-	assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
-	    "Unexpected xallocx() behavior");
-
-	dallocx(p, flags);
-}
-TEST_END
-
-static void
-print_filled_extents(const void *p, uint8_t c, size_t len)
-{
-	const uint8_t *pc = (const uint8_t *)p;
-	size_t i, range0;
-	uint8_t c0;
-
-	malloc_printf("  p=%p, c=%#x, len=%zu:", p, c, len);
-	range0 = 0;
-	c0 = pc[0];
-	for (i = 0; i < len; i++) {
-		if (pc[i] != c0) {
-			malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
-			range0 = i;
-			c0 = pc[i];
-		}
-	}
-	malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
-}
-
-static bool
-validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
-{
-	const uint8_t *pc = (const uint8_t *)p;
-	bool err;
-	size_t i;
-
-	for (i = offset, err = false; i < offset+len; i++) {
-		if (pc[i] != c)
-			err = true;
-	}
-
-	if (err)
-		print_filled_extents(p, c, offset + len);
-
-	return (err);
-}
-
-static void
-test_zero(size_t szmin, size_t szmax)
-{
-	int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
-	size_t sz, nsz;
-	void *p;
-#define	FILL_BYTE 0x7aU
-
-	sz = szmax;
-	p = mallocx(sz, flags);
-	assert_ptr_not_null(p, "Unexpected mallocx() error");
-	assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
-	    sz);
-
-	/*
-	 * Fill with non-zero so that non-debug builds are more likely to detect
-	 * errors.
-	 */
-	memset(p, FILL_BYTE, sz);
-	assert_false(validate_fill(p, FILL_BYTE, 0, sz),
-	    "Memory not filled: sz=%zu", sz);
-
-	/* Shrink in place so that we can expect growing in place to succeed. */
-	sz = szmin;
-	if (xallocx(p, sz, 0, flags) != sz) {
-		p = rallocx(p, sz, flags);
-		assert_ptr_not_null(p, "Unexpected rallocx() failure");
-	}
-	assert_false(validate_fill(p, FILL_BYTE, 0, sz),
-	    "Memory not filled: sz=%zu", sz);
-
-	for (sz = szmin; sz < szmax; sz = nsz) {
-		nsz = nallocx(sz+1, flags);
-		if (xallocx(p, sz+1, 0, flags) != nsz) {
-			p = rallocx(p, sz+1, flags);
-			assert_ptr_not_null(p, "Unexpected rallocx() failure");
-		}
-		assert_false(validate_fill(p, FILL_BYTE, 0, sz),
-		    "Memory not filled: sz=%zu", sz);
-		assert_false(validate_fill(p, 0x00, sz, nsz-sz),
-		    "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
-		memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
-		assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
-		    "Memory not filled: nsz=%zu", nsz);
-	}
-
-	dallocx(p, flags);
-}
-
-TEST_BEGIN(test_zero_large)
-{
-	size_t large0, large1;
-
-	/* Get size classes. */
-	large0 = get_large_size(0);
-	large1 = get_large_size(1);
-
-	test_zero(large1, large0 * 2);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_same_size,
-	    test_extra_no_move,
-	    test_no_move_fail,
-	    test_size,
-	    test_size_extra_overflow,
-	    test_extra_small,
-	    test_extra_large,
-	    test_zero_large));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/src/SFMT.c b/zircon/third_party/ulib/jemalloc/test/src/SFMT.c
deleted file mode 100644
index 80cabe0..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/SFMT.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/** 
- * @file  SFMT.c
- * @brief SIMD oriented Fast Mersenne Twister(SFMT)
- *
- * @author Mutsuo Saito (Hiroshima University)
- * @author Makoto Matsumoto (Hiroshima University)
- *
- * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- * University. All rights reserved.
- *
- * The new BSD License is applied to this software, see LICENSE.txt
- */
-#define	SFMT_C_
-#include "test/jemalloc_test.h"
-#include "test/SFMT-params.h"
-
-#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
-#define BIG_ENDIAN64 1
-#endif
-#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
-#define BIG_ENDIAN64 1
-#endif
-#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
-#define BIG_ENDIAN64 1
-#endif
-#if defined(ONLY64) && !defined(BIG_ENDIAN64)
-  #if defined(__GNUC__)
-    #error "-DONLY64 must be specified with -DBIG_ENDIAN64"
-  #endif
-#undef ONLY64
-#endif
-/*------------------------------------------------------
-  128-bit SIMD data type for Altivec, SSE2 or standard C
-  ------------------------------------------------------*/
-#if defined(HAVE_ALTIVEC)
-/** 128-bit data structure */
-union W128_T {
-    vector unsigned int s;
-    uint32_t u[4];
-};
-/** 128-bit data type */
-typedef union W128_T w128_t;
-
-#elif defined(HAVE_SSE2)
-/** 128-bit data structure */
-union W128_T {
-    __m128i si;
-    uint32_t u[4];
-};
-/** 128-bit data type */
-typedef union W128_T w128_t;
-
-#else
-
-/** 128-bit data structure */
-struct W128_T {
-    uint32_t u[4];
-};
-/** 128-bit data type */
-typedef struct W128_T w128_t;
-
-#endif
-
-struct sfmt_s {
-    /** the 128-bit internal state array */
-    w128_t sfmt[N];
-    /** index counter to the 32-bit internal state array */
-    int idx;
-    /** a flag: it is 0 if and only if the internal state is not yet
-     * initialized. */
-    int initialized;
-};
-
-/*--------------------------------------
-  FILE GLOBAL VARIABLES
-  internal state, index counter and flag 
-  --------------------------------------*/
-
-/** a parity check vector which certificate the period of 2^{MEXP} */
-static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
-
-/*----------------
-  STATIC FUNCTIONS
-  ----------------*/
-JEMALLOC_INLINE_C int idxof(int i);
-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
-JEMALLOC_INLINE_C void rshift128(w128_t *out,  w128_t const *in, int shift);
-JEMALLOC_INLINE_C void lshift128(w128_t *out,  w128_t const *in, int shift);
-#endif
-JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
-JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
-JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
-JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
-static void period_certification(sfmt_t *ctx);
-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
-JEMALLOC_INLINE_C void swap(w128_t *array, int size);
-#endif
-
-#if defined(HAVE_ALTIVEC)
-  #include "test/SFMT-alti.h"
-#elif defined(HAVE_SSE2)
-  #include "test/SFMT-sse2.h"
-#endif
-
-/**
- * This function simulate a 64-bit index of LITTLE ENDIAN 
- * in BIG ENDIAN machine.
- */
-#ifdef ONLY64
-JEMALLOC_INLINE_C int idxof(int i) {
-    return i ^ 1;
-}
-#else
-JEMALLOC_INLINE_C int idxof(int i) {
-    return i;
-}
-#endif
-/**
- * This function simulates SIMD 128-bit right shift by the standard C.
- * The 128-bit integer given in in is shifted by (shift * 8) bits.
- * This function simulates the LITTLE ENDIAN SIMD.
- * @param out the output of this function
- * @param in the 128-bit data to be shifted
- * @param shift the shift value
- */
-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
-#ifdef ONLY64
-JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
-    uint64_t th, tl, oh, ol;
-
-    th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
-    tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
-
-    oh = th >> (shift * 8);
-    ol = tl >> (shift * 8);
-    ol |= th << (64 - shift * 8);
-    out->u[0] = (uint32_t)(ol >> 32);
-    out->u[1] = (uint32_t)ol;
-    out->u[2] = (uint32_t)(oh >> 32);
-    out->u[3] = (uint32_t)oh;
-}
-#else
-JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
-    uint64_t th, tl, oh, ol;
-
-    th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
-    tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
-
-    oh = th >> (shift * 8);
-    ol = tl >> (shift * 8);
-    ol |= th << (64 - shift * 8);
-    out->u[1] = (uint32_t)(ol >> 32);
-    out->u[0] = (uint32_t)ol;
-    out->u[3] = (uint32_t)(oh >> 32);
-    out->u[2] = (uint32_t)oh;
-}
-#endif
-/**
- * This function simulates SIMD 128-bit left shift by the standard C.
- * The 128-bit integer given in in is shifted by (shift * 8) bits.
- * This function simulates the LITTLE ENDIAN SIMD.
- * @param out the output of this function
- * @param in the 128-bit data to be shifted
- * @param shift the shift value
- */
-#ifdef ONLY64
-JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
-    uint64_t th, tl, oh, ol;
-
-    th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
-    tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
-
-    oh = th << (shift * 8);
-    ol = tl << (shift * 8);
-    oh |= tl >> (64 - shift * 8);
-    out->u[0] = (uint32_t)(ol >> 32);
-    out->u[1] = (uint32_t)ol;
-    out->u[2] = (uint32_t)(oh >> 32);
-    out->u[3] = (uint32_t)oh;
-}
-#else
-JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
-    uint64_t th, tl, oh, ol;
-
-    th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
-    tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
-
-    oh = th << (shift * 8);
-    ol = tl << (shift * 8);
-    oh |= tl >> (64 - shift * 8);
-    out->u[1] = (uint32_t)(ol >> 32);
-    out->u[0] = (uint32_t)ol;
-    out->u[3] = (uint32_t)(oh >> 32);
-    out->u[2] = (uint32_t)oh;
-}
-#endif
-#endif
-
-/**
- * This function represents the recursion formula.
- * @param r output
- * @param a a 128-bit part of the internal state array
- * @param b a 128-bit part of the internal state array
- * @param c a 128-bit part of the internal state array
- * @param d a 128-bit part of the internal state array
- */
-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
-#ifdef ONLY64
-JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
-				w128_t *d) {
-    w128_t x;
-    w128_t y;
-
-    lshift128(&x, a, SL2);
-    rshift128(&y, c, SR2);
-    r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] 
-	^ (d->u[0] << SL1);
-    r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] 
-	^ (d->u[1] << SL1);
-    r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] 
-	^ (d->u[2] << SL1);
-    r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] 
-	^ (d->u[3] << SL1);
-}
-#else
-JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
-				w128_t *d) {
-    w128_t x;
-    w128_t y;
-
-    lshift128(&x, a, SL2);
-    rshift128(&y, c, SR2);
-    r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] 
-	^ (d->u[0] << SL1);
-    r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] 
-	^ (d->u[1] << SL1);
-    r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] 
-	^ (d->u[2] << SL1);
-    r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] 
-	^ (d->u[3] << SL1);
-}
-#endif
-#endif
-
-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
-/**
- * This function fills the internal state array with pseudorandom
- * integers.
- */
-JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
-    int i;
-    w128_t *r1, *r2;
-
-    r1 = &ctx->sfmt[N - 2];
-    r2 = &ctx->sfmt[N - 1];
-    for (i = 0; i < N - POS1; i++) {
-	do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
-	  r2);
-	r1 = r2;
-	r2 = &ctx->sfmt[i];
-    }
-    for (; i < N; i++) {
-	do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
-	  r2);
-	r1 = r2;
-	r2 = &ctx->sfmt[i];
-    }
-}
-
-/**
- * This function fills the user-specified array with pseudorandom
- * integers.
- *
- * @param array an 128-bit array to be filled by pseudorandom numbers.  
- * @param size number of 128-bit pseudorandom numbers to be generated.
- */
-JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
-    int i, j;
-    w128_t *r1, *r2;
-
-    r1 = &ctx->sfmt[N - 2];
-    r2 = &ctx->sfmt[N - 1];
-    for (i = 0; i < N - POS1; i++) {
-	do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
-	r1 = r2;
-	r2 = &array[i];
-    }
-    for (; i < N; i++) {
-	do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
-	r1 = r2;
-	r2 = &array[i];
-    }
-    for (; i < size - N; i++) {
-	do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
-	r1 = r2;
-	r2 = &array[i];
-    }
-    for (j = 0; j < 2 * N - size; j++) {
-	ctx->sfmt[j] = array[j + size - N];
-    }
-    for (; i < size; i++, j++) {
-	do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
-	r1 = r2;
-	r2 = &array[i];
-	ctx->sfmt[j] = array[i];
-    }
-}
-#endif
-
-#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
-JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
-    int i;
-    uint32_t x, y;
-
-    for (i = 0; i < size; i++) {
-	x = array[i].u[0];
-	y = array[i].u[2];
-	array[i].u[0] = array[i].u[1];
-	array[i].u[2] = array[i].u[3];
-	array[i].u[1] = x;
-	array[i].u[3] = y;
-    }
-}
-#endif
-/**
- * This function represents a function used in the initialization
- * by init_by_array
- * @param x 32-bit integer
- * @return 32-bit integer
- */
-static uint32_t func1(uint32_t x) {
-    return (x ^ (x >> 27)) * (uint32_t)1664525UL;
-}
-
-/**
- * This function represents a function used in the initialization
- * by init_by_array
- * @param x 32-bit integer
- * @return 32-bit integer
- */
-static uint32_t func2(uint32_t x) {
-    return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
-}
-
-/**
- * This function certificate the period of 2^{MEXP}
- */
-static void period_certification(sfmt_t *ctx) {
-    int inner = 0;
-    int i, j;
-    uint32_t work;
-    uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
-
-    for (i = 0; i < 4; i++)
-	inner ^= psfmt32[idxof(i)] & parity[i];
-    for (i = 16; i > 0; i >>= 1)
-	inner ^= inner >> i;
-    inner &= 1;
-    /* check OK */
-    if (inner == 1) {
-	return;
-    }
-    /* check NG, and modification */
-    for (i = 0; i < 4; i++) {
-	work = 1;
-	for (j = 0; j < 32; j++) {
-	    if ((work & parity[i]) != 0) {
-		psfmt32[idxof(i)] ^= work;
-		return;
-	    }
-	    work = work << 1;
-	}
-    }
-}
-
-/*----------------
-  PUBLIC FUNCTIONS
-  ----------------*/
-/**
- * This function returns the identification string.
- * The string shows the word size, the Mersenne exponent,
- * and all parameters of this generator.
- */
-const char *get_idstring(void) {
-    return IDSTR;
-}
-
-/**
- * This function returns the minimum size of array used for \b
- * fill_array32() function.
- * @return minimum size of array used for fill_array32() function.
- */
-int get_min_array_size32(void) {
-    return N32;
-}
-
-/**
- * This function returns the minimum size of array used for \b
- * fill_array64() function.
- * @return minimum size of array used for fill_array64() function.
- */
-int get_min_array_size64(void) {
-    return N64;
-}
-
-#ifndef ONLY64
-/**
- * This function generates and returns 32-bit pseudorandom number.
- * init_gen_rand or init_by_array must be called before this function.
- * @return 32-bit pseudorandom number
- */
-uint32_t gen_rand32(sfmt_t *ctx) {
-    uint32_t r;
-    uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
-
-    assert(ctx->initialized);
-    if (ctx->idx >= N32) {
-	gen_rand_all(ctx);
-	ctx->idx = 0;
-    }
-    r = psfmt32[ctx->idx++];
-    return r;
-}
-
-/* Generate a random integer in [0..limit). */
-uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
-    uint32_t ret, above;
-
-    above = 0xffffffffU - (0xffffffffU % limit);
-    while (1) {
-	ret = gen_rand32(ctx);
-	if (ret < above) {
-	    ret %= limit;
-	    break;
-	}
-    }
-    return ret;
-}
-#endif
-/**
- * This function generates and returns 64-bit pseudorandom number.
- * init_gen_rand or init_by_array must be called before this function.
- * The function gen_rand64 should not be called after gen_rand32,
- * unless an initialization is again executed. 
- * @return 64-bit pseudorandom number
- */
-uint64_t gen_rand64(sfmt_t *ctx) {
-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
-    uint32_t r1, r2;
-    uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
-#else
-    uint64_t r;
-    uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
-#endif
-
-    assert(ctx->initialized);
-    assert(ctx->idx % 2 == 0);
-
-    if (ctx->idx >= N32) {
-	gen_rand_all(ctx);
-	ctx->idx = 0;
-    }
-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
-    r1 = psfmt32[ctx->idx];
-    r2 = psfmt32[ctx->idx + 1];
-    ctx->idx += 2;
-    return ((uint64_t)r2 << 32) | r1;
-#else
-    r = psfmt64[ctx->idx / 2];
-    ctx->idx += 2;
-    return r;
-#endif
-}
-
-/* Generate a random integer in [0..limit). */
-uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
-    uint64_t ret, above;
-
-    above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
-    while (1) {
-	ret = gen_rand64(ctx);
-	if (ret < above) {
-	    ret %= limit;
-	    break;
-	}
-    }
-    return ret;
-}
-
-#ifndef ONLY64
-/**
- * This function generates pseudorandom 32-bit integers in the
- * specified array[] by one call. The number of pseudorandom integers
- * is specified by the argument size, which must be at least 624 and a
- * multiple of four.  The generation by this function is much faster
- * than the following gen_rand function.
- *
- * For initialization, init_gen_rand or init_by_array must be called
- * before the first call of this function. This function can not be
- * used after calling gen_rand function, without initialization.
- *
- * @param array an array where pseudorandom 32-bit integers are filled
- * by this function.  The pointer to the array must be \b "aligned"
- * (namely, must be a multiple of 16) in the SIMD version, since it
- * refers to the address of a 128-bit integer.  In the standard C
- * version, the pointer is arbitrary.
- *
- * @param size the number of 32-bit pseudorandom integers to be
- * generated.  size must be a multiple of 4, and greater than or equal
- * to (MEXP / 128 + 1) * 4.
- *
- * @note \b memalign or \b posix_memalign is available to get aligned
- * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
- * returns the pointer to the aligned memory block.
- */
-void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
-    assert(ctx->initialized);
-    assert(ctx->idx == N32);
-    assert(size % 4 == 0);
-    assert(size >= N32);
-
-    gen_rand_array(ctx, (w128_t *)array, size / 4);
-    ctx->idx = N32;
-}
-#endif
-
-/**
- * This function generates pseudorandom 64-bit integers in the
- * specified array[] by one call. The number of pseudorandom integers
- * is specified by the argument size, which must be at least 312 and a
- * multiple of two.  The generation by this function is much faster
- * than the following gen_rand function.
- *
- * For initialization, init_gen_rand or init_by_array must be called
- * before the first call of this function. This function can not be
- * used after calling gen_rand function, without initialization.
- *
- * @param array an array where pseudorandom 64-bit integers are filled
- * by this function.  The pointer to the array must be "aligned"
- * (namely, must be a multiple of 16) in the SIMD version, since it
- * refers to the address of a 128-bit integer.  In the standard C
- * version, the pointer is arbitrary.
- *
- * @param size the number of 64-bit pseudorandom integers to be
- * generated.  size must be a multiple of 2, and greater than or equal
- * to (MEXP / 128 + 1) * 2
- *
- * @note \b memalign or \b posix_memalign is available to get aligned
- * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
- * returns the pointer to the aligned memory block.
- */
-void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
-    assert(ctx->initialized);
-    assert(ctx->idx == N32);
-    assert(size % 2 == 0);
-    assert(size >= N64);
-
-    gen_rand_array(ctx, (w128_t *)array, size / 2);
-    ctx->idx = N32;
-
-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
-    swap((w128_t *)array, size /2);
-#endif
-}
-
-/**
- * This function initializes the internal state array with a 32-bit
- * integer seed.
- *
- * @param seed a 32-bit integer used as the seed.
- */
-sfmt_t *init_gen_rand(uint32_t seed) {
-    void *p;
-    sfmt_t *ctx;
-    int i;
-    uint32_t *psfmt32;
-
-    if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
-	return NULL;
-    }
-    ctx = (sfmt_t *)p;
-    psfmt32 = &ctx->sfmt[0].u[0];
-
-    psfmt32[idxof(0)] = seed;
-    for (i = 1; i < N32; i++) {
-	psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] 
-					    ^ (psfmt32[idxof(i - 1)] >> 30))
-	    + i;
-    }
-    ctx->idx = N32;
-    period_certification(ctx);
-    ctx->initialized = 1;
-
-    return ctx;
-}
-
-/**
- * This function initializes the internal state array,
- * with an array of 32-bit integers used as the seeds
- * @param init_key the array of 32-bit integers, used as a seed.
- * @param key_length the length of init_key.
- */
-sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
-    void *p;
-    sfmt_t *ctx;
-    int i, j, count;
-    uint32_t r;
-    int lag;
-    int mid;
-    int size = N * 4;
-    uint32_t *psfmt32;
-
-    if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
-	return NULL;
-    }
-    ctx = (sfmt_t *)p;
-    psfmt32 = &ctx->sfmt[0].u[0];
-
-    if (size >= 623) {
-	lag = 11;
-    } else if (size >= 68) {
-	lag = 7;
-    } else if (size >= 39) {
-	lag = 5;
-    } else {
-	lag = 3;
-    }
-    mid = (size - lag) / 2;
-
-    memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
-    if (key_length + 1 > N32) {
-	count = key_length + 1;
-    } else {
-	count = N32;
-    }
-    r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] 
-	      ^ psfmt32[idxof(N32 - 1)]);
-    psfmt32[idxof(mid)] += r;
-    r += key_length;
-    psfmt32[idxof(mid + lag)] += r;
-    psfmt32[idxof(0)] = r;
-
-    count--;
-    for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
-	r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] 
-		  ^ psfmt32[idxof((i + N32 - 1) % N32)]);
-	psfmt32[idxof((i + mid) % N32)] += r;
-	r += init_key[j] + i;
-	psfmt32[idxof((i + mid + lag) % N32)] += r;
-	psfmt32[idxof(i)] = r;
-	i = (i + 1) % N32;
-    }
-    for (; j < count; j++) {
-	r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] 
-		  ^ psfmt32[idxof((i + N32 - 1) % N32)]);
-	psfmt32[idxof((i + mid) % N32)] += r;
-	r += i;
-	psfmt32[idxof((i + mid + lag) % N32)] += r;
-	psfmt32[idxof(i)] = r;
-	i = (i + 1) % N32;
-    }
-    for (j = 0; j < N32; j++) {
-	r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] 
-		  + psfmt32[idxof((i + N32 - 1) % N32)]);
-	psfmt32[idxof((i + mid) % N32)] ^= r;
-	r -= i;
-	psfmt32[idxof((i + mid + lag) % N32)] ^= r;
-	psfmt32[idxof(i)] = r;
-	i = (i + 1) % N32;
-    }
-
-    ctx->idx = N32;
-    period_certification(ctx);
-    ctx->initialized = 1;
-
-    return ctx;
-}
-
-void fini_gen_rand(sfmt_t *ctx) {
-    assert(ctx != NULL);
-
-    ctx->initialized = 0;
-    free(ctx);
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/src/btalloc.c b/zircon/third_party/ulib/jemalloc/test/src/btalloc.c
deleted file mode 100644
index a78cb89..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/btalloc.c
+++ /dev/null
@@ -1,7 +0,0 @@
-#include "test/jemalloc_test.h"
-
-void *
-btalloc(size_t size, unsigned bits)
-{
-	return (btalloc_0(size, bits));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/src/btalloc_0.c b/zircon/third_party/ulib/jemalloc/test/src/btalloc_0.c
deleted file mode 100644
index 77d8904..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/btalloc_0.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "test/jemalloc_test.h"
-
-btalloc_n_gen(0)
diff --git a/zircon/third_party/ulib/jemalloc/test/src/btalloc_1.c b/zircon/third_party/ulib/jemalloc/test/src/btalloc_1.c
deleted file mode 100644
index 4c126c3..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/btalloc_1.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "test/jemalloc_test.h"
-
-btalloc_n_gen(1)
diff --git a/zircon/third_party/ulib/jemalloc/test/src/math.c b/zircon/third_party/ulib/jemalloc/test/src/math.c
deleted file mode 100644
index 887a363..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/math.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define	MATH_C_
-#include "test/jemalloc_test.h"
diff --git a/zircon/third_party/ulib/jemalloc/test/src/mq.c b/zircon/third_party/ulib/jemalloc/test/src/mq.c
deleted file mode 100644
index 47f362c..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/mq.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include "test/jemalloc_test.h"
-
-/*
- * Sleep for approximately ns nanoseconds.  No lower *nor* upper bound on sleep
- * time is guaranteed.
- */
-void
-mq_nanosleep(unsigned ns)
-{
-	assert(ns <= 1000*1000*1000);
-
-#ifdef _WIN32
-	Sleep(ns / 1000);
-#else
-	{
-		struct timespec timeout;
-
-		if (ns < 1000*1000*1000) {
-			timeout.tv_sec = 0;
-			timeout.tv_nsec = ns;
-		} else {
-			timeout.tv_sec = 1;
-			timeout.tv_nsec = 0;
-		}
-		nanosleep(&timeout, NULL);
-	}
-#endif
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/src/mtx.c b/zircon/third_party/ulib/jemalloc/test/src/mtx.c
deleted file mode 100644
index bbfec4a..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/mtx.c
+++ /dev/null
@@ -1,69 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifndef _CRT_SPINCOUNT
-#define	_CRT_SPINCOUNT 4000
-#endif
-
-bool
-mtx_init(mtx_t *mtx)
-{
-#ifdef _WIN32
-	if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
-		return (true);
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-	mtx->lock = OS_UNFAIR_LOCK_INIT;
-#elif (defined(JEMALLOC_OSSPIN))
-	mtx->lock = 0;
-#else
-	pthread_mutexattr_t attr;
-
-	if (pthread_mutexattr_init(&attr) != 0)
-		return (true);
-	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
-	if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
-		pthread_mutexattr_destroy(&attr);
-		return (true);
-	}
-	pthread_mutexattr_destroy(&attr);
-#endif
-	return (false);
-}
-
-void
-mtx_fini(mtx_t *mtx)
-{
-#ifdef _WIN32
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-#elif (defined(JEMALLOC_OSSPIN))
-#else
-	pthread_mutex_destroy(&mtx->lock);
-#endif
-}
-
-void
-mtx_lock(mtx_t *mtx)
-{
-#ifdef _WIN32
-	EnterCriticalSection(&mtx->lock);
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-	os_unfair_lock_lock(&mtx->lock);
-#elif (defined(JEMALLOC_OSSPIN))
-	OSSpinLockLock(&mtx->lock);
-#else
-	pthread_mutex_lock(&mtx->lock);
-#endif
-}
-
-void
-mtx_unlock(mtx_t *mtx)
-{
-#ifdef _WIN32
-	LeaveCriticalSection(&mtx->lock);
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-	os_unfair_lock_unlock(&mtx->lock);
-#elif (defined(JEMALLOC_OSSPIN))
-	OSSpinLockUnlock(&mtx->lock);
-#else
-	pthread_mutex_unlock(&mtx->lock);
-#endif
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/src/test.c b/zircon/third_party/ulib/jemalloc/test/src/test.c
deleted file mode 100644
index 345cc1c..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/test.c
+++ /dev/null
@@ -1,129 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static unsigned		test_count = 0;
-static test_status_t	test_counts[test_status_count] = {0, 0, 0};
-static test_status_t	test_status = test_status_pass;
-static const char *	test_name = "";
-
-JEMALLOC_FORMAT_PRINTF(1, 2)
-void
-test_skip(const char *format, ...)
-{
-	va_list ap;
-
-	va_start(ap, format);
-	malloc_vcprintf(NULL, NULL, format, ap);
-	va_end(ap);
-	malloc_printf("\n");
-	test_status = test_status_skip;
-}
-
-JEMALLOC_FORMAT_PRINTF(1, 2)
-void
-test_fail(const char *format, ...)
-{
-	va_list ap;
-
-	va_start(ap, format);
-	malloc_vcprintf(NULL, NULL, format, ap);
-	va_end(ap);
-	malloc_printf("\n");
-	test_status = test_status_fail;
-}
-
-static const char *
-test_status_string(test_status_t test_status)
-{
-	switch (test_status) {
-	case test_status_pass: return "pass";
-	case test_status_skip: return "skip";
-	case test_status_fail: return "fail";
-	default: not_reached();
-	}
-}
-
-void
-p_test_init(const char *name)
-{
-	test_count++;
-	test_status = test_status_pass;
-	test_name = name;
-}
-
-void
-p_test_fini(void)
-{
-	test_counts[test_status]++;
-	malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
-}
-
-static test_status_t
-p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
-{
-	test_status_t ret;
-
-	if (do_malloc_init) {
-		/*
-		 * Make sure initialization occurs prior to running tests.
-		 * Tests are special because they may use internal facilities
-		 * prior to triggering initialization as a side effect of
-		 * calling into the public API.
-		 */
-		if (nallocx(1, 0) == 0) {
-			malloc_printf("Initialization error");
-			return (test_status_fail);
-		}
-	}
-
-	ret = test_status_pass;
-	for (; t != NULL; t = va_arg(ap, test_t *)) {
-		t();
-		if (test_status > ret)
-			ret = test_status;
-	}
-
-	malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
-	    test_status_string(test_status_pass),
-	    test_counts[test_status_pass], test_count,
-	    test_status_string(test_status_skip),
-	    test_counts[test_status_skip], test_count,
-	    test_status_string(test_status_fail),
-	    test_counts[test_status_fail], test_count);
-
-	return (ret);
-}
-
-test_status_t
-p_test(test_t *t, ...)
-{
-	test_status_t ret;
-	va_list ap;
-
-	ret = test_status_pass;
-	va_start(ap, t);
-	ret = p_test_impl(true, t, ap);
-	va_end(ap);
-
-	return (ret);
-}
-
-test_status_t
-p_test_no_malloc_init(test_t *t, ...)
-{
-	test_status_t ret;
-	va_list ap;
-
-	ret = test_status_pass;
-	va_start(ap, t);
-	ret = p_test_impl(false, t, ap);
-	va_end(ap);
-
-	return (ret);
-}
-
-void
-p_test_fail(const char *prefix, const char *message)
-{
-	malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
-	test_status = test_status_fail;
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/src/thd.c b/zircon/third_party/ulib/jemalloc/test/src/thd.c
deleted file mode 100644
index e316708..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/thd.c
+++ /dev/null
@@ -1,36 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef _WIN32
-void
-thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
-{
-	LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
-	*thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
-	if (*thd == NULL)
-		test_fail("Error in CreateThread()\n");
-}
-
-void
-thd_join(thd_t thd, void **ret)
-{
-	if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
-		DWORD exit_code;
-		GetExitCodeThread(thd, (LPDWORD) &exit_code);
-		*ret = (void *)(uintptr_t)exit_code;
-	}
-}
-
-#else
-void
-thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
-{
-	if (pthread_create(thd, NULL, proc, arg) != 0)
-		test_fail("Error in pthread_create()\n");
-}
-
-void
-thd_join(thd_t thd, void **ret)
-{
-	pthread_join(thd, ret);
-}
-#endif
diff --git a/zircon/third_party/ulib/jemalloc/test/src/timer.c b/zircon/third_party/ulib/jemalloc/test/src/timer.c
deleted file mode 100644
index 82f69d0..0000000
--- a/zircon/third_party/ulib/jemalloc/test/src/timer.c
+++ /dev/null
@@ -1,58 +0,0 @@
-#include "test/jemalloc_test.h"
-
-void
-timer_start(timedelta_t *timer)
-{
-	nstime_init(&timer->t0, 0);
-	nstime_update(&timer->t0);
-}
-
-void
-timer_stop(timedelta_t *timer)
-{
-	nstime_copy(&timer->t1, &timer->t0);
-	nstime_update(&timer->t1);
-}
-
-uint64_t
-timer_usec(const timedelta_t *timer)
-{
-	nstime_t delta;
-
-	nstime_copy(&delta, &timer->t1);
-	nstime_subtract(&delta, &timer->t0);
-	return (nstime_ns(&delta) / 1000);
-}
-
-void
-timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
-{
-	uint64_t t0 = timer_usec(a);
-	uint64_t t1 = timer_usec(b);
-	uint64_t mult;
-	size_t i = 0;
-	size_t j, n;
-
-	/* Whole. */
-	n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
-	i += n;
-	if (i >= buflen)
-		return;
-	mult = 1;
-	for (j = 0; j < n; j++)
-		mult *= 10;
-
-	/* Decimal. */
-	n = malloc_snprintf(&buf[i], buflen-i, ".");
-	i += n;
-
-	/* Fraction. */
-	while (i < buflen-1) {
-		uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
-		    >= 5)) ? 1 : 0;
-		n = malloc_snprintf(&buf[i], buflen-i,
-		    "%"FMTu64, (t0 * mult / t1) % 10 + round);
-		i += n;
-		mult *= 10;
-	}
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/stress/microbench.c b/zircon/third_party/ulib/jemalloc/test/stress/microbench.c
deleted file mode 100644
index c599d9d..0000000
--- a/zircon/third_party/ulib/jemalloc/test/stress/microbench.c
+++ /dev/null
@@ -1,176 +0,0 @@
-#include "test/jemalloc_test.h"
-
-JEMALLOC_INLINE_C void
-time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
-    void (*func)(void))
-{
-	uint64_t i;
-
-	for (i = 0; i < nwarmup; i++)
-		func();
-	timer_start(timer);
-	for (i = 0; i < niter; i++)
-		func();
-	timer_stop(timer);
-}
-
-void
-compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
-    void (*func_a), const char *name_b, void (*func_b))
-{
-	timedelta_t timer_a, timer_b;
-	char ratio_buf[6];
-	void *p;
-
-	p = mallocx(1, 0);
-	if (p == NULL) {
-		test_fail("Unexpected mallocx() failure");
-		return;
-	}
-
-	time_func(&timer_a, nwarmup, niter, func_a);
-	time_func(&timer_b, nwarmup, niter, func_b);
-
-	timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
-	malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, "
-	    "%s=%"FMTu64"us, ratio=1:%s\n",
-	    niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b),
-	    ratio_buf);
-
-	dallocx(p, 0);
-}
-
-static void
-malloc_free(void)
-{
-	/* The compiler can optimize away free(malloc(1))! */
-	void *p = malloc(1);
-	if (p == NULL) {
-		test_fail("Unexpected malloc() failure");
-		return;
-	}
-	free(p);
-}
-
-static void
-mallocx_free(void)
-{
-	void *p = mallocx(1, 0);
-	if (p == NULL) {
-		test_fail("Unexpected mallocx() failure");
-		return;
-	}
-	free(p);
-}
-
-TEST_BEGIN(test_malloc_vs_mallocx)
-{
-	compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
-	    malloc_free, "mallocx", mallocx_free);
-}
-TEST_END
-
-static void
-malloc_dallocx(void)
-{
-	void *p = malloc(1);
-	if (p == NULL) {
-		test_fail("Unexpected malloc() failure");
-		return;
-	}
-	dallocx(p, 0);
-}
-
-static void
-malloc_sdallocx(void)
-{
-	void *p = malloc(1);
-	if (p == NULL) {
-		test_fail("Unexpected malloc() failure");
-		return;
-	}
-	sdallocx(p, 1, 0);
-}
-
-TEST_BEGIN(test_free_vs_dallocx)
-{
-	compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
-	    "dallocx", malloc_dallocx);
-}
-TEST_END
-
-TEST_BEGIN(test_dallocx_vs_sdallocx)
-{
-	compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
-	    "sdallocx", malloc_sdallocx);
-}
-TEST_END
-
-static void
-malloc_mus_free(void)
-{
-	void *p;
-
-	p = malloc(1);
-	if (p == NULL) {
-		test_fail("Unexpected malloc() failure");
-		return;
-	}
-	malloc_usable_size(p);
-	free(p);
-}
-
-static void
-malloc_sallocx_free(void)
-{
-	void *p;
-
-	p = malloc(1);
-	if (p == NULL) {
-		test_fail("Unexpected malloc() failure");
-		return;
-	}
-	if (sallocx(p, 0) < 1)
-		test_fail("Unexpected sallocx() failure");
-	free(p);
-}
-
-TEST_BEGIN(test_mus_vs_sallocx)
-{
-	compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
-	    malloc_mus_free, "sallocx", malloc_sallocx_free);
-}
-TEST_END
-
-static void
-malloc_nallocx_free(void)
-{
-	void *p;
-
-	p = malloc(1);
-	if (p == NULL) {
-		test_fail("Unexpected malloc() failure");
-		return;
-	}
-	if (nallocx(1, 0) < 1)
-		test_fail("Unexpected nallocx() failure");
-	free(p);
-}
-
-TEST_BEGIN(test_sallocx_vs_nallocx)
-{
-	compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
-	    malloc_sallocx_free, "nallocx", malloc_nallocx_free);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_malloc_vs_mallocx,
-	    test_free_vs_dallocx,
-	    test_dallocx_vs_sdallocx,
-	    test_mus_vs_sallocx,
-	    test_sallocx_vs_nallocx));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/test.sh.in b/zircon/third_party/ulib/jemalloc/test/test.sh.in
deleted file mode 100644
index a39f99f..0000000
--- a/zircon/third_party/ulib/jemalloc/test/test.sh.in
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/sh
-
-case @abi@ in
-  macho)
-    export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib"
-    ;;
-  pecoff)
-    export PATH="${PATH}:@objroot@lib"
-    ;;
-  *)
-    ;;
-esac
-
-# Corresponds to test_status_t.
-pass_code=0
-skip_code=1
-fail_code=2
-
-pass_count=0
-skip_count=0
-fail_count=0
-for t in $@; do
-  if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
-    echo
-  fi
-  echo "=== ${t} ==="
-  ${t}@exe@ @abs_srcroot@ @abs_objroot@
-  result_code=$?
-  case ${result_code} in
-    ${pass_code})
-      pass_count=$((pass_count+1))
-      ;;
-    ${skip_code})
-      skip_count=$((skip_count+1))
-      ;;
-    ${fail_code})
-      fail_count=$((fail_count+1))
-      ;;
-    *)
-      echo "Test harness error" 1>&2
-      exit 1
-  esac
-done
-
-total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
-echo
-echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
-
-if [ ${fail_count} -eq 0 ] ; then
-  exit 0
-else
-  exit 1
-fi
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/SFMT.c b/zircon/third_party/ulib/jemalloc/test/unit/SFMT.c
deleted file mode 100644
index cf52670..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/SFMT.c
+++ /dev/null
@@ -1,1604 +0,0 @@
-/*
- * This file derives from SFMT 1.3.3
- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
- * released under the terms of the following license:
- *
- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
- *   University. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions are
- *   met:
- *
- *       * Redistributions of source code must retain the above copyright
- *         notice, this list of conditions and the following disclaimer.
- *       * Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials provided
- *         with the distribution.
- *       * Neither the name of the Hiroshima University nor the names of
- *         its contributors may be used to endorse or promote products
- *         derived from this software without specific prior written
- *         permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include "test/jemalloc_test.h"
-
-#define	BLOCK_SIZE 10000
-#define	BLOCK_SIZE64 (BLOCK_SIZE / 2)
-#define	COUNT_1 1000
-#define	COUNT_2 700
-
-static const uint32_t init_gen_rand_32_expected[] = {
-	3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
-	3796268453U,  423124208U, 2143818589U, 3827219408U, 2987036003U,
-	2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
-	 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
-	1764869045U,  824597505U,  862581900U, 2469764249U,  812862514U,
-	 359318673U,  116957936U, 3367389672U, 2327178354U, 1898245200U,
-	3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
-	1417665896U,  964324147U, 2282797708U, 2456269299U,  313400376U,
-	2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
-	 463679346U, 3721104591U, 3475064196U,  856141236U, 1499559719U,
-	3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
-	1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
-	 462466863U, 1037679449U, 1228140306U,  922298197U, 1205109853U,
-	1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
-	 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
-	2997311961U, 1189931652U, 3981543765U, 2826166703U,   87159245U,
-	1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
-	2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
-	3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
-	 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
-	3212265810U,  984692259U,  346590253U, 1804179199U, 3298543443U,
-	 750108141U, 2880257022U,  243310542U, 1869036465U, 1588062513U,
-	2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
-	2539522841U,  127965585U, 3992448871U,  913388237U,  559130076U,
-	1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
-	1013913783U, 1155864921U, 2715773730U,  915061862U, 1948766573U,
-	2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
-	3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
-	3237090058U, 3880063844U,  862416318U, 4039923869U, 2303788317U,
-	3073590536U,  701653667U, 2131530884U, 3169309950U, 2028486980U,
-	 747196777U, 3620218225U,  432016035U, 1449580595U, 2772266392U,
-	 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
-	1104864179U,  342430307U, 1350510923U, 3024656237U, 1028417492U,
-	2870772950U,  290847558U, 3675663500U,  508431529U, 4264340390U,
-	2263569913U, 1669302976U,  519511383U, 2706411211U, 3764615828U,
-	3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
-	2348767304U, 2664054906U, 3763609282U,  593943581U, 3757090046U,
-	2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
-	2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
-	4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
-	3038334120U,  547099465U,  771105860U, 3199983734U, 4282046461U,
-	2298388363U,  934810218U, 2837827901U, 3952500708U, 2095130248U,
-	3083335297U,   26885281U, 3932155283U, 1531751116U, 1425227133U,
-	 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
-	 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
-	1729880440U,  805571298U,  448971099U, 2726757106U, 2749436461U,
-	2485987104U,  175337042U, 3235477922U, 3882114302U, 2020970972U,
-	 943926109U, 2762587195U, 1904195558U, 3452650564U,  108432281U,
-	3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
-	4258854744U,  980047703U, 4057175418U, 3890008292U,  145653646U,
-	3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
-	2868313360U, 2886032750U, 1110873820U,  279553524U, 3007258565U,
-	1104807822U, 3186961098U,  315764646U, 2163680838U, 3574508994U,
-	3099755655U,  191957684U, 3642656737U, 3317946149U, 3522087636U,
-	 444526410U,  779157624U, 1088229627U, 1092460223U, 1856013765U,
-	3659877367U,  368270451U,  503570716U, 3000984671U, 2742789647U,
-	 928097709U, 2914109539U,  308843566U, 2816161253U, 3667192079U,
-	2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
-	3787782576U, 2894104823U, 1296880455U, 1253636503U,  989959407U,
-	2291560361U, 2776790436U, 1913178042U, 1584677829U,  689637520U,
-	1898406878U,  688391508U, 3385234998U,  845493284U, 1943591856U,
-	2720472050U,  222695101U, 1653320868U, 2904632120U, 4084936008U,
-	1080720688U, 3938032556U,  387896427U, 2650839632U,   99042991U,
-	1720913794U, 1047186003U, 1877048040U, 2090457659U,  517087501U,
-	4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
-	1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
-	3576797776U, 2074552772U,  832002644U, 3097122623U, 2464859298U,
-	2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
-	2306631119U,  639727358U, 3369698270U,  226902796U, 2099920751U,
-	1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
-	 841660320U, 3974501451U, 3360949056U, 1676829340U,  728899254U,
-	2047809627U, 2390948962U,  670165943U, 3412951831U, 4189320049U,
-	1911595255U, 2055363086U,  507170575U,  418219594U, 4141495280U,
-	2692088692U, 4203630654U, 3540093932U,  791986533U, 2237921051U,
-	2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
-	 591653646U,  228432437U, 1611046598U, 3007736357U, 1040040725U,
-	2726180733U, 2789804360U, 4263568405U,  829098158U, 3847722805U,
-	1123578029U, 1804276347U,  997971319U, 4203797076U, 4185199713U,
-	2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
-	1967077982U, 1585223204U, 1097475516U, 1903944948U,  740382444U,
-	1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
-	3519754455U, 2866243923U,  341865437U,  645498576U, 2690735853U,
-	1046963033U, 2493178460U, 1187604696U, 1619577821U,  488503634U,
-	3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
-	3794467088U, 1796415981U, 3657173746U,  409136296U, 1387122342U,
-	1297726519U,  219544855U, 4270285558U,  437578827U, 1444698679U,
-	2258519491U,  963109892U, 3982244073U, 3351535275U,  385328496U,
-	1804784013U,  698059346U, 3920535147U,  708331212U,  784338163U,
-	 785678147U, 1238376158U, 1557298846U, 2037809321U,  271576218U,
-	4145155269U, 1913481602U, 2763691931U,  588981080U, 1201098051U,
-	3717640232U, 1509206239U,  662536967U, 3180523616U, 1133105435U,
-	2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
-	3034720222U, 1090798544U, 2942170004U, 4036187520U,  686972531U,
-	2610990302U, 2641437026U, 1837562420U,  722096247U, 1315333033U,
-	2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
-	 814384596U,  385649582U, 1916643285U, 1924625106U, 2512905582U,
-	2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
-	2969208552U,  884750901U,  102992576U,  227844301U, 3681442994U,
-	3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
-	1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
-	1629323443U,    3233815U, 2003823032U, 3083834263U, 2379264872U,
-	3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
-	 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
-	4018728324U,  983712955U,  440071928U, 3710838677U, 2001027698U,
-	3994702151U,   22493119U, 3584400918U, 3446253670U, 4254789085U,
-	1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
-	3403623451U,   67092802U, 2609352193U, 3914150340U, 1814842761U,
-	3610830847U,  591531412U, 3880232807U, 1673505890U, 2585326991U,
-	1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
-	 154025329U,  121678860U, 1164915738U,  973873761U,  269116100U,
-	  52087970U,  744015362U,  498556057U,   94298882U, 1563271621U,
-	2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
-	1097742433U, 3924840517U,  264557272U, 2292287003U, 3203307984U,
-	4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
-	3412254904U,  583538222U, 2390557166U, 4140459427U, 2810357445U,
-	 226777499U, 2496151295U, 2207301712U, 3283683112U,  611630281U,
-	1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
-	1313998161U, 2365383016U, 3146941060U, 1801206260U,  796124080U,
-	2076248581U, 1747472464U, 3254365145U,  595543130U, 3573909503U,
-	3758250204U, 2020768540U, 2439254210U,   93368951U, 3155792250U,
-	2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
-	1443493395U, 2581621665U, 3086506297U, 2443465861U,  558107211U,
-	1519367835U,  249149686U,  908102264U, 2588765675U, 1232743965U,
-	1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
-	 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
-	1633861986U, 1636979509U, 1438500431U,   58463278U, 2823485629U,
-	2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
-	1444292075U,  437703973U, 1060414512U,  189705863U,  910018135U,
-	4077357964U,  884213423U, 2644986052U, 3973488374U, 1187906116U,
-	2331207875U,  780463700U, 3713351662U, 3854611290U,  412805574U,
-	2978462572U, 2176222820U,  829424696U, 2790788332U, 2750819108U,
-	1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
-	  14130042U,  192292500U,  947227076U,   90719497U, 3854230320U,
-	 784028434U, 2142399787U, 1563449646U, 2844400217U,  819143172U,
-	2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
-	 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
-	1412424497U, 2981395985U, 1418359660U, 2925902456U,   52752784U,
-	3713667988U, 3924669405U,  648975707U, 1145520213U, 4018650664U,
-	3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
-	1114540067U, 3728768081U, 2396958768U,  590672271U,  904818725U,
-	4263660715U,  700754408U, 1042601829U, 4094111823U, 4274838909U,
-	2512692617U, 2774300207U, 2057306915U, 3470942453U,   99333088U,
-	1142661026U, 2889931380U,   14316674U, 2201179167U,  415289459U,
-	 448265759U, 3515142743U, 3254903683U,  246633281U, 1184307224U,
-	2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
-	1544150531U,  399010405U, 1513946097U,  499682937U,  461167460U,
-	3045570638U, 1633669705U,  851492362U, 4052801922U, 2055266765U,
-	 635556996U,  368266356U, 2385737383U, 3218202352U, 2603772408U,
-	 349178792U,  226482567U, 3102426060U, 3575998268U, 2103001871U,
-	3243137071U,  225500688U, 1634718593U, 4283311431U, 4292122923U,
-	3842802787U,  811735523U,  105712518U,  663434053U, 1855889273U,
-	2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
-	3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
-	 353001539U,  459496439U, 3799690868U, 1293777660U, 2761079737U,
-	 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
-	4134660419U, 3903444024U, 3576494993U,  203682175U, 3321164857U,
-	2747963611U,   79749085U, 2992890370U, 1240278549U, 1772175713U,
-	2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
-	2489884874U,  756853961U, 2298874501U, 4013448667U, 4143996022U,
-	2948306858U, 4132920035U, 1283299272U,  995592228U, 3450508595U,
-	1027845759U, 1766942720U, 3861411826U, 1446861231U,   95974993U,
-	3502263554U, 1487532194U,  601502472U, 4129619129U,  250131773U,
-	2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
-	2276901713U,  365637751U, 2260718426U, 1394775634U, 1791172338U,
-	2690503163U, 2952737846U, 1568710462U,  732623190U, 2980358000U,
-	1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
-	3204031934U,  735775531U,  107468620U, 3734611984U,  631009402U,
-	3083622457U, 4109580626U,  159373458U, 1301970201U, 4132389302U,
-	1293255004U,  847182752U, 4170022737U,   96712900U, 2641406755U,
-	1381727755U,  405608287U, 4287919625U, 1703554290U, 3589580244U,
-	2911403488U,    2166565U, 2647306451U, 2330535117U, 1200815358U,
-	1165916754U,  245060911U, 4040679071U, 3684908771U, 2452834126U,
-	2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
-	4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
-	2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
-	4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
-	1046178638U,  144496770U,  230725846U, 3349637149U,   17065717U,
-	2809932048U, 2054581785U, 3608424964U, 3259628808U,  134897388U,
-	3743067463U,  257685904U, 3795656590U, 1562468719U, 3589103904U,
-	3120404710U,  254684547U, 2653661580U, 3663904795U, 2631942758U,
-	1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
-	1935868586U, 4110970440U,  296706371U, 2128666368U, 1319875791U,
-	1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
-	1244491489U, 3533770203U,  567496053U, 2757924305U, 2781639343U,
-	2818420107U,  560404889U, 2619609724U, 4176035430U, 2511289753U,
-	2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
-	 330725126U,  367400677U,  888239854U,  545570454U, 4259590525U,
-	 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
-	3631537963U, 3342671457U, 1301549147U, 2083739356U,  146593792U,
-	3217959080U,  652755743U, 2032187193U, 3898758414U, 1021358093U,
-	4037409230U, 2176407931U, 3427391950U, 2883553603U,  985613827U,
-	3105265092U, 3423168427U, 3387507672U,  467170288U, 2141266163U,
-	3723870208U,  916410914U, 1293987799U, 2652584950U,  769160137U,
-	3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
-	 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
-	2352279820U,  401870217U, 2625822463U, 1529125296U, 2981801895U,
-	1191285226U, 4027725437U, 3432700217U, 4098835661U,  971182783U,
-	2443861173U, 3881457123U, 3874386651U,  457276199U, 2638294160U,
-	4002809368U,  421169044U, 1112642589U, 3076213779U, 3387033971U,
-	2499610950U, 3057240914U, 1662679783U,  461224431U, 1168395933U
-};
-static const uint32_t init_by_array_32_expected[] = {
-	2920711183U, 3885745737U, 3501893680U,  856470934U, 1421864068U,
-	 277361036U, 1518638004U, 2328404353U, 3355513634U,   64329189U,
-	1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
-	2913275699U,  882658412U,  384641219U,  422202002U, 1873384891U,
-	2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
-	4195470535U,  779207191U, 1577721373U, 1390469554U, 2928648150U,
-	 121399709U, 3170839019U, 4044347501U,  953953814U, 3821710850U,
-	3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
-	4044408017U,  433600965U, 1637785608U, 1798509764U,  860770589U,
-	3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
-	3357389386U, 2154596123U,  496568176U, 2650035164U, 2472361850U,
-	   3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
-	4078331588U, 3706103141U,  170391138U, 3806085154U, 1680970100U,
-	1961637521U, 3316029766U,  890610272U, 1453751581U, 1430283664U,
-	3051057411U, 3597003186U,  542563954U, 3796490244U, 1690016688U,
-	3448752238U,  440702173U,  347290497U, 1121336647U, 2540588620U,
-	 280881896U, 2495136428U,  213707396U,   15104824U, 2946180358U,
-	 659000016U,  566379385U, 2614030979U, 2855760170U,  334526548U,
-	2315569495U, 2729518615U,  564745877U, 1263517638U, 3157185798U,
-	1604852056U, 1011639885U, 2950579535U, 2524219188U,  312951012U,
-	1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
-	 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
-	3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
-	2631577923U, 3027156164U,  842334259U, 3353446843U, 1226432104U,
-	1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
-	2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
-	1625156629U, 3669701987U,  615211810U, 3294791649U, 4131143784U,
-	2590843588U, 3207422808U, 3275066464U,  561592872U, 3957205738U,
-	3396578098U,   48410678U, 3505556445U, 1005764855U, 3920606528U,
-	2936980473U, 2378918600U, 2404449845U, 1649515163U,  701203563U,
-	3705256349U,   83714199U, 3586854132U,  922978446U, 2863406304U,
-	3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
-	3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
-	3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
-	 980754888U, 1302782359U, 1765252468U,  459245755U, 3728923860U,
-	1512894209U, 2046491914U,  207860527U,  514188684U, 2288713615U,
-	1597354672U, 3349636117U, 2357291114U, 3995796221U,  945364213U,
-	1893326518U, 3770814016U, 1691552714U, 2397527410U,  967486361U,
-	 776416472U, 4197661421U,  951150819U, 1852770983U, 4044624181U,
-	1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
-	1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
-	2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
-	  20815612U, 3727008608U, 3871996229U,  824061249U, 1932503978U,
-	3404541726U,  758428924U, 2609331364U, 1223966026U, 1299179808U,
-	 648499352U, 2180134401U,  880821170U, 3781130950U,  113491270U,
-	1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
-	4165586898U, 1629813212U, 2887821158U,  415045333U,  628926856U,
-	2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
-	1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
-	2052479531U, 1695809164U, 3176587306U, 2632770465U,   81634404U,
-	1603220563U,  644238487U,  302857763U,  897352968U, 2613146653U,
-	1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
-	3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
-	 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
-	3111154986U, 2929478371U,  668346391U, 1152241381U, 2632029711U,
-	3004150659U, 2135025926U,  948690501U, 2799119116U, 4228829406U,
-	1981197489U, 4209064138U,  684318751U, 3459397845U,  201790843U,
-	4022541136U, 3043635877U,  492509624U, 3263466772U, 1509148086U,
-	 921459029U, 3198857146U,  705479721U, 3835966910U, 3603356465U,
-	 576159741U, 1742849431U,  594214882U, 2055294343U, 3634861861U,
-	 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
-	2464815318U, 3960178104U, 1784261920U,   18311476U, 3627135050U,
-	 644609697U,  424968996U,  919890700U, 2986824110U,  816423214U,
-	4003562844U, 1392714305U, 1757384428U, 2569030598U,  995949559U,
-	3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
-	2770783427U, 2775406005U, 2777781742U, 1931292655U,  472147933U,
-	3865853827U, 2726470545U, 2668412860U, 2887008249U,  408979190U,
-	3578063323U, 3242082049U, 1778193530U,   27981909U, 2362826515U,
-	 389875677U, 1043878156U,  581653903U, 3830568952U,  389535942U,
-	3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
-	3951172488U, 1098005003U,  906275699U, 3446228002U, 2220677963U,
-	2059306445U,  132199571U,  476838790U, 1868039399U, 3097344807U,
-	 857300945U,  396345050U, 2835919916U, 1782168828U, 1419519470U,
-	4288137521U,  819087232U,  596301494U,  872823172U, 1526888217U,
-	 805161465U, 1116186205U, 2829002754U, 2352620120U,  620121516U,
-	 354159268U, 3601949785U,  209568138U, 1352371732U, 2145977349U,
-	4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
-	3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
-	1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
-	3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
-	3574970923U, 4057131421U,  589224178U, 1000098193U,  171190718U,
-	2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
-	3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
-	2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
-	1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
-	2419700818U,  971242709U, 1361975763U, 1096842482U, 3271045537U,
-	  81165449U,  612438025U, 3912966678U, 1356929810U,  733545735U,
-	 537003843U, 1282953084U,  884458241U,  588930090U, 3930269801U,
-	2961472450U, 1219535534U, 3632251943U,  268183903U, 1441240533U,
-	3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
-	2105543911U, 1764085217U, 3246183186U,  482438805U,  888317895U,
-	2628314765U, 2466219854U,  717546004U, 2322237039U,  416725234U,
-	1544049923U, 1797944973U, 3398652364U, 3111909456U,  485742908U,
-	2277491072U, 1056355088U, 3181001278U,  129695079U, 2693624550U,
-	1764438564U, 3797785470U,  195503713U, 3266519725U, 2053389444U,
-	1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
-	4094406648U, 2171410785U, 1781151386U, 1378577117U,  654643266U,
-	3424024173U, 3385813322U,  679385799U,  479380913U,  681715441U,
-	3096225905U,  276813409U, 3854398070U, 2721105350U,  831263315U,
-	3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
-	1412672743U,  820330404U, 3491501010U,  942735832U,  710652807U,
-	3972652090U,  679881088U,   40577009U, 3705286397U, 2815423480U,
-	3566262429U,  663396513U, 3777887429U, 4016670678U,  404539370U,
-	1142712925U, 1140173408U, 2913248352U, 2872321286U,  263751841U,
-	3175196073U, 3162557581U, 2878996619U,   75498548U, 3836833140U,
-	3284664959U, 1157523805U,  112847376U,  207855609U, 1337979698U,
-	1222578451U,  157107174U,  901174378U, 3883717063U, 1618632639U,
-	1767889440U, 4264698824U, 1582999313U,  884471997U, 2508825098U,
-	3756370771U, 2457213553U, 3565776881U, 3709583214U,  915609601U,
-	 460833524U, 1091049576U,   85522880U,    2553251U,  132102809U,
-	2429882442U, 2562084610U, 1386507633U, 4112471229U,   21965213U,
-	1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
-	1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
-	3463052265U,  802340101U, 1912886800U, 4031997367U, 3550640406U,
-	1596096923U,  610150600U,  431464457U, 2541325046U,  486478003U,
-	 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
-	1228993498U,  510075548U, 3424962587U, 2458689681U,  818934833U,
-	4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
-	3317854285U,  631986188U, 3008214764U,  613826412U, 3567358221U,
-	3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
-	 565267881U,  768644821U,  198310105U, 2396688616U, 1837659011U,
-	 203429334U,  854539004U, 4235811518U, 3338304926U, 3730418692U,
-	3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
-	3894665932U,  145835667U,  249563655U, 1932210840U, 2431696407U,
-	3312636759U,  214962629U, 2092026914U, 3020145527U, 4073039873U,
-	2739105705U, 1308336752U,  855104522U, 2391715321U,   67448785U,
-	 547989482U,  854411802U, 3608633740U,  431731530U,  537375589U,
-	3888005760U,  696099141U,  397343236U, 1864511780U,   44029739U,
-	1729526891U, 1993398655U, 2010173426U, 2591546756U,  275223291U,
-	1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
-	 681707194U, 3260479338U,  933579397U, 2983029282U, 2505504587U,
-	2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
-	2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
-	3917185129U, 2155660174U, 3280524475U, 1503827867U,  674380765U,
-	1918468193U, 3843983676U,  634358221U, 2538335643U, 1873351298U,
-	3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
-	2516284287U,   24437745U, 1118381474U, 2816314867U, 2448576035U,
-	4281989654U,  217287825U,  165872888U, 2628995722U, 3533525116U,
-	2721669106U,  872340568U, 3429930655U, 3309047304U, 3916704967U,
-	3270160355U, 1348884255U, 1634797670U,  881214967U, 4259633554U,
-	 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
-	3853082619U, 4073196549U, 1189620777U,  637238656U,  930241537U,
-	4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
-	1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
-	3307479609U, 3849322257U, 3507445699U, 4247696636U,  758393720U,
-	 967665141U, 1095244571U, 1319812152U,  407678762U, 2640605208U,
-	2170766134U, 3663594275U, 4039329364U, 2512175520U,  725523154U,
-	2249807004U, 3312617979U, 2414634172U, 1278482215U,  349206484U,
-	1573063308U, 1196429124U, 3873264116U, 2400067801U,  268795167U,
-	 226175489U, 2961367263U, 1968719665U,   42656370U, 1010790699U,
-	 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
-	3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
-	1087362407U, 3625200291U,  361937537U,  580441897U, 1520043666U,
-	2270875402U, 1009161260U, 2502355842U, 4278769785U,  473902412U,
-	1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
-	1806991954U, 2194674403U, 3455972205U,  807207678U, 3655655687U,
-	 674112918U,  195425752U, 3917890095U, 1874364234U, 1837892715U,
-	3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
-	 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
-	2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
-	1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
-	3494770452U, 3332626671U,   55327267U,  478030603U,  411080625U,
-	3419529010U, 1604767823U, 3513468014U,  570668510U,  913790824U,
-	2283967995U,  695159462U, 3825542932U, 4150698144U, 1829758699U,
-	 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
-	2477423819U, 3932081579U,  900879979U, 2145588390U, 2670007504U,
-	 580819444U, 1864996828U, 2526325979U, 1019124258U,  815508628U,
-	2765933989U, 1277301341U, 3006021786U,  855540956U,  288025710U,
-	1919594237U, 2331223864U,  177452412U, 2475870369U, 2689291749U,
-	 865194284U,  253432152U, 2628531804U, 2861208555U, 2361597573U,
-	1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
-	2596878672U, 2041442161U,   31164696U, 2662962485U, 3665637339U,
-	1678115244U, 2699839832U, 3651968520U, 3521595541U,  458433303U,
-	2423096824U,   21831741U,  380011703U, 2498168716U,  861806087U,
-	1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
-	4171949898U, 3886039621U, 1661344005U, 3424285243U,  992588372U,
-	2500984144U, 2993248497U, 3590193895U, 1535327365U,  515645636U,
-	 131633450U, 3729760261U, 1613045101U, 3254194278U,   15889678U,
-	1493590689U,  244148718U, 2991472662U, 1401629333U,  777349878U,
-	2501401703U, 4285518317U, 3794656178U,  955526526U, 3442142820U,
-	3970298374U,  736025417U, 2737370764U, 1271509744U,  440570731U,
-	 136141826U, 1596189518U,  923399175U,  257541519U, 3505774281U,
-	2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
-	1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
-	1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
-	2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
-	 353294540U, 1609232588U,  243926648U, 2332803291U,  507996832U,
-	2392838793U, 4075145196U, 2060984340U, 4287475136U,   88232602U,
-	2491531140U, 4159725633U, 2272075455U,  759298618U,  201384554U,
-	 838356250U, 1416268324U,  674476934U,   90795364U,  141672229U,
-	3660399588U, 4196417251U, 3249270244U, 3774530247U,   59587265U,
-	3683164208U,   19392575U, 1463123697U, 1882205379U,  293780489U,
-	2553160622U, 2933904694U,  675638239U, 2851336944U, 1435238743U,
-	2448730183U,  804436302U, 2119845972U,  322560608U, 4097732704U,
-	2987802540U,  641492617U, 2575442710U, 4217822703U, 3271835300U,
-	2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
-	3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
-	 584489368U, 3431165766U,  897384869U, 2062537737U, 2847889234U,
-	3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
-	2750138839U, 3518055702U,  733072558U, 4169325400U,  788493625U
-};
-static const uint64_t init_gen_rand_64_expected[] = {
-	KQU(16924766246869039260), KQU( 8201438687333352714),
-	KQU( 2265290287015001750), KQU(18397264611805473832),
-	KQU( 3375255223302384358), KQU( 6345559975416828796),
-	KQU(18229739242790328073), KQU( 7596792742098800905),
-	KQU(  255338647169685981), KQU( 2052747240048610300),
-	KQU(18328151576097299343), KQU(12472905421133796567),
-	KQU(11315245349717600863), KQU(16594110197775871209),
-	KQU(15708751964632456450), KQU(10452031272054632535),
-	KQU(11097646720811454386), KQU( 4556090668445745441),
-	KQU(17116187693090663106), KQU(14931526836144510645),
-	KQU( 9190752218020552591), KQU( 9625800285771901401),
-	KQU(13995141077659972832), KQU( 5194209094927829625),
-	KQU( 4156788379151063303), KQU( 8523452593770139494),
-	KQU(14082382103049296727), KQU( 2462601863986088483),
-	KQU( 3030583461592840678), KQU( 5221622077872827681),
-	KQU( 3084210671228981236), KQU(13956758381389953823),
-	KQU(13503889856213423831), KQU(15696904024189836170),
-	KQU( 4612584152877036206), KQU( 6231135538447867881),
-	KQU(10172457294158869468), KQU( 6452258628466708150),
-	KQU(14044432824917330221), KQU(  370168364480044279),
-	KQU(10102144686427193359), KQU(  667870489994776076),
-	KQU( 2732271956925885858), KQU(18027788905977284151),
-	KQU(15009842788582923859), KQU( 7136357960180199542),
-	KQU(15901736243475578127), KQU(16951293785352615701),
-	KQU(10551492125243691632), KQU(17668869969146434804),
-	KQU(13646002971174390445), KQU( 9804471050759613248),
-	KQU( 5511670439655935493), KQU(18103342091070400926),
-	KQU(17224512747665137533), KQU(15534627482992618168),
-	KQU( 1423813266186582647), KQU(15821176807932930024),
-	KQU(   30323369733607156), KQU(11599382494723479403),
-	KQU(  653856076586810062), KQU( 3176437395144899659),
-	KQU(14028076268147963917), KQU(16156398271809666195),
-	KQU( 3166955484848201676), KQU( 5746805620136919390),
-	KQU(17297845208891256593), KQU(11691653183226428483),
-	KQU(17900026146506981577), KQU(15387382115755971042),
-	KQU(16923567681040845943), KQU( 8039057517199388606),
-	KQU(11748409241468629263), KQU(  794358245539076095),
-	KQU(13438501964693401242), KQU(14036803236515618962),
-	KQU( 5252311215205424721), KQU(17806589612915509081),
-	KQU( 6802767092397596006), KQU(14212120431184557140),
-	KQU( 1072951366761385712), KQU(13098491780722836296),
-	KQU( 9466676828710797353), KQU(12673056849042830081),
-	KQU(12763726623645357580), KQU(16468961652999309493),
-	KQU(15305979875636438926), KQU(17444713151223449734),
-	KQU( 5692214267627883674), KQU(13049589139196151505),
-	KQU(  880115207831670745), KQU( 1776529075789695498),
-	KQU(16695225897801466485), KQU(10666901778795346845),
-	KQU( 6164389346722833869), KQU( 2863817793264300475),
-	KQU( 9464049921886304754), KQU( 3993566636740015468),
-	KQU( 9983749692528514136), KQU(16375286075057755211),
-	KQU(16042643417005440820), KQU(11445419662923489877),
-	KQU( 7999038846885158836), KQU( 6721913661721511535),
-	KQU( 5363052654139357320), KQU( 1817788761173584205),
-	KQU(13290974386445856444), KQU( 4650350818937984680),
-	KQU( 8219183528102484836), KQU( 1569862923500819899),
-	KQU( 4189359732136641860), KQU(14202822961683148583),
-	KQU( 4457498315309429058), KQU(13089067387019074834),
-	KQU(11075517153328927293), KQU(10277016248336668389),
-	KQU( 7070509725324401122), KQU(17808892017780289380),
-	KQU(13143367339909287349), KQU( 1377743745360085151),
-	KQU( 5749341807421286485), KQU(14832814616770931325),
-	KQU( 7688820635324359492), KQU(10960474011539770045),
-	KQU(   81970066653179790), KQU(12619476072607878022),
-	KQU( 4419566616271201744), KQU(15147917311750568503),
-	KQU( 5549739182852706345), KQU( 7308198397975204770),
-	KQU(13580425496671289278), KQU(17070764785210130301),
-	KQU( 8202832846285604405), KQU( 6873046287640887249),
-	KQU( 6927424434308206114), KQU( 6139014645937224874),
-	KQU(10290373645978487639), KQU(15904261291701523804),
-	KQU( 9628743442057826883), KQU(18383429096255546714),
-	KQU( 4977413265753686967), KQU( 7714317492425012869),
-	KQU( 9025232586309926193), KQU(14627338359776709107),
-	KQU(14759849896467790763), KQU(10931129435864423252),
-	KQU( 4588456988775014359), KQU(10699388531797056724),
-	KQU(  468652268869238792), KQU( 5755943035328078086),
-	KQU( 2102437379988580216), KQU( 9986312786506674028),
-	KQU( 2654207180040945604), KQU( 8726634790559960062),
-	KQU(  100497234871808137), KQU( 2800137176951425819),
-	KQU( 6076627612918553487), KQU( 5780186919186152796),
-	KQU( 8179183595769929098), KQU( 6009426283716221169),
-	KQU( 2796662551397449358), KQU( 1756961367041986764),
-	KQU( 6972897917355606205), KQU(14524774345368968243),
-	KQU( 2773529684745706940), KQU( 4853632376213075959),
-	KQU( 4198177923731358102), KQU( 8271224913084139776),
-	KQU( 2741753121611092226), KQU(16782366145996731181),
-	KQU(15426125238972640790), KQU(13595497100671260342),
-	KQU( 3173531022836259898), KQU( 6573264560319511662),
-	KQU(18041111951511157441), KQU( 2351433581833135952),
-	KQU( 3113255578908173487), KQU( 1739371330877858784),
-	KQU(16046126562789165480), KQU( 8072101652214192925),
-	KQU(15267091584090664910), KQU( 9309579200403648940),
-	KQU( 5218892439752408722), KQU(14492477246004337115),
-	KQU(17431037586679770619), KQU( 7385248135963250480),
-	KQU( 9580144956565560660), KQU( 4919546228040008720),
-	KQU(15261542469145035584), KQU(18233297270822253102),
-	KQU( 5453248417992302857), KQU( 9309519155931460285),
-	KQU(10342813012345291756), KQU(15676085186784762381),
-	KQU(15912092950691300645), KQU( 9371053121499003195),
-	KQU( 9897186478226866746), KQU(14061858287188196327),
-	KQU(  122575971620788119), KQU(12146750969116317754),
-	KQU( 4438317272813245201), KQU( 8332576791009527119),
-	KQU(13907785691786542057), KQU(10374194887283287467),
-	KQU( 2098798755649059566), KQU( 3416235197748288894),
-	KQU( 8688269957320773484), KQU( 7503964602397371571),
-	KQU(16724977015147478236), KQU( 9461512855439858184),
-	KQU(13259049744534534727), KQU( 3583094952542899294),
-	KQU( 8764245731305528292), KQU(13240823595462088985),
-	KQU(13716141617617910448), KQU(18114969519935960955),
-	KQU( 2297553615798302206), KQU( 4585521442944663362),
-	KQU(17776858680630198686), KQU( 4685873229192163363),
-	KQU(  152558080671135627), KQU(15424900540842670088),
-	KQU(13229630297130024108), KQU(17530268788245718717),
-	KQU(16675633913065714144), KQU( 3158912717897568068),
-	KQU(15399132185380087288), KQU( 7401418744515677872),
-	KQU(13135412922344398535), KQU( 6385314346100509511),
-	KQU(13962867001134161139), KQU(10272780155442671999),
-	KQU(12894856086597769142), KQU(13340877795287554994),
-	KQU(12913630602094607396), KQU(12543167911119793857),
-	KQU(17343570372251873096), KQU(10959487764494150545),
-	KQU( 6966737953093821128), KQU(13780699135496988601),
-	KQU( 4405070719380142046), KQU(14923788365607284982),
-	KQU( 2869487678905148380), KQU( 6416272754197188403),
-	KQU(15017380475943612591), KQU( 1995636220918429487),
-	KQU( 3402016804620122716), KQU(15800188663407057080),
-	KQU(11362369990390932882), KQU(15262183501637986147),
-	KQU(10239175385387371494), KQU( 9352042420365748334),
-	KQU( 1682457034285119875), KQU( 1724710651376289644),
-	KQU( 2038157098893817966), KQU( 9897825558324608773),
-	KQU( 1477666236519164736), KQU(16835397314511233640),
-	KQU(10370866327005346508), KQU(10157504370660621982),
-	KQU(12113904045335882069), KQU(13326444439742783008),
-	KQU(11302769043000765804), KQU(13594979923955228484),
-	KQU(11779351762613475968), KQU( 3786101619539298383),
-	KQU( 8021122969180846063), KQU(15745904401162500495),
-	KQU(10762168465993897267), KQU(13552058957896319026),
-	KQU(11200228655252462013), KQU( 5035370357337441226),
-	KQU( 7593918984545500013), KQU( 5418554918361528700),
-	KQU( 4858270799405446371), KQU( 9974659566876282544),
-	KQU(18227595922273957859), KQU( 2772778443635656220),
-	KQU(14285143053182085385), KQU( 9939700992429600469),
-	KQU(12756185904545598068), KQU( 2020783375367345262),
-	KQU(   57026775058331227), KQU(  950827867930065454),
-	KQU( 6602279670145371217), KQU( 2291171535443566929),
-	KQU( 5832380724425010313), KQU( 1220343904715982285),
-	KQU(17045542598598037633), KQU(15460481779702820971),
-	KQU(13948388779949365130), KQU(13975040175430829518),
-	KQU(17477538238425541763), KQU(11104663041851745725),
-	KQU(15860992957141157587), KQU(14529434633012950138),
-	KQU( 2504838019075394203), KQU( 7512113882611121886),
-	KQU( 4859973559980886617), KQU( 1258601555703250219),
-	KQU(15594548157514316394), KQU( 4516730171963773048),
-	KQU(11380103193905031983), KQU( 6809282239982353344),
-	KQU(18045256930420065002), KQU( 2453702683108791859),
-	KQU(  977214582986981460), KQU( 2006410402232713466),
-	KQU( 6192236267216378358), KQU( 3429468402195675253),
-	KQU(18146933153017348921), KQU(17369978576367231139),
-	KQU( 1246940717230386603), KQU(11335758870083327110),
-	KQU(14166488801730353682), KQU( 9008573127269635732),
-	KQU(10776025389820643815), KQU(15087605441903942962),
-	KQU( 1359542462712147922), KQU(13898874411226454206),
-	KQU(17911176066536804411), KQU( 9435590428600085274),
-	KQU(  294488509967864007), KQU( 8890111397567922046),
-	KQU( 7987823476034328778), KQU(13263827582440967651),
-	KQU( 7503774813106751573), KQU(14974747296185646837),
-	KQU( 8504765037032103375), KQU(17340303357444536213),
-	KQU( 7704610912964485743), KQU( 8107533670327205061),
-	KQU( 9062969835083315985), KQU(16968963142126734184),
-	KQU(12958041214190810180), KQU( 2720170147759570200),
-	KQU( 2986358963942189566), KQU(14884226322219356580),
-	KQU(  286224325144368520), KQU(11313800433154279797),
-	KQU(18366849528439673248), KQU(17899725929482368789),
-	KQU( 3730004284609106799), KQU( 1654474302052767205),
-	KQU( 5006698007047077032), KQU( 8196893913601182838),
-	KQU(15214541774425211640), KQU(17391346045606626073),
-	KQU( 8369003584076969089), KQU( 3939046733368550293),
-	KQU(10178639720308707785), KQU( 2180248669304388697),
-	KQU(   62894391300126322), KQU( 9205708961736223191),
-	KQU( 6837431058165360438), KQU( 3150743890848308214),
-	KQU(17849330658111464583), KQU(12214815643135450865),
-	KQU(13410713840519603402), KQU( 3200778126692046802),
-	KQU(13354780043041779313), KQU(  800850022756886036),
-	KQU(15660052933953067433), KQU( 6572823544154375676),
-	KQU(11030281857015819266), KQU(12682241941471433835),
-	KQU(11654136407300274693), KQU( 4517795492388641109),
-	KQU( 9757017371504524244), KQU(17833043400781889277),
-	KQU(12685085201747792227), KQU(10408057728835019573),
-	KQU(   98370418513455221), KQU( 6732663555696848598),
-	KQU(13248530959948529780), KQU( 3530441401230622826),
-	KQU(18188251992895660615), KQU( 1847918354186383756),
-	KQU( 1127392190402660921), KQU(11293734643143819463),
-	KQU( 3015506344578682982), KQU(13852645444071153329),
-	KQU( 2121359659091349142), KQU( 1294604376116677694),
-	KQU( 5616576231286352318), KQU( 7112502442954235625),
-	KQU(11676228199551561689), KQU(12925182803007305359),
-	KQU( 7852375518160493082), KQU( 1136513130539296154),
-	KQU( 5636923900916593195), KQU( 3221077517612607747),
-	KQU(17784790465798152513), KQU( 3554210049056995938),
-	KQU(17476839685878225874), KQU( 3206836372585575732),
-	KQU( 2765333945644823430), KQU(10080070903718799528),
-	KQU( 5412370818878286353), KQU( 9689685887726257728),
-	KQU( 8236117509123533998), KQU( 1951139137165040214),
-	KQU( 4492205209227980349), KQU(16541291230861602967),
-	KQU( 1424371548301437940), KQU( 9117562079669206794),
-	KQU(14374681563251691625), KQU(13873164030199921303),
-	KQU( 6680317946770936731), KQU(15586334026918276214),
-	KQU(10896213950976109802), KQU( 9506261949596413689),
-	KQU( 9903949574308040616), KQU( 6038397344557204470),
-	KQU(  174601465422373648), KQU(15946141191338238030),
-	KQU(17142225620992044937), KQU( 7552030283784477064),
-	KQU( 2947372384532947997), KQU(  510797021688197711),
-	KQU( 4962499439249363461), KQU(   23770320158385357),
-	KQU(  959774499105138124), KQU( 1468396011518788276),
-	KQU( 2015698006852312308), KQU( 4149400718489980136),
-	KQU( 5992916099522371188), KQU(10819182935265531076),
-	KQU(16189787999192351131), KQU(  342833961790261950),
-	KQU(12470830319550495336), KQU(18128495041912812501),
-	KQU( 1193600899723524337), KQU( 9056793666590079770),
-	KQU( 2154021227041669041), KQU( 4963570213951235735),
-	KQU( 4865075960209211409), KQU( 2097724599039942963),
-	KQU( 2024080278583179845), KQU(11527054549196576736),
-	KQU(10650256084182390252), KQU( 4808408648695766755),
-	KQU( 1642839215013788844), KQU(10607187948250398390),
-	KQU( 7076868166085913508), KQU(  730522571106887032),
-	KQU(12500579240208524895), KQU( 4484390097311355324),
-	KQU(15145801330700623870), KQU( 8055827661392944028),
-	KQU( 5865092976832712268), KQU(15159212508053625143),
-	KQU( 3560964582876483341), KQU( 4070052741344438280),
-	KQU( 6032585709886855634), KQU(15643262320904604873),
-	KQU( 2565119772293371111), KQU(  318314293065348260),
-	KQU(15047458749141511872), KQU( 7772788389811528730),
-	KQU( 7081187494343801976), KQU( 6465136009467253947),
-	KQU(10425940692543362069), KQU(  554608190318339115),
-	KQU(14796699860302125214), KQU( 1638153134431111443),
-	KQU(10336967447052276248), KQU( 8412308070396592958),
-	KQU( 4004557277152051226), KQU( 8143598997278774834),
-	KQU(16413323996508783221), KQU(13139418758033994949),
-	KQU( 9772709138335006667), KQU( 2818167159287157659),
-	KQU(17091740573832523669), KQU(14629199013130751608),
-	KQU(18268322711500338185), KQU( 8290963415675493063),
-	KQU( 8830864907452542588), KQU( 1614839084637494849),
-	KQU(14855358500870422231), KQU( 3472996748392519937),
-	KQU(15317151166268877716), KQU( 5825895018698400362),
-	KQU(16730208429367544129), KQU(10481156578141202800),
-	KQU( 4746166512382823750), KQU(12720876014472464998),
-	KQU( 8825177124486735972), KQU(13733447296837467838),
-	KQU( 6412293741681359625), KQU( 8313213138756135033),
-	KQU(11421481194803712517), KQU( 7997007691544174032),
-	KQU( 6812963847917605930), KQU( 9683091901227558641),
-	KQU(14703594165860324713), KQU( 1775476144519618309),
-	KQU( 2724283288516469519), KQU(  717642555185856868),
-	KQU( 8736402192215092346), KQU(11878800336431381021),
-	KQU( 4348816066017061293), KQU( 6115112756583631307),
-	KQU( 9176597239667142976), KQU(12615622714894259204),
-	KQU(10283406711301385987), KQU( 5111762509485379420),
-	KQU( 3118290051198688449), KQU( 7345123071632232145),
-	KQU( 9176423451688682359), KQU( 4843865456157868971),
-	KQU(12008036363752566088), KQU(12058837181919397720),
-	KQU( 2145073958457347366), KQU( 1526504881672818067),
-	KQU( 3488830105567134848), KQU(13208362960674805143),
-	KQU( 4077549672899572192), KQU( 7770995684693818365),
-	KQU( 1398532341546313593), KQU(12711859908703927840),
-	KQU( 1417561172594446813), KQU(17045191024194170604),
-	KQU( 4101933177604931713), KQU(14708428834203480320),
-	KQU(17447509264469407724), KQU(14314821973983434255),
-	KQU(17990472271061617265), KQU( 5087756685841673942),
-	KQU(12797820586893859939), KQU( 1778128952671092879),
-	KQU( 3535918530508665898), KQU( 9035729701042481301),
-	KQU(14808661568277079962), KQU(14587345077537747914),
-	KQU(11920080002323122708), KQU( 6426515805197278753),
-	KQU( 3295612216725984831), KQU(11040722532100876120),
-	KQU(12305952936387598754), KQU(16097391899742004253),
-	KQU( 4908537335606182208), KQU(12446674552196795504),
-	KQU(16010497855816895177), KQU( 9194378874788615551),
-	KQU( 3382957529567613384), KQU( 5154647600754974077),
-	KQU( 9801822865328396141), KQU( 9023662173919288143),
-	KQU(17623115353825147868), KQU( 8238115767443015816),
-	KQU(15811444159859002560), KQU( 9085612528904059661),
-	KQU( 6888601089398614254), KQU(  258252992894160189),
-	KQU( 6704363880792428622), KQU( 6114966032147235763),
-	KQU(11075393882690261875), KQU( 8797664238933620407),
-	KQU( 5901892006476726920), KQU( 5309780159285518958),
-	KQU(14940808387240817367), KQU(14642032021449656698),
-	KQU( 9808256672068504139), KQU( 3670135111380607658),
-	KQU(11211211097845960152), KQU( 1474304506716695808),
-	KQU(15843166204506876239), KQU( 7661051252471780561),
-	KQU(10170905502249418476), KQU( 7801416045582028589),
-	KQU( 2763981484737053050), KQU( 9491377905499253054),
-	KQU(16201395896336915095), KQU( 9256513756442782198),
-	KQU( 5411283157972456034), KQU( 5059433122288321676),
-	KQU( 4327408006721123357), KQU( 9278544078834433377),
-	KQU( 7601527110882281612), KQU(11848295896975505251),
-	KQU(12096998801094735560), KQU(14773480339823506413),
-	KQU(15586227433895802149), KQU(12786541257830242872),
-	KQU( 6904692985140503067), KQU( 5309011515263103959),
-	KQU(12105257191179371066), KQU(14654380212442225037),
-	KQU( 2556774974190695009), KQU( 4461297399927600261),
-	KQU(14888225660915118646), KQU(14915459341148291824),
-	KQU( 2738802166252327631), KQU( 6047155789239131512),
-	KQU(12920545353217010338), KQU(10697617257007840205),
-	KQU( 2751585253158203504), KQU(13252729159780047496),
-	KQU(14700326134672815469), KQU(14082527904374600529),
-	KQU(16852962273496542070), KQU(17446675504235853907),
-	KQU(15019600398527572311), KQU(12312781346344081551),
-	KQU(14524667935039810450), KQU( 5634005663377195738),
-	KQU(11375574739525000569), KQU( 2423665396433260040),
-	KQU( 5222836914796015410), KQU( 4397666386492647387),
-	KQU( 4619294441691707638), KQU(  665088602354770716),
-	KQU(13246495665281593610), KQU( 6564144270549729409),
-	KQU(10223216188145661688), KQU( 3961556907299230585),
-	KQU(11543262515492439914), KQU(16118031437285993790),
-	KQU( 7143417964520166465), KQU(13295053515909486772),
-	KQU(   40434666004899675), KQU(17127804194038347164),
-	KQU( 8599165966560586269), KQU( 8214016749011284903),
-	KQU(13725130352140465239), KQU( 5467254474431726291),
-	KQU( 7748584297438219877), KQU(16933551114829772472),
-	KQU( 2169618439506799400), KQU( 2169787627665113463),
-	KQU(17314493571267943764), KQU(18053575102911354912),
-	KQU(11928303275378476973), KQU(11593850925061715550),
-	KQU(17782269923473589362), KQU( 3280235307704747039),
-	KQU( 6145343578598685149), KQU(17080117031114086090),
-	KQU(18066839902983594755), KQU( 6517508430331020706),
-	KQU( 8092908893950411541), KQU(12558378233386153732),
-	KQU( 4476532167973132976), KQU(16081642430367025016),
-	KQU( 4233154094369139361), KQU( 8693630486693161027),
-	KQU(11244959343027742285), KQU(12273503967768513508),
-	KQU(14108978636385284876), KQU( 7242414665378826984),
-	KQU( 6561316938846562432), KQU( 8601038474994665795),
-	KQU(17532942353612365904), KQU(17940076637020912186),
-	KQU( 7340260368823171304), KQU( 7061807613916067905),
-	KQU(10561734935039519326), KQU(17990796503724650862),
-	KQU( 6208732943911827159), KQU(  359077562804090617),
-	KQU(14177751537784403113), KQU(10659599444915362902),
-	KQU(15081727220615085833), KQU(13417573895659757486),
-	KQU(15513842342017811524), KQU(11814141516204288231),
-	KQU( 1827312513875101814), KQU( 2804611699894603103),
-	KQU(17116500469975602763), KQU(12270191815211952087),
-	KQU(12256358467786024988), KQU(18435021722453971267),
-	KQU(  671330264390865618), KQU(  476504300460286050),
-	KQU(16465470901027093441), KQU( 4047724406247136402),
-	KQU( 1322305451411883346), KQU( 1388308688834322280),
-	KQU( 7303989085269758176), KQU( 9323792664765233642),
-	KQU( 4542762575316368936), KQU(17342696132794337618),
-	KQU( 4588025054768498379), KQU(13415475057390330804),
-	KQU(17880279491733405570), KQU(10610553400618620353),
-	KQU( 3180842072658960139), KQU(13002966655454270120),
-	KQU( 1665301181064982826), KQU( 7083673946791258979),
-	KQU(  190522247122496820), KQU(17388280237250677740),
-	KQU( 8430770379923642945), KQU(12987180971921668584),
-	KQU( 2311086108365390642), KQU( 2870984383579822345),
-	KQU(14014682609164653318), KQU(14467187293062251484),
-	KQU(  192186361147413298), KQU(15171951713531796524),
-	KQU( 9900305495015948728), KQU(17958004775615466344),
-	KQU(14346380954498606514), KQU(18040047357617407096),
-	KQU( 5035237584833424532), KQU(15089555460613972287),
-	KQU( 4131411873749729831), KQU( 1329013581168250330),
-	KQU(10095353333051193949), KQU(10749518561022462716),
-	KQU( 9050611429810755847), KQU(15022028840236655649),
-	KQU( 8775554279239748298), KQU(13105754025489230502),
-	KQU(15471300118574167585), KQU(   89864764002355628),
-	KQU( 8776416323420466637), KQU( 5280258630612040891),
-	KQU( 2719174488591862912), KQU( 7599309137399661994),
-	KQU(15012887256778039979), KQU(14062981725630928925),
-	KQU(12038536286991689603), KQU( 7089756544681775245),
-	KQU(10376661532744718039), KQU( 1265198725901533130),
-	KQU(13807996727081142408), KQU( 2935019626765036403),
-	KQU( 7651672460680700141), KQU( 3644093016200370795),
-	KQU( 2840982578090080674), KQU(17956262740157449201),
-	KQU(18267979450492880548), KQU(11799503659796848070),
-	KQU( 9942537025669672388), KQU(11886606816406990297),
-	KQU( 5488594946437447576), KQU( 7226714353282744302),
-	KQU( 3784851653123877043), KQU(  878018453244803041),
-	KQU(12110022586268616085), KQU(  734072179404675123),
-	KQU(11869573627998248542), KQU(  469150421297783998),
-	KQU(  260151124912803804), KQU(11639179410120968649),
-	KQU( 9318165193840846253), KQU(12795671722734758075),
-	KQU(15318410297267253933), KQU(  691524703570062620),
-	KQU( 5837129010576994601), KQU(15045963859726941052),
-	KQU( 5850056944932238169), KQU(12017434144750943807),
-	KQU( 7447139064928956574), KQU( 3101711812658245019),
-	KQU(16052940704474982954), KQU(18195745945986994042),
-	KQU( 8932252132785575659), KQU(13390817488106794834),
-	KQU(11582771836502517453), KQU( 4964411326683611686),
-	KQU( 2195093981702694011), KQU(14145229538389675669),
-	KQU(16459605532062271798), KQU(  866316924816482864),
-	KQU( 4593041209937286377), KQU( 8415491391910972138),
-	KQU( 4171236715600528969), KQU(16637569303336782889),
-	KQU( 2002011073439212680), KQU(17695124661097601411),
-	KQU( 4627687053598611702), KQU( 7895831936020190403),
-	KQU( 8455951300917267802), KQU( 2923861649108534854),
-	KQU( 8344557563927786255), KQU( 6408671940373352556),
-	KQU(12210227354536675772), KQU(14294804157294222295),
-	KQU(10103022425071085127), KQU(10092959489504123771),
-	KQU( 6554774405376736268), KQU(12629917718410641774),
-	KQU( 6260933257596067126), KQU( 2460827021439369673),
-	KQU( 2541962996717103668), KQU(  597377203127351475),
-	KQU( 5316984203117315309), KQU( 4811211393563241961),
-	KQU(13119698597255811641), KQU( 8048691512862388981),
-	KQU(10216818971194073842), KQU( 4612229970165291764),
-	KQU(10000980798419974770), KQU( 6877640812402540687),
-	KQU( 1488727563290436992), KQU( 2227774069895697318),
-	KQU(11237754507523316593), KQU(13478948605382290972),
-	KQU( 1963583846976858124), KQU( 5512309205269276457),
-	KQU( 3972770164717652347), KQU( 3841751276198975037),
-	KQU(10283343042181903117), KQU( 8564001259792872199),
-	KQU(16472187244722489221), KQU( 8953493499268945921),
-	KQU( 3518747340357279580), KQU( 4003157546223963073),
-	KQU( 3270305958289814590), KQU( 3966704458129482496),
-	KQU( 8122141865926661939), KQU(14627734748099506653),
-	KQU(13064426990862560568), KQU( 2414079187889870829),
-	KQU( 5378461209354225306), KQU(10841985740128255566),
-	KQU(  538582442885401738), KQU( 7535089183482905946),
-	KQU(16117559957598879095), KQU( 8477890721414539741),
-	KQU( 1459127491209533386), KQU(17035126360733620462),
-	KQU( 8517668552872379126), KQU(10292151468337355014),
-	KQU(17081267732745344157), KQU(13751455337946087178),
-	KQU(14026945459523832966), KQU( 6653278775061723516),
-	KQU(10619085543856390441), KQU( 2196343631481122885),
-	KQU(10045966074702826136), KQU(10082317330452718282),
-	KQU( 5920859259504831242), KQU( 9951879073426540617),
-	KQU( 7074696649151414158), KQU(15808193543879464318),
-	KQU( 7385247772746953374), KQU( 3192003544283864292),
-	KQU(18153684490917593847), KQU(12423498260668568905),
-	KQU(10957758099756378169), KQU(11488762179911016040),
-	KQU( 2099931186465333782), KQU(11180979581250294432),
-	KQU( 8098916250668367933), KQU( 3529200436790763465),
-	KQU(12988418908674681745), KQU( 6147567275954808580),
-	KQU( 3207503344604030989), KQU(10761592604898615360),
-	KQU(  229854861031893504), KQU( 8809853962667144291),
-	KQU(13957364469005693860), KQU( 7634287665224495886),
-	KQU(12353487366976556874), KQU( 1134423796317152034),
-	KQU( 2088992471334107068), KQU( 7393372127190799698),
-	KQU( 1845367839871058391), KQU(  207922563987322884),
-	KQU(11960870813159944976), KQU(12182120053317317363),
-	KQU(17307358132571709283), KQU(13871081155552824936),
-	KQU(18304446751741566262), KQU( 7178705220184302849),
-	KQU(10929605677758824425), KQU(16446976977835806844),
-	KQU(13723874412159769044), KQU( 6942854352100915216),
-	KQU( 1726308474365729390), KQU( 2150078766445323155),
-	KQU(15345558947919656626), KQU(12145453828874527201),
-	KQU( 2054448620739726849), KQU( 2740102003352628137),
-	KQU(11294462163577610655), KQU(  756164283387413743),
-	KQU(17841144758438810880), KQU(10802406021185415861),
-	KQU( 8716455530476737846), KQU( 6321788834517649606),
-	KQU(14681322910577468426), KQU(17330043563884336387),
-	KQU(12701802180050071614), KQU(14695105111079727151),
-	KQU( 5112098511654172830), KQU( 4957505496794139973),
-	KQU( 8270979451952045982), KQU(12307685939199120969),
-	KQU(12425799408953443032), KQU( 8376410143634796588),
-	KQU(16621778679680060464), KQU( 3580497854566660073),
-	KQU( 1122515747803382416), KQU(  857664980960597599),
-	KQU( 6343640119895925918), KQU(12878473260854462891),
-	KQU(10036813920765722626), KQU(14451335468363173812),
-	KQU( 5476809692401102807), KQU(16442255173514366342),
-	KQU(13060203194757167104), KQU(14354124071243177715),
-	KQU(15961249405696125227), KQU(13703893649690872584),
-	KQU(  363907326340340064), KQU( 6247455540491754842),
-	KQU(12242249332757832361), KQU(  156065475679796717),
-	KQU( 9351116235749732355), KQU( 4590350628677701405),
-	KQU( 1671195940982350389), KQU(13501398458898451905),
-	KQU( 6526341991225002255), KQU( 1689782913778157592),
-	KQU( 7439222350869010334), KQU(13975150263226478308),
-	KQU(11411961169932682710), KQU(17204271834833847277),
-	KQU(  541534742544435367), KQU( 6591191931218949684),
-	KQU( 2645454775478232486), KQU( 4322857481256485321),
-	KQU( 8477416487553065110), KQU(12902505428548435048),
-	KQU(  971445777981341415), KQU(14995104682744976712),
-	KQU( 4243341648807158063), KQU( 8695061252721927661),
-	KQU( 5028202003270177222), KQU( 2289257340915567840),
-	KQU(13870416345121866007), KQU(13994481698072092233),
-	KQU( 6912785400753196481), KQU( 2278309315841980139),
-	KQU( 4329765449648304839), KQU( 5963108095785485298),
-	KQU( 4880024847478722478), KQU(16015608779890240947),
-	KQU( 1866679034261393544), KQU(  914821179919731519),
-	KQU( 9643404035648760131), KQU( 2418114953615593915),
-	KQU(  944756836073702374), KQU(15186388048737296834),
-	KQU( 7723355336128442206), KQU( 7500747479679599691),
-	KQU(18013961306453293634), KQU( 2315274808095756456),
-	KQU(13655308255424029566), KQU(17203800273561677098),
-	KQU( 1382158694422087756), KQU( 5090390250309588976),
-	KQU(  517170818384213989), KQU( 1612709252627729621),
-	KQU( 1330118955572449606), KQU(  300922478056709885),
-	KQU(18115693291289091987), KQU(13491407109725238321),
-	KQU(15293714633593827320), KQU( 5151539373053314504),
-	KQU( 5951523243743139207), KQU(14459112015249527975),
-	KQU( 5456113959000700739), KQU( 3877918438464873016),
-	KQU(12534071654260163555), KQU(15871678376893555041),
-	KQU(11005484805712025549), KQU(16353066973143374252),
-	KQU( 4358331472063256685), KQU( 8268349332210859288),
-	KQU(12485161590939658075), KQU(13955993592854471343),
-	KQU( 5911446886848367039), KQU(14925834086813706974),
-	KQU( 6590362597857994805), KQU( 1280544923533661875),
-	KQU( 1637756018947988164), KQU( 4734090064512686329),
-	KQU(16693705263131485912), KQU( 6834882340494360958),
-	KQU( 8120732176159658505), KQU( 2244371958905329346),
-	KQU(10447499707729734021), KQU( 7318742361446942194),
-	KQU( 8032857516355555296), KQU(14023605983059313116),
-	KQU( 1032336061815461376), KQU( 9840995337876562612),
-	KQU( 9869256223029203587), KQU(12227975697177267636),
-	KQU(12728115115844186033), KQU( 7752058479783205470),
-	KQU(  729733219713393087), KQU(12954017801239007622)
-};
-static const uint64_t init_by_array_64_expected[] = {
-	KQU( 2100341266307895239), KQU( 8344256300489757943),
-	KQU(15687933285484243894), KQU( 8268620370277076319),
-	KQU(12371852309826545459), KQU( 8800491541730110238),
-	KQU(18113268950100835773), KQU( 2886823658884438119),
-	KQU( 3293667307248180724), KQU( 9307928143300172731),
-	KQU( 7688082017574293629), KQU(  900986224735166665),
-	KQU( 9977972710722265039), KQU( 6008205004994830552),
-	KQU(  546909104521689292), KQU( 7428471521869107594),
-	KQU(14777563419314721179), KQU(16116143076567350053),
-	KQU( 5322685342003142329), KQU( 4200427048445863473),
-	KQU( 4693092150132559146), KQU(13671425863759338582),
-	KQU( 6747117460737639916), KQU( 4732666080236551150),
-	KQU( 5912839950611941263), KQU( 3903717554504704909),
-	KQU( 2615667650256786818), KQU(10844129913887006352),
-	KQU(13786467861810997820), KQU(14267853002994021570),
-	KQU(13767807302847237439), KQU(16407963253707224617),
-	KQU( 4802498363698583497), KQU( 2523802839317209764),
-	KQU( 3822579397797475589), KQU( 8950320572212130610),
-	KQU( 3745623504978342534), KQU(16092609066068482806),
-	KQU( 9817016950274642398), KQU(10591660660323829098),
-	KQU(11751606650792815920), KQU( 5122873818577122211),
-	KQU(17209553764913936624), KQU( 6249057709284380343),
-	KQU(15088791264695071830), KQU(15344673071709851930),
-	KQU( 4345751415293646084), KQU( 2542865750703067928),
-	KQU(13520525127852368784), KQU(18294188662880997241),
-	KQU( 3871781938044881523), KQU( 2873487268122812184),
-	KQU(15099676759482679005), KQU(15442599127239350490),
-	KQU( 6311893274367710888), KQU( 3286118760484672933),
-	KQU( 4146067961333542189), KQU(13303942567897208770),
-	KQU( 8196013722255630418), KQU( 4437815439340979989),
-	KQU(15433791533450605135), KQU( 4254828956815687049),
-	KQU( 1310903207708286015), KQU(10529182764462398549),
-	KQU(14900231311660638810), KQU( 9727017277104609793),
-	KQU( 1821308310948199033), KQU(11628861435066772084),
-	KQU( 9469019138491546924), KQU( 3145812670532604988),
-	KQU( 9938468915045491919), KQU( 1562447430672662142),
-	KQU(13963995266697989134), KQU( 3356884357625028695),
-	KQU( 4499850304584309747), KQU( 8456825817023658122),
-	KQU(10859039922814285279), KQU( 8099512337972526555),
-	KQU(  348006375109672149), KQU(11919893998241688603),
-	KQU( 1104199577402948826), KQU(16689191854356060289),
-	KQU(10992552041730168078), KQU( 7243733172705465836),
-	KQU( 5668075606180319560), KQU(18182847037333286970),
-	KQU( 4290215357664631322), KQU( 4061414220791828613),
-	KQU(13006291061652989604), KQU( 7140491178917128798),
-	KQU(12703446217663283481), KQU( 5500220597564558267),
-	KQU(10330551509971296358), KQU(15958554768648714492),
-	KQU( 5174555954515360045), KQU( 1731318837687577735),
-	KQU( 3557700801048354857), KQU(13764012341928616198),
-	KQU(13115166194379119043), KQU( 7989321021560255519),
-	KQU( 2103584280905877040), KQU( 9230788662155228488),
-	KQU(16396629323325547654), KQU(  657926409811318051),
-	KQU(15046700264391400727), KQU( 5120132858771880830),
-	KQU( 7934160097989028561), KQU( 6963121488531976245),
-	KQU(17412329602621742089), KQU(15144843053931774092),
-	KQU(17204176651763054532), KQU(13166595387554065870),
-	KQU( 8590377810513960213), KQU( 5834365135373991938),
-	KQU( 7640913007182226243), KQU( 3479394703859418425),
-	KQU(16402784452644521040), KQU( 4993979809687083980),
-	KQU(13254522168097688865), KQU(15643659095244365219),
-	KQU( 5881437660538424982), KQU(11174892200618987379),
-	KQU(  254409966159711077), KQU(17158413043140549909),
-	KQU( 3638048789290376272), KQU( 1376816930299489190),
-	KQU( 4622462095217761923), KQU(15086407973010263515),
-	KQU(13253971772784692238), KQU( 5270549043541649236),
-	KQU(11182714186805411604), KQU(12283846437495577140),
-	KQU( 5297647149908953219), KQU(10047451738316836654),
-	KQU( 4938228100367874746), KQU(12328523025304077923),
-	KQU( 3601049438595312361), KQU( 9313624118352733770),
-	KQU(13322966086117661798), KQU(16660005705644029394),
-	KQU(11337677526988872373), KQU(13869299102574417795),
-	KQU(15642043183045645437), KQU( 3021755569085880019),
-	KQU( 4979741767761188161), KQU(13679979092079279587),
-	KQU( 3344685842861071743), KQU(13947960059899588104),
-	KQU(  305806934293368007), KQU( 5749173929201650029),
-	KQU(11123724852118844098), KQU(15128987688788879802),
-	KQU(15251651211024665009), KQU( 7689925933816577776),
-	KQU(16732804392695859449), KQU(17087345401014078468),
-	KQU(14315108589159048871), KQU( 4820700266619778917),
-	KQU(16709637539357958441), KQU( 4936227875177351374),
-	KQU( 2137907697912987247), KQU(11628565601408395420),
-	KQU( 2333250549241556786), KQU( 5711200379577778637),
-	KQU( 5170680131529031729), KQU(12620392043061335164),
-	KQU(   95363390101096078), KQU( 5487981914081709462),
-	KQU( 1763109823981838620), KQU( 3395861271473224396),
-	KQU( 1300496844282213595), KQU( 6894316212820232902),
-	KQU(10673859651135576674), KQU( 5911839658857903252),
-	KQU(17407110743387299102), KQU( 8257427154623140385),
-	KQU(11389003026741800267), KQU( 4070043211095013717),
-	KQU(11663806997145259025), KQU(15265598950648798210),
-	KQU(  630585789434030934), KQU( 3524446529213587334),
-	KQU( 7186424168495184211), KQU(10806585451386379021),
-	KQU(11120017753500499273), KQU( 1586837651387701301),
-	KQU(17530454400954415544), KQU( 9991670045077880430),
-	KQU( 7550997268990730180), KQU( 8640249196597379304),
-	KQU( 3522203892786893823), KQU(10401116549878854788),
-	KQU(13690285544733124852), KQU( 8295785675455774586),
-	KQU(15535716172155117603), KQU( 3112108583723722511),
-	KQU(17633179955339271113), KQU(18154208056063759375),
-	KQU( 1866409236285815666), KQU(13326075895396412882),
-	KQU( 8756261842948020025), KQU( 6281852999868439131),
-	KQU(15087653361275292858), KQU(10333923911152949397),
-	KQU( 5265567645757408500), KQU(12728041843210352184),
-	KQU( 6347959327507828759), KQU(  154112802625564758),
-	KQU(18235228308679780218), KQU( 3253805274673352418),
-	KQU( 4849171610689031197), KQU(17948529398340432518),
-	KQU(13803510475637409167), KQU(13506570190409883095),
-	KQU(15870801273282960805), KQU( 8451286481299170773),
-	KQU( 9562190620034457541), KQU( 8518905387449138364),
-	KQU(12681306401363385655), KQU( 3788073690559762558),
-	KQU( 5256820289573487769), KQU( 2752021372314875467),
-	KQU( 6354035166862520716), KQU( 4328956378309739069),
-	KQU(  449087441228269600), KQU( 5533508742653090868),
-	KQU( 1260389420404746988), KQU(18175394473289055097),
-	KQU( 1535467109660399420), KQU( 8818894282874061442),
-	KQU(12140873243824811213), KQU(15031386653823014946),
-	KQU( 1286028221456149232), KQU( 6329608889367858784),
-	KQU( 9419654354945132725), KQU( 6094576547061672379),
-	KQU(17706217251847450255), KQU( 1733495073065878126),
-	KQU(16918923754607552663), KQU( 8881949849954945044),
-	KQU(12938977706896313891), KQU(14043628638299793407),
-	KQU(18393874581723718233), KQU( 6886318534846892044),
-	KQU(14577870878038334081), KQU(13541558383439414119),
-	KQU(13570472158807588273), KQU(18300760537910283361),
-	KQU(  818368572800609205), KQU( 1417000585112573219),
-	KQU(12337533143867683655), KQU(12433180994702314480),
-	KQU(  778190005829189083), KQU(13667356216206524711),
-	KQU( 9866149895295225230), KQU(11043240490417111999),
-	KQU( 1123933826541378598), KQU( 6469631933605123610),
-	KQU(14508554074431980040), KQU(13918931242962026714),
-	KQU( 2870785929342348285), KQU(14786362626740736974),
-	KQU(13176680060902695786), KQU( 9591778613541679456),
-	KQU( 9097662885117436706), KQU(  749262234240924947),
-	KQU( 1944844067793307093), KQU( 4339214904577487742),
-	KQU( 8009584152961946551), KQU(16073159501225501777),
-	KQU( 3335870590499306217), KQU(17088312653151202847),
-	KQU( 3108893142681931848), KQU(16636841767202792021),
-	KQU(10423316431118400637), KQU( 8008357368674443506),
-	KQU(11340015231914677875), KQU(17687896501594936090),
-	KQU(15173627921763199958), KQU(  542569482243721959),
-	KQU(15071714982769812975), KQU( 4466624872151386956),
-	KQU( 1901780715602332461), KQU( 9822227742154351098),
-	KQU( 1479332892928648780), KQU( 6981611948382474400),
-	KQU( 7620824924456077376), KQU(14095973329429406782),
-	KQU( 7902744005696185404), KQU(15830577219375036920),
-	KQU(10287076667317764416), KQU(12334872764071724025),
-	KQU( 4419302088133544331), KQU(14455842851266090520),
-	KQU(12488077416504654222), KQU( 7953892017701886766),
-	KQU( 6331484925529519007), KQU( 4902145853785030022),
-	KQU(17010159216096443073), KQU(11945354668653886087),
-	KQU(15112022728645230829), KQU(17363484484522986742),
-	KQU( 4423497825896692887), KQU( 8155489510809067471),
-	KQU(  258966605622576285), KQU( 5462958075742020534),
-	KQU( 6763710214913276228), KQU( 2368935183451109054),
-	KQU(14209506165246453811), KQU( 2646257040978514881),
-	KQU( 3776001911922207672), KQU( 1419304601390147631),
-	KQU(14987366598022458284), KQU( 3977770701065815721),
-	KQU(  730820417451838898), KQU( 3982991703612885327),
-	KQU( 2803544519671388477), KQU(17067667221114424649),
-	KQU( 2922555119737867166), KQU( 1989477584121460932),
-	KQU(15020387605892337354), KQU( 9293277796427533547),
-	KQU(10722181424063557247), KQU(16704542332047511651),
-	KQU( 5008286236142089514), KQU(16174732308747382540),
-	KQU(17597019485798338402), KQU(13081745199110622093),
-	KQU( 8850305883842258115), KQU(12723629125624589005),
-	KQU( 8140566453402805978), KQU(15356684607680935061),
-	KQU(14222190387342648650), KQU(11134610460665975178),
-	KQU( 1259799058620984266), KQU(13281656268025610041),
-	KQU(  298262561068153992), KQU(12277871700239212922),
-	KQU(13911297774719779438), KQU(16556727962761474934),
-	KQU(17903010316654728010), KQU( 9682617699648434744),
-	KQU(14757681836838592850), KQU( 1327242446558524473),
-	KQU(11126645098780572792), KQU( 1883602329313221774),
-	KQU( 2543897783922776873), KQU(15029168513767772842),
-	KQU(12710270651039129878), KQU(16118202956069604504),
-	KQU(15010759372168680524), KQU( 2296827082251923948),
-	KQU(10793729742623518101), KQU(13829764151845413046),
-	KQU(17769301223184451213), KQU( 3118268169210783372),
-	KQU(17626204544105123127), KQU( 7416718488974352644),
-	KQU(10450751996212925994), KQU( 9352529519128770586),
-	KQU(  259347569641110140), KQU( 8048588892269692697),
-	KQU( 1774414152306494058), KQU(10669548347214355622),
-	KQU(13061992253816795081), KQU(18432677803063861659),
-	KQU( 8879191055593984333), KQU(12433753195199268041),
-	KQU(14919392415439730602), KQU( 6612848378595332963),
-	KQU( 6320986812036143628), KQU(10465592420226092859),
-	KQU( 4196009278962570808), KQU( 3747816564473572224),
-	KQU(17941203486133732898), KQU( 2350310037040505198),
-	KQU( 5811779859134370113), KQU(10492109599506195126),
-	KQU( 7699650690179541274), KQU( 1954338494306022961),
-	KQU(14095816969027231152), KQU( 5841346919964852061),
-	KQU(14945969510148214735), KQU( 3680200305887550992),
-	KQU( 6218047466131695792), KQU( 8242165745175775096),
-	KQU(11021371934053307357), KQU( 1265099502753169797),
-	KQU( 4644347436111321718), KQU( 3609296916782832859),
-	KQU( 8109807992218521571), KQU(18387884215648662020),
-	KQU(14656324896296392902), KQU(17386819091238216751),
-	KQU(17788300878582317152), KQU( 7919446259742399591),
-	KQU( 4466613134576358004), KQU(12928181023667938509),
-	KQU(13147446154454932030), KQU(16552129038252734620),
-	KQU( 8395299403738822450), KQU(11313817655275361164),
-	KQU(  434258809499511718), KQU( 2074882104954788676),
-	KQU( 7929892178759395518), KQU( 9006461629105745388),
-	KQU( 5176475650000323086), KQU(11128357033468341069),
-	KQU(12026158851559118955), KQU(14699716249471156500),
-	KQU(  448982497120206757), KQU( 4156475356685519900),
-	KQU( 6063816103417215727), KQU(10073289387954971479),
-	KQU( 8174466846138590962), KQU( 2675777452363449006),
-	KQU( 9090685420572474281), KQU( 6659652652765562060),
-	KQU(12923120304018106621), KQU(11117480560334526775),
-	KQU(  937910473424587511), KQU( 1838692113502346645),
-	KQU(11133914074648726180), KQU( 7922600945143884053),
-	KQU(13435287702700959550), KQU( 5287964921251123332),
-	KQU(11354875374575318947), KQU(17955724760748238133),
-	KQU(13728617396297106512), KQU( 4107449660118101255),
-	KQU( 1210269794886589623), KQU(11408687205733456282),
-	KQU( 4538354710392677887), KQU(13566803319341319267),
-	KQU(17870798107734050771), KQU( 3354318982568089135),
-	KQU( 9034450839405133651), KQU(13087431795753424314),
-	KQU(  950333102820688239), KQU( 1968360654535604116),
-	KQU(16840551645563314995), KQU( 8867501803892924995),
-	KQU(11395388644490626845), KQU( 1529815836300732204),
-	KQU(13330848522996608842), KQU( 1813432878817504265),
-	KQU( 2336867432693429560), KQU(15192805445973385902),
-	KQU( 2528593071076407877), KQU(  128459777936689248),
-	KQU( 9976345382867214866), KQU( 6208885766767996043),
-	KQU(14982349522273141706), KQU( 3099654362410737822),
-	KQU(13776700761947297661), KQU( 8806185470684925550),
-	KQU( 8151717890410585321), KQU(  640860591588072925),
-	KQU(14592096303937307465), KQU( 9056472419613564846),
-	KQU(14861544647742266352), KQU(12703771500398470216),
-	KQU( 3142372800384138465), KQU( 6201105606917248196),
-	KQU(18337516409359270184), KQU(15042268695665115339),
-	KQU(15188246541383283846), KQU(12800028693090114519),
-	KQU( 5992859621101493472), KQU(18278043971816803521),
-	KQU( 9002773075219424560), KQU( 7325707116943598353),
-	KQU( 7930571931248040822), KQU( 5645275869617023448),
-	KQU( 7266107455295958487), KQU( 4363664528273524411),
-	KQU(14313875763787479809), KQU(17059695613553486802),
-	KQU( 9247761425889940932), KQU(13704726459237593128),
-	KQU( 2701312427328909832), KQU(17235532008287243115),
-	KQU(14093147761491729538), KQU( 6247352273768386516),
-	KQU( 8268710048153268415), KQU( 7985295214477182083),
-	KQU(15624495190888896807), KQU( 3772753430045262788),
-	KQU( 9133991620474991698), KQU( 5665791943316256028),
-	KQU( 7551996832462193473), KQU(13163729206798953877),
-	KQU( 9263532074153846374), KQU( 1015460703698618353),
-	KQU(17929874696989519390), KQU(18257884721466153847),
-	KQU(16271867543011222991), KQU( 3905971519021791941),
-	KQU(16814488397137052085), KQU( 1321197685504621613),
-	KQU( 2870359191894002181), KQU(14317282970323395450),
-	KQU(13663920845511074366), KQU( 2052463995796539594),
-	KQU(14126345686431444337), KQU( 1727572121947022534),
-	KQU(17793552254485594241), KQU( 6738857418849205750),
-	KQU( 1282987123157442952), KQU(16655480021581159251),
-	KQU( 6784587032080183866), KQU(14726758805359965162),
-	KQU( 7577995933961987349), KQU(12539609320311114036),
-	KQU(10789773033385439494), KQU( 8517001497411158227),
-	KQU(10075543932136339710), KQU(14838152340938811081),
-	KQU( 9560840631794044194), KQU(17445736541454117475),
-	KQU(10633026464336393186), KQU(15705729708242246293),
-	KQU( 1117517596891411098), KQU( 4305657943415886942),
-	KQU( 4948856840533979263), KQU(16071681989041789593),
-	KQU(13723031429272486527), KQU( 7639567622306509462),
-	KQU(12670424537483090390), KQU( 9715223453097197134),
-	KQU( 5457173389992686394), KQU(  289857129276135145),
-	KQU(17048610270521972512), KQU(  692768013309835485),
-	KQU(14823232360546632057), KQU(18218002361317895936),
-	KQU( 3281724260212650204), KQU(16453957266549513795),
-	KQU( 8592711109774511881), KQU(  929825123473369579),
-	KQU(15966784769764367791), KQU( 9627344291450607588),
-	KQU(10849555504977813287), KQU( 9234566913936339275),
-	KQU( 6413807690366911210), KQU(10862389016184219267),
-	KQU(13842504799335374048), KQU( 1531994113376881174),
-	KQU( 2081314867544364459), KQU(16430628791616959932),
-	KQU( 8314714038654394368), KQU( 9155473892098431813),
-	KQU(12577843786670475704), KQU( 4399161106452401017),
-	KQU( 1668083091682623186), KQU( 1741383777203714216),
-	KQU( 2162597285417794374), KQU(15841980159165218736),
-	KQU( 1971354603551467079), KQU( 1206714764913205968),
-	KQU( 4790860439591272330), KQU(14699375615594055799),
-	KQU( 8374423871657449988), KQU(10950685736472937738),
-	KQU(  697344331343267176), KQU(10084998763118059810),
-	KQU(12897369539795983124), KQU(12351260292144383605),
-	KQU( 1268810970176811234), KQU( 7406287800414582768),
-	KQU(  516169557043807831), KQU( 5077568278710520380),
-	KQU( 3828791738309039304), KQU( 7721974069946943610),
-	KQU( 3534670260981096460), KQU( 4865792189600584891),
-	KQU(16892578493734337298), KQU( 9161499464278042590),
-	KQU(11976149624067055931), KQU(13219479887277343990),
-	KQU(14161556738111500680), KQU(14670715255011223056),
-	KQU( 4671205678403576558), KQU(12633022931454259781),
-	KQU(14821376219869187646), KQU(  751181776484317028),
-	KQU( 2192211308839047070), KQU(11787306362361245189),
-	KQU(10672375120744095707), KQU( 4601972328345244467),
-	KQU(15457217788831125879), KQU( 8464345256775460809),
-	KQU(10191938789487159478), KQU( 6184348739615197613),
-	KQU(11425436778806882100), KQU( 2739227089124319793),
-	KQU(  461464518456000551), KQU( 4689850170029177442),
-	KQU( 6120307814374078625), KQU(11153579230681708671),
-	KQU( 7891721473905347926), KQU(10281646937824872400),
-	KQU( 3026099648191332248), KQU( 8666750296953273818),
-	KQU(14978499698844363232), KQU(13303395102890132065),
-	KQU( 8182358205292864080), KQU(10560547713972971291),
-	KQU(11981635489418959093), KQU( 3134621354935288409),
-	KQU(11580681977404383968), KQU(14205530317404088650),
-	KQU( 5997789011854923157), KQU(13659151593432238041),
-	KQU(11664332114338865086), KQU( 7490351383220929386),
-	KQU( 7189290499881530378), KQU(15039262734271020220),
-	KQU( 2057217285976980055), KQU(  555570804905355739),
-	KQU(11235311968348555110), KQU(13824557146269603217),
-	KQU(16906788840653099693), KQU( 7222878245455661677),
-	KQU( 5245139444332423756), KQU( 4723748462805674292),
-	KQU(12216509815698568612), KQU(17402362976648951187),
-	KQU(17389614836810366768), KQU( 4880936484146667711),
-	KQU( 9085007839292639880), KQU(13837353458498535449),
-	KQU(11914419854360366677), KQU(16595890135313864103),
-	KQU( 6313969847197627222), KQU(18296909792163910431),
-	KQU(10041780113382084042), KQU( 2499478551172884794),
-	KQU(11057894246241189489), KQU( 9742243032389068555),
-	KQU(12838934582673196228), KQU(13437023235248490367),
-	KQU(13372420669446163240), KQU( 6752564244716909224),
-	KQU( 7157333073400313737), KQU(12230281516370654308),
-	KQU( 1182884552219419117), KQU( 2955125381312499218),
-	KQU(10308827097079443249), KQU( 1337648572986534958),
-	KQU(16378788590020343939), KQU(  108619126514420935),
-	KQU( 3990981009621629188), KQU( 5460953070230946410),
-	KQU( 9703328329366531883), KQU(13166631489188077236),
-	KQU( 1104768831213675170), KQU( 3447930458553877908),
-	KQU( 8067172487769945676), KQU( 5445802098190775347),
-	KQU( 3244840981648973873), KQU(17314668322981950060),
-	KQU( 5006812527827763807), KQU(18158695070225526260),
-	KQU( 2824536478852417853), KQU(13974775809127519886),
-	KQU( 9814362769074067392), KQU(17276205156374862128),
-	KQU(11361680725379306967), KQU( 3422581970382012542),
-	KQU(11003189603753241266), KQU(11194292945277862261),
-	KQU( 6839623313908521348), KQU(11935326462707324634),
-	KQU( 1611456788685878444), KQU(13112620989475558907),
-	KQU(  517659108904450427), KQU(13558114318574407624),
-	KQU(15699089742731633077), KQU( 4988979278862685458),
-	KQU( 8111373583056521297), KQU( 3891258746615399627),
-	KQU( 8137298251469718086), KQU(12748663295624701649),
-	KQU( 4389835683495292062), KQU( 5775217872128831729),
-	KQU( 9462091896405534927), KQU( 8498124108820263989),
-	KQU( 8059131278842839525), KQU(10503167994254090892),
-	KQU(11613153541070396656), KQU(18069248738504647790),
-	KQU(  570657419109768508), KQU( 3950574167771159665),
-	KQU( 5514655599604313077), KQU( 2908460854428484165),
-	KQU(10777722615935663114), KQU(12007363304839279486),
-	KQU( 9800646187569484767), KQU( 8795423564889864287),
-	KQU(14257396680131028419), KQU( 6405465117315096498),
-	KQU( 7939411072208774878), KQU(17577572378528990006),
-	KQU(14785873806715994850), KQU(16770572680854747390),
-	KQU(18127549474419396481), KQU(11637013449455757750),
-	KQU(14371851933996761086), KQU( 3601181063650110280),
-	KQU( 4126442845019316144), KQU(10198287239244320669),
-	KQU(18000169628555379659), KQU(18392482400739978269),
-	KQU( 6219919037686919957), KQU( 3610085377719446052),
-	KQU( 2513925039981776336), KQU(16679413537926716955),
-	KQU(12903302131714909434), KQU( 5581145789762985009),
-	KQU(12325955044293303233), KQU(17216111180742141204),
-	KQU( 6321919595276545740), KQU( 3507521147216174501),
-	KQU( 9659194593319481840), KQU(11473976005975358326),
-	KQU(14742730101435987026), KQU(  492845897709954780),
-	KQU(16976371186162599676), KQU(17712703422837648655),
-	KQU( 9881254778587061697), KQU( 8413223156302299551),
-	KQU( 1563841828254089168), KQU( 9996032758786671975),
-	KQU(  138877700583772667), KQU(13003043368574995989),
-	KQU( 4390573668650456587), KQU( 8610287390568126755),
-	KQU(15126904974266642199), KQU( 6703637238986057662),
-	KQU( 2873075592956810157), KQU( 6035080933946049418),
-	KQU(13382846581202353014), KQU( 7303971031814642463),
-	KQU(18418024405307444267), KQU( 5847096731675404647),
-	KQU( 4035880699639842500), KQU(11525348625112218478),
-	KQU( 3041162365459574102), KQU( 2604734487727986558),
-	KQU(15526341771636983145), KQU(14556052310697370254),
-	KQU(12997787077930808155), KQU( 9601806501755554499),
-	KQU(11349677952521423389), KQU(14956777807644899350),
-	KQU(16559736957742852721), KQU(12360828274778140726),
-	KQU( 6685373272009662513), KQU(16932258748055324130),
-	KQU(15918051131954158508), KQU( 1692312913140790144),
-	KQU(  546653826801637367), KQU( 5341587076045986652),
-	KQU(14975057236342585662), KQU(12374976357340622412),
-	KQU(10328833995181940552), KQU(12831807101710443149),
-	KQU(10548514914382545716), KQU( 2217806727199715993),
-	KQU(12627067369242845138), KQU( 4598965364035438158),
-	KQU(  150923352751318171), KQU(14274109544442257283),
-	KQU( 4696661475093863031), KQU( 1505764114384654516),
-	KQU(10699185831891495147), KQU( 2392353847713620519),
-	KQU( 3652870166711788383), KQU( 8640653276221911108),
-	KQU( 3894077592275889704), KQU( 4918592872135964845),
-	KQU(16379121273281400789), KQU(12058465483591683656),
-	KQU(11250106829302924945), KQU( 1147537556296983005),
-	KQU( 6376342756004613268), KQU(14967128191709280506),
-	KQU(18007449949790627628), KQU( 9497178279316537841),
-	KQU( 7920174844809394893), KQU(10037752595255719907),
-	KQU(15875342784985217697), KQU(15311615921712850696),
-	KQU( 9552902652110992950), KQU(14054979450099721140),
-	KQU( 5998709773566417349), KQU(18027910339276320187),
-	KQU( 8223099053868585554), KQU( 7842270354824999767),
-	KQU( 4896315688770080292), KQU(12969320296569787895),
-	KQU( 2674321489185759961), KQU( 4053615936864718439),
-	KQU(11349775270588617578), KQU( 4743019256284553975),
-	KQU( 5602100217469723769), KQU(14398995691411527813),
-	KQU( 7412170493796825470), KQU(  836262406131744846),
-	KQU( 8231086633845153022), KQU( 5161377920438552287),
-	KQU( 8828731196169924949), KQU(16211142246465502680),
-	KQU( 3307990879253687818), KQU( 5193405406899782022),
-	KQU( 8510842117467566693), KQU( 6070955181022405365),
-	KQU(14482950231361409799), KQU(12585159371331138077),
-	KQU( 3511537678933588148), KQU( 2041849474531116417),
-	KQU(10944936685095345792), KQU(18303116923079107729),
-	KQU( 2720566371239725320), KQU( 4958672473562397622),
-	KQU( 3032326668253243412), KQU(13689418691726908338),
-	KQU( 1895205511728843996), KQU( 8146303515271990527),
-	KQU(16507343500056113480), KQU(  473996939105902919),
-	KQU( 9897686885246881481), KQU(14606433762712790575),
-	KQU( 6732796251605566368), KQU( 1399778120855368916),
-	KQU(  935023885182833777), KQU(16066282816186753477),
-	KQU( 7291270991820612055), KQU(17530230393129853844),
-	KQU(10223493623477451366), KQU(15841725630495676683),
-	KQU(17379567246435515824), KQU( 8588251429375561971),
-	KQU(18339511210887206423), KQU(17349587430725976100),
-	KQU(12244876521394838088), KQU( 6382187714147161259),
-	KQU(12335807181848950831), KQU(16948885622305460665),
-	KQU(13755097796371520506), KQU(14806740373324947801),
-	KQU( 4828699633859287703), KQU( 8209879281452301604),
-	KQU(12435716669553736437), KQU(13970976859588452131),
-	KQU( 6233960842566773148), KQU(12507096267900505759),
-	KQU( 1198713114381279421), KQU(14989862731124149015),
-	KQU(15932189508707978949), KQU( 2526406641432708722),
-	KQU(   29187427817271982), KQU( 1499802773054556353),
-	KQU(10816638187021897173), KQU( 5436139270839738132),
-	KQU( 6659882287036010082), KQU( 2154048955317173697),
-	KQU(10887317019333757642), KQU(16281091802634424955),
-	KQU(10754549879915384901), KQU(10760611745769249815),
-	KQU( 2161505946972504002), KQU( 5243132808986265107),
-	KQU(10129852179873415416), KQU(  710339480008649081),
-	KQU( 7802129453068808528), KQU(17967213567178907213),
-	KQU(15730859124668605599), KQU(13058356168962376502),
-	KQU( 3701224985413645909), KQU(14464065869149109264),
-	KQU( 9959272418844311646), KQU(10157426099515958752),
-	KQU(14013736814538268528), KQU(17797456992065653951),
-	KQU(17418878140257344806), KQU(15457429073540561521),
-	KQU( 2184426881360949378), KQU( 2062193041154712416),
-	KQU( 8553463347406931661), KQU( 4913057625202871854),
-	KQU( 2668943682126618425), KQU(17064444737891172288),
-	KQU( 4997115903913298637), KQU(12019402608892327416),
-	KQU(17603584559765897352), KQU(11367529582073647975),
-	KQU( 8211476043518436050), KQU( 8676849804070323674),
-	KQU(18431829230394475730), KQU(10490177861361247904),
-	KQU( 9508720602025651349), KQU( 7409627448555722700),
-	KQU( 5804047018862729008), KQU(11943858176893142594),
-	KQU(11908095418933847092), KQU( 5415449345715887652),
-	KQU( 1554022699166156407), KQU( 9073322106406017161),
-	KQU( 7080630967969047082), KQU(18049736940860732943),
-	KQU(12748714242594196794), KQU( 1226992415735156741),
-	KQU(17900981019609531193), KQU(11720739744008710999),
-	KQU( 3006400683394775434), KQU(11347974011751996028),
-	KQU( 3316999628257954608), KQU( 8384484563557639101),
-	KQU(18117794685961729767), KQU( 1900145025596618194),
-	KQU(17459527840632892676), KQU( 5634784101865710994),
-	KQU( 7918619300292897158), KQU( 3146577625026301350),
-	KQU( 9955212856499068767), KQU( 1873995843681746975),
-	KQU( 1561487759967972194), KQU( 8322718804375878474),
-	KQU(11300284215327028366), KQU( 4667391032508998982),
-	KQU( 9820104494306625580), KQU(17922397968599970610),
-	KQU( 1784690461886786712), KQU(14940365084341346821),
-	KQU( 5348719575594186181), KQU(10720419084507855261),
-	KQU(14210394354145143274), KQU( 2426468692164000131),
-	KQU(16271062114607059202), KQU(14851904092357070247),
-	KQU( 6524493015693121897), KQU( 9825473835127138531),
-	KQU(14222500616268569578), KQU(15521484052007487468),
-	KQU(14462579404124614699), KQU(11012375590820665520),
-	KQU(11625327350536084927), KQU(14452017765243785417),
-	KQU( 9989342263518766305), KQU( 3640105471101803790),
-	KQU( 4749866455897513242), KQU(13963064946736312044),
-	KQU(10007416591973223791), KQU(18314132234717431115),
-	KQU( 3286596588617483450), KQU( 7726163455370818765),
-	KQU( 7575454721115379328), KQU( 5308331576437663422),
-	KQU(18288821894903530934), KQU( 8028405805410554106),
-	KQU(15744019832103296628), KQU(  149765559630932100),
-	KQU( 6137705557200071977), KQU(14513416315434803615),
-	KQU(11665702820128984473), KQU(  218926670505601386),
-	KQU( 6868675028717769519), KQU(15282016569441512302),
-	KQU( 5707000497782960236), KQU( 6671120586555079567),
-	KQU( 2194098052618985448), KQU(16849577895477330978),
-	KQU(12957148471017466283), KQU( 1997805535404859393),
-	KQU( 1180721060263860490), KQU(13206391310193756958),
-	KQU(12980208674461861797), KQU( 3825967775058875366),
-	KQU(17543433670782042631), KQU( 1518339070120322730),
-	KQU(16344584340890991669), KQU( 2611327165318529819),
-	KQU(11265022723283422529), KQU( 4001552800373196817),
-	KQU(14509595890079346161), KQU( 3528717165416234562),
-	KQU(18153222571501914072), KQU( 9387182977209744425),
-	KQU(10064342315985580021), KQU(11373678413215253977),
-	KQU( 2308457853228798099), KQU( 9729042942839545302),
-	KQU( 7833785471140127746), KQU( 6351049900319844436),
-	KQU(14454610627133496067), KQU(12533175683634819111),
-	KQU(15570163926716513029), KQU(13356980519185762498)
-};
-
-TEST_BEGIN(test_gen_rand_32)
-{
-	uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
-	uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
-	int i;
-	uint32_t r32;
-	sfmt_t *ctx;
-
-	assert_d_le(get_min_array_size32(), BLOCK_SIZE,
-	    "Array size too small");
-	ctx = init_gen_rand(1234);
-	fill_array32(ctx, array32, BLOCK_SIZE);
-	fill_array32(ctx, array32_2, BLOCK_SIZE);
-	fini_gen_rand(ctx);
-
-	ctx = init_gen_rand(1234);
-	for (i = 0; i < BLOCK_SIZE; i++) {
-		if (i < COUNT_1) {
-			assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
-			    "Output mismatch for i=%d", i);
-		}
-		r32 = gen_rand32(ctx);
-		assert_u32_eq(r32, array32[i],
-		    "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
-	}
-	for (i = 0; i < COUNT_2; i++) {
-		r32 = gen_rand32(ctx);
-		assert_u32_eq(r32, array32_2[i],
-		    "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
-		    r32);
-	}
-	fini_gen_rand(ctx);
-}
-TEST_END
-
-TEST_BEGIN(test_by_array_32)
-{
-	uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
-	uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
-	int i;
-	uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
-	uint32_t r32;
-	sfmt_t *ctx;
-
-	assert_d_le(get_min_array_size32(), BLOCK_SIZE,
-	    "Array size too small");
-	ctx = init_by_array(ini, 4);
-	fill_array32(ctx, array32, BLOCK_SIZE);
-	fill_array32(ctx, array32_2, BLOCK_SIZE);
-	fini_gen_rand(ctx);
-
-	ctx = init_by_array(ini, 4);
-	for (i = 0; i < BLOCK_SIZE; i++) {
-		if (i < COUNT_1) {
-			assert_u32_eq(array32[i], init_by_array_32_expected[i],
-			    "Output mismatch for i=%d", i);
-		}
-		r32 = gen_rand32(ctx);
-		assert_u32_eq(r32, array32[i],
-		    "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
-	}
-	for (i = 0; i < COUNT_2; i++) {
-		r32 = gen_rand32(ctx);
-		assert_u32_eq(r32, array32_2[i],
-		    "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
-		    r32);
-	}
-	fini_gen_rand(ctx);
-}
-TEST_END
-
-TEST_BEGIN(test_gen_rand_64)
-{
-	uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
-	uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
-	int i;
-	uint64_t r;
-	sfmt_t *ctx;
-
-	assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
-	    "Array size too small");
-	ctx = init_gen_rand(4321);
-	fill_array64(ctx, array64, BLOCK_SIZE64);
-	fill_array64(ctx, array64_2, BLOCK_SIZE64);
-	fini_gen_rand(ctx);
-
-	ctx = init_gen_rand(4321);
-	for (i = 0; i < BLOCK_SIZE64; i++) {
-		if (i < COUNT_1) {
-			assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
-			    "Output mismatch for i=%d", i);
-		}
-		r = gen_rand64(ctx);
-		assert_u64_eq(r, array64[i],
-		    "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
-		    array64[i], r);
-	}
-	for (i = 0; i < COUNT_2; i++) {
-		r = gen_rand64(ctx);
-		assert_u64_eq(r, array64_2[i],
-		    "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
-		    array64_2[i], r);
-	}
-	fini_gen_rand(ctx);
-}
-TEST_END
-
-TEST_BEGIN(test_by_array_64)
-{
-	uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
-	uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
-	int i;
-	uint64_t r;
-	uint32_t ini[] = {5, 4, 3, 2, 1};
-	sfmt_t *ctx;
-
-	assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
-	    "Array size too small");
-	ctx = init_by_array(ini, 5);
-	fill_array64(ctx, array64, BLOCK_SIZE64);
-	fill_array64(ctx, array64_2, BLOCK_SIZE64);
-	fini_gen_rand(ctx);
-
-	ctx = init_by_array(ini, 5);
-	for (i = 0; i < BLOCK_SIZE64; i++) {
-		if (i < COUNT_1) {
-			assert_u64_eq(array64[i], init_by_array_64_expected[i],
-			    "Output mismatch for i=%d", i);
-		}
-		r = gen_rand64(ctx);
-		assert_u64_eq(r, array64[i],
-		    "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
-		    array64[i], r);
-	}
-	for (i = 0; i < COUNT_2; i++) {
-		r = gen_rand64(ctx);
-		assert_u64_eq(r, array64_2[i],
-		    "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
-		    array64_2[i], r);
-	}
-	fini_gen_rand(ctx);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_gen_rand_32,
-	    test_by_array_32,
-	    test_gen_rand_64,
-	    test_by_array_64));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/a0.c b/zircon/third_party/ulib/jemalloc/test/unit/a0.c
deleted file mode 100644
index 87f7e52..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/a0.c
+++ /dev/null
@@ -1,18 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_a0)
-{
-	void *p;
-
-	p = a0malloc(1);
-	assert_ptr_not_null(p, "Unexpected a0malloc() error");
-	a0dalloc(p);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test_no_malloc_init(
-	    test_a0));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/arena_reset.c b/zircon/third_party/ulib/jemalloc/test/unit/arena_reset.c
deleted file mode 100644
index 257f972..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/arena_reset.c
+++ /dev/null
@@ -1,338 +0,0 @@
-#ifndef ARENA_RESET_PROF_C_
-#include "test/jemalloc_test.h"
-#endif
-
-#include "test/extent_hooks.h"
-
-static unsigned
-get_nsizes_impl(const char *cmd)
-{
-	unsigned ret;
-	size_t z;
-
-	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
-
-	return (ret);
-}
-
-static unsigned
-get_nsmall(void)
-{
-	return (get_nsizes_impl("arenas.nbins"));
-}
-
-static unsigned
-get_nlarge(void)
-{
-	return (get_nsizes_impl("arenas.nlextents"));
-}
-
-static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
-	size_t ret;
-	size_t z;
-	size_t mib[4];
-	size_t miblen = 4;
-
-	z = sizeof(size_t);
-	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
-	mib[2] = ind;
-	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
-
-	return (ret);
-}
-
-static size_t
-get_small_size(size_t ind)
-{
-	return (get_size_impl("arenas.bin.0.size", ind));
-}
-
-static size_t
-get_large_size(size_t ind)
-{
-	return (get_size_impl("arenas.lextent.0.size", ind));
-}
-
-/* Like ivsalloc(), but safe to call on discarded allocations. */
-static size_t
-vsalloc(tsdn_t *tsdn, const void *ptr)
-{
-	extent_t *extent;
-
-	extent = extent_lookup(tsdn, ptr, false);
-	if (extent == NULL)
-		return (0);
-	if (!extent_active_get(extent))
-		return (0);
-
-	return (isalloc(tsdn, extent, ptr));
-}
-
-static unsigned
-do_arena_create(extent_hooks_t *h)
-{
-	unsigned arena_ind;
-	size_t sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
-	    (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
-	    "Unexpected mallctl() failure");
-	return (arena_ind);
-}
-
-static void
-do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs)
-{
-#define	NLARGE	32
-	unsigned nsmall, nlarge, i;
-	size_t sz;
-	int flags;
-	tsdn_t *tsdn;
-
-	flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
-
-	nsmall = get_nsmall();
-	nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
-	*nptrs = nsmall + nlarge;
-	*ptrs = (void **)malloc(*nptrs * sizeof(void *));
-	assert_ptr_not_null(*ptrs, "Unexpected malloc() failure");
-
-	/* Allocate objects with a wide range of sizes. */
-	for (i = 0; i < nsmall; i++) {
-		sz = get_small_size(i);
-		(*ptrs)[i] = mallocx(sz, flags);
-		assert_ptr_not_null((*ptrs)[i],
-		    "Unexpected mallocx(%zu, %#x) failure", sz, flags);
-	}
-	for (i = 0; i < nlarge; i++) {
-		sz = get_large_size(i);
-		(*ptrs)[nsmall + i] = mallocx(sz, flags);
-		assert_ptr_not_null((*ptrs)[i],
-		    "Unexpected mallocx(%zu, %#x) failure", sz, flags);
-	}
-
-	tsdn = tsdn_fetch();
-
-	/* Verify allocations. */
-	for (i = 0; i < *nptrs; i++) {
-		assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
-		    "Allocation should have queryable size");
-	}
-}
-
-static void
-do_arena_reset_post(void **ptrs, unsigned nptrs)
-{
-	tsdn_t *tsdn;
-	unsigned i;
-
-	tsdn = tsdn_fetch();
-
-	/* Verify allocations no longer exist. */
-	for (i = 0; i < nptrs; i++) {
-		assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
-		    "Allocation should no longer exist");
-	}
-
-	free(ptrs);
-}
-
-static void
-do_arena_reset_destroy(const char *name, unsigned arena_ind)
-{
-	size_t mib[3];
-	size_t miblen;
-
-	miblen = sizeof(mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib(name, mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	mib[1] = (size_t)arena_ind;
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctlbymib() failure");
-}
-
-static void
-do_arena_reset(unsigned arena_ind)
-{
-	do_arena_reset_destroy("arena.0.reset", arena_ind);
-}
-
-static void
-do_arena_destroy(unsigned arena_ind)
-{
-	do_arena_reset_destroy("arena.0.destroy", arena_ind);
-}
-
-TEST_BEGIN(test_arena_reset)
-{
-	unsigned arena_ind;
-	void **ptrs;
-	unsigned nptrs;
-
-	arena_ind = do_arena_create(NULL);
-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
-	do_arena_reset(arena_ind);
-	do_arena_reset_post(ptrs, nptrs);
-}
-TEST_END
-
-static bool
-arena_i_initialized(unsigned arena_ind, bool refresh)
-{
-	bool initialized;
-	size_t mib[3];
-	size_t miblen, sz;
-
-	if (refresh) {
-		uint64_t epoch = 1;
-		assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-		    sizeof(epoch)), 0, "Unexpected mallctl() failure");
-	}
-
-	miblen = sizeof(mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	mib[1] = (size_t)arena_ind;
-	sz = sizeof(initialized);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
-	    0), 0, "Unexpected mallctlbymib() failure");
-
-	return (initialized);
-}
-
-TEST_BEGIN(test_arena_destroy_initial)
-{
-	assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
-	    "Destroyed arena stats should not be initialized");
-}
-TEST_END
-
-TEST_BEGIN(test_arena_destroy_hooks_default)
-{
-	unsigned arena_ind, arena_ind_another, arena_ind_prev;
-	void **ptrs;
-	unsigned nptrs;
-
-	arena_ind = do_arena_create(NULL);
-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
-
-	assert_false(arena_i_initialized(arena_ind, false),
-	    "Arena stats should not be initialized");
-	assert_true(arena_i_initialized(arena_ind, true),
-	    "Arena stats should be initialized");
-
-	/*
-	 * Create another arena before destroying one, to better verify arena
-	 * index reuse.
-	 */
-	arena_ind_another = do_arena_create(NULL);
-
-	do_arena_destroy(arena_ind);
-
-	assert_false(arena_i_initialized(arena_ind, true),
-	    "Arena stats should not be initialized");
-	assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
-	    "Destroyed arena stats should be initialized");
-
-	do_arena_reset_post(ptrs, nptrs);
-
-	arena_ind_prev = arena_ind;
-	arena_ind = do_arena_create(NULL);
-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
-	assert_u_eq(arena_ind, arena_ind_prev,
-	    "Arena index should have been recycled");
-	do_arena_destroy(arena_ind);
-	do_arena_reset_post(ptrs, nptrs);
-
-	do_arena_destroy(arena_ind_another);
-}
-TEST_END
-
-/*
- * Actually unmap extents, regardless of config_munmap, so that attempts to
- * access a destroyed arena's memory will segfault.
- */
-static bool
-extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
-    bool committed, unsigned arena_ind)
-{
-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
-	    "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
-	    "true" : "false", arena_ind);
-	assert_ptr_eq(extent_hooks, &hooks,
-	    "extent_hooks should be same as pointer used to set hooks");
-	assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
-	    "Wrong hook function");
-	called_dalloc = true;
-	if (!try_dalloc)
-		return (true);
-	pages_unmap(addr, size);
-	did_dalloc = true;
-	return (false);
-}
-
-static extent_hooks_t hooks_orig;
-
-static extent_hooks_t hooks_unmap = {
-	extent_alloc_hook,
-	extent_dalloc_unmap, /* dalloc */
-	extent_commit_hook,
-	extent_decommit_hook,
-	extent_purge_lazy_hook,
-	extent_purge_forced_hook,
-	extent_split_hook,
-	extent_merge_hook
-};
-
-TEST_BEGIN(test_arena_destroy_hooks_unmap)
-{
-	unsigned arena_ind;
-	void **ptrs;
-	unsigned nptrs;
-
-	extent_hooks_prep();
-	try_decommit = false;
-	memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
-	memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
-
-	did_alloc = false;
-	arena_ind = do_arena_create(&hooks);
-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
-
-	assert_true(did_alloc, "Expected alloc");
-
-	assert_false(arena_i_initialized(arena_ind, false),
-	    "Arena stats should not be initialized");
-	assert_true(arena_i_initialized(arena_ind, true),
-	    "Arena stats should be initialized");
-
-	did_dalloc = false;
-	do_arena_destroy(arena_ind);
-	assert_true(did_dalloc, "Expected dalloc");
-
-	assert_false(arena_i_initialized(arena_ind, true),
-	    "Arena stats should not be initialized");
-	assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
-	    "Destroyed arena stats should be initialized");
-
-	do_arena_reset_post(ptrs, nptrs);
-
-	memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_arena_reset,
-	    test_arena_destroy_initial,
-	    test_arena_destroy_hooks_default,
-	    test_arena_destroy_hooks_unmap));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/arena_reset_prof.c b/zircon/third_party/ulib/jemalloc/test/unit/arena_reset_prof.c
deleted file mode 100644
index 0fd362e..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/arena_reset_prof.c
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "test/jemalloc_test.h"
-#define	ARENA_RESET_PROF_C_
-
-const char *malloc_conf = "prof:true,lg_prof_sample:0";
-#include "arena_reset.c"
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/atomic.c b/zircon/third_party/ulib/jemalloc/test/unit/atomic.c
deleted file mode 100644
index 1d14368..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/atomic.c
+++ /dev/null
@@ -1,116 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	TEST_STRUCT(p, t)						\
-struct p##_test_s {							\
-	t	accum0;							\
-	t	x;							\
-	t	s;							\
-};									\
-typedef struct p##_test_s p##_test_t;
-
-#define	TEST_BODY(p, t, tc, ta, FMT) do {				\
-	const p##_test_t tests[] = {					\
-		{(t)-1, (t)-1, (t)-2},					\
-		{(t)-1, (t) 0, (t)-2},					\
-		{(t)-1, (t) 1, (t)-2},					\
-									\
-		{(t) 0, (t)-1, (t)-2},					\
-		{(t) 0, (t) 0, (t)-2},					\
-		{(t) 0, (t) 1, (t)-2},					\
-									\
-		{(t) 1, (t)-1, (t)-2},					\
-		{(t) 1, (t) 0, (t)-2},					\
-		{(t) 1, (t) 1, (t)-2},					\
-									\
-		{(t)0, (t)-(1 << 22), (t)-2},				\
-		{(t)0, (t)(1 << 22), (t)-2},				\
-		{(t)(1 << 22), (t)-(1 << 22), (t)-2},			\
-		{(t)(1 << 22), (t)(1 << 22), (t)-2}			\
-	};								\
-	unsigned i;							\
-									\
-	for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) {	\
-		bool err;						\
-		t accum = tests[i].accum0;				\
-		assert_##ta##_eq(atomic_read_##p(&accum),		\
-		    tests[i].accum0,					\
-		    "Erroneous read, i=%u", i);				\
-									\
-		assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x),	\
-		    (t)((tc)tests[i].accum0 + (tc)tests[i].x),		\
-		    "i=%u, accum=%"FMT", x=%"FMT,			\
-		    i, tests[i].accum0, tests[i].x);			\
-		assert_##ta##_eq(atomic_read_##p(&accum), accum,	\
-		    "Erroneous add, i=%u", i);				\
-									\
-		accum = tests[i].accum0;				\
-		assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x),	\
-		    (t)((tc)tests[i].accum0 - (tc)tests[i].x),		\
-		    "i=%u, accum=%"FMT", x=%"FMT,			\
-		    i, tests[i].accum0, tests[i].x);			\
-		assert_##ta##_eq(atomic_read_##p(&accum), accum,	\
-		    "Erroneous sub, i=%u", i);				\
-									\
-		accum = tests[i].accum0;				\
-		err = atomic_cas_##p(&accum, tests[i].x, tests[i].s);	\
-		assert_b_eq(err, tests[i].accum0 != tests[i].x,		\
-		    "Erroneous cas success/failure result");		\
-		assert_##ta##_eq(accum, err ? tests[i].accum0 :		\
-		    tests[i].s, "Erroneous cas effect, i=%u", i);	\
-									\
-		accum = tests[i].accum0;				\
-		atomic_write_##p(&accum, tests[i].s);			\
-		assert_##ta##_eq(accum, tests[i].s,			\
-		    "Erroneous write, i=%u", i);			\
-	}								\
-} while (0)
-
-TEST_STRUCT(u64, uint64_t)
-TEST_BEGIN(test_atomic_u64)
-{
-#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-	test_skip("64-bit atomic operations not supported");
-#else
-	TEST_BODY(u64, uint64_t, uint64_t, u64, FMTx64);
-#endif
-}
-TEST_END
-
-TEST_STRUCT(u32, uint32_t)
-TEST_BEGIN(test_atomic_u32)
-{
-	TEST_BODY(u32, uint32_t, uint32_t, u32, "#"FMTx32);
-}
-TEST_END
-
-TEST_STRUCT(p, void *)
-TEST_BEGIN(test_atomic_p)
-{
-	TEST_BODY(p, void *, uintptr_t, ptr, "p");
-}
-TEST_END
-
-TEST_STRUCT(zu, size_t)
-TEST_BEGIN(test_atomic_zu)
-{
-	TEST_BODY(zu, size_t, size_t, zu, "#zx");
-}
-TEST_END
-
-TEST_STRUCT(u, unsigned)
-TEST_BEGIN(test_atomic_u)
-{
-	TEST_BODY(u, unsigned, unsigned, u, "#x");
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_atomic_u64,
-	    test_atomic_u32,
-	    test_atomic_p,
-	    test_atomic_zu,
-	    test_atomic_u));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/base.c b/zircon/third_party/ulib/jemalloc/test/unit/base.c
deleted file mode 100644
index 76e96da..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/base.c
+++ /dev/null
@@ -1,223 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#include "test/extent_hooks.h"
-
-static extent_hooks_t hooks_null = {
-	extent_alloc_hook,
-	NULL, /* dalloc */
-	NULL, /* commit */
-	NULL, /* decommit */
-	NULL, /* purge_lazy */
-	NULL, /* purge_forced */
-	NULL, /* split */
-	NULL /* merge */
-};
-
-static extent_hooks_t hooks_not_null = {
-	extent_alloc_hook,
-	extent_dalloc_hook,
-	NULL, /* commit */
-	extent_decommit_hook,
-	extent_purge_lazy_hook,
-	extent_purge_forced_hook,
-	NULL, /* split */
-	NULL /* merge */
-};
-
-TEST_BEGIN(test_base_hooks_default)
-{
-	tsdn_t *tsdn;
-	base_t *base;
-	size_t allocated0, allocated1, resident, mapped;
-
-	tsdn = tsdn_fetch();
-	base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
-
-	if (config_stats) {
-		base_stats_get(tsdn, base, &allocated0, &resident, &mapped);
-		assert_zu_ge(allocated0, sizeof(base_t),
-		    "Base header should count as allocated");
-	}
-
-	assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
-	    "Unexpected base_alloc() failure");
-
-	if (config_stats) {
-		base_stats_get(tsdn, base, &allocated1, &resident, &mapped);
-		assert_zu_ge(allocated1 - allocated0, 42,
-		    "At least 42 bytes were allocated by base_alloc()");
-	}
-
-	base_delete(base);
-}
-TEST_END
-
-TEST_BEGIN(test_base_hooks_null)
-{
-	extent_hooks_t hooks_orig;
-	tsdn_t *tsdn;
-	base_t *base;
-	size_t allocated0, allocated1, resident, mapped;
-
-	extent_hooks_prep();
-	try_dalloc = false;
-	try_decommit = false;
-	try_purge_lazy = false;
-	try_purge_forced = false;
-	memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
-	memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t));
-
-	tsdn = tsdn_fetch();
-	base = base_new(tsdn, 0, &hooks);
-	assert_ptr_not_null(base, "Unexpected base_new() failure");
-
-	if (config_stats) {
-		base_stats_get(tsdn, base, &allocated0, &resident, &mapped);
-		assert_zu_ge(allocated0, sizeof(base_t),
-		    "Base header should count as allocated");
-	}
-
-	assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
-	    "Unexpected base_alloc() failure");
-
-	if (config_stats) {
-		base_stats_get(tsdn, base, &allocated1, &resident, &mapped);
-		assert_zu_ge(allocated1 - allocated0, 42,
-		    "At least 42 bytes were allocated by base_alloc()");
-	}
-
-	base_delete(base);
-
-	memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
-}
-TEST_END
-
-TEST_BEGIN(test_base_hooks_not_null)
-{
-	extent_hooks_t hooks_orig;
-	tsdn_t *tsdn;
-	base_t *base;
-	void *p, *q, *r, *r_exp;
-
-	extent_hooks_prep();
-	try_dalloc = false;
-	try_decommit = false;
-	try_purge_lazy = false;
-	try_purge_forced = false;
-	memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
-	memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
-
-	tsdn = tsdn_fetch();
-	did_alloc = false;
-	base = base_new(tsdn, 0, &hooks);
-	assert_ptr_not_null(base, "Unexpected base_new() failure");
-	assert_true(did_alloc, "Expected alloc");
-
-	/*
-	 * Check for tight packing at specified alignment under simple
-	 * conditions.
-	 */
-	{
-		const size_t alignments[] = {
-			1,
-			QUANTUM,
-			QUANTUM << 1,
-			CACHELINE,
-			CACHELINE << 1,
-		};
-		unsigned i;
-
-		for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
-			size_t alignment = alignments[i];
-			size_t align_ceil = ALIGNMENT_CEILING(alignment,
-			    QUANTUM);
-			p = base_alloc(tsdn, base, 1, alignment);
-			assert_ptr_not_null(p,
-			    "Unexpected base_alloc() failure");
-			assert_ptr_eq(p,
-			    (void *)(ALIGNMENT_CEILING((uintptr_t)p,
-			    alignment)), "Expected quantum alignment");
-			q = base_alloc(tsdn, base, alignment, alignment);
-			assert_ptr_not_null(q,
-			    "Unexpected base_alloc() failure");
-			assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
-			    "Minimal allocation should take up %zu bytes",
-			    align_ceil);
-			r = base_alloc(tsdn, base, 1, alignment);
-			assert_ptr_not_null(r,
-			    "Unexpected base_alloc() failure");
-			assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
-			    "Minimal allocation should take up %zu bytes",
-			    align_ceil);
-		}
-	}
-
-	/*
-	 * Allocate an object that cannot fit in the first block, then verify
-	 * that the first block's remaining space is considered for subsequent
-	 * allocation.
-	 */
-	assert_zu_ge(extent_size_get(&base->blocks->extent), QUANTUM,
-	    "Remainder insufficient for test");
-	/* Use up all but one quantum of block. */
-	while (extent_size_get(&base->blocks->extent) > QUANTUM) {
-		p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
-		assert_ptr_not_null(p, "Unexpected base_alloc() failure");
-	}
-	r_exp = extent_addr_get(&base->blocks->extent);
-	assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
-	q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
-	assert_ptr_not_null(q, "Unexpected base_alloc() failure");
-	assert_ptr_ne(q, r_exp, "Expected allocation from new block");
-	assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
-	r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
-	assert_ptr_not_null(r, "Unexpected base_alloc() failure");
-	assert_ptr_eq(r, r_exp, "Expected allocation from first block");
-	assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
-
-	/*
-	 * Check for proper alignment support when normal blocks are too small.
-	 */
-	{
-		const size_t alignments[] = {
-			HUGEPAGE,
-			HUGEPAGE << 1
-		};
-		unsigned i;
-
-		for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
-			size_t alignment = alignments[i];
-			p = base_alloc(tsdn, base, QUANTUM, alignment);
-			assert_ptr_not_null(p,
-			    "Unexpected base_alloc() failure");
-			assert_ptr_eq(p,
-			    (void *)(ALIGNMENT_CEILING((uintptr_t)p,
-			    alignment)), "Expected %zu-byte alignment",
-			    alignment);
-		}
-	}
-
-	called_dalloc = called_decommit = called_purge_lazy =
-	    called_purge_forced = false;
-	base_delete(base);
-	assert_true(called_dalloc, "Expected dalloc call");
-	assert_true(called_decommit, "Expected decommit call");
-	assert_true(called_purge_lazy, "Expected purge_lazy call");
-	assert_true(called_purge_forced, "Expected purge_forced call");
-
-	try_dalloc = true;
-	try_decommit = true;
-	try_purge_lazy = true;
-	try_purge_forced = true;
-	memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_base_hooks_default,
-	    test_base_hooks_null,
-	    test_base_hooks_not_null));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/bitmap.c b/zircon/third_party/ulib/jemalloc/test/unit/bitmap.c
deleted file mode 100644
index b502bfea..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/bitmap.c
+++ /dev/null
@@ -1,348 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	NBITS_TAB \
-    NB( 1) \
-    NB( 2) \
-    NB( 3) \
-    NB( 4) \
-    NB( 5) \
-    NB( 6) \
-    NB( 7) \
-    NB( 8) \
-    NB( 9) \
-    NB(10) \
-    NB(11) \
-    NB(12) \
-    NB(13) \
-    NB(14) \
-    NB(15) \
-    NB(16) \
-    NB(17) \
-    NB(18) \
-    NB(19) \
-    NB(20) \
-    NB(21) \
-    NB(22) \
-    NB(23) \
-    NB(24) \
-    NB(25) \
-    NB(26) \
-    NB(27) \
-    NB(28) \
-    NB(29) \
-    NB(30) \
-    NB(31) \
-    NB(32) \
-    \
-    NB(33) \
-    NB(34) \
-    NB(35) \
-    NB(36) \
-    NB(37) \
-    NB(38) \
-    NB(39) \
-    NB(40) \
-    NB(41) \
-    NB(42) \
-    NB(43) \
-    NB(44) \
-    NB(45) \
-    NB(46) \
-    NB(47) \
-    NB(48) \
-    NB(49) \
-    NB(50) \
-    NB(51) \
-    NB(52) \
-    NB(53) \
-    NB(54) \
-    NB(55) \
-    NB(56) \
-    NB(57) \
-    NB(58) \
-    NB(59) \
-    NB(60) \
-    NB(61) \
-    NB(62) \
-    NB(63) \
-    NB(64) \
-    NB(65) \
-    \
-    NB(126) \
-    NB(127) \
-    NB(128) \
-    NB(129) \
-    NB(130) \
-    \
-    NB(254) \
-    NB(255) \
-    NB(256) \
-    NB(257) \
-    NB(258) \
-    \
-    NB(510) \
-    NB(511) \
-    NB(512) \
-    NB(513) \
-    NB(514) \
-    \
-    NB(1024) \
-    NB(2048) \
-    NB(4096) \
-    NB(8192) \
-    NB(16384) \
-
-static void
-test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits)
-{
-	bitmap_info_t binfo_dyn;
-	bitmap_info_init(&binfo_dyn, nbits);
-
-	assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
-	    "Unexpected difference between static and dynamic initialization, "
-	    "nbits=%zu", nbits);
-	assert_zu_eq(binfo->nbits, binfo_dyn.nbits,
-	    "Unexpected difference between static and dynamic initialization, "
-	    "nbits=%zu", nbits);
-#ifdef BITMAP_USE_TREE
-	assert_u_eq(binfo->nlevels, binfo_dyn.nlevels,
-	    "Unexpected difference between static and dynamic initialization, "
-	    "nbits=%zu", nbits);
-	{
-		unsigned i;
-
-		for (i = 0; i < binfo->nlevels; i++) {
-			assert_zu_eq(binfo->levels[i].group_offset,
-			    binfo_dyn.levels[i].group_offset,
-			    "Unexpected difference between static and dynamic "
-			    "initialization, nbits=%zu, level=%u", nbits, i);
-		}
-	}
-#else
-	assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
-	    "Unexpected difference between static and dynamic initialization");
-#endif
-}
-
-TEST_BEGIN(test_bitmap_initializer)
-{
-#define	NB(nbits) {							\
-		if (nbits <= BITMAP_MAXBITS) {				\
-			bitmap_info_t binfo =				\
-			    BITMAP_INFO_INITIALIZER(nbits);		\
-			test_bitmap_initializer_body(&binfo, nbits);	\
-		}							\
-	}
-	NBITS_TAB
-#undef NB
-}
-TEST_END
-
-static size_t
-test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
-    size_t prev_size)
-{
-	size_t size = bitmap_size(binfo);
-	assert_zu_ge(size, (nbits >> 3),
-	    "Bitmap size is smaller than expected");
-	assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
-	return (size);
-}
-
-TEST_BEGIN(test_bitmap_size)
-{
-	size_t nbits, prev_size;
-
-	prev_size = 0;
-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
-		bitmap_info_t binfo;
-		bitmap_info_init(&binfo, nbits);
-		prev_size = test_bitmap_size_body(&binfo, nbits, prev_size);
-	}
-#define	NB(nbits) {							\
-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
-		prev_size = test_bitmap_size_body(&binfo, nbits,	\
-		    prev_size);						\
-	}
-	prev_size = 0;
-	NBITS_TAB
-#undef NB
-}
-TEST_END
-
-static void
-test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits)
-{
-	size_t i;
-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
-	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
-	bitmap_init(bitmap, binfo);
-
-	for (i = 0; i < nbits; i++) {
-		assert_false(bitmap_get(bitmap, binfo, i),
-		    "Bit should be unset");
-	}
-	free(bitmap);
-}
-
-TEST_BEGIN(test_bitmap_init)
-{
-	size_t nbits;
-
-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
-		bitmap_info_t binfo;
-		bitmap_info_init(&binfo, nbits);
-		test_bitmap_init_body(&binfo, nbits);
-	}
-#define	NB(nbits) {							\
-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
-		test_bitmap_init_body(&binfo, nbits);			\
-	}
-	NBITS_TAB
-#undef NB
-}
-TEST_END
-
-static void
-test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits)
-{
-	size_t i;
-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
-	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
-	bitmap_init(bitmap, binfo);
-
-	for (i = 0; i < nbits; i++)
-		bitmap_set(bitmap, binfo, i);
-	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
-	free(bitmap);
-}
-
-TEST_BEGIN(test_bitmap_set)
-{
-	size_t nbits;
-
-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
-		bitmap_info_t binfo;
-		bitmap_info_init(&binfo, nbits);
-		test_bitmap_set_body(&binfo, nbits);
-	}
-#define	NB(nbits) {							\
-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
-		test_bitmap_set_body(&binfo, nbits);			\
-	}
-	NBITS_TAB
-#undef NB
-}
-TEST_END
-
-static void
-test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits)
-{
-	size_t i;
-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
-	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
-	bitmap_init(bitmap, binfo);
-
-	for (i = 0; i < nbits; i++)
-		bitmap_set(bitmap, binfo, i);
-	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
-	for (i = 0; i < nbits; i++)
-		bitmap_unset(bitmap, binfo, i);
-	for (i = 0; i < nbits; i++)
-		bitmap_set(bitmap, binfo, i);
-	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
-	free(bitmap);
-}
-
-TEST_BEGIN(test_bitmap_unset)
-{
-	size_t nbits;
-
-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
-		bitmap_info_t binfo;
-		bitmap_info_init(&binfo, nbits);
-		test_bitmap_unset_body(&binfo, nbits);
-	}
-#define	NB(nbits) {							\
-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
-		test_bitmap_unset_body(&binfo, nbits);			\
-	}
-	NBITS_TAB
-#undef NB
-}
-TEST_END
-
-static void
-test_bitmap_sfu_body(const bitmap_info_t *binfo, size_t nbits)
-{
-	size_t i;
-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
-	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
-	bitmap_init(bitmap, binfo);
-
-	/* Iteratively set bits starting at the beginning. */
-	for (i = 0; i < nbits; i++) {
-		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
-		    "First unset bit should be just after previous first unset "
-		    "bit");
-	}
-	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
-
-	/*
-	 * Iteratively unset bits starting at the end, and verify that
-	 * bitmap_sfu() reaches the unset bits.
-	 */
-	for (i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
-		bitmap_unset(bitmap, binfo, i);
-		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
-		    "First unset bit should the bit previously unset");
-		bitmap_unset(bitmap, binfo, i);
-	}
-	assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
-
-	/*
-	 * Iteratively set bits starting at the beginning, and verify that
-	 * bitmap_sfu() looks past them.
-	 */
-	for (i = 1; i < nbits; i++) {
-		bitmap_set(bitmap, binfo, i - 1);
-		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
-		    "First unset bit should be just after the bit previously "
-		    "set");
-		bitmap_unset(bitmap, binfo, i);
-	}
-	assert_zd_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
-	    "First unset bit should be the last bit");
-	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
-	free(bitmap);
-}
-
-TEST_BEGIN(test_bitmap_sfu)
-{
-	size_t nbits;
-
-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
-		bitmap_info_t binfo;
-		bitmap_info_init(&binfo, nbits);
-		test_bitmap_sfu_body(&binfo, nbits);
-	}
-#define	NB(nbits) {							\
-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
-		test_bitmap_sfu_body(&binfo, nbits);			\
-	}
-	NBITS_TAB
-#undef NB
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_bitmap_initializer,
-	    test_bitmap_size,
-	    test_bitmap_init,
-	    test_bitmap_set,
-	    test_bitmap_unset,
-	    test_bitmap_sfu));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/ckh.c b/zircon/third_party/ulib/jemalloc/test/unit/ckh.c
deleted file mode 100644
index 1f57668..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/ckh.c
+++ /dev/null
@@ -1,213 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_new_delete)
-{
-	tsd_t *tsd;
-	ckh_t ckh;
-
-	tsd = tsd_fetch();
-
-	assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
-	    ckh_string_keycomp), "Unexpected ckh_new() error");
-	ckh_delete(tsd, &ckh);
-
-	assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
-	    ckh_pointer_keycomp), "Unexpected ckh_new() error");
-	ckh_delete(tsd, &ckh);
-}
-TEST_END
-
-TEST_BEGIN(test_count_insert_search_remove)
-{
-	tsd_t *tsd;
-	ckh_t ckh;
-	const char *strs[] = {
-	    "a string",
-	    "A string",
-	    "a string.",
-	    "A string."
-	};
-	const char *missing = "A string not in the hash table.";
-	size_t i;
-
-	tsd = tsd_fetch();
-
-	assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
-	    ckh_string_keycomp), "Unexpected ckh_new() error");
-	assert_zu_eq(ckh_count(&ckh), 0,
-	    "ckh_count() should return %zu, but it returned %zu", ZU(0),
-	    ckh_count(&ckh));
-
-	/* Insert. */
-	for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
-		ckh_insert(tsd, &ckh, strs[i], strs[i]);
-		assert_zu_eq(ckh_count(&ckh), i+1,
-		    "ckh_count() should return %zu, but it returned %zu", i+1,
-		    ckh_count(&ckh));
-	}
-
-	/* Search. */
-	for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
-		union {
-			void *p;
-			const char *s;
-		} k, v;
-		void **kp, **vp;
-		const char *ks, *vs;
-
-		kp = (i & 1) ? &k.p : NULL;
-		vp = (i & 2) ? &v.p : NULL;
-		k.p = NULL;
-		v.p = NULL;
-		assert_false(ckh_search(&ckh, strs[i], kp, vp),
-		    "Unexpected ckh_search() error");
-
-		ks = (i & 1) ? strs[i] : (const char *)NULL;
-		vs = (i & 2) ? strs[i] : (const char *)NULL;
-		assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
-		    i);
-		assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
-		    i);
-	}
-	assert_true(ckh_search(&ckh, missing, NULL, NULL),
-	    "Unexpected ckh_search() success");
-
-	/* Remove. */
-	for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
-		union {
-			void *p;
-			const char *s;
-		} k, v;
-		void **kp, **vp;
-		const char *ks, *vs;
-
-		kp = (i & 1) ? &k.p : NULL;
-		vp = (i & 2) ? &v.p : NULL;
-		k.p = NULL;
-		v.p = NULL;
-		assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
-		    "Unexpected ckh_remove() error");
-
-		ks = (i & 1) ? strs[i] : (const char *)NULL;
-		vs = (i & 2) ? strs[i] : (const char *)NULL;
-		assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
-		    i);
-		assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
-		    i);
-		assert_zu_eq(ckh_count(&ckh),
-		    sizeof(strs)/sizeof(const char *) - i - 1,
-		    "ckh_count() should return %zu, but it returned %zu",
-		        sizeof(strs)/sizeof(const char *) - i - 1,
-		    ckh_count(&ckh));
-	}
-
-	ckh_delete(tsd, &ckh);
-}
-TEST_END
-
-TEST_BEGIN(test_insert_iter_remove)
-{
-#define	NITEMS ZU(1000)
-	tsd_t *tsd;
-	ckh_t ckh;
-	void **p[NITEMS];
-	void *q, *r;
-	size_t i;
-
-	tsd = tsd_fetch();
-
-	assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
-	    ckh_pointer_keycomp), "Unexpected ckh_new() error");
-
-	for (i = 0; i < NITEMS; i++) {
-		p[i] = mallocx(i+1, 0);
-		assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
-	}
-
-	for (i = 0; i < NITEMS; i++) {
-		size_t j;
-
-		for (j = i; j < NITEMS; j++) {
-			assert_false(ckh_insert(tsd, &ckh, p[j], p[j]),
-			    "Unexpected ckh_insert() failure");
-			assert_false(ckh_search(&ckh, p[j], &q, &r),
-			    "Unexpected ckh_search() failure");
-			assert_ptr_eq(p[j], q, "Key pointer mismatch");
-			assert_ptr_eq(p[j], r, "Value pointer mismatch");
-		}
-
-		assert_zu_eq(ckh_count(&ckh), NITEMS,
-		    "ckh_count() should return %zu, but it returned %zu",
-		    NITEMS, ckh_count(&ckh));
-
-		for (j = i + 1; j < NITEMS; j++) {
-			assert_false(ckh_search(&ckh, p[j], NULL, NULL),
-			    "Unexpected ckh_search() failure");
-			assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
-			    "Unexpected ckh_remove() failure");
-			assert_ptr_eq(p[j], q, "Key pointer mismatch");
-			assert_ptr_eq(p[j], r, "Value pointer mismatch");
-			assert_true(ckh_search(&ckh, p[j], NULL, NULL),
-			    "Unexpected ckh_search() success");
-			assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
-			    "Unexpected ckh_remove() success");
-		}
-
-		{
-			bool seen[NITEMS];
-			size_t tabind;
-
-			memset(seen, 0, sizeof(seen));
-
-			for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
-				size_t k;
-
-				assert_ptr_eq(q, r, "Key and val not equal");
-
-				for (k = 0; k < NITEMS; k++) {
-					if (p[k] == q) {
-						assert_false(seen[k],
-						    "Item %zu already seen", k);
-						seen[k] = true;
-						break;
-					}
-				}
-			}
-
-			for (j = 0; j < i + 1; j++)
-				assert_true(seen[j], "Item %zu not seen", j);
-			for (; j < NITEMS; j++)
-				assert_false(seen[j], "Item %zu seen", j);
-		}
-	}
-
-	for (i = 0; i < NITEMS; i++) {
-		assert_false(ckh_search(&ckh, p[i], NULL, NULL),
-		    "Unexpected ckh_search() failure");
-		assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
-		    "Unexpected ckh_remove() failure");
-		assert_ptr_eq(p[i], q, "Key pointer mismatch");
-		assert_ptr_eq(p[i], r, "Value pointer mismatch");
-		assert_true(ckh_search(&ckh, p[i], NULL, NULL),
-		    "Unexpected ckh_search() success");
-		assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
-		    "Unexpected ckh_remove() success");
-		dallocx(p[i], 0);
-	}
-
-	assert_zu_eq(ckh_count(&ckh), 0,
-	    "ckh_count() should return %zu, but it returned %zu",
-	    ZU(0), ckh_count(&ckh));
-	ckh_delete(tsd, &ckh);
-#undef NITEMS
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_new_delete,
-	    test_count_insert_search_remove,
-	    test_insert_iter_remove));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/decay.c b/zircon/third_party/ulib/jemalloc/test/unit/decay.c
deleted file mode 100644
index b3b1dd9..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/decay.c
+++ /dev/null
@@ -1,362 +0,0 @@
-#include "test/jemalloc_test.h"
-
-const char *malloc_conf = "decay_time:1,lg_tcache_max:0";
-
-static nstime_monotonic_t *nstime_monotonic_orig;
-static nstime_update_t *nstime_update_orig;
-
-static unsigned nupdates_mock;
-static nstime_t time_mock;
-static bool monotonic_mock;
-
-static bool
-nstime_monotonic_mock(void)
-{
-	return (monotonic_mock);
-}
-
-static bool
-nstime_update_mock(nstime_t *time)
-{
-	nupdates_mock++;
-	if (monotonic_mock)
-		nstime_copy(time, &time_mock);
-	return (!monotonic_mock);
-}
-
-TEST_BEGIN(test_decay_ticks)
-{
-	ticker_t *decay_ticker;
-	unsigned tick0, tick1;
-	size_t sz, large0;
-	void *p;
-
-	decay_ticker = decay_ticker_get(tsd_fetch(), 0);
-	assert_ptr_not_null(decay_ticker,
-	    "Unexpected failure getting decay ticker");
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
-	    0), 0, "Unexpected mallctl failure");
-
-	/*
-	 * Test the standard APIs using a large size class, since we can't
-	 * control tcache interactions for small size classes (except by
-	 * completely disabling tcache for the entire test program).
-	 */
-
-	/* malloc(). */
-	tick0 = ticker_read(decay_ticker);
-	p = malloc(large0);
-	assert_ptr_not_null(p, "Unexpected malloc() failure");
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
-	/* free(). */
-	tick0 = ticker_read(decay_ticker);
-	free(p);
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
-
-	/* calloc(). */
-	tick0 = ticker_read(decay_ticker);
-	p = calloc(1, large0);
-	assert_ptr_not_null(p, "Unexpected calloc() failure");
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
-	free(p);
-
-	/* posix_memalign(). */
-	tick0 = ticker_read(decay_ticker);
-	assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
-	    "Unexpected posix_memalign() failure");
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0,
-	    "Expected ticker to tick during posix_memalign()");
-	free(p);
-
-	/* aligned_alloc(). */
-	tick0 = ticker_read(decay_ticker);
-	p = aligned_alloc(sizeof(size_t), large0);
-	assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0,
-	    "Expected ticker to tick during aligned_alloc()");
-	free(p);
-
-	/* realloc(). */
-	/* Allocate. */
-	tick0 = ticker_read(decay_ticker);
-	p = realloc(NULL, large0);
-	assert_ptr_not_null(p, "Unexpected realloc() failure");
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
-	/* Reallocate. */
-	tick0 = ticker_read(decay_ticker);
-	p = realloc(p, large0);
-	assert_ptr_not_null(p, "Unexpected realloc() failure");
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
-	/* Deallocate. */
-	tick0 = ticker_read(decay_ticker);
-	realloc(p, 0);
-	tick1 = ticker_read(decay_ticker);
-	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
-
-	/*
-	 * Test the *allocx() APIs using large and small size classes, with
-	 * tcache explicitly disabled.
-	 */
-	{
-		unsigned i;
-		size_t allocx_sizes[2];
-		allocx_sizes[0] = large0;
-		allocx_sizes[1] = 1;
-
-		for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
-			sz = allocx_sizes[i];
-
-			/* mallocx(). */
-			tick0 = ticker_read(decay_ticker);
-			p = mallocx(sz, MALLOCX_TCACHE_NONE);
-			assert_ptr_not_null(p, "Unexpected mallocx() failure");
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during mallocx() (sz=%zu)",
-			    sz);
-			/* rallocx(). */
-			tick0 = ticker_read(decay_ticker);
-			p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
-			assert_ptr_not_null(p, "Unexpected rallocx() failure");
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during rallocx() (sz=%zu)",
-			    sz);
-			/* xallocx(). */
-			tick0 = ticker_read(decay_ticker);
-			xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during xallocx() (sz=%zu)",
-			    sz);
-			/* dallocx(). */
-			tick0 = ticker_read(decay_ticker);
-			dallocx(p, MALLOCX_TCACHE_NONE);
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during dallocx() (sz=%zu)",
-			    sz);
-			/* sdallocx(). */
-			p = mallocx(sz, MALLOCX_TCACHE_NONE);
-			assert_ptr_not_null(p, "Unexpected mallocx() failure");
-			tick0 = ticker_read(decay_ticker);
-			sdallocx(p, sz, MALLOCX_TCACHE_NONE);
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during sdallocx() "
-			    "(sz=%zu)", sz);
-		}
-	}
-
-	/*
-	 * Test tcache fill/flush interactions for large and small size classes,
-	 * using an explicit tcache.
-	 */
-	if (config_tcache) {
-		unsigned tcache_ind, i;
-		size_t tcache_sizes[2];
-		tcache_sizes[0] = large0;
-		tcache_sizes[1] = 1;
-
-		sz = sizeof(unsigned);
-		assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
-		    NULL, 0), 0, "Unexpected mallctl failure");
-
-		for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
-			sz = tcache_sizes[i];
-
-			/* tcache fill. */
-			tick0 = ticker_read(decay_ticker);
-			p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
-			assert_ptr_not_null(p, "Unexpected mallocx() failure");
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during tcache fill "
-			    "(sz=%zu)", sz);
-			/* tcache flush. */
-			dallocx(p, MALLOCX_TCACHE(tcache_ind));
-			tick0 = ticker_read(decay_ticker);
-			assert_d_eq(mallctl("tcache.flush", NULL, NULL,
-			    (void *)&tcache_ind, sizeof(unsigned)), 0,
-			    "Unexpected mallctl failure");
-			tick1 = ticker_read(decay_ticker);
-			assert_u32_ne(tick1, tick0,
-			    "Expected ticker to tick during tcache flush "
-			    "(sz=%zu)", sz);
-		}
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_decay_ticker)
-{
-#define	NPS 1024
-	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
-	void *ps[NPS];
-	uint64_t epoch;
-	uint64_t npurge0 = 0;
-	uint64_t npurge1 = 0;
-	size_t sz, large;
-	unsigned i, nupdates0;
-	nstime_t time, decay_time, deadline;
-
-	/*
-	 * Allocate a bunch of large objects, pause the clock, deallocate the
-	 * objects, restore the clock, then [md]allocx() in a tight loop to
-	 * verify the ticker triggers purging.
-	 */
-
-	if (config_tcache) {
-		size_t tcache_max;
-
-		sz = sizeof(size_t);
-		assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
-		    &sz, NULL, 0), 0, "Unexpected mallctl failure");
-		large = nallocx(tcache_max + 1, flags);
-	}  else {
-		sz = sizeof(size_t);
-		assert_d_eq(mallctl("arenas.lextent.0.size", &large, &sz, NULL,
-		    0), 0, "Unexpected mallctl failure");
-	}
-
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl failure");
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
-	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
-
-	for (i = 0; i < NPS; i++) {
-		ps[i] = mallocx(large, flags);
-		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
-	}
-
-	nupdates_mock = 0;
-	nstime_init(&time_mock, 0);
-	nstime_update(&time_mock);
-	monotonic_mock = true;
-
-	nstime_monotonic_orig = nstime_monotonic;
-	nstime_update_orig = nstime_update;
-	nstime_monotonic = nstime_monotonic_mock;
-	nstime_update = nstime_update_mock;
-
-	for (i = 0; i < NPS; i++) {
-		dallocx(ps[i], flags);
-		nupdates0 = nupdates_mock;
-		assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
-		    "Unexpected arena.0.decay failure");
-		assert_u_gt(nupdates_mock, nupdates0,
-		    "Expected nstime_update() to be called");
-	}
-
-	nstime_monotonic = nstime_monotonic_orig;
-	nstime_update = nstime_update_orig;
-
-	nstime_init(&time, 0);
-	nstime_update(&time);
-	nstime_init2(&decay_time, opt_decay_time, 0);
-	nstime_copy(&deadline, &time);
-	nstime_add(&deadline, &decay_time);
-	do {
-		for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
-			void *p = mallocx(1, flags);
-			assert_ptr_not_null(p, "Unexpected mallocx() failure");
-			dallocx(p, flags);
-		}
-		assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-		    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
-		sz = sizeof(uint64_t);
-		assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1,
-		    &sz, NULL, 0), config_stats ? 0 : ENOENT,
-		    "Unexpected mallctl result");
-
-		nstime_update(&time);
-	} while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
-
-	if (config_stats)
-		assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
-#undef NPS
-}
-TEST_END
-
-TEST_BEGIN(test_decay_nonmonotonic)
-{
-#define	NPS (SMOOTHSTEP_NSTEPS + 1)
-	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
-	void *ps[NPS];
-	uint64_t epoch;
-	uint64_t npurge0 = 0;
-	uint64_t npurge1 = 0;
-	size_t sz, large0;
-	unsigned i, nupdates0;
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
-	    0), 0, "Unexpected mallctl failure");
-
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl failure");
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
-	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
-
-	nupdates_mock = 0;
-	nstime_init(&time_mock, 0);
-	nstime_update(&time_mock);
-	monotonic_mock = false;
-
-	nstime_monotonic_orig = nstime_monotonic;
-	nstime_update_orig = nstime_update;
-	nstime_monotonic = nstime_monotonic_mock;
-	nstime_update = nstime_update_mock;
-
-	for (i = 0; i < NPS; i++) {
-		ps[i] = mallocx(large0, flags);
-		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
-	}
-
-	for (i = 0; i < NPS; i++) {
-		dallocx(ps[i], flags);
-		nupdates0 = nupdates_mock;
-		assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
-		    "Unexpected arena.0.decay failure");
-		assert_u_gt(nupdates_mock, nupdates0,
-		    "Expected nstime_update() to be called");
-	}
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
-	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
-
-	if (config_stats)
-		assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
-
-	nstime_monotonic = nstime_monotonic_orig;
-	nstime_update = nstime_update_orig;
-#undef NPS
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_decay_ticks,
-	    test_decay_ticker,
-	    test_decay_nonmonotonic));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/extent_quantize.c b/zircon/third_party/ulib/jemalloc/test/unit/extent_quantize.c
deleted file mode 100644
index a5c1b7a..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/extent_quantize.c
+++ /dev/null
@@ -1,145 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_small_extent_size)
-{
-	unsigned nbins, i;
-	size_t sz, extent_size;
-	size_t mib[4];
-	size_t miblen = sizeof(mib) / sizeof(size_t);
-
-	/*
-	 * Iterate over all small size classes, get their extent sizes, and
-	 * verify that the quantized size is the same as the extent size.
-	 */
-
-	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
-	    "Unexpected mallctl failure");
-
-	assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib failure");
-	for (i = 0; i < nbins; i++) {
-		mib[2] = i;
-		sz = sizeof(size_t);
-		assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
-		    NULL, 0), 0, "Unexpected mallctlbymib failure");
-		assert_zu_eq(extent_size,
-		    extent_size_quantize_floor(extent_size),
-		    "Small extent quantization should be a no-op "
-		    "(extent_size=%zu)", extent_size);
-		assert_zu_eq(extent_size,
-		    extent_size_quantize_ceil(extent_size),
-		    "Small extent quantization should be a no-op "
-		    "(extent_size=%zu)", extent_size);
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_large_extent_size)
-{
-	bool cache_oblivious;
-	unsigned nlextents, i;
-	size_t sz, extent_size_prev, ceil_prev;
-	size_t mib[4];
-	size_t miblen = sizeof(mib) / sizeof(size_t);
-
-	/*
-	 * Iterate over all large size classes, get their extent sizes, and
-	 * verify that the quantized size is the same as the extent size.
-	 */
-
-	sz = sizeof(bool);
-	assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
-	    &sz, NULL, 0), 0, "Unexpected mallctl failure");
-
-	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
-	    0), 0, "Unexpected mallctl failure");
-
-	assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib failure");
-	for (i = 0; i < nlextents; i++) {
-		size_t lextent_size, extent_size, floor, ceil;
-
-		mib[2] = i;
-		sz = sizeof(size_t);
-		assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
-		    &sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
-		extent_size = cache_oblivious ? lextent_size + PAGE :
-		    lextent_size;
-		floor = extent_size_quantize_floor(extent_size);
-		ceil = extent_size_quantize_ceil(extent_size);
-
-		assert_zu_eq(extent_size, floor,
-		    "Extent quantization should be a no-op for precise size "
-		    "(lextent_size=%zu, extent_size=%zu)", lextent_size,
-		    extent_size);
-		assert_zu_eq(extent_size, ceil,
-		    "Extent quantization should be a no-op for precise size "
-		    "(lextent_size=%zu, extent_size=%zu)", lextent_size,
-		    extent_size);
-
-		if (i > 0) {
-			assert_zu_eq(extent_size_prev,
-			    extent_size_quantize_floor(extent_size - PAGE),
-			    "Floor should be a precise size");
-			if (extent_size_prev < ceil_prev) {
-				assert_zu_eq(ceil_prev, extent_size,
-				    "Ceiling should be a precise size "
-				    "(extent_size_prev=%zu, ceil_prev=%zu, "
-				    "extent_size=%zu)", extent_size_prev,
-				    ceil_prev, extent_size);
-			}
-		}
-		if (i + 1 < nlextents) {
-			extent_size_prev = floor;
-			ceil_prev = extent_size_quantize_ceil(extent_size +
-			    PAGE);
-		}
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_monotonic)
-{
-#define	SZ_MAX	ZU(4 * 1024 * 1024)
-	unsigned i;
-	size_t floor_prev, ceil_prev;
-
-	floor_prev = 0;
-	ceil_prev = 0;
-	for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
-		size_t extent_size, floor, ceil;
-
-		extent_size = i << LG_PAGE;
-		floor = extent_size_quantize_floor(extent_size);
-		ceil = extent_size_quantize_ceil(extent_size);
-
-		assert_zu_le(floor, extent_size,
-		    "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
-		    floor, extent_size, ceil);
-		assert_zu_ge(ceil, extent_size,
-		    "Ceiling should be >= (floor=%zu, extent_size=%zu, "
-		    "ceil=%zu)", floor, extent_size, ceil);
-
-		assert_zu_le(floor_prev, floor, "Floor should be monotonic "
-		    "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
-		    floor_prev, floor, extent_size, ceil);
-		assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
-		    "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
-		    floor, extent_size, ceil_prev, ceil);
-
-		floor_prev = floor;
-		ceil_prev = ceil;
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_small_extent_size,
-	    test_large_extent_size,
-	    test_monotonic));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/fork.c b/zircon/third_party/ulib/jemalloc/test/unit/fork.c
deleted file mode 100644
index 58091c6..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/fork.c
+++ /dev/null
@@ -1,63 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifndef _WIN32
-#include <sys/wait.h>
-#endif
-
-TEST_BEGIN(test_fork)
-{
-#ifndef _WIN32
-	void *p;
-	pid_t pid;
-
-	p = malloc(1);
-	assert_ptr_not_null(p, "Unexpected malloc() failure");
-
-	pid = fork();
-
-	free(p);
-
-	p = malloc(64);
-	assert_ptr_not_null(p, "Unexpected malloc() failure");
-	free(p);
-
-	if (pid == -1) {
-		/* Error. */
-		test_fail("Unexpected fork() failure");
-	} else if (pid == 0) {
-		/* Child. */
-		_exit(0);
-	} else {
-		int status;
-
-		/* Parent. */
-		while (true) {
-			if (waitpid(pid, &status, 0) == -1)
-				test_fail("Unexpected waitpid() failure");
-			if (WIFSIGNALED(status)) {
-				test_fail("Unexpected child termination due to "
-				    "signal %d", WTERMSIG(status));
-				break;
-			}
-			if (WIFEXITED(status)) {
-				if (WEXITSTATUS(status) != 0) {
-					test_fail(
-					    "Unexpected child exit value %d",
-					    WEXITSTATUS(status));
-				}
-				break;
-			}
-		}
-	}
-#else
-	test_skip("fork(2) is irrelevant to Windows");
-#endif
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_fork));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/hash.c b/zircon/third_party/ulib/jemalloc/test/unit/hash.c
deleted file mode 100644
index ff23777..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/hash.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * This file is based on code that is part of SMHasher
- * (https://code.google.com/p/smhasher/), and is subject to the MIT license
- * (http://www.opensource.org/licenses/mit-license.php).  Both email addresses
- * associated with the source code's revision history belong to Austin Appleby,
- * and the revision history ranges from 2010 to 2012.  Therefore the copyright
- * and license are here taken to be:
- *
- * Copyright (c) 2010-2012 Austin Appleby
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "test/jemalloc_test.h"
-
-typedef enum {
-	hash_variant_x86_32,
-	hash_variant_x86_128,
-	hash_variant_x64_128
-} hash_variant_t;
-
-static int
-hash_variant_bits(hash_variant_t variant)
-{
-	switch (variant) {
-	case hash_variant_x86_32: return (32);
-	case hash_variant_x86_128: return (128);
-	case hash_variant_x64_128: return (128);
-	default: not_reached();
-	}
-}
-
-static const char *
-hash_variant_string(hash_variant_t variant)
-{
-	switch (variant) {
-	case hash_variant_x86_32: return ("hash_x86_32");
-	case hash_variant_x86_128: return ("hash_x86_128");
-	case hash_variant_x64_128: return ("hash_x64_128");
-	default: not_reached();
-	}
-}
-
-#define	KEY_SIZE	256
-static void
-hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
-{
-	const int hashbytes = hash_variant_bits(variant) / 8;
-	const int hashes_size = hashbytes * 256;
-	VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
-	VARIABLE_ARRAY(uint8_t, final, hashbytes);
-	unsigned i;
-	uint32_t computed, expected;
-
-	memset(key, 0, KEY_SIZE);
-	memset(hashes, 0, hashes_size);
-	memset(final, 0, hashbytes);
-
-	/*
-	 * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
-	 * seed.
-	 */
-	for (i = 0; i < 256; i++) {
-		key[i] = (uint8_t)i;
-		switch (variant) {
-		case hash_variant_x86_32: {
-			uint32_t out;
-			out = hash_x86_32(key, i, 256-i);
-			memcpy(&hashes[i*hashbytes], &out, hashbytes);
-			break;
-		} case hash_variant_x86_128: {
-			uint64_t out[2];
-			hash_x86_128(key, i, 256-i, out);
-			memcpy(&hashes[i*hashbytes], out, hashbytes);
-			break;
-		} case hash_variant_x64_128: {
-			uint64_t out[2];
-			hash_x64_128(key, i, 256-i, out);
-			memcpy(&hashes[i*hashbytes], out, hashbytes);
-			break;
-		} default: not_reached();
-		}
-	}
-
-	/* Hash the result array. */
-	switch (variant) {
-	case hash_variant_x86_32: {
-		uint32_t out = hash_x86_32(hashes, hashes_size, 0);
-		memcpy(final, &out, sizeof(out));
-		break;
-	} case hash_variant_x86_128: {
-		uint64_t out[2];
-		hash_x86_128(hashes, hashes_size, 0, out);
-		memcpy(final, out, sizeof(out));
-		break;
-	} case hash_variant_x64_128: {
-		uint64_t out[2];
-		hash_x64_128(hashes, hashes_size, 0, out);
-		memcpy(final, out, sizeof(out));
-		break;
-	} default: not_reached();
-	}
-
-	computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
-	    (final[3] << 24);
-
-	switch (variant) {
-#ifdef JEMALLOC_BIG_ENDIAN
-	case hash_variant_x86_32: expected = 0x6213303eU; break;
-	case hash_variant_x86_128: expected = 0x266820caU; break;
-	case hash_variant_x64_128: expected = 0xcc622b6fU; break;
-#else
-	case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
-	case hash_variant_x86_128: expected = 0xb3ece62aU; break;
-	case hash_variant_x64_128: expected = 0x6384ba69U; break;
-#endif
-	default: not_reached();
-	}
-
-	assert_u32_eq(computed, expected,
-	    "Hash mismatch for %s(): expected %#x but got %#x",
-	    hash_variant_string(variant), expected, computed);
-}
-
-static void
-hash_variant_verify(hash_variant_t variant)
-{
-#define	MAX_ALIGN	16
-	uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
-	unsigned i;
-
-	for (i = 0; i < MAX_ALIGN; i++)
-		hash_variant_verify_key(variant, &key[i]);
-#undef MAX_ALIGN
-}
-#undef KEY_SIZE
-
-TEST_BEGIN(test_hash_x86_32)
-{
-	hash_variant_verify(hash_variant_x86_32);
-}
-TEST_END
-
-TEST_BEGIN(test_hash_x86_128)
-{
-	hash_variant_verify(hash_variant_x86_128);
-}
-TEST_END
-
-TEST_BEGIN(test_hash_x64_128)
-{
-	hash_variant_verify(hash_variant_x64_128);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_hash_x86_32,
-	    test_hash_x86_128,
-	    test_hash_x64_128));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/junk.c b/zircon/third_party/ulib/jemalloc/test/unit/junk.c
deleted file mode 100644
index 5f34d05..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/junk.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_FILL
-#  ifndef JEMALLOC_TEST_JUNK_OPT
-#    define JEMALLOC_TEST_JUNK_OPT "junk:true"
-#  endif
-const char *malloc_conf =
-    "abort:false,zero:false," JEMALLOC_TEST_JUNK_OPT;
-#endif
-
-static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
-static large_dalloc_junk_t *large_dalloc_junk_orig;
-static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig;
-static void *watch_for_junking;
-static bool saw_junking;
-
-static void
-watch_junking(void *p)
-{
-	watch_for_junking = p;
-	saw_junking = false;
-}
-
-static void
-arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
-{
-	size_t i;
-
-	arena_dalloc_junk_small_orig(ptr, bin_info);
-	for (i = 0; i < bin_info->reg_size; i++) {
-		assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
-		    "Missing junk fill for byte %zu/%zu of deallocated region",
-		    i, bin_info->reg_size);
-	}
-	if (ptr == watch_for_junking)
-		saw_junking = true;
-}
-
-static void
-large_dalloc_junk_intercept(void *ptr, size_t usize)
-{
-	size_t i;
-
-	large_dalloc_junk_orig(ptr, usize);
-	for (i = 0; i < usize; i++) {
-		assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
-		    "Missing junk fill for byte %zu/%zu of deallocated region",
-		    i, usize);
-	}
-	if (ptr == watch_for_junking)
-		saw_junking = true;
-}
-
-static void
-large_dalloc_maybe_junk_intercept(void *ptr, size_t usize)
-{
-	large_dalloc_maybe_junk_orig(ptr, usize);
-	if (ptr == watch_for_junking)
-		saw_junking = true;
-}
-
-static void
-test_junk(size_t sz_min, size_t sz_max)
-{
-	uint8_t *s;
-	size_t sz_prev, sz, i;
-
-	if (opt_junk_free) {
-		arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
-		arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
-		large_dalloc_junk_orig = large_dalloc_junk;
-		large_dalloc_junk = large_dalloc_junk_intercept;
-		large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
-		large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
-	}
-
-	sz_prev = 0;
-	s = (uint8_t *)mallocx(sz_min, 0);
-	assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
-
-	for (sz = sallocx(s, 0); sz <= sz_max;
-	    sz_prev = sz, sz = sallocx(s, 0)) {
-		if (sz_prev > 0) {
-			assert_u_eq(s[0], 'a',
-			    "Previously allocated byte %zu/%zu is corrupted",
-			    ZU(0), sz_prev);
-			assert_u_eq(s[sz_prev-1], 'a',
-			    "Previously allocated byte %zu/%zu is corrupted",
-			    sz_prev-1, sz_prev);
-		}
-
-		for (i = sz_prev; i < sz; i++) {
-			if (opt_junk_alloc) {
-				assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
-				    "Newly allocated byte %zu/%zu isn't "
-				    "junk-filled", i, sz);
-			}
-			s[i] = 'a';
-		}
-
-		if (xallocx(s, sz+1, 0, 0) == sz) {
-			uint8_t *t;
-			watch_junking(s);
-			t = (uint8_t *)rallocx(s, sz+1, 0);
-			assert_ptr_not_null((void *)t,
-			    "Unexpected rallocx() failure");
-			assert_ptr_ne(s, t, "Unexpected in-place rallocx()");
-			assert_zu_ge(sallocx(t, 0), sz+1,
-			    "Unexpectedly small rallocx() result");
-			assert_true(!opt_junk_free || saw_junking,
-			    "Expected region of size %zu to be junk-filled",
-			    sz);
-			s = t;
-		}
-	}
-
-	watch_junking(s);
-	dallocx(s, 0);
-	assert_true(!opt_junk_free || saw_junking,
-	    "Expected region of size %zu to be junk-filled", sz);
-
-	if (opt_junk_free) {
-		arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
-		large_dalloc_junk = large_dalloc_junk_orig;
-		large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
-	}
-}
-
-TEST_BEGIN(test_junk_small)
-{
-	test_skip_if(!config_fill);
-	test_junk(1, SMALL_MAXCLASS-1);
-}
-TEST_END
-
-TEST_BEGIN(test_junk_large)
-{
-	test_skip_if(!config_fill);
-	test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_junk_small,
-	    test_junk_large));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/junk_alloc.c b/zircon/third_party/ulib/jemalloc/test/unit/junk_alloc.c
deleted file mode 100644
index a5895b5..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/junk_alloc.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define	JEMALLOC_TEST_JUNK_OPT "junk:alloc"
-#include "junk.c"
-#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/junk_free.c b/zircon/third_party/ulib/jemalloc/test/unit/junk_free.c
deleted file mode 100644
index bb5183c..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/junk_free.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define	JEMALLOC_TEST_JUNK_OPT "junk:free"
-#include "junk.c"
-#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/mallctl.c b/zircon/third_party/ulib/jemalloc/test/unit/mallctl.c
deleted file mode 100644
index 5b734e1..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/mallctl.c
+++ /dev/null
@@ -1,668 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_mallctl_errors)
-{
-	uint64_t epoch;
-	size_t sz;
-
-	assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
-	    "mallctl() should return ENOENT for non-existent names");
-
-	assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
-	    EPERM, "mallctl() should return EPERM on attempt to write "
-	    "read-only value");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-	    sizeof(epoch)-1), EINVAL,
-	    "mallctl() should return EINVAL for input size mismatch");
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
-	    sizeof(epoch)+1), EINVAL,
-	    "mallctl() should return EINVAL for input size mismatch");
-
-	sz = sizeof(epoch)-1;
-	assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
-	    "mallctl() should return EINVAL for output size mismatch");
-	sz = sizeof(epoch)+1;
-	assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
-	    "mallctl() should return EINVAL for output size mismatch");
-}
-TEST_END
-
-TEST_BEGIN(test_mallctlnametomib_errors)
-{
-	size_t mib[1];
-	size_t miblen;
-
-	miblen = sizeof(mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
-	    "mallctlnametomib() should return ENOENT for non-existent names");
-}
-TEST_END
-
-TEST_BEGIN(test_mallctlbymib_errors)
-{
-	uint64_t epoch;
-	size_t sz;
-	size_t mib[1];
-	size_t miblen;
-
-	miblen = sizeof(mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
-	    strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
-	    "attempt to write read-only value");
-
-	miblen = sizeof(mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
-	    sizeof(epoch)-1), EINVAL,
-	    "mallctlbymib() should return EINVAL for input size mismatch");
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
-	    sizeof(epoch)+1), EINVAL,
-	    "mallctlbymib() should return EINVAL for input size mismatch");
-
-	sz = sizeof(epoch)-1;
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
-	    EINVAL,
-	    "mallctlbymib() should return EINVAL for output size mismatch");
-	sz = sizeof(epoch)+1;
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
-	    EINVAL,
-	    "mallctlbymib() should return EINVAL for output size mismatch");
-}
-TEST_END
-
-TEST_BEGIN(test_mallctl_read_write)
-{
-	uint64_t old_epoch, new_epoch;
-	size_t sz = sizeof(old_epoch);
-
-	/* Blind. */
-	assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
-
-	/* Read. */
-	assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
-
-	/* Write. */
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
-	    sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
-	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
-
-	/* Read+write. */
-	assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
-	    (void *)&new_epoch, sizeof(new_epoch)), 0,
-	    "Unexpected mallctl() failure");
-	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
-}
-TEST_END
-
-TEST_BEGIN(test_mallctlnametomib_short_mib)
-{
-	size_t mib[4];
-	size_t miblen;
-
-	miblen = 3;
-	mib[3] = 42;
-	assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	assert_zu_eq(miblen, 3, "Unexpected mib output length");
-	assert_zu_eq(mib[3], 42,
-	    "mallctlnametomib() wrote past the end of the input mib");
-}
-TEST_END
-
-TEST_BEGIN(test_mallctl_config)
-{
-#define	TEST_MALLCTL_CONFIG(config, t) do {				\
-	t oldval;							\
-	size_t sz = sizeof(oldval);					\
-	assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz,	\
-	    NULL, 0), 0, "Unexpected mallctl() failure");		\
-	assert_b_eq(oldval, config_##config, "Incorrect config value");	\
-	assert_zu_eq(sz, sizeof(oldval), "Unexpected output size");	\
-} while (0)
-
-	TEST_MALLCTL_CONFIG(cache_oblivious, bool);
-	TEST_MALLCTL_CONFIG(debug, bool);
-	TEST_MALLCTL_CONFIG(fill, bool);
-	TEST_MALLCTL_CONFIG(lazy_lock, bool);
-	TEST_MALLCTL_CONFIG(malloc_conf, const char *);
-	TEST_MALLCTL_CONFIG(munmap, bool);
-	TEST_MALLCTL_CONFIG(prof, bool);
-	TEST_MALLCTL_CONFIG(prof_libgcc, bool);
-	TEST_MALLCTL_CONFIG(prof_libunwind, bool);
-	TEST_MALLCTL_CONFIG(stats, bool);
-	TEST_MALLCTL_CONFIG(tcache, bool);
-	TEST_MALLCTL_CONFIG(tls, bool);
-	TEST_MALLCTL_CONFIG(utrace, bool);
-	TEST_MALLCTL_CONFIG(xmalloc, bool);
-
-#undef TEST_MALLCTL_CONFIG
-}
-TEST_END
-
-TEST_BEGIN(test_mallctl_opt)
-{
-	bool config_always = true;
-
-#define	TEST_MALLCTL_OPT(t, opt, config) do {				\
-	t oldval;							\
-	size_t sz = sizeof(oldval);					\
-	int expected = config_##config ? 0 : ENOENT;			\
-	int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL,	\
-	    0);								\
-	assert_d_eq(result, expected,					\
-	    "Unexpected mallctl() result for opt."#opt);		\
-	assert_zu_eq(sz, sizeof(oldval), "Unexpected output size");	\
-} while (0)
-
-	TEST_MALLCTL_OPT(bool, abort, always);
-	TEST_MALLCTL_OPT(const char *, dss, always);
-	TEST_MALLCTL_OPT(unsigned, narenas, always);
-	TEST_MALLCTL_OPT(ssize_t, decay_time, always);
-	TEST_MALLCTL_OPT(bool, stats_print, always);
-	TEST_MALLCTL_OPT(const char *, junk, fill);
-	TEST_MALLCTL_OPT(bool, zero, fill);
-	TEST_MALLCTL_OPT(bool, utrace, utrace);
-	TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
-	TEST_MALLCTL_OPT(bool, tcache, tcache);
-	TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
-	TEST_MALLCTL_OPT(bool, prof, prof);
-	TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
-	TEST_MALLCTL_OPT(bool, prof_active, prof);
-	TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
-	TEST_MALLCTL_OPT(bool, prof_accum, prof);
-	TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
-	TEST_MALLCTL_OPT(bool, prof_gdump, prof);
-	TEST_MALLCTL_OPT(bool, prof_final, prof);
-	TEST_MALLCTL_OPT(bool, prof_leak, prof);
-
-#undef TEST_MALLCTL_OPT
-}
-TEST_END
-
-TEST_BEGIN(test_manpage_example)
-{
-	unsigned nbins, i;
-	size_t mib[4];
-	size_t len, miblen;
-
-	len = sizeof(nbins);
-	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-
-	miblen = 4;
-	assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	for (i = 0; i < nbins; i++) {
-		size_t bin_size;
-
-		mib[2] = i;
-		len = sizeof(bin_size);
-		assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
-		    NULL, 0), 0, "Unexpected mallctlbymib() failure");
-		/* Do something with bin_size... */
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_tcache_none)
-{
-	void *p0, *q, *p1;
-
-	test_skip_if(!config_tcache);
-
-	/* Allocate p and q. */
-	p0 = mallocx(42, 0);
-	assert_ptr_not_null(p0, "Unexpected mallocx() failure");
-	q = mallocx(42, 0);
-	assert_ptr_not_null(q, "Unexpected mallocx() failure");
-
-	/* Deallocate p and q, but bypass the tcache for q. */
-	dallocx(p0, 0);
-	dallocx(q, MALLOCX_TCACHE_NONE);
-
-	/* Make sure that tcache-based allocation returns p, not q. */
-	p1 = mallocx(42, 0);
-	assert_ptr_not_null(p1, "Unexpected mallocx() failure");
-	assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
-
-	/* Clean up. */
-	dallocx(p1, MALLOCX_TCACHE_NONE);
-}
-TEST_END
-
-TEST_BEGIN(test_tcache)
-{
-#define	NTCACHES	10
-	unsigned tis[NTCACHES];
-	void *ps[NTCACHES];
-	void *qs[NTCACHES];
-	unsigned i;
-	size_t sz, psz, qsz;
-
-	test_skip_if(!config_tcache);
-
-	psz = 42;
-	qsz = nallocx(psz, 0) + 1;
-
-	/* Create tcaches. */
-	for (i = 0; i < NTCACHES; i++) {
-		sz = sizeof(unsigned);
-		assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
-		    0), 0, "Unexpected mallctl() failure, i=%u", i);
-	}
-
-	/* Exercise tcache ID recycling. */
-	for (i = 0; i < NTCACHES; i++) {
-		assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
-		    (void *)&tis[i], sizeof(unsigned)), 0,
-		    "Unexpected mallctl() failure, i=%u", i);
-	}
-	for (i = 0; i < NTCACHES; i++) {
-		sz = sizeof(unsigned);
-		assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
-		    0), 0, "Unexpected mallctl() failure, i=%u", i);
-	}
-
-	/* Flush empty tcaches. */
-	for (i = 0; i < NTCACHES; i++) {
-		assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
-		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
-		    i);
-	}
-
-	/* Cache some allocations. */
-	for (i = 0; i < NTCACHES; i++) {
-		ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
-		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
-		    i);
-		dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
-
-		qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
-		assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
-		    i);
-		dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
-	}
-
-	/* Verify that tcaches allocate cached regions. */
-	for (i = 0; i < NTCACHES; i++) {
-		void *p0 = ps[i];
-		ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
-		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
-		    i);
-		assert_ptr_eq(ps[i], p0,
-		    "Expected mallocx() to allocate cached region, i=%u", i);
-	}
-
-	/* Verify that reallocation uses cached regions. */
-	for (i = 0; i < NTCACHES; i++) {
-		void *q0 = qs[i];
-		qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
-		assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
-		    i);
-		assert_ptr_eq(qs[i], q0,
-		    "Expected rallocx() to allocate cached region, i=%u", i);
-		/* Avoid undefined behavior in case of test failure. */
-		if (qs[i] == NULL)
-			qs[i] = ps[i];
-	}
-	for (i = 0; i < NTCACHES; i++)
-		dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
-
-	/* Flush some non-empty tcaches. */
-	for (i = 0; i < NTCACHES/2; i++) {
-		assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
-		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
-		    i);
-	}
-
-	/* Destroy tcaches. */
-	for (i = 0; i < NTCACHES; i++) {
-		assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
-		    (void *)&tis[i], sizeof(unsigned)), 0,
-		    "Unexpected mallctl() failure, i=%u", i);
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_thread_arena)
-{
-	unsigned arena_old, arena_new, narenas;
-	size_t sz = sizeof(unsigned);
-
-	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
-	    0, "Unexpected mallctl() failure");
-	assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
-	arena_new = narenas - 1;
-	assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
-	    (void *)&arena_new, sizeof(unsigned)), 0,
-	    "Unexpected mallctl() failure");
-	arena_new = 0;
-	assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
-	    (void *)&arena_new, sizeof(unsigned)), 0,
-	    "Unexpected mallctl() failure");
-}
-TEST_END
-
-TEST_BEGIN(test_arena_i_initialized)
-{
-	unsigned narenas, i;
-	size_t sz;
-	size_t mib[3];
-	size_t miblen = sizeof(mib) / sizeof(size_t);
-	bool initialized;
-
-	sz = sizeof(narenas);
-	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
-	    0, "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	for (i = 0; i < narenas; i++) {
-		mib[1] = i;
-		sz = sizeof(initialized);
-		assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
-		    0), 0, "Unexpected mallctl() failure");
-	}
-
-	mib[1] = MALLCTL_ARENAS_ALL;
-	sz = sizeof(initialized);
-	assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-	assert_true(initialized,
-	    "Merged arena statistics should always be initialized");
-
-	/* Equivalent to the above but using mallctl() directly. */
-	sz = sizeof(initialized);
-	assert_d_eq(mallctl(
-	    "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
-	    (void *)&initialized, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-	assert_true(initialized,
-	    "Merged arena statistics should always be initialized");
-}
-TEST_END
-
-TEST_BEGIN(test_arena_i_decay_time)
-{
-	ssize_t decay_time, orig_decay_time, prev_decay_time;
-	size_t sz = sizeof(ssize_t);
-
-	assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz,
-	    NULL, 0), 0, "Unexpected mallctl() failure");
-
-	decay_time = -2;
-	assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
-	    (void *)&decay_time, sizeof(ssize_t)), EFAULT,
-	    "Unexpected mallctl() success");
-
-	decay_time = 0x7fffffff;
-	assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
-	    (void *)&decay_time, sizeof(ssize_t)), 0,
-	    "Unexpected mallctl() failure");
-
-	for (prev_decay_time = decay_time, decay_time = -1;
-	    decay_time < 20; prev_decay_time = decay_time, decay_time++) {
-		ssize_t old_decay_time;
-
-		assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time,
-		    &sz, (void *)&decay_time, sizeof(ssize_t)), 0,
-		    "Unexpected mallctl() failure");
-		assert_zd_eq(old_decay_time, prev_decay_time,
-		    "Unexpected old arena.0.decay_time");
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_arena_i_purge)
-{
-	unsigned narenas;
-	size_t sz = sizeof(unsigned);
-	size_t mib[3];
-	size_t miblen = 3;
-
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
-	    0, "Unexpected mallctl() failure");
-	assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	mib[1] = narenas;
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctlbymib() failure");
-
-	mib[1] = MALLCTL_ARENAS_ALL;
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctlbymib() failure");
-}
-TEST_END
-
-TEST_BEGIN(test_arena_i_decay)
-{
-	unsigned narenas;
-	size_t sz = sizeof(unsigned);
-	size_t mib[3];
-	size_t miblen = 3;
-
-	assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
-	    0, "Unexpected mallctl() failure");
-	assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	mib[1] = narenas;
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctlbymib() failure");
-
-	mib[1] = MALLCTL_ARENAS_ALL;
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctlbymib() failure");
-}
-TEST_END
-
-TEST_BEGIN(test_arena_i_dss)
-{
-	const char *dss_prec_old, *dss_prec_new;
-	size_t sz = sizeof(dss_prec_old);
-	size_t mib[3];
-	size_t miblen;
-
-	miblen = sizeof(mib)/sizeof(size_t);
-	assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() error");
-
-	dss_prec_new = "disabled";
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
-	    (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
-	    "Unexpected mallctl() failure");
-	assert_str_ne(dss_prec_old, "primary",
-	    "Unexpected default for dss precedence");
-
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
-	    (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
-	    "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
-	    0), 0, "Unexpected mallctl() failure");
-	assert_str_ne(dss_prec_old, "primary",
-	    "Unexpected value for dss precedence");
-
-	mib[1] = narenas_total_get();
-	dss_prec_new = "disabled";
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
-	    (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
-	    "Unexpected mallctl() failure");
-	assert_str_ne(dss_prec_old, "primary",
-	    "Unexpected default for dss precedence");
-
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
-	    (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
-	    "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
-	    0), 0, "Unexpected mallctl() failure");
-	assert_str_ne(dss_prec_old, "primary",
-	    "Unexpected value for dss precedence");
-}
-TEST_END
-
-TEST_BEGIN(test_arenas_decay_time)
-{
-	ssize_t decay_time, orig_decay_time, prev_decay_time;
-	size_t sz = sizeof(ssize_t);
-
-	assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz,
-	    NULL, 0), 0, "Unexpected mallctl() failure");
-
-	decay_time = -2;
-	assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
-	    (void *)&decay_time, sizeof(ssize_t)), EFAULT,
-	    "Unexpected mallctl() success");
-
-	decay_time = 0x7fffffff;
-	assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
-	    (void *)&decay_time, sizeof(ssize_t)), 0,
-	    "Expected mallctl() failure");
-
-	for (prev_decay_time = decay_time, decay_time = -1;
-	    decay_time < 20; prev_decay_time = decay_time, decay_time++) {
-		ssize_t old_decay_time;
-
-		assert_d_eq(mallctl("arenas.decay_time",
-		    (void *)&old_decay_time, &sz, (void *)&decay_time,
-		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
-		assert_zd_eq(old_decay_time, prev_decay_time,
-		    "Unexpected old arenas.decay_time");
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_arenas_constants)
-{
-#define	TEST_ARENAS_CONSTANT(t, name, expected) do {			\
-	t name;								\
-	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL,	\
-	    0), 0, "Unexpected mallctl() failure");			\
-	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
-} while (0)
-
-	TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
-	TEST_ARENAS_CONSTANT(size_t, page, PAGE);
-	TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
-	TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS);
-
-#undef TEST_ARENAS_CONSTANT
-}
-TEST_END
-
-TEST_BEGIN(test_arenas_bin_constants)
-{
-#define	TEST_ARENAS_BIN_CONSTANT(t, name, expected) do {		\
-	t name;								\
-	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz,	\
-	    NULL, 0), 0, "Unexpected mallctl() failure");		\
-	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
-} while (0)
-
-	TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
-	TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
-	TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
-	    arena_bin_info[0].slab_size);
-
-#undef TEST_ARENAS_BIN_CONSTANT
-}
-TEST_END
-
-TEST_BEGIN(test_arenas_lextent_constants)
-{
-#define	TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do {		\
-	t name;								\
-	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name,	\
-	    &sz, NULL, 0), 0, "Unexpected mallctl() failure");		\
-	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
-} while (0)
-
-	TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS);
-
-#undef TEST_ARENAS_LEXTENT_CONSTANT
-}
-TEST_END
-
-TEST_BEGIN(test_arenas_create)
-{
-	unsigned narenas_before, arena, narenas_after;
-	size_t sz = sizeof(unsigned);
-
-	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
-	    NULL, 0), 0, "Unexpected mallctl() failure");
-	assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
-	    0), 0, "Unexpected mallctl() failure");
-
-	assert_u_eq(narenas_before+1, narenas_after,
-	    "Unexpected number of arenas before versus after extension");
-	assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
-}
-TEST_END
-
-TEST_BEGIN(test_stats_arenas)
-{
-#define	TEST_STATS_ARENAS(t, name) do {					\
-	t name;								\
-	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz,	\
-	    NULL, 0), 0, "Unexpected mallctl() failure");		\
-} while (0)
-
-	TEST_STATS_ARENAS(unsigned, nthreads);
-	TEST_STATS_ARENAS(const char *, dss);
-	TEST_STATS_ARENAS(ssize_t, decay_time);
-	TEST_STATS_ARENAS(size_t, pactive);
-	TEST_STATS_ARENAS(size_t, pdirty);
-
-#undef TEST_STATS_ARENAS
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_mallctl_errors,
-	    test_mallctlnametomib_errors,
-	    test_mallctlbymib_errors,
-	    test_mallctl_read_write,
-	    test_mallctlnametomib_short_mib,
-	    test_mallctl_config,
-	    test_mallctl_opt,
-	    test_manpage_example,
-	    test_tcache_none,
-	    test_tcache,
-	    test_thread_arena,
-	    test_arena_i_initialized,
-	    test_arena_i_decay_time,
-	    test_arena_i_purge,
-	    test_arena_i_decay,
-	    test_arena_i_dss,
-	    test_arenas_decay_time,
-	    test_arenas_constants,
-	    test_arenas_bin_constants,
-	    test_arenas_lextent_constants,
-	    test_arenas_create,
-	    test_stats_arenas));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/math.c b/zircon/third_party/ulib/jemalloc/test/unit/math.c
deleted file mode 100644
index 8e5ec61..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/math.c
+++ /dev/null
@@ -1,397 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	MAX_REL_ERR 1.0e-9
-#define	MAX_ABS_ERR 1.0e-9
-
-#include <float.h>
-
-#ifdef __PGI
-#undef INFINITY
-#endif
-
-#ifndef INFINITY
-#define	INFINITY (DBL_MAX + DBL_MAX)
-#endif
-
-static bool
-double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
-{
-	double rel_err;
-
-	if (fabs(a - b) < max_abs_err)
-		return (true);
-	rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
-	return (rel_err < max_rel_err);
-}
-
-static uint64_t
-factorial(unsigned x)
-{
-	uint64_t ret = 1;
-	unsigned i;
-
-	for (i = 2; i <= x; i++)
-		ret *= (uint64_t)i;
-
-	return (ret);
-}
-
-TEST_BEGIN(test_ln_gamma_factorial)
-{
-	unsigned x;
-
-	/* exp(ln_gamma(x)) == (x-1)! for integer x. */
-	for (x = 1; x <= 21; x++) {
-		assert_true(double_eq_rel(exp(ln_gamma(x)),
-		    (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
-		    "Incorrect factorial result for x=%u", x);
-	}
-}
-TEST_END
-
-/* Expected ln_gamma([0.0..100.0] increment=0.25). */
-static const double ln_gamma_misc_expected[] = {
-	INFINITY,
-	1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
-	0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
-	-0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
-	0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
-	0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
-	1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
-	2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
-	3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
-	5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
-	6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
-	8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
-	9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
-	11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
-	12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
-	14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
-	16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
-	18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
-	19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
-	21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
-	23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
-	25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
-	27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
-	29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
-	32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
-	34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
-	36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
-	38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
-	40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
-	43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
-	45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
-	47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
-	50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
-	52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
-	54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
-	57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
-	59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
-	62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
-	64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
-	67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
-	69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
-	72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
-	74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
-	77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
-	79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
-	82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
-	85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
-	87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
-	90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
-	93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
-	95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
-	98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
-	101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
-	103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
-	106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
-	109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
-	112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
-	114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
-	117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
-	120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
-	123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
-	126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
-	129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
-	131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
-	134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
-	137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
-	140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
-	143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
-	146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
-	149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
-	152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
-	155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
-	158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
-	161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
-	164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
-	167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
-	170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
-	173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
-	176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
-	179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
-	182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
-	185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
-	188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
-	191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
-	194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
-	197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
-	201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
-	204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
-	207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
-	210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
-	213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
-	216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
-	219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
-	223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
-	226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
-	229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
-	232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
-	235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
-	238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
-	242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
-	245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
-	248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
-	251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
-	255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
-	258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
-	261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
-	264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
-	268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
-	271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
-	274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
-	278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
-	281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
-	284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
-	287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
-	291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
-	294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
-	297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
-	301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
-	304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
-	308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
-	311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
-	314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
-	318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
-	321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
-	324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
-	328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
-	331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
-	335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
-	338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
-	341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
-	345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
-	348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
-	352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
-	355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
-	359.13420536957539753
-};
-
-TEST_BEGIN(test_ln_gamma_misc)
-{
-	unsigned i;
-
-	for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
-		double x = (double)i * 0.25;
-		assert_true(double_eq_rel(ln_gamma(x),
-		    ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
-		    "Incorrect ln_gamma result for i=%u", i);
-	}
-}
-TEST_END
-
-/* Expected pt_norm([0.01..0.99] increment=0.01). */
-static const double pt_norm_expected[] = {
-	-INFINITY,
-	-2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
-	-1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
-	-1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
-	-1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
-	-1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
-	-0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
-	-0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
-	-0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
-	-0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
-	-0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
-	-0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
-	-0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
-	-0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
-	-0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
-	-0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
-	-0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
-	-0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
-	0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
-	0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
-	0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
-	0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
-	0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
-	0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
-	0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
-	0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
-	0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
-	0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
-	0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
-	1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
-	1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
-	1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
-	1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
-	1.88079360815125041, 2.05374891063182208, 2.32634787404084076
-};
-
-TEST_BEGIN(test_pt_norm)
-{
-	unsigned i;
-
-	for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
-		double p = (double)i * 0.01;
-		assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
-		    MAX_REL_ERR, MAX_ABS_ERR),
-		    "Incorrect pt_norm result for i=%u", i);
-	}
-}
-TEST_END
-
-/*
- * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
- *                  df={0.1, 1.1, 10.1, 100.1, 1000.1}).
- */
-static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
-static const double pt_chi2_expected[] = {
-	1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
-	8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
-	5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
-	1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
-	4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
-
-	0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
-	0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
-	0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
-	0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
-	2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
-
-	2.606673548632508, 4.602913725294877, 5.646152813924212,
-	6.488971315540869, 7.249823275816285, 7.977314231410841,
-	8.700354939944047, 9.441728024225892, 10.224338321374127,
-	11.076435368801061, 12.039320937038386, 13.183878752697167,
-	14.657791935084575, 16.885728216339373, 23.361991680031817,
-
-	70.14844087392152, 80.92379498849355, 85.53325420085891,
-	88.94433120715347, 91.83732712857017, 94.46719943606301,
-	96.96896479994635, 99.43412843510363, 101.94074719829733,
-	104.57228644307247, 107.43900093448734, 110.71844673417287,
-	114.76616819871325, 120.57422505959563, 135.92318818757556,
-
-	899.0072447849649, 937.9271278858220, 953.8117189560207,
-	965.3079371501154, 974.8974061207954, 983.4936235182347,
-	991.5691170518946, 999.4334123954690, 1007.3391826856553,
-	1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
-	1046.4872561869577, 1063.5717461999654, 1107.0741966053859
-};
-
-TEST_BEGIN(test_pt_chi2)
-{
-	unsigned i, j;
-	unsigned e = 0;
-
-	for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
-		double df = pt_chi2_df[i];
-		double ln_gamma_df = ln_gamma(df * 0.5);
-		for (j = 1; j < 100; j += 7) {
-			double p = (double)j * 0.01;
-			assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
-			    pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
-			    "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
-			e++;
-		}
-	}
-}
-TEST_END
-
-/*
- * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
- *                   shape=[0.5..3.0] increment=0.5).
- */
-static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
-static const double pt_gamma_expected[] = {
-	7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
-	3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
-	1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
-	4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
-	1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
-
-	0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
-	0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
-	0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
-	1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
-	1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
-
-	0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
-	0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
-	1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
-	1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
-	2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
-
-	0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
-	0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
-	1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
-	2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
-	3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
-
-	0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
-	1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
-	1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
-	2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
-	4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
-
-	0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
-	1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
-	2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
-	3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
-	4.7230515633946677, 5.6417477865306020, 8.4059469148854635
-};
-
-TEST_BEGIN(test_pt_gamma_shape)
-{
-	unsigned i, j;
-	unsigned e = 0;
-
-	for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
-		double shape = pt_gamma_shape[i];
-		double ln_gamma_shape = ln_gamma(shape);
-		for (j = 1; j < 100; j += 7) {
-			double p = (double)j * 0.01;
-			assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
-			    ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
-			    MAX_ABS_ERR),
-			    "Incorrect pt_gamma result for i=%u, j=%u", i, j);
-			e++;
-		}
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_pt_gamma_scale)
-{
-	double shape = 1.0;
-	double ln_gamma_shape = ln_gamma(shape);
-
-	assert_true(double_eq_rel(
-	    pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
-	    pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
-	    MAX_ABS_ERR),
-	    "Scale should be trivially equivalent to external multiplication");
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_ln_gamma_factorial,
-	    test_ln_gamma_misc,
-	    test_pt_norm,
-	    test_pt_chi2,
-	    test_pt_gamma_shape,
-	    test_pt_gamma_scale));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/mq.c b/zircon/third_party/ulib/jemalloc/test/unit/mq.c
deleted file mode 100644
index bd289c5..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/mq.c
+++ /dev/null
@@ -1,92 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	NSENDERS	3
-#define	NMSGS		100000
-
-typedef struct mq_msg_s mq_msg_t;
-struct mq_msg_s {
-	mq_msg(mq_msg_t)	link;
-};
-mq_gen(static, mq_, mq_t, mq_msg_t, link)
-
-TEST_BEGIN(test_mq_basic)
-{
-	mq_t mq;
-	mq_msg_t msg;
-
-	assert_false(mq_init(&mq), "Unexpected mq_init() failure");
-	assert_u_eq(mq_count(&mq), 0, "mq should be empty");
-	assert_ptr_null(mq_tryget(&mq),
-	    "mq_tryget() should fail when the queue is empty");
-
-	mq_put(&mq, &msg);
-	assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
-	assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
-
-	mq_put(&mq, &msg);
-	assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
-
-	mq_fini(&mq);
-}
-TEST_END
-
-static void *
-thd_receiver_start(void *arg)
-{
-	mq_t *mq = (mq_t *)arg;
-	unsigned i;
-
-	for (i = 0; i < (NSENDERS * NMSGS); i++) {
-		mq_msg_t *msg = mq_get(mq);
-		assert_ptr_not_null(msg, "mq_get() should never return NULL");
-		dallocx(msg, 0);
-	}
-	return (NULL);
-}
-
-static void *
-thd_sender_start(void *arg)
-{
-	mq_t *mq = (mq_t *)arg;
-	unsigned i;
-
-	for (i = 0; i < NMSGS; i++) {
-		mq_msg_t *msg;
-		void *p;
-		p = mallocx(sizeof(mq_msg_t), 0);
-		assert_ptr_not_null(p, "Unexpected mallocx() failure");
-		msg = (mq_msg_t *)p;
-		mq_put(mq, msg);
-	}
-	return (NULL);
-}
-
-TEST_BEGIN(test_mq_threaded)
-{
-	mq_t mq;
-	thd_t receiver;
-	thd_t senders[NSENDERS];
-	unsigned i;
-
-	assert_false(mq_init(&mq), "Unexpected mq_init() failure");
-
-	thd_create(&receiver, thd_receiver_start, (void *)&mq);
-	for (i = 0; i < NSENDERS; i++)
-		thd_create(&senders[i], thd_sender_start, (void *)&mq);
-
-	thd_join(receiver, NULL);
-	for (i = 0; i < NSENDERS; i++)
-		thd_join(senders[i], NULL);
-
-	mq_fini(&mq);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_mq_basic,
-	    test_mq_threaded));
-}
-
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/mtx.c b/zircon/third_party/ulib/jemalloc/test/unit/mtx.c
deleted file mode 100644
index 2eccc98..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/mtx.c
+++ /dev/null
@@ -1,59 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	NTHREADS	2
-#define	NINCRS		2000000
-
-TEST_BEGIN(test_mtx_basic)
-{
-	mtx_t mtx;
-
-	assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
-	mtx_lock(&mtx);
-	mtx_unlock(&mtx);
-	mtx_fini(&mtx);
-}
-TEST_END
-
-typedef struct {
-	mtx_t		mtx;
-	unsigned	x;
-} thd_start_arg_t;
-
-static void *
-thd_start(void *varg)
-{
-	thd_start_arg_t *arg = (thd_start_arg_t *)varg;
-	unsigned i;
-
-	for (i = 0; i < NINCRS; i++) {
-		mtx_lock(&arg->mtx);
-		arg->x++;
-		mtx_unlock(&arg->mtx);
-	}
-	return (NULL);
-}
-
-TEST_BEGIN(test_mtx_race)
-{
-	thd_start_arg_t arg;
-	thd_t thds[NTHREADS];
-	unsigned i;
-
-	assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
-	arg.x = 0;
-	for (i = 0; i < NTHREADS; i++)
-		thd_create(&thds[i], thd_start, (void *)&arg);
-	for (i = 0; i < NTHREADS; i++)
-		thd_join(thds[i], NULL);
-	assert_u_eq(arg.x, NTHREADS * NINCRS,
-	    "Race-related counter corruption");
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_mtx_basic,
-	    test_mtx_race));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/nstime.c b/zircon/third_party/ulib/jemalloc/test/unit/nstime.c
deleted file mode 100644
index 6548ba2..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/nstime.c
+++ /dev/null
@@ -1,225 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	BILLION	UINT64_C(1000000000)
-
-TEST_BEGIN(test_nstime_init)
-{
-	nstime_t nst;
-
-	nstime_init(&nst, 42000000043);
-	assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
-	assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
-	assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_init2)
-{
-	nstime_t nst;
-
-	nstime_init2(&nst, 42, 43);
-	assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
-	assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_copy)
-{
-	nstime_t nsta, nstb;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_init(&nstb, 0);
-	nstime_copy(&nstb, &nsta);
-	assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
-	assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_compare)
-{
-	nstime_t nsta, nstb;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
-	assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
-
-	nstime_init2(&nstb, 42, 42);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 1,
-	    "nsta should be greater than nstb");
-	assert_d_eq(nstime_compare(&nstb, &nsta), -1,
-	    "nstb should be less than nsta");
-
-	nstime_init2(&nstb, 42, 44);
-	assert_d_eq(nstime_compare(&nsta, &nstb), -1,
-	    "nsta should be less than nstb");
-	assert_d_eq(nstime_compare(&nstb, &nsta), 1,
-	    "nstb should be greater than nsta");
-
-	nstime_init2(&nstb, 41, BILLION - 1);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 1,
-	    "nsta should be greater than nstb");
-	assert_d_eq(nstime_compare(&nstb, &nsta), -1,
-	    "nstb should be less than nsta");
-
-	nstime_init2(&nstb, 43, 0);
-	assert_d_eq(nstime_compare(&nsta, &nstb), -1,
-	    "nsta should be less than nstb");
-	assert_d_eq(nstime_compare(&nstb, &nsta), 1,
-	    "nstb should be greater than nsta");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_add)
-{
-	nstime_t nsta, nstb;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	nstime_add(&nsta, &nstb);
-	nstime_init2(&nstb, 84, 86);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect addition result");
-
-	nstime_init2(&nsta, 42, BILLION - 1);
-	nstime_copy(&nstb, &nsta);
-	nstime_add(&nsta, &nstb);
-	nstime_init2(&nstb, 85, BILLION - 2);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect addition result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_subtract)
-{
-	nstime_t nsta, nstb;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	nstime_subtract(&nsta, &nstb);
-	nstime_init(&nstb, 0);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect subtraction result");
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_init2(&nstb, 41, 44);
-	nstime_subtract(&nsta, &nstb);
-	nstime_init2(&nstb, 0, BILLION - 1);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect subtraction result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_imultiply)
-{
-	nstime_t nsta, nstb;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_imultiply(&nsta, 10);
-	nstime_init2(&nstb, 420, 430);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect multiplication result");
-
-	nstime_init2(&nsta, 42, 666666666);
-	nstime_imultiply(&nsta, 3);
-	nstime_init2(&nstb, 127, 999999998);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect multiplication result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_idivide)
-{
-	nstime_t nsta, nstb;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	nstime_imultiply(&nsta, 10);
-	nstime_idivide(&nsta, 10);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect division result");
-
-	nstime_init2(&nsta, 42, 666666666);
-	nstime_copy(&nstb, &nsta);
-	nstime_imultiply(&nsta, 3);
-	nstime_idivide(&nsta, 3);
-	assert_d_eq(nstime_compare(&nsta, &nstb), 0,
-	    "Incorrect division result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_divide)
-{
-	nstime_t nsta, nstb, nstc;
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	nstime_imultiply(&nsta, 10);
-	assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
-	    "Incorrect division result");
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	nstime_imultiply(&nsta, 10);
-	nstime_init(&nstc, 1);
-	nstime_add(&nsta, &nstc);
-	assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
-	    "Incorrect division result");
-
-	nstime_init2(&nsta, 42, 43);
-	nstime_copy(&nstb, &nsta);
-	nstime_imultiply(&nsta, 10);
-	nstime_init(&nstc, 1);
-	nstime_subtract(&nsta, &nstc);
-	assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
-	    "Incorrect division result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_monotonic)
-{
-	nstime_monotonic();
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_update)
-{
-	nstime_t nst;
-
-	nstime_init(&nst, 0);
-
-	assert_false(nstime_update(&nst), "Basic time update failed.");
-
-	/* Only Rip Van Winkle sleeps this long. */
-	{
-		nstime_t addend;
-		nstime_init2(&addend, 631152000, 0);
-		nstime_add(&nst, &addend);
-	}
-	{
-		nstime_t nst0;
-		nstime_copy(&nst0, &nst);
-		assert_true(nstime_update(&nst),
-		    "Update should detect time roll-back.");
-		assert_d_eq(nstime_compare(&nst, &nst0), 0,
-		    "Time should not have been modified");
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_nstime_init,
-	    test_nstime_init2,
-	    test_nstime_copy,
-	    test_nstime_compare,
-	    test_nstime_add,
-	    test_nstime_subtract,
-	    test_nstime_imultiply,
-	    test_nstime_idivide,
-	    test_nstime_divide,
-	    test_nstime_monotonic,
-	    test_nstime_update));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/pack.c b/zircon/third_party/ulib/jemalloc/test/unit/pack.c
deleted file mode 100644
index 316b6df..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/pack.c
+++ /dev/null
@@ -1,166 +0,0 @@
-#include "test/jemalloc_test.h"
-
-/* Immediately purge to minimize fragmentation. */
-const char *malloc_conf = "decay_time:-1";
-
-/*
- * Size class that is a divisor of the page size, ideally 4+ regions per run.
- */
-#if LG_PAGE <= 14
-#define	SZ	(ZU(1) << (LG_PAGE - 2))
-#else
-#define	SZ	4096
-#endif
-
-/*
- * Number of slabs to consume at high water mark.  Should be at least 2 so that
- * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
- * tested.
- */
-#define	NSLABS	8
-
-static unsigned
-binind_compute(void)
-{
-	size_t sz;
-	unsigned nbins, i;
-
-	sz = sizeof(nbins);
-	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
-	    "Unexpected mallctl failure");
-
-	for (i = 0; i < nbins; i++) {
-		size_t mib[4];
-		size_t miblen = sizeof(mib)/sizeof(size_t);
-		size_t size;
-
-		assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
-		    &miblen), 0, "Unexpected mallctlnametomb failure");
-		mib[2] = (size_t)i;
-
-		sz = sizeof(size);
-		assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
-		    0), 0, "Unexpected mallctlbymib failure");
-		if (size == SZ)
-			return (i);
-	}
-
-	test_fail("Unable to compute nregs_per_run");
-	return (0);
-}
-
-static size_t
-nregs_per_run_compute(void)
-{
-	uint32_t nregs;
-	size_t sz;
-	unsigned binind = binind_compute();
-	size_t mib[4];
-	size_t miblen = sizeof(mib)/sizeof(size_t);
-
-	assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
-	    "Unexpected mallctlnametomb failure");
-	mib[2] = (size_t)binind;
-	sz = sizeof(nregs);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
-	    0), 0, "Unexpected mallctlbymib failure");
-	return (nregs);
-}
-
-static unsigned
-arenas_create_mallctl(void)
-{
-	unsigned arena_ind;
-	size_t sz;
-
-	sz = sizeof(arena_ind);
-	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
-	    0, "Error in arenas.create");
-
-	return (arena_ind);
-}
-
-static void
-arena_reset_mallctl(unsigned arena_ind)
-{
-	size_t mib[3];
-	size_t miblen = sizeof(mib)/sizeof(size_t);
-
-	assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() failure");
-	mib[1] = (size_t)arena_ind;
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctlbymib() failure");
-}
-
-TEST_BEGIN(test_pack)
-{
-	unsigned arena_ind = arenas_create_mallctl();
-	size_t nregs_per_run = nregs_per_run_compute();
-	size_t nregs = nregs_per_run * NSLABS;
-	VARIABLE_ARRAY(void *, ptrs, nregs);
-	size_t i, j, offset;
-
-	/* Fill matrix. */
-	for (i = offset = 0; i < NSLABS; i++) {
-		for (j = 0; j < nregs_per_run; j++) {
-			void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
-			    MALLOCX_TCACHE_NONE);
-			assert_ptr_not_null(p,
-			    "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
-			    " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
-			    SZ, arena_ind, i, j);
-			ptrs[(i * nregs_per_run) + j] = p;
-		}
-	}
-
-	/*
-	 * Free all but one region of each run, but rotate which region is
-	 * preserved, so that subsequent allocations exercise the within-run
-	 * layout policy.
-	 */
-	offset = 0;
-	for (i = offset = 0;
-	    i < NSLABS;
-	    i++, offset = (offset + 1) % nregs_per_run) {
-		for (j = 0; j < nregs_per_run; j++) {
-			void *p = ptrs[(i * nregs_per_run) + j];
-			if (offset == j)
-				continue;
-			dallocx(p, MALLOCX_ARENA(arena_ind) |
-			    MALLOCX_TCACHE_NONE);
-		}
-	}
-
-	/*
-	 * Logically refill matrix, skipping preserved regions and verifying
-	 * that the matrix is unmodified.
-	 */
-	offset = 0;
-	for (i = offset = 0;
-	    i < NSLABS;
-	    i++, offset = (offset + 1) % nregs_per_run) {
-		for (j = 0; j < nregs_per_run; j++) {
-			void *p;
-
-			if (offset == j)
-				continue;
-			p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
-			    MALLOCX_TCACHE_NONE);
-			assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
-			    "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
-			    i, j);
-		}
-	}
-
-	/* Clean up. */
-	arena_reset_mallctl(arena_ind);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_pack));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/pages.c b/zircon/third_party/ulib/jemalloc/test/unit/pages.c
deleted file mode 100644
index 1e6add9..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/pages.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_pages_huge)
-{
-	size_t alloc_size;
-	bool commit;
-	void *pages, *hugepage;
-
-	alloc_size = HUGEPAGE * 2 - PAGE;
-	commit = true;
-	pages = pages_map(NULL, alloc_size, &commit);
-	assert_ptr_not_null(pages, "Unexpected pages_map() error");
-
-	hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
-	assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_thp,
-	    "Unexpected pages_huge() result");
-	assert_false(pages_nohuge(hugepage, HUGEPAGE),
-	    "Unexpected pages_nohuge() result");
-
-	pages_unmap(pages, alloc_size);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_pages_huge));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/ph.c b/zircon/third_party/ulib/jemalloc/test/unit/ph.c
deleted file mode 100644
index 10bf99e..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/ph.c
+++ /dev/null
@@ -1,288 +0,0 @@
-#include "test/jemalloc_test.h"
-
-typedef struct node_s node_t;
-
-struct node_s {
-#define	NODE_MAGIC 0x9823af7e
-	uint32_t magic;
-	phn(node_t) link;
-	uint64_t key;
-};
-
-static int
-node_cmp(const node_t *a, const node_t *b)
-{
-	int ret;
-
-	ret = (a->key > b->key) - (a->key < b->key);
-	if (ret == 0) {
-		/*
-		 * Duplicates are not allowed in the heap, so force an
-		 * arbitrary ordering for non-identical items with equal keys.
-		 */
-		ret = (((uintptr_t)a) > ((uintptr_t)b))
-		    - (((uintptr_t)a) < ((uintptr_t)b));
-	}
-	return (ret);
-}
-
-static int
-node_cmp_magic(const node_t *a, const node_t *b) {
-
-	assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
-	assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
-
-	return (node_cmp(a, b));
-}
-
-typedef ph(node_t) heap_t;
-ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
-
-static void
-node_print(const node_t *node, unsigned depth)
-{
-	unsigned i;
-	node_t *leftmost_child, *sibling;
-
-	for (i = 0; i < depth; i++)
-		malloc_printf("\t");
-	malloc_printf("%2"FMTu64"\n", node->key);
-
-	leftmost_child = phn_lchild_get(node_t, link, node);
-	if (leftmost_child == NULL)
-		return;
-	node_print(leftmost_child, depth + 1);
-
-	for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
-	    NULL; sibling = phn_next_get(node_t, link, sibling)) {
-		node_print(sibling, depth + 1);
-	}
-}
-
-static void
-heap_print(const heap_t *heap)
-{
-	node_t *auxelm;
-
-	malloc_printf("vvv heap %p vvv\n", heap);
-	if (heap->ph_root == NULL)
-		goto label_return;
-
-	node_print(heap->ph_root, 0);
-
-	for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
-	    auxelm = phn_next_get(node_t, link, auxelm)) {
-		assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
-		    link, auxelm)), auxelm,
-		    "auxelm's prev doesn't link to auxelm");
-		node_print(auxelm, 0);
-	}
-
-label_return:
-	malloc_printf("^^^ heap %p ^^^\n", heap);
-}
-
-static unsigned
-node_validate(const node_t *node, const node_t *parent)
-{
-	unsigned nnodes = 1;
-	node_t *leftmost_child, *sibling;
-
-	if (parent != NULL) {
-		assert_d_ge(node_cmp_magic(node, parent), 0,
-		    "Child is less than parent");
-	}
-
-	leftmost_child = phn_lchild_get(node_t, link, node);
-	if (leftmost_child == NULL)
-		return (nnodes);
-	assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
-	    (void *)node, "Leftmost child does not link to node");
-	nnodes += node_validate(leftmost_child, node);
-
-	for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
-	    NULL; sibling = phn_next_get(node_t, link, sibling)) {
-		assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
-		    link, sibling)), sibling,
-		    "sibling's prev doesn't link to sibling");
-		nnodes += node_validate(sibling, node);
-	}
-	return (nnodes);
-}
-
-static unsigned
-heap_validate(const heap_t *heap)
-{
-	unsigned nnodes = 0;
-	node_t *auxelm;
-
-	if (heap->ph_root == NULL)
-		goto label_return;
-
-	nnodes += node_validate(heap->ph_root, NULL);
-
-	for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
-	    auxelm = phn_next_get(node_t, link, auxelm)) {
-		assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
-		    link, auxelm)), auxelm,
-		    "auxelm's prev doesn't link to auxelm");
-		nnodes += node_validate(auxelm, NULL);
-	}
-
-label_return:
-	if (false)
-		heap_print(heap);
-	return (nnodes);
-}
-
-TEST_BEGIN(test_ph_empty)
-{
-	heap_t heap;
-
-	heap_new(&heap);
-	assert_true(heap_empty(&heap), "Heap should be empty");
-	assert_ptr_null(heap_first(&heap), "Unexpected node");
-}
-TEST_END
-
-static void
-node_remove(heap_t *heap, node_t *node)
-{
-	heap_remove(heap, node);
-
-	node->magic = 0;
-}
-
-static node_t *
-node_remove_first(heap_t *heap)
-{
-	node_t *node = heap_remove_first(heap);
-	node->magic = 0;
-	return (node);
-}
-
-TEST_BEGIN(test_ph_random)
-{
-#define	NNODES 25
-#define	NBAGS 250
-#define	SEED 42
-	sfmt_t *sfmt;
-	uint64_t bag[NNODES];
-	heap_t heap;
-	node_t nodes[NNODES];
-	unsigned i, j, k;
-
-	sfmt = init_gen_rand(SEED);
-	for (i = 0; i < NBAGS; i++) {
-		switch (i) {
-		case 0:
-			/* Insert in order. */
-			for (j = 0; j < NNODES; j++)
-				bag[j] = j;
-			break;
-		case 1:
-			/* Insert in reverse order. */
-			for (j = 0; j < NNODES; j++)
-				bag[j] = NNODES - j - 1;
-			break;
-		default:
-			for (j = 0; j < NNODES; j++)
-				bag[j] = gen_rand64_range(sfmt, NNODES);
-		}
-
-		for (j = 1; j <= NNODES; j++) {
-			/* Initialize heap and nodes. */
-			heap_new(&heap);
-			assert_u_eq(heap_validate(&heap), 0,
-			    "Incorrect node count");
-			for (k = 0; k < j; k++) {
-				nodes[k].magic = NODE_MAGIC;
-				nodes[k].key = bag[k];
-			}
-
-			/* Insert nodes. */
-			for (k = 0; k < j; k++) {
-				heap_insert(&heap, &nodes[k]);
-				if (i % 13 == 12) {
-					/* Trigger merging. */
-					assert_ptr_not_null(heap_first(&heap),
-					    "Heap should not be empty");
-				}
-				assert_u_eq(heap_validate(&heap), k + 1,
-				    "Incorrect node count");
-			}
-
-			assert_false(heap_empty(&heap),
-			    "Heap should not be empty");
-
-			/* Remove nodes. */
-			switch (i % 4) {
-			case 0:
-				for (k = 0; k < j; k++) {
-					assert_u_eq(heap_validate(&heap), j - k,
-					    "Incorrect node count");
-					node_remove(&heap, &nodes[k]);
-					assert_u_eq(heap_validate(&heap), j - k
-					    - 1, "Incorrect node count");
-				}
-				break;
-			case 1:
-				for (k = j; k > 0; k--) {
-					node_remove(&heap, &nodes[k-1]);
-					assert_u_eq(heap_validate(&heap), k - 1,
-					    "Incorrect node count");
-				}
-				break;
-			case 2: {
-				node_t *prev = NULL;
-				for (k = 0; k < j; k++) {
-					node_t *node = node_remove_first(&heap);
-					assert_u_eq(heap_validate(&heap), j - k
-					    - 1, "Incorrect node count");
-					if (prev != NULL) {
-						assert_d_ge(node_cmp(node,
-						    prev), 0,
-						    "Bad removal order");
-					}
-					prev = node;
-				}
-				break;
-			} case 3: {
-				node_t *prev = NULL;
-				for (k = 0; k < j; k++) {
-					node_t *node = heap_first(&heap);
-					assert_u_eq(heap_validate(&heap), j - k,
-					    "Incorrect node count");
-					if (prev != NULL) {
-						assert_d_ge(node_cmp(node,
-						    prev), 0,
-						    "Bad removal order");
-					}
-					node_remove(&heap, node);
-					assert_u_eq(heap_validate(&heap), j - k
-					    - 1, "Incorrect node count");
-					prev = node;
-				}
-				break;
-			} default:
-				not_reached();
-			}
-
-			assert_ptr_null(heap_first(&heap),
-			    "Heap should be empty");
-			assert_true(heap_empty(&heap), "Heap should be empty");
-		}
-	}
-	fini_gen_rand(sfmt);
-#undef NNODES
-#undef SEED
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_ph_empty,
-	    test_ph_random));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prng.c b/zircon/third_party/ulib/jemalloc/test/unit/prng.c
deleted file mode 100644
index f32d82a..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prng.c
+++ /dev/null
@@ -1,252 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static void
-test_prng_lg_range_u32(bool atomic)
-{
-	uint32_t sa, sb, ra, rb;
-	unsigned lg_range;
-
-	sa = 42;
-	ra = prng_lg_range_u32(&sa, 32, atomic);
-	sa = 42;
-	rb = prng_lg_range_u32(&sa, 32, atomic);
-	assert_u32_eq(ra, rb,
-	    "Repeated generation should produce repeated results");
-
-	sb = 42;
-	rb = prng_lg_range_u32(&sb, 32, atomic);
-	assert_u32_eq(ra, rb,
-	    "Equivalent generation should produce equivalent results");
-
-	sa = 42;
-	ra = prng_lg_range_u32(&sa, 32, atomic);
-	rb = prng_lg_range_u32(&sa, 32, atomic);
-	assert_u32_ne(ra, rb,
-	    "Full-width results must not immediately repeat");
-
-	sa = 42;
-	ra = prng_lg_range_u32(&sa, 32, atomic);
-	for (lg_range = 31; lg_range > 0; lg_range--) {
-		sb = 42;
-		rb = prng_lg_range_u32(&sb, lg_range, atomic);
-		assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
-		    0, "High order bits should be 0, lg_range=%u", lg_range);
-		assert_u32_eq(rb, (ra >> (32 - lg_range)),
-		    "Expected high order bits of full-width result, "
-		    "lg_range=%u", lg_range);
-	}
-}
-
-static void
-test_prng_lg_range_u64(void)
-{
-	uint64_t sa, sb, ra, rb;
-	unsigned lg_range;
-
-	sa = 42;
-	ra = prng_lg_range_u64(&sa, 64);
-	sa = 42;
-	rb = prng_lg_range_u64(&sa, 64);
-	assert_u64_eq(ra, rb,
-	    "Repeated generation should produce repeated results");
-
-	sb = 42;
-	rb = prng_lg_range_u64(&sb, 64);
-	assert_u64_eq(ra, rb,
-	    "Equivalent generation should produce equivalent results");
-
-	sa = 42;
-	ra = prng_lg_range_u64(&sa, 64);
-	rb = prng_lg_range_u64(&sa, 64);
-	assert_u64_ne(ra, rb,
-	    "Full-width results must not immediately repeat");
-
-	sa = 42;
-	ra = prng_lg_range_u64(&sa, 64);
-	for (lg_range = 63; lg_range > 0; lg_range--) {
-		sb = 42;
-		rb = prng_lg_range_u64(&sb, lg_range);
-		assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
-		    0, "High order bits should be 0, lg_range=%u", lg_range);
-		assert_u64_eq(rb, (ra >> (64 - lg_range)),
-		    "Expected high order bits of full-width result, "
-		    "lg_range=%u", lg_range);
-	}
-}
-
-static void
-test_prng_lg_range_zu(bool atomic)
-{
-	size_t sa, sb, ra, rb;
-	unsigned lg_range;
-
-	sa = 42;
-	ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
-	sa = 42;
-	rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
-	assert_zu_eq(ra, rb,
-	    "Repeated generation should produce repeated results");
-
-	sb = 42;
-	rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
-	assert_zu_eq(ra, rb,
-	    "Equivalent generation should produce equivalent results");
-
-	sa = 42;
-	ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
-	rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
-	assert_zu_ne(ra, rb,
-	    "Full-width results must not immediately repeat");
-
-	sa = 42;
-	ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
-	for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
-	    lg_range--) {
-		sb = 42;
-		rb = prng_lg_range_zu(&sb, lg_range, atomic);
-		assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
-		    0, "High order bits should be 0, lg_range=%u", lg_range);
-		assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
-		    lg_range)), "Expected high order bits of full-width "
-		    "result, lg_range=%u", lg_range);
-	}
-}
-
-TEST_BEGIN(test_prng_lg_range_u32_nonatomic)
-{
-	test_prng_lg_range_u32(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_u32_atomic)
-{
-	test_prng_lg_range_u32(true);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_u64_nonatomic)
-{
-	test_prng_lg_range_u64();
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_zu_nonatomic)
-{
-	test_prng_lg_range_zu(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_zu_atomic)
-{
-	test_prng_lg_range_zu(true);
-}
-TEST_END
-
-static void
-test_prng_range_u32(bool atomic)
-{
-	uint32_t range;
-#define	MAX_RANGE	10000000
-#define	RANGE_STEP	97
-#define	NREPS		10
-
-	for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
-		uint32_t s;
-		unsigned rep;
-
-		s = range;
-		for (rep = 0; rep < NREPS; rep++) {
-			uint32_t r = prng_range_u32(&s, range, atomic);
-
-			assert_u32_lt(r, range, "Out of range");
-		}
-	}
-}
-
-static void
-test_prng_range_u64(void)
-{
-	uint64_t range;
-#define	MAX_RANGE	10000000
-#define	RANGE_STEP	97
-#define	NREPS		10
-
-	for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
-		uint64_t s;
-		unsigned rep;
-
-		s = range;
-		for (rep = 0; rep < NREPS; rep++) {
-			uint64_t r = prng_range_u64(&s, range);
-
-			assert_u64_lt(r, range, "Out of range");
-		}
-	}
-}
-
-static void
-test_prng_range_zu(bool atomic)
-{
-	size_t range;
-#define	MAX_RANGE	10000000
-#define	RANGE_STEP	97
-#define	NREPS		10
-
-	for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
-		size_t s;
-		unsigned rep;
-
-		s = range;
-		for (rep = 0; rep < NREPS; rep++) {
-			size_t r = prng_range_zu(&s, range, atomic);
-
-			assert_zu_lt(r, range, "Out of range");
-		}
-	}
-}
-
-TEST_BEGIN(test_prng_range_u32_nonatomic)
-{
-	test_prng_range_u32(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_u32_atomic)
-{
-	test_prng_range_u32(true);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_u64_nonatomic)
-{
-	test_prng_range_u64();
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_zu_nonatomic)
-{
-	test_prng_range_zu(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_zu_atomic)
-{
-	test_prng_range_zu(true);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_prng_lg_range_u32_nonatomic,
-	    test_prng_lg_range_u32_atomic,
-	    test_prng_lg_range_u64_nonatomic,
-	    test_prng_lg_range_zu_nonatomic,
-	    test_prng_lg_range_zu_atomic,
-	    test_prng_range_u32_nonatomic,
-	    test_prng_range_u32_atomic,
-	    test_prng_range_u64_nonatomic,
-	    test_prng_range_zu_nonatomic,
-	    test_prng_range_zu_atomic));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_accum.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_accum.c
deleted file mode 100644
index 41ebeea..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_accum.c
+++ /dev/null
@@ -1,90 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	NTHREADS		4
-#define	NALLOCS_PER_THREAD	50
-#define	DUMP_INTERVAL		1
-#define	BT_COUNT_CHECK_INTERVAL	5
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf =
-    "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
-#endif
-
-static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
-	int fd;
-
-	fd = open("/dev/null", O_WRONLY);
-	assert_d_ne(fd, -1, "Unexpected open() failure");
-
-	return (fd);
-}
-
-static void *
-alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
-{
-	return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration));
-}
-
-static void *
-thd_start(void *varg)
-{
-	unsigned thd_ind = *(unsigned *)varg;
-	size_t bt_count_prev, bt_count;
-	unsigned i_prev, i;
-
-	i_prev = 0;
-	bt_count_prev = 0;
-	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
-		void *p = alloc_from_permuted_backtrace(thd_ind, i);
-		dallocx(p, 0);
-		if (i % DUMP_INTERVAL == 0) {
-			assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
-			    0, "Unexpected error while dumping heap profile");
-		}
-
-		if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
-		    i+1 == NALLOCS_PER_THREAD) {
-			bt_count = prof_bt_count();
-			assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
-			    "Expected larger backtrace count increase");
-			i_prev = i;
-			bt_count_prev = bt_count;
-		}
-	}
-
-	return (NULL);
-}
-
-TEST_BEGIN(test_idump)
-{
-	bool active;
-	thd_t thds[NTHREADS];
-	unsigned thd_args[NTHREADS];
-	unsigned i;
-
-	test_skip_if(!config_prof);
-
-	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
-	    sizeof(active)), 0,
-	    "Unexpected mallctl failure while activating profiling");
-
-	prof_dump_open = prof_dump_open_intercept;
-
-	for (i = 0; i < NTHREADS; i++) {
-		thd_args[i] = i;
-		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
-	}
-	for (i = 0; i < NTHREADS; i++)
-		thd_join(thds[i], NULL);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_idump));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_active.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_active.c
deleted file mode 100644
index d3b341d..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_active.c
+++ /dev/null
@@ -1,131 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf =
-    "prof:true,prof_thread_active_init:false,lg_prof_sample:0";
-#endif
-
-static void
-mallctl_bool_get(const char *name, bool expected, const char *func, int line)
-{
-	bool old;
-	size_t sz;
-
-	sz = sizeof(old);
-	assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
-	    "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
-	assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
-	    name);
-}
-
-static void
-mallctl_bool_set(const char *name, bool old_expected, bool val_new,
-    const char *func, int line)
-{
-	bool old;
-	size_t sz;
-
-	sz = sizeof(old);
-	assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
-	    sizeof(val_new)), 0,
-	    "%s():%d: Unexpected mallctl failure reading/writing %s", func,
-	    line, name);
-	assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
-	    line, name);
-}
-
-static void
-mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
-    int line)
-{
-	mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
-}
-#define	mallctl_prof_active_get(a)					\
-	mallctl_prof_active_get_impl(a, __func__, __LINE__)
-
-static void
-mallctl_prof_active_set_impl(bool prof_active_old_expected,
-    bool prof_active_new, const char *func, int line)
-{
-	mallctl_bool_set("prof.active", prof_active_old_expected,
-	    prof_active_new, func, line);
-}
-#define	mallctl_prof_active_set(a, b)					\
-	mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
-
-static void
-mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
-    const char *func, int line)
-{
-	mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
-	    func, line);
-}
-#define	mallctl_thread_prof_active_get(a)				\
-	mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
-
-static void
-mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
-    bool thread_prof_active_new, const char *func, int line)
-{
-	mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
-	    thread_prof_active_new, func, line);
-}
-#define	mallctl_thread_prof_active_set(a, b)				\
-	mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
-
-static void
-prof_sampling_probe_impl(bool expect_sample, const char *func, int line)
-{
-	void *p;
-	size_t expected_backtraces = expect_sample ? 1 : 0;
-
-	assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
-	    line);
-	p = mallocx(1, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-	assert_zu_eq(prof_bt_count(), expected_backtraces,
-	    "%s():%d: Unexpected backtrace count", func, line);
-	dallocx(p, 0);
-}
-#define	prof_sampling_probe(a)						\
-	prof_sampling_probe_impl(a, __func__, __LINE__)
-
-TEST_BEGIN(test_prof_active)
-{
-	test_skip_if(!config_prof);
-
-	mallctl_prof_active_get(true);
-	mallctl_thread_prof_active_get(false);
-
-	mallctl_prof_active_set(true, true);
-	mallctl_thread_prof_active_set(false, false);
-	/* prof.active, !thread.prof.active. */
-	prof_sampling_probe(false);
-
-	mallctl_prof_active_set(true, false);
-	mallctl_thread_prof_active_set(false, false);
-	/* !prof.active, !thread.prof.active. */
-	prof_sampling_probe(false);
-
-	mallctl_prof_active_set(false, false);
-	mallctl_thread_prof_active_set(false, true);
-	/* !prof.active, thread.prof.active. */
-	prof_sampling_probe(false);
-
-	mallctl_prof_active_set(false, true);
-	mallctl_thread_prof_active_set(true, true);
-	/* prof.active, thread.prof.active. */
-	prof_sampling_probe(true);
-
-	/* Restore settings. */
-	mallctl_prof_active_set(true, true);
-	mallctl_thread_prof_active_set(true, false);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_prof_active));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_gdump.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_gdump.c
deleted file mode 100644
index 53f7cad..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_gdump.c
+++ /dev/null
@@ -1,81 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true";
-#endif
-
-static bool did_prof_dump_open;
-
-static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
-	int fd;
-
-	did_prof_dump_open = true;
-
-	fd = open("/dev/null", O_WRONLY);
-	assert_d_ne(fd, -1, "Unexpected open() failure");
-
-	return (fd);
-}
-
-TEST_BEGIN(test_gdump)
-{
-	bool active, gdump, gdump_old;
-	void *p, *q, *r, *s;
-	size_t sz;
-
-	test_skip_if(!config_prof);
-
-	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
-	    sizeof(active)), 0,
-	    "Unexpected mallctl failure while activating profiling");
-
-	prof_dump_open = prof_dump_open_intercept;
-
-	did_prof_dump_open = false;
-	p = mallocx((1U << LG_LARGE_MINCLASS), 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-	assert_true(did_prof_dump_open, "Expected a profile dump");
-
-	did_prof_dump_open = false;
-	q = mallocx((1U << LG_LARGE_MINCLASS), 0);
-	assert_ptr_not_null(q, "Unexpected mallocx() failure");
-	assert_true(did_prof_dump_open, "Expected a profile dump");
-
-	gdump = false;
-	sz = sizeof(gdump_old);
-	assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
-	    (void *)&gdump, sizeof(gdump)), 0,
-	    "Unexpected mallctl failure while disabling prof.gdump");
-	assert(gdump_old);
-	did_prof_dump_open = false;
-	r = mallocx((1U << LG_LARGE_MINCLASS), 0);
-	assert_ptr_not_null(q, "Unexpected mallocx() failure");
-	assert_false(did_prof_dump_open, "Unexpected profile dump");
-
-	gdump = true;
-	sz = sizeof(gdump_old);
-	assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
-	    (void *)&gdump, sizeof(gdump)), 0,
-	    "Unexpected mallctl failure while enabling prof.gdump");
-	assert(!gdump_old);
-	did_prof_dump_open = false;
-	s = mallocx((1U << LG_LARGE_MINCLASS), 0);
-	assert_ptr_not_null(q, "Unexpected mallocx() failure");
-	assert_true(did_prof_dump_open, "Expected a profile dump");
-
-	dallocx(p, 0);
-	dallocx(q, 0);
-	dallocx(r, 0);
-	dallocx(s, 0);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_gdump));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_idump.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_idump.c
deleted file mode 100644
index 43824c6..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_idump.c
+++ /dev/null
@@ -1,58 +0,0 @@
-#include "test/jemalloc_test.h"
-
-const char *malloc_conf = ""
-#ifdef JEMALLOC_PROF
-    "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
-    ",lg_prof_interval:0"
-#  ifdef JEMALLOC_TCACHE
-    ","
-#  endif
-#endif
-#ifdef JEMALLOC_TCACHE
-    "tcache:false"
-#endif
-    ;
-
-static bool did_prof_dump_open;
-
-static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
-	int fd;
-
-	did_prof_dump_open = true;
-
-	fd = open("/dev/null", O_WRONLY);
-	assert_d_ne(fd, -1, "Unexpected open() failure");
-
-	return (fd);
-}
-
-TEST_BEGIN(test_idump)
-{
-	bool active;
-	void *p;
-
-	test_skip_if(!config_prof);
-
-	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
-	    sizeof(active)), 0,
-	    "Unexpected mallctl failure while activating profiling");
-
-	prof_dump_open = prof_dump_open_intercept;
-
-	did_prof_dump_open = false;
-	p = mallocx(1, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-	dallocx(p, 0);
-	assert_true(did_prof_dump_open, "Expected a profile dump");
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_idump));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_reset.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_reset.c
deleted file mode 100644
index cc13e37..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_reset.c
+++ /dev/null
@@ -1,301 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf =
-    "prof:true,prof_active:false,lg_prof_sample:0";
-#endif
-
-static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
-	int fd;
-
-	fd = open("/dev/null", O_WRONLY);
-	assert_d_ne(fd, -1, "Unexpected open() failure");
-
-	return (fd);
-}
-
-static void
-set_prof_active(bool active)
-{
-	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
-	    sizeof(active)), 0, "Unexpected mallctl failure");
-}
-
-static size_t
-get_lg_prof_sample(void)
-{
-	size_t lg_prof_sample;
-	size_t sz = sizeof(size_t);
-
-	assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
-	    NULL, 0), 0,
-	    "Unexpected mallctl failure while reading profiling sample rate");
-	return (lg_prof_sample);
-}
-
-static void
-do_prof_reset(size_t lg_prof_sample)
-{
-	assert_d_eq(mallctl("prof.reset", NULL, NULL,
-	    (void *)&lg_prof_sample, sizeof(size_t)), 0,
-	    "Unexpected mallctl failure while resetting profile data");
-	assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
-	    "Expected profile sample rate change");
-}
-
-TEST_BEGIN(test_prof_reset_basic)
-{
-	size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
-	size_t sz;
-	unsigned i;
-
-	test_skip_if(!config_prof);
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
-	    &sz, NULL, 0), 0,
-	    "Unexpected mallctl failure while reading profiling sample rate");
-	assert_zu_eq(lg_prof_sample_orig, 0,
-	    "Unexpected profiling sample rate");
-	lg_prof_sample = get_lg_prof_sample();
-	assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
-	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
-	    "\"prof.lg_sample\"");
-
-	/* Test simple resets. */
-	for (i = 0; i < 2; i++) {
-		assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
-		    "Unexpected mallctl failure while resetting profile data");
-		lg_prof_sample = get_lg_prof_sample();
-		assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
-		    "Unexpected profile sample rate change");
-	}
-
-	/* Test resets with prof.lg_sample changes. */
-	lg_prof_sample_next = 1;
-	for (i = 0; i < 2; i++) {
-		do_prof_reset(lg_prof_sample_next);
-		lg_prof_sample = get_lg_prof_sample();
-		assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
-		    "Expected profile sample rate change");
-		lg_prof_sample_next = lg_prof_sample_orig;
-	}
-
-	/* Make sure the test code restored prof.lg_sample. */
-	lg_prof_sample = get_lg_prof_sample();
-	assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
-	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
-	    "\"prof.lg_sample\"");
-}
-TEST_END
-
-bool prof_dump_header_intercepted = false;
-prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
-static bool
-prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
-    const prof_cnt_t *cnt_all)
-{
-	prof_dump_header_intercepted = true;
-	memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
-
-	return (false);
-}
-
-TEST_BEGIN(test_prof_reset_cleanup)
-{
-	void *p;
-	prof_dump_header_t *prof_dump_header_orig;
-
-	test_skip_if(!config_prof);
-
-	set_prof_active(true);
-
-	assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
-	p = mallocx(1, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-	assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
-
-	prof_dump_header_orig = prof_dump_header;
-	prof_dump_header = prof_dump_header_intercept;
-	assert_false(prof_dump_header_intercepted, "Unexpected intercept");
-
-	assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
-	    0, "Unexpected error while dumping heap profile");
-	assert_true(prof_dump_header_intercepted, "Expected intercept");
-	assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
-
-	assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
-	    "Unexpected error while resetting heap profile data");
-	assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
-	    0, "Unexpected error while dumping heap profile");
-	assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
-	assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
-
-	prof_dump_header = prof_dump_header_orig;
-
-	dallocx(p, 0);
-	assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
-
-	set_prof_active(false);
-}
-TEST_END
-
-#define	NTHREADS		4
-#define	NALLOCS_PER_THREAD	(1U << 13)
-#define	OBJ_RING_BUF_COUNT	1531
-#define	RESET_INTERVAL		(1U << 10)
-#define	DUMP_INTERVAL		3677
-static void *
-thd_start(void *varg)
-{
-	unsigned thd_ind = *(unsigned *)varg;
-	unsigned i;
-	void *objs[OBJ_RING_BUF_COUNT];
-
-	memset(objs, 0, sizeof(objs));
-
-	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
-		if (i % RESET_INTERVAL == 0) {
-			assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
-			    0, "Unexpected error while resetting heap profile "
-			    "data");
-		}
-
-		if (i % DUMP_INTERVAL == 0) {
-			assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
-			    0, "Unexpected error while dumping heap profile");
-		}
-
-		{
-			void **pp = &objs[i % OBJ_RING_BUF_COUNT];
-			if (*pp != NULL) {
-				dallocx(*pp, 0);
-				*pp = NULL;
-			}
-			*pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
-			assert_ptr_not_null(*pp,
-			    "Unexpected btalloc() failure");
-		}
-	}
-
-	/* Clean up any remaining objects. */
-	for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
-		void **pp = &objs[i % OBJ_RING_BUF_COUNT];
-		if (*pp != NULL) {
-			dallocx(*pp, 0);
-			*pp = NULL;
-		}
-	}
-
-	return (NULL);
-}
-
-TEST_BEGIN(test_prof_reset)
-{
-	size_t lg_prof_sample_orig;
-	thd_t thds[NTHREADS];
-	unsigned thd_args[NTHREADS];
-	unsigned i;
-	size_t bt_count, tdata_count;
-
-	test_skip_if(!config_prof);
-
-	bt_count = prof_bt_count();
-	assert_zu_eq(bt_count, 0,
-	    "Unexpected pre-existing tdata structures");
-	tdata_count = prof_tdata_count();
-
-	lg_prof_sample_orig = get_lg_prof_sample();
-	do_prof_reset(5);
-
-	set_prof_active(true);
-
-	for (i = 0; i < NTHREADS; i++) {
-		thd_args[i] = i;
-		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
-	}
-	for (i = 0; i < NTHREADS; i++)
-		thd_join(thds[i], NULL);
-
-	assert_zu_eq(prof_bt_count(), bt_count,
-	    "Unexpected bactrace count change");
-	assert_zu_eq(prof_tdata_count(), tdata_count,
-	    "Unexpected remaining tdata structures");
-
-	set_prof_active(false);
-
-	do_prof_reset(lg_prof_sample_orig);
-}
-TEST_END
-#undef NTHREADS
-#undef NALLOCS_PER_THREAD
-#undef OBJ_RING_BUF_COUNT
-#undef RESET_INTERVAL
-#undef DUMP_INTERVAL
-
-/* Test sampling at the same allocation site across resets. */
-#define	NITER 10
-TEST_BEGIN(test_xallocx)
-{
-	size_t lg_prof_sample_orig;
-	unsigned i;
-	void *ptrs[NITER];
-
-	test_skip_if(!config_prof);
-
-	lg_prof_sample_orig = get_lg_prof_sample();
-	set_prof_active(true);
-
-	/* Reset profiling. */
-	do_prof_reset(0);
-
-	for (i = 0; i < NITER; i++) {
-		void *p;
-		size_t sz, nsz;
-
-		/* Reset profiling. */
-		do_prof_reset(0);
-
-		/* Allocate small object (which will be promoted). */
-		p = ptrs[i] = mallocx(1, 0);
-		assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-		/* Reset profiling. */
-		do_prof_reset(0);
-
-		/* Perform successful xallocx(). */
-		sz = sallocx(p, 0);
-		assert_zu_eq(xallocx(p, sz, 0, 0), sz,
-		    "Unexpected xallocx() failure");
-
-		/* Perform unsuccessful xallocx(). */
-		nsz = nallocx(sz+1, 0);
-		assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
-		    "Unexpected xallocx() success");
-	}
-
-	for (i = 0; i < NITER; i++) {
-		/* dallocx. */
-		dallocx(ptrs[i], 0);
-	}
-
-	set_prof_active(false);
-	do_prof_reset(lg_prof_sample_orig);
-}
-TEST_END
-#undef NITER
-
-int
-main(void)
-{
-	/* Intercept dumping prior to running any tests. */
-	prof_dump_open = prof_dump_open_intercept;
-
-	return (test(
-	    test_prof_reset_basic,
-	    test_prof_reset_cleanup,
-	    test_prof_reset,
-	    test_xallocx));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_tctx.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_tctx.c
deleted file mode 100644
index 8f928eb..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_tctx.c
+++ /dev/null
@@ -1,57 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf = "prof:true,lg_prof_sample:0";
-#endif
-
-TEST_BEGIN(test_prof_realloc)
-{
-	tsdn_t *tsdn;
-	int flags;
-	void *p, *q;
-	extent_t *extent_p, *extent_q;
-	prof_tctx_t *tctx_p, *tctx_q;
-	uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3;
-
-	test_skip_if(!config_prof);
-
-	tsdn = tsdn_fetch();
-	flags = MALLOCX_TCACHE_NONE;
-
-	prof_cnt_all(&curobjs_0, NULL, NULL, NULL);
-	p = mallocx(1024, flags);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-	extent_p = iealloc(tsdn, p);
-	assert_ptr_not_null(extent_p, "Unexpected iealloc() failure");
-	tctx_p = prof_tctx_get(tsdn, extent_p, p);
-	assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U,
-	    "Expected valid tctx");
-	prof_cnt_all(&curobjs_1, NULL, NULL, NULL);
-	assert_u64_eq(curobjs_0 + 1, curobjs_1,
-	    "Allocation should have increased sample size");
-
-	q = rallocx(p, 2048, flags);
-	assert_ptr_ne(p, q, "Expected move");
-	assert_ptr_not_null(p, "Unexpected rmallocx() failure");
-	extent_q = iealloc(tsdn, q);
-	assert_ptr_not_null(extent_q, "Unexpected iealloc() failure");
-	tctx_q = prof_tctx_get(tsdn, extent_q, q);
-	assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U,
-	    "Expected valid tctx");
-	prof_cnt_all(&curobjs_2, NULL, NULL, NULL);
-	assert_u64_eq(curobjs_1, curobjs_2,
-	    "Reallocation should not have changed sample size");
-
-	dallocx(q, flags);
-	prof_cnt_all(&curobjs_3, NULL, NULL, NULL);
-	assert_u64_eq(curobjs_0, curobjs_3,
-	    "Sample size should have returned to base level");
-}
-TEST_END
-
-int
-main(void)
-{
-	return test(
-	    test_prof_realloc);
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/prof_thread_name.c b/zircon/third_party/ulib/jemalloc/test/unit/prof_thread_name.c
deleted file mode 100644
index 8699936..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/prof_thread_name.c
+++ /dev/null
@@ -1,129 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf = "prof:true,prof_active:false";
-#endif
-
-static void
-mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
-    int line)
-{
-	const char *thread_name_old;
-	size_t sz;
-
-	sz = sizeof(thread_name_old);
-	assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
-	    NULL, 0), 0,
-	    "%s():%d: Unexpected mallctl failure reading thread.prof.name",
-	    func, line);
-	assert_str_eq(thread_name_old, thread_name_expected,
-	    "%s():%d: Unexpected thread.prof.name value", func, line);
-}
-#define	mallctl_thread_name_get(a)					\
-	mallctl_thread_name_get_impl(a, __func__, __LINE__)
-
-static void
-mallctl_thread_name_set_impl(const char *thread_name, const char *func,
-    int line)
-{
-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
-	    (void *)&thread_name, sizeof(thread_name)), 0,
-	    "%s():%d: Unexpected mallctl failure reading thread.prof.name",
-	    func, line);
-	mallctl_thread_name_get_impl(thread_name, func, line);
-}
-#define	mallctl_thread_name_set(a)					\
-	mallctl_thread_name_set_impl(a, __func__, __LINE__)
-
-TEST_BEGIN(test_prof_thread_name_validation)
-{
-	const char *thread_name;
-
-	test_skip_if(!config_prof);
-
-	mallctl_thread_name_get("");
-	mallctl_thread_name_set("hi there");
-
-	/* NULL input shouldn't be allowed. */
-	thread_name = NULL;
-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
-	    (void *)&thread_name, sizeof(thread_name)), EFAULT,
-	    "Unexpected mallctl result writing \"%s\" to thread.prof.name",
-	    thread_name);
-
-	/* '\n' shouldn't be allowed. */
-	thread_name = "hi\nthere";
-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
-	    (void *)&thread_name, sizeof(thread_name)), EFAULT,
-	    "Unexpected mallctl result writing \"%s\" to thread.prof.name",
-	    thread_name);
-
-	/* Simultaneous read/write shouldn't be allowed. */
-	{
-		const char *thread_name_old;
-		size_t sz;
-
-		sz = sizeof(thread_name_old);
-		assert_d_eq(mallctl("thread.prof.name",
-		    (void *)&thread_name_old, &sz, (void *)&thread_name,
-		    sizeof(thread_name)), EPERM,
-		    "Unexpected mallctl result writing \"%s\" to "
-		    "thread.prof.name", thread_name);
-	}
-
-	mallctl_thread_name_set("");
-}
-TEST_END
-
-#define	NTHREADS	4
-#define	NRESET		25
-static void *
-thd_start(void *varg)
-{
-	unsigned thd_ind = *(unsigned *)varg;
-	char thread_name[16] = "";
-	unsigned i;
-
-	malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
-
-	mallctl_thread_name_get("");
-	mallctl_thread_name_set(thread_name);
-
-	for (i = 0; i < NRESET; i++) {
-		assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
-		    "Unexpected error while resetting heap profile data");
-		mallctl_thread_name_get(thread_name);
-	}
-
-	mallctl_thread_name_set(thread_name);
-	mallctl_thread_name_set("");
-
-	return (NULL);
-}
-
-TEST_BEGIN(test_prof_thread_name_threaded)
-{
-	thd_t thds[NTHREADS];
-	unsigned thd_args[NTHREADS];
-	unsigned i;
-
-	test_skip_if(!config_prof);
-
-	for (i = 0; i < NTHREADS; i++) {
-		thd_args[i] = i;
-		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
-	}
-	for (i = 0; i < NTHREADS; i++)
-		thd_join(thds[i], NULL);
-}
-TEST_END
-#undef NTHREADS
-#undef NRESET
-
-int
-main(void)
-{
-	return (test(
-	    test_prof_thread_name_validation,
-	    test_prof_thread_name_threaded));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/ql.c b/zircon/third_party/ulib/jemalloc/test/unit/ql.c
deleted file mode 100644
index 2ebb450..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/ql.c
+++ /dev/null
@@ -1,208 +0,0 @@
-#include "test/jemalloc_test.h"
-
-/* Number of ring entries, in [2..26]. */
-#define	NENTRIES 9
-
-typedef struct list_s list_t;
-typedef ql_head(list_t) list_head_t;
-
-struct list_s {
-	ql_elm(list_t) link;
-	char id;
-};
-
-static void
-test_empty_list(list_head_t *head)
-{
-	list_t *t;
-	unsigned i;
-
-	assert_ptr_null(ql_first(head), "Unexpected element for empty list");
-	assert_ptr_null(ql_last(head, link),
-	    "Unexpected element for empty list");
-
-	i = 0;
-	ql_foreach(t, head, link) {
-		i++;
-	}
-	assert_u_eq(i, 0, "Unexpected element for empty list");
-
-	i = 0;
-	ql_reverse_foreach(t, head, link) {
-		i++;
-	}
-	assert_u_eq(i, 0, "Unexpected element for empty list");
-}
-
-TEST_BEGIN(test_ql_empty)
-{
-	list_head_t head;
-
-	ql_new(&head);
-	test_empty_list(&head);
-}
-TEST_END
-
-static void
-init_entries(list_t *entries, unsigned nentries)
-{
-	unsigned i;
-
-	for (i = 0; i < nentries; i++) {
-		entries[i].id = 'a' + i;
-		ql_elm_new(&entries[i], link);
-	}
-}
-
-static void
-test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
-{
-	list_t *t;
-	unsigned i;
-
-	assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
-	assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
-	    "Element id mismatch");
-
-	i = 0;
-	ql_foreach(t, head, link) {
-		assert_c_eq(t->id, entries[i].id, "Element id mismatch");
-		i++;
-	}
-
-	i = 0;
-	ql_reverse_foreach(t, head, link) {
-		assert_c_eq(t->id, entries[nentries-i-1].id,
-		    "Element id mismatch");
-		i++;
-	}
-
-	for (i = 0; i < nentries-1; i++) {
-		t = ql_next(head, &entries[i], link);
-		assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
-	}
-	assert_ptr_null(ql_next(head, &entries[nentries-1], link),
-	    "Unexpected element");
-
-	assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
-	for (i = 1; i < nentries; i++) {
-		t = ql_prev(head, &entries[i], link);
-		assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
-	}
-}
-
-TEST_BEGIN(test_ql_tail_insert)
-{
-	list_head_t head;
-	list_t entries[NENTRIES];
-	unsigned i;
-
-	ql_new(&head);
-	init_entries(entries, sizeof(entries)/sizeof(list_t));
-	for (i = 0; i < NENTRIES; i++)
-		ql_tail_insert(&head, &entries[i], link);
-
-	test_entries_list(&head, entries, NENTRIES);
-}
-TEST_END
-
-TEST_BEGIN(test_ql_tail_remove)
-{
-	list_head_t head;
-	list_t entries[NENTRIES];
-	unsigned i;
-
-	ql_new(&head);
-	init_entries(entries, sizeof(entries)/sizeof(list_t));
-	for (i = 0; i < NENTRIES; i++)
-		ql_tail_insert(&head, &entries[i], link);
-
-	for (i = 0; i < NENTRIES; i++) {
-		test_entries_list(&head, entries, NENTRIES-i);
-		ql_tail_remove(&head, list_t, link);
-	}
-	test_empty_list(&head);
-}
-TEST_END
-
-TEST_BEGIN(test_ql_head_insert)
-{
-	list_head_t head;
-	list_t entries[NENTRIES];
-	unsigned i;
-
-	ql_new(&head);
-	init_entries(entries, sizeof(entries)/sizeof(list_t));
-	for (i = 0; i < NENTRIES; i++)
-		ql_head_insert(&head, &entries[NENTRIES-i-1], link);
-
-	test_entries_list(&head, entries, NENTRIES);
-}
-TEST_END
-
-TEST_BEGIN(test_ql_head_remove)
-{
-	list_head_t head;
-	list_t entries[NENTRIES];
-	unsigned i;
-
-	ql_new(&head);
-	init_entries(entries, sizeof(entries)/sizeof(list_t));
-	for (i = 0; i < NENTRIES; i++)
-		ql_head_insert(&head, &entries[NENTRIES-i-1], link);
-
-	for (i = 0; i < NENTRIES; i++) {
-		test_entries_list(&head, &entries[i], NENTRIES-i);
-		ql_head_remove(&head, list_t, link);
-	}
-	test_empty_list(&head);
-}
-TEST_END
-
-TEST_BEGIN(test_ql_insert)
-{
-	list_head_t head;
-	list_t entries[8];
-	list_t *a, *b, *c, *d, *e, *f, *g, *h;
-
-	ql_new(&head);
-	init_entries(entries, sizeof(entries)/sizeof(list_t));
-	a = &entries[0];
-	b = &entries[1];
-	c = &entries[2];
-	d = &entries[3];
-	e = &entries[4];
-	f = &entries[5];
-	g = &entries[6];
-	h = &entries[7];
-
-	/*
-	 * ql_remove(), ql_before_insert(), and ql_after_insert() are used
-	 * internally by other macros that are already tested, so there's no
-	 * need to test them completely.  However, insertion/deletion from the
-	 * middle of lists is not otherwise tested; do so here.
-	 */
-	ql_tail_insert(&head, f, link);
-	ql_before_insert(&head, f, b, link);
-	ql_before_insert(&head, f, c, link);
-	ql_after_insert(f, h, link);
-	ql_after_insert(f, g, link);
-	ql_before_insert(&head, b, a, link);
-	ql_after_insert(c, d, link);
-	ql_before_insert(&head, f, e, link);
-
-	test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_ql_empty,
-	    test_ql_tail_insert,
-	    test_ql_tail_remove,
-	    test_ql_head_insert,
-	    test_ql_head_remove,
-	    test_ql_insert));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/qr.c b/zircon/third_party/ulib/jemalloc/test/unit/qr.c
deleted file mode 100644
index 7c9c102..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/qr.c
+++ /dev/null
@@ -1,247 +0,0 @@
-#include "test/jemalloc_test.h"
-
-/* Number of ring entries, in [2..26]. */
-#define	NENTRIES 9
-/* Split index, in [1..NENTRIES). */
-#define	SPLIT_INDEX 5
-
-typedef struct ring_s ring_t;
-
-struct ring_s {
-	qr(ring_t) link;
-	char id;
-};
-
-static void
-init_entries(ring_t *entries)
-{
-	unsigned i;
-
-	for (i = 0; i < NENTRIES; i++) {
-		qr_new(&entries[i], link);
-		entries[i].id = 'a' + i;
-	}
-}
-
-static void
-test_independent_entries(ring_t *entries)
-{
-	ring_t *t;
-	unsigned i, j;
-
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_foreach(t, &entries[i], link) {
-			j++;
-		}
-		assert_u_eq(j, 1,
-		    "Iteration over single-element ring should visit precisely "
-		    "one element");
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_reverse_foreach(t, &entries[i], link) {
-			j++;
-		}
-		assert_u_eq(j, 1,
-		    "Iteration over single-element ring should visit precisely "
-		    "one element");
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		t = qr_next(&entries[i], link);
-		assert_ptr_eq(t, &entries[i],
-		    "Next element in single-element ring should be same as "
-		    "current element");
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		t = qr_prev(&entries[i], link);
-		assert_ptr_eq(t, &entries[i],
-		    "Previous element in single-element ring should be same as "
-		    "current element");
-	}
-}
-
-TEST_BEGIN(test_qr_one)
-{
-	ring_t entries[NENTRIES];
-
-	init_entries(entries);
-	test_independent_entries(entries);
-}
-TEST_END
-
-static void
-test_entries_ring(ring_t *entries)
-{
-	ring_t *t;
-	unsigned i, j;
-
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_foreach(t, &entries[i], link) {
-			assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
-			    "Element id mismatch");
-			j++;
-		}
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_reverse_foreach(t, &entries[i], link) {
-			assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
-			    NENTRIES].id, "Element id mismatch");
-			j++;
-		}
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		t = qr_next(&entries[i], link);
-		assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
-		    "Element id mismatch");
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		t = qr_prev(&entries[i], link);
-		assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
-		    "Element id mismatch");
-	}
-}
-
-TEST_BEGIN(test_qr_after_insert)
-{
-	ring_t entries[NENTRIES];
-	unsigned i;
-
-	init_entries(entries);
-	for (i = 1; i < NENTRIES; i++)
-		qr_after_insert(&entries[i - 1], &entries[i], link);
-	test_entries_ring(entries);
-}
-TEST_END
-
-TEST_BEGIN(test_qr_remove)
-{
-	ring_t entries[NENTRIES];
-	ring_t *t;
-	unsigned i, j;
-
-	init_entries(entries);
-	for (i = 1; i < NENTRIES; i++)
-		qr_after_insert(&entries[i - 1], &entries[i], link);
-
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_foreach(t, &entries[i], link) {
-			assert_c_eq(t->id, entries[i+j].id,
-			    "Element id mismatch");
-			j++;
-		}
-		j = 0;
-		qr_reverse_foreach(t, &entries[i], link) {
-			assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
-			"Element id mismatch");
-			j++;
-		}
-		qr_remove(&entries[i], link);
-	}
-	test_independent_entries(entries);
-}
-TEST_END
-
-TEST_BEGIN(test_qr_before_insert)
-{
-	ring_t entries[NENTRIES];
-	ring_t *t;
-	unsigned i, j;
-
-	init_entries(entries);
-	for (i = 1; i < NENTRIES; i++)
-		qr_before_insert(&entries[i - 1], &entries[i], link);
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_foreach(t, &entries[i], link) {
-			assert_c_eq(t->id, entries[(NENTRIES+i-j) %
-			    NENTRIES].id, "Element id mismatch");
-			j++;
-		}
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_reverse_foreach(t, &entries[i], link) {
-			assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
-			    "Element id mismatch");
-			j++;
-		}
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		t = qr_next(&entries[i], link);
-		assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
-		    "Element id mismatch");
-	}
-	for (i = 0; i < NENTRIES; i++) {
-		t = qr_prev(&entries[i], link);
-		assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
-		    "Element id mismatch");
-	}
-}
-TEST_END
-
-static void
-test_split_entries(ring_t *entries)
-{
-	ring_t *t;
-	unsigned i, j;
-
-	for (i = 0; i < NENTRIES; i++) {
-		j = 0;
-		qr_foreach(t, &entries[i], link) {
-			if (i < SPLIT_INDEX) {
-				assert_c_eq(t->id,
-				    entries[(i+j) % SPLIT_INDEX].id,
-				    "Element id mismatch");
-			} else {
-				assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
-				    (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
-				    "Element id mismatch");
-			}
-			j++;
-		}
-	}
-}
-
-TEST_BEGIN(test_qr_meld_split)
-{
-	ring_t entries[NENTRIES];
-	unsigned i;
-
-	init_entries(entries);
-	for (i = 1; i < NENTRIES; i++)
-		qr_after_insert(&entries[i - 1], &entries[i], link);
-
-	qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
-	test_split_entries(entries);
-
-	qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
-	test_entries_ring(entries);
-
-	qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
-	test_split_entries(entries);
-
-	qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
-	test_entries_ring(entries);
-
-	qr_split(&entries[0], &entries[0], ring_t, link);
-	test_entries_ring(entries);
-
-	qr_meld(&entries[0], &entries[0], ring_t, link);
-	test_entries_ring(entries);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_qr_one,
-	    test_qr_after_insert,
-	    test_qr_remove,
-	    test_qr_before_insert,
-	    test_qr_meld_split));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/rb.c b/zircon/third_party/ulib/jemalloc/test/unit/rb.c
deleted file mode 100644
index 56e0021..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/rb.c
+++ /dev/null
@@ -1,353 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	rbtn_black_height(a_type, a_field, a_rbt, r_height) do {	\
-    a_type *rbp_bh_t;							\
-    for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0;			\
-	 rbp_bh_t != NULL;						\
-      rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) {		\
-	if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) {			\
-	    (r_height)++;						\
-	}								\
-    }									\
-} while (0)
-
-typedef struct node_s node_t;
-
-struct node_s {
-#define	NODE_MAGIC 0x9823af7e
-	uint32_t magic;
-	rb_node(node_t) link;
-	uint64_t key;
-};
-
-static int
-node_cmp(const node_t *a, const node_t *b) {
-	int ret;
-
-	assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
-	assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
-
-	ret = (a->key > b->key) - (a->key < b->key);
-	if (ret == 0) {
-		/*
-		 * Duplicates are not allowed in the tree, so force an
-		 * arbitrary ordering for non-identical items with equal keys.
-		 */
-		ret = (((uintptr_t)a) > ((uintptr_t)b))
-		    - (((uintptr_t)a) < ((uintptr_t)b));
-	}
-	return (ret);
-}
-
-typedef rb_tree(node_t) tree_t;
-rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
-
-TEST_BEGIN(test_rb_empty)
-{
-	tree_t tree;
-	node_t key;
-
-	tree_new(&tree);
-
-	assert_true(tree_empty(&tree), "Tree should be empty");
-	assert_ptr_null(tree_first(&tree), "Unexpected node");
-	assert_ptr_null(tree_last(&tree), "Unexpected node");
-
-	key.key = 0;
-	key.magic = NODE_MAGIC;
-	assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
-
-	key.key = 0;
-	key.magic = NODE_MAGIC;
-	assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
-
-	key.key = 0;
-	key.magic = NODE_MAGIC;
-	assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
-}
-TEST_END
-
-static unsigned
-tree_recurse(node_t *node, unsigned black_height, unsigned black_depth)
-{
-	unsigned ret = 0;
-	node_t *left_node;
-	node_t *right_node;
-
-	if (node == NULL)
-		return (ret);
-
-	left_node = rbtn_left_get(node_t, link, node);
-	right_node = rbtn_right_get(node_t, link, node);
-
-	if (!rbtn_red_get(node_t, link, node))
-		black_depth++;
-
-	/* Red nodes must be interleaved with black nodes. */
-	if (rbtn_red_get(node_t, link, node)) {
-		if (left_node != NULL)
-			assert_false(rbtn_red_get(node_t, link, left_node),
-				"Node should be black");
-		if (right_node != NULL)
-			assert_false(rbtn_red_get(node_t, link, right_node),
-			    "Node should be black");
-	}
-
-	/* Self. */
-	assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
-
-	/* Left subtree. */
-	if (left_node != NULL)
-		ret += tree_recurse(left_node, black_height, black_depth);
-	else
-		ret += (black_depth != black_height);
-
-	/* Right subtree. */
-	if (right_node != NULL)
-		ret += tree_recurse(right_node, black_height, black_depth);
-	else
-		ret += (black_depth != black_height);
-
-	return (ret);
-}
-
-static node_t *
-tree_iterate_cb(tree_t *tree, node_t *node, void *data)
-{
-	unsigned *i = (unsigned *)data;
-	node_t *search_node;
-
-	assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
-
-	/* Test rb_search(). */
-	search_node = tree_search(tree, node);
-	assert_ptr_eq(search_node, node,
-	    "tree_search() returned unexpected node");
-
-	/* Test rb_nsearch(). */
-	search_node = tree_nsearch(tree, node);
-	assert_ptr_eq(search_node, node,
-	    "tree_nsearch() returned unexpected node");
-
-	/* Test rb_psearch(). */
-	search_node = tree_psearch(tree, node);
-	assert_ptr_eq(search_node, node,
-	    "tree_psearch() returned unexpected node");
-
-	(*i)++;
-
-	return (NULL);
-}
-
-static unsigned
-tree_iterate(tree_t *tree)
-{
-	unsigned i;
-
-	i = 0;
-	tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
-
-	return (i);
-}
-
-static unsigned
-tree_iterate_reverse(tree_t *tree)
-{
-	unsigned i;
-
-	i = 0;
-	tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
-
-	return (i);
-}
-
-static void
-node_remove(tree_t *tree, node_t *node, unsigned nnodes)
-{
-	node_t *search_node;
-	unsigned black_height, imbalances;
-
-	tree_remove(tree, node);
-
-	/* Test rb_nsearch(). */
-	search_node = tree_nsearch(tree, node);
-	if (search_node != NULL) {
-		assert_u64_ge(search_node->key, node->key,
-		    "Key ordering error");
-	}
-
-	/* Test rb_psearch(). */
-	search_node = tree_psearch(tree, node);
-	if (search_node != NULL) {
-		assert_u64_le(search_node->key, node->key,
-		    "Key ordering error");
-	}
-
-	node->magic = 0;
-
-	rbtn_black_height(node_t, link, tree, black_height);
-	imbalances = tree_recurse(tree->rbt_root, black_height, 0);
-	assert_u_eq(imbalances, 0, "Tree is unbalanced");
-	assert_u_eq(tree_iterate(tree), nnodes-1,
-	    "Unexpected node iteration count");
-	assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
-	    "Unexpected node iteration count");
-}
-
-static node_t *
-remove_iterate_cb(tree_t *tree, node_t *node, void *data)
-{
-	unsigned *nnodes = (unsigned *)data;
-	node_t *ret = tree_next(tree, node);
-
-	node_remove(tree, node, *nnodes);
-
-	return (ret);
-}
-
-static node_t *
-remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
-{
-	unsigned *nnodes = (unsigned *)data;
-	node_t *ret = tree_prev(tree, node);
-
-	node_remove(tree, node, *nnodes);
-
-	return (ret);
-}
-
-static void
-destroy_cb(node_t *node, void *data)
-{
-	unsigned *nnodes = (unsigned *)data;
-
-	assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
-	(*nnodes)--;
-}
-
-TEST_BEGIN(test_rb_random)
-{
-#define	NNODES 25
-#define	NBAGS 250
-#define	SEED 42
-	sfmt_t *sfmt;
-	uint64_t bag[NNODES];
-	tree_t tree;
-	node_t nodes[NNODES];
-	unsigned i, j, k, black_height, imbalances;
-
-	sfmt = init_gen_rand(SEED);
-	for (i = 0; i < NBAGS; i++) {
-		switch (i) {
-		case 0:
-			/* Insert in order. */
-			for (j = 0; j < NNODES; j++)
-				bag[j] = j;
-			break;
-		case 1:
-			/* Insert in reverse order. */
-			for (j = 0; j < NNODES; j++)
-				bag[j] = NNODES - j - 1;
-			break;
-		default:
-			for (j = 0; j < NNODES; j++)
-				bag[j] = gen_rand64_range(sfmt, NNODES);
-		}
-
-		for (j = 1; j <= NNODES; j++) {
-			/* Initialize tree and nodes. */
-			tree_new(&tree);
-			for (k = 0; k < j; k++) {
-				nodes[k].magic = NODE_MAGIC;
-				nodes[k].key = bag[k];
-			}
-
-			/* Insert nodes. */
-			for (k = 0; k < j; k++) {
-				tree_insert(&tree, &nodes[k]);
-
-				rbtn_black_height(node_t, link, &tree,
-				    black_height);
-				imbalances = tree_recurse(tree.rbt_root,
-				    black_height, 0);
-				assert_u_eq(imbalances, 0,
-				    "Tree is unbalanced");
-
-				assert_u_eq(tree_iterate(&tree), k+1,
-				    "Unexpected node iteration count");
-				assert_u_eq(tree_iterate_reverse(&tree), k+1,
-				    "Unexpected node iteration count");
-
-				assert_false(tree_empty(&tree),
-				    "Tree should not be empty");
-				assert_ptr_not_null(tree_first(&tree),
-				    "Tree should not be empty");
-				assert_ptr_not_null(tree_last(&tree),
-				    "Tree should not be empty");
-
-				tree_next(&tree, &nodes[k]);
-				tree_prev(&tree, &nodes[k]);
-			}
-
-			/* Remove nodes. */
-			switch (i % 5) {
-			case 0:
-				for (k = 0; k < j; k++)
-					node_remove(&tree, &nodes[k], j - k);
-				break;
-			case 1:
-				for (k = j; k > 0; k--)
-					node_remove(&tree, &nodes[k-1], k);
-				break;
-			case 2: {
-				node_t *start;
-				unsigned nnodes = j;
-
-				start = NULL;
-				do {
-					start = tree_iter(&tree, start,
-					    remove_iterate_cb, (void *)&nnodes);
-					nnodes--;
-				} while (start != NULL);
-				assert_u_eq(nnodes, 0,
-				    "Removal terminated early");
-				break;
-			} case 3: {
-				node_t *start;
-				unsigned nnodes = j;
-
-				start = NULL;
-				do {
-					start = tree_reverse_iter(&tree, start,
-					    remove_reverse_iterate_cb,
-					    (void *)&nnodes);
-					nnodes--;
-				} while (start != NULL);
-				assert_u_eq(nnodes, 0,
-				    "Removal terminated early");
-				break;
-			} case 4: {
-				unsigned nnodes = j;
-				tree_destroy(&tree, destroy_cb, &nnodes);
-				assert_u_eq(nnodes, 0,
-				    "Destruction terminated early");
-				break;
-			} default:
-				not_reached();
-			}
-		}
-	}
-	fini_gen_rand(sfmt);
-#undef NNODES
-#undef NBAGS
-#undef SEED
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_rb_empty,
-	    test_rb_random));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/rtree.c b/zircon/third_party/ulib/jemalloc/test/unit/rtree.c
deleted file mode 100644
index d2f3705..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/rtree.c
+++ /dev/null
@@ -1,297 +0,0 @@
-#include "test/jemalloc_test.h"
-
-rtree_node_alloc_t *rtree_node_alloc_orig;
-rtree_node_dalloc_t *rtree_node_dalloc_orig;
-
-rtree_t *test_rtree;
-
-static rtree_elm_t *
-rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
-{
-	rtree_elm_t *node;
-
-	if (rtree != test_rtree)
-		return rtree_node_alloc_orig(tsdn, rtree, nelms);
-
-	malloc_mutex_unlock(tsdn, &rtree->init_lock);
-	node = (rtree_elm_t *)calloc(nelms, sizeof(rtree_elm_t));
-	assert_ptr_not_null(node, "Unexpected calloc() failure");
-	malloc_mutex_lock(tsdn, &rtree->init_lock);
-
-	return (node);
-}
-
-static void
-rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
-{
-	if (rtree != test_rtree) {
-		rtree_node_dalloc_orig(tsdn, rtree, node);
-		return;
-	}
-
-	free(node);
-}
-
-TEST_BEGIN(test_rtree_read_empty)
-{
-	tsdn_t *tsdn;
-	unsigned i;
-
-	tsdn = tsdn_fetch();
-
-	for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
-		rtree_t rtree;
-		rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
-		test_rtree = &rtree;
-		assert_false(rtree_new(&rtree, i),
-		    "Unexpected rtree_new() failure");
-		assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx, 0, false),
-		    "rtree_read() should return NULL for empty tree");
-		rtree_delete(tsdn, &rtree);
-		test_rtree = NULL;
-	}
-}
-TEST_END
-
-#define	NTHREADS	8
-#define	MAX_NBITS	18
-#define	NITERS		1000
-#define	SEED		42
-
-typedef struct {
-	unsigned	nbits;
-	rtree_t		rtree;
-	uint32_t	seed;
-} thd_start_arg_t;
-
-static void *
-thd_start(void *varg)
-{
-	thd_start_arg_t *arg = (thd_start_arg_t *)varg;
-	rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
-	sfmt_t *sfmt;
-	extent_t *extent;
-	tsdn_t *tsdn;
-	unsigned i;
-
-	sfmt = init_gen_rand(arg->seed);
-	extent = (extent_t *)malloc(sizeof(extent));
-	assert_ptr_not_null(extent, "Unexpected malloc() failure");
-	tsdn = tsdn_fetch();
-
-	for (i = 0; i < NITERS; i++) {
-		uintptr_t key = (uintptr_t)gen_rand64(sfmt);
-		if (i % 2 == 0) {
-			rtree_elm_t *elm;
-
-			elm = rtree_elm_acquire(tsdn, &arg->rtree, &rtree_ctx,
-			    key, false, true);
-			assert_ptr_not_null(elm,
-			    "Unexpected rtree_elm_acquire() failure");
-			rtree_elm_write_acquired(tsdn, &arg->rtree, elm,
-			    extent);
-			rtree_elm_release(tsdn, &arg->rtree, elm);
-
-			elm = rtree_elm_acquire(tsdn, &arg->rtree, &rtree_ctx,
-			    key, true, false);
-			assert_ptr_not_null(elm,
-			    "Unexpected rtree_elm_acquire() failure");
-			rtree_elm_read_acquired(tsdn, &arg->rtree, elm);
-			rtree_elm_release(tsdn, &arg->rtree, elm);
-		} else
-			rtree_read(tsdn, &arg->rtree, &rtree_ctx, key, false);
-	}
-
-	free(extent);
-	fini_gen_rand(sfmt);
-	return (NULL);
-}
-
-TEST_BEGIN(test_rtree_concurrent)
-{
-	thd_start_arg_t arg;
-	thd_t thds[NTHREADS];
-	sfmt_t *sfmt;
-	tsdn_t *tsdn;
-	unsigned i, j;
-
-	sfmt = init_gen_rand(SEED);
-	tsdn = tsdn_fetch();
-	for (i = 1; i < MAX_NBITS; i++) {
-		arg.nbits = i;
-		test_rtree = &arg.rtree;
-		assert_false(rtree_new(&arg.rtree, arg.nbits),
-		    "Unexpected rtree_new() failure");
-		arg.seed = gen_rand32(sfmt);
-		for (j = 0; j < NTHREADS; j++)
-			thd_create(&thds[j], thd_start, (void *)&arg);
-		for (j = 0; j < NTHREADS; j++)
-			thd_join(thds[j], NULL);
-		rtree_delete(tsdn, &arg.rtree);
-		test_rtree = NULL;
-	}
-	fini_gen_rand(sfmt);
-}
-TEST_END
-
-#undef NTHREADS
-#undef MAX_NBITS
-#undef NITERS
-#undef SEED
-
-TEST_BEGIN(test_rtree_extrema)
-{
-	unsigned i;
-	extent_t extent_a, extent_b;
-	tsdn_t *tsdn;
-
-	tsdn = tsdn_fetch();
-
-	for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
-		rtree_t rtree;
-		rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
-		test_rtree = &rtree;
-		assert_false(rtree_new(&rtree, i),
-		    "Unexpected rtree_new() failure");
-
-		assert_false(rtree_write(tsdn, &rtree, &rtree_ctx, 0,
-		    &extent_a), "Unexpected rtree_write() failure, i=%u", i);
-		assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx, 0, true),
-		    &extent_a,
-		    "rtree_read() should return previously set value, i=%u", i);
-
-		assert_false(rtree_write(tsdn, &rtree, &rtree_ctx,
-		    ~((uintptr_t)0), &extent_b),
-		    "Unexpected rtree_write() failure, i=%u", i);
-		assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx,
-		    ~((uintptr_t)0), true), &extent_b,
-		    "rtree_read() should return previously set value, i=%u", i);
-
-		rtree_delete(tsdn, &rtree);
-		test_rtree = NULL;
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_rtree_bits)
-{
-	tsdn_t *tsdn;
-	unsigned i, j, k;
-
-	tsdn = tsdn_fetch();
-
-	for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
-		uintptr_t keys[] = {0, 1,
-		    (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
-		extent_t extent;
-		rtree_t rtree;
-		rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
-
-		test_rtree = &rtree;
-		assert_false(rtree_new(&rtree, i),
-		    "Unexpected rtree_new() failure");
-
-		for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
-			assert_false(rtree_write(tsdn, &rtree, &rtree_ctx,
-			    keys[j], &extent),
-			    "Unexpected rtree_write() failure");
-			for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
-				assert_ptr_eq(rtree_read(tsdn, &rtree,
-				    &rtree_ctx, keys[k], true), &extent,
-				    "rtree_read() should return previously set "
-				    "value and ignore insignificant key bits; "
-				    "i=%u, j=%u, k=%u, set key=%#"FMTxPTR", "
-				    "get key=%#"FMTxPTR, i, j, k, keys[j],
-				    keys[k]);
-			}
-			assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx,
-			    (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false),
-			    "Only leftmost rtree leaf should be set; "
-			    "i=%u, j=%u", i, j);
-			rtree_clear(tsdn, &rtree, &rtree_ctx, keys[j]);
-		}
-
-		rtree_delete(tsdn, &rtree);
-		test_rtree = NULL;
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_rtree_random)
-{
-	unsigned i;
-	sfmt_t *sfmt;
-	tsdn_t *tsdn;
-#define	NSET 16
-#define	SEED 42
-
-	sfmt = init_gen_rand(SEED);
-	tsdn = tsdn_fetch();
-	for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
-		uintptr_t keys[NSET];
-		extent_t extent;
-		unsigned j;
-		rtree_t rtree;
-		rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
-		rtree_elm_t *elm;
-
-		test_rtree = &rtree;
-		assert_false(rtree_new(&rtree, i),
-		    "Unexpected rtree_new() failure");
-
-		for (j = 0; j < NSET; j++) {
-			keys[j] = (uintptr_t)gen_rand64(sfmt);
-			elm = rtree_elm_acquire(tsdn, &rtree, &rtree_ctx,
-			    keys[j], false, true);
-			assert_ptr_not_null(elm,
-			    "Unexpected rtree_elm_acquire() failure");
-			rtree_elm_write_acquired(tsdn, &rtree, elm, &extent);
-			rtree_elm_release(tsdn, &rtree, elm);
-			assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx,
-			    keys[j], true), &extent,
-			    "rtree_read() should return previously set value");
-		}
-		for (j = 0; j < NSET; j++) {
-			assert_ptr_eq(rtree_read(tsdn, &rtree, &rtree_ctx,
-			    keys[j], true), &extent,
-			    "rtree_read() should return previously set value, "
-			    "j=%u", j);
-		}
-
-		for (j = 0; j < NSET; j++) {
-			rtree_clear(tsdn, &rtree, &rtree_ctx, keys[j]);
-			assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx,
-			    keys[j], true),
-			    "rtree_read() should return previously set value");
-		}
-		for (j = 0; j < NSET; j++) {
-			assert_ptr_null(rtree_read(tsdn, &rtree, &rtree_ctx,
-			    keys[j], true),
-			    "rtree_read() should return previously set value");
-		}
-
-		rtree_delete(tsdn, &rtree);
-		test_rtree = NULL;
-	}
-	fini_gen_rand(sfmt);
-#undef NSET
-#undef SEED
-}
-TEST_END
-
-int
-main(void)
-{
-	rtree_node_alloc_orig = rtree_node_alloc;
-	rtree_node_alloc = rtree_node_alloc_intercept;
-	rtree_node_dalloc_orig = rtree_node_dalloc;
-	rtree_node_dalloc = rtree_node_dalloc_intercept;
-	test_rtree = NULL;
-
-	return (test(
-	    test_rtree_read_empty,
-	    test_rtree_concurrent,
-	    test_rtree_extrema,
-	    test_rtree_bits,
-	    test_rtree_random));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/size_classes.c b/zircon/third_party/ulib/jemalloc/test/unit/size_classes.c
deleted file mode 100644
index f7c14bc..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/size_classes.c
+++ /dev/null
@@ -1,185 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static size_t
-get_max_size_class(void)
-{
-	unsigned nlextents;
-	size_t mib[4];
-	size_t sz, miblen, max_size_class;
-
-	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
-	    0), 0, "Unexpected mallctl() error");
-
-	miblen = sizeof(mib) / sizeof(size_t);
-	assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
-	    "Unexpected mallctlnametomib() error");
-	mib[2] = nlextents - 1;
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
-	    NULL, 0), 0, "Unexpected mallctlbymib() error");
-
-	return (max_size_class);
-}
-
-TEST_BEGIN(test_size_classes)
-{
-	size_t size_class, max_size_class;
-	szind_t index, max_index;
-
-	max_size_class = get_max_size_class();
-	max_index = size2index(max_size_class);
-
-	for (index = 0, size_class = index2size(index); index < max_index ||
-	    size_class < max_size_class; index++, size_class =
-	    index2size(index)) {
-		assert_true(index < max_index,
-		    "Loop conditionals should be equivalent; index=%u, "
-		    "size_class=%zu (%#zx)", index, size_class, size_class);
-		assert_true(size_class < max_size_class,
-		    "Loop conditionals should be equivalent; index=%u, "
-		    "size_class=%zu (%#zx)", index, size_class, size_class);
-
-		assert_u_eq(index, size2index(size_class),
-		    "size2index() does not reverse index2size(): index=%u -->"
-		    " size_class=%zu --> index=%u --> size_class=%zu", index,
-		    size_class, size2index(size_class),
-		    index2size(size2index(size_class)));
-		assert_zu_eq(size_class, index2size(size2index(size_class)),
-		    "index2size() does not reverse size2index(): index=%u -->"
-		    " size_class=%zu --> index=%u --> size_class=%zu", index,
-		    size_class, size2index(size_class),
-		    index2size(size2index(size_class)));
-
-		assert_u_eq(index+1, size2index(size_class+1),
-		    "Next size_class does not round up properly");
-
-		assert_zu_eq(size_class, (index > 0) ?
-		    s2u(index2size(index-1)+1) : s2u(1),
-		    "s2u() does not round up to size class");
-		assert_zu_eq(size_class, s2u(size_class-1),
-		    "s2u() does not round up to size class");
-		assert_zu_eq(size_class, s2u(size_class),
-		    "s2u() does not compute same size class");
-		assert_zu_eq(s2u(size_class+1), index2size(index+1),
-		    "s2u() does not round up to next size class");
-	}
-
-	assert_u_eq(index, size2index(index2size(index)),
-	    "size2index() does not reverse index2size()");
-	assert_zu_eq(max_size_class, index2size(size2index(max_size_class)),
-	    "index2size() does not reverse size2index()");
-
-	assert_zu_eq(size_class, s2u(index2size(index-1)+1),
-	    "s2u() does not round up to size class");
-	assert_zu_eq(size_class, s2u(size_class-1),
-	    "s2u() does not round up to size class");
-	assert_zu_eq(size_class, s2u(size_class),
-	    "s2u() does not compute same size class");
-}
-TEST_END
-
-TEST_BEGIN(test_psize_classes)
-{
-	size_t size_class, max_psz;
-	pszind_t pind, max_pind;
-
-	max_psz = get_max_size_class() + PAGE;
-	max_pind = psz2ind(max_psz);
-
-	for (pind = 0, size_class = pind2sz(pind); pind < max_pind || size_class
-	    < max_psz; pind++, size_class = pind2sz(pind)) {
-		assert_true(pind < max_pind,
-		    "Loop conditionals should be equivalent; pind=%u, "
-		    "size_class=%zu (%#zx)", pind, size_class, size_class);
-		assert_true(size_class < max_psz,
-		    "Loop conditionals should be equivalent; pind=%u, "
-		    "size_class=%zu (%#zx)", pind, size_class, size_class);
-
-		assert_u_eq(pind, psz2ind(size_class),
-		    "psz2ind() does not reverse pind2sz(): pind=%u -->"
-		    " size_class=%zu --> pind=%u --> size_class=%zu", pind,
-		    size_class, psz2ind(size_class),
-		    pind2sz(psz2ind(size_class)));
-		assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
-		    "pind2sz() does not reverse psz2ind(): pind=%u -->"
-		    " size_class=%zu --> pind=%u --> size_class=%zu", pind,
-		    size_class, psz2ind(size_class),
-		    pind2sz(psz2ind(size_class)));
-
-		assert_u_eq(pind+1, psz2ind(size_class+1),
-		    "Next size_class does not round up properly");
-
-		assert_zu_eq(size_class, (pind > 0) ?
-		    psz2u(pind2sz(pind-1)+1) : psz2u(1),
-		    "psz2u() does not round up to size class");
-		assert_zu_eq(size_class, psz2u(size_class-1),
-		    "psz2u() does not round up to size class");
-		assert_zu_eq(size_class, psz2u(size_class),
-		    "psz2u() does not compute same size class");
-		assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
-		    "psz2u() does not round up to next size class");
-	}
-
-	assert_u_eq(pind, psz2ind(pind2sz(pind)),
-	    "psz2ind() does not reverse pind2sz()");
-	assert_zu_eq(max_psz, pind2sz(psz2ind(max_psz)),
-	    "pind2sz() does not reverse psz2ind()");
-
-	assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
-	    "psz2u() does not round up to size class");
-	assert_zu_eq(size_class, psz2u(size_class-1),
-	    "psz2u() does not round up to size class");
-	assert_zu_eq(size_class, psz2u(size_class),
-	    "psz2u() does not compute same size class");
-}
-TEST_END
-
-TEST_BEGIN(test_overflow)
-{
-	size_t max_size_class, max_psz;
-
-	max_size_class = get_max_size_class();
-	max_psz = max_size_class + PAGE;
-
-	assert_u_eq(size2index(max_size_class+1), NSIZES,
-	    "size2index() should return NSIZES on overflow");
-	assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
-	    "size2index() should return NSIZES on overflow");
-	assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
-	    "size2index() should return NSIZES on overflow");
-
-	assert_zu_eq(s2u(max_size_class+1), 0,
-	    "s2u() should return 0 for unsupported size");
-	assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
-	    "s2u() should return 0 for unsupported size");
-	assert_zu_eq(s2u(SIZE_T_MAX), 0,
-	    "s2u() should return 0 on overflow");
-
-	assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
-	    "psz2ind() should return NPSIZES on overflow");
-	assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
-	    "psz2ind() should return NPSIZES on overflow");
-	assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
-	    "psz2ind() should return NPSIZES on overflow");
-
-	assert_zu_eq(psz2u(max_size_class+1), max_psz,
-	    "psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
-	    " size");
-	assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
-	    "psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
-	    "size");
-	assert_zu_eq(psz2u(SIZE_T_MAX), max_psz,
-	    "psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_size_classes,
-	    test_psize_classes,
-	    test_overflow));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/slab.c b/zircon/third_party/ulib/jemalloc/test/unit/slab.c
deleted file mode 100644
index 7e6a62f..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/slab.c
+++ /dev/null
@@ -1,34 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_arena_slab_regind)
-{
-	szind_t binind;
-
-	for (binind = 0; binind < NBINS; binind++) {
-		size_t regind;
-		extent_t slab;
-		const arena_bin_info_t *bin_info = &arena_bin_info[binind];
-		extent_init(&slab, NULL, mallocx(bin_info->slab_size,
-		    MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, 0, 0, true,
-		    false, true, true);
-		assert_ptr_not_null(extent_addr_get(&slab),
-		    "Unexpected malloc() failure");
-		for (regind = 0; regind < bin_info->nregs; regind++) {
-			void *reg = (void *)((uintptr_t)extent_addr_get(&slab) +
-			    (bin_info->reg_size * regind));
-			assert_zu_eq(arena_slab_regind(&slab, binind, reg),
-			    regind,
-			    "Incorrect region index computed for size %zu",
-			    bin_info->reg_size);
-		}
-		free(extent_addr_get(&slab));
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_arena_slab_regind));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/smoothstep.c b/zircon/third_party/ulib/jemalloc/test/unit/smoothstep.c
deleted file mode 100644
index 071aede..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/smoothstep.c
+++ /dev/null
@@ -1,105 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static const uint64_t smoothstep_tab[] = {
-#define	STEP(step, h, x, y) \
-	h,
-	SMOOTHSTEP
-#undef STEP
-};
-
-TEST_BEGIN(test_smoothstep_integral)
-{
-	uint64_t sum, min, max;
-	unsigned i;
-
-	/*
-	 * The integral of smoothstep in the [0..1] range equals 1/2.  Verify
-	 * that the fixed point representation's integral is no more than
-	 * rounding error distant from 1/2.  Regarding rounding, each table
-	 * element is rounded down to the nearest fixed point value, so the
-	 * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
-	 */
-	sum = 0;
-	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
-		sum += smoothstep_tab[i];
-
-	max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
-	min = max - SMOOTHSTEP_NSTEPS;
-
-	assert_u64_ge(sum, min,
-	    "Integral too small, even accounting for truncation");
-	assert_u64_le(sum, max, "Integral exceeds 1/2");
-	if (false) {
-		malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
-		    max - sum, SMOOTHSTEP_NSTEPS);
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_smoothstep_monotonic)
-{
-	uint64_t prev_h;
-	unsigned i;
-
-	/*
-	 * The smoothstep function is monotonic in [0..1], i.e. its slope is
-	 * non-negative.  In practice we want to parametrize table generation
-	 * such that piecewise slope is greater than zero, but do not require
-	 * that here.
-	 */
-	prev_h = 0;
-	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
-		uint64_t h = smoothstep_tab[i];
-		assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
-		prev_h = h;
-	}
-	assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
-	    (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
-}
-TEST_END
-
-TEST_BEGIN(test_smoothstep_slope)
-{
-	uint64_t prev_h, prev_delta;
-	unsigned i;
-
-	/*
-	 * The smoothstep slope strictly increases until x=0.5, and then
-	 * strictly decreases until x=1.0.  Verify the slightly weaker
-	 * requirement of monotonicity, so that inadequate table precision does
-	 * not cause false test failures.
-	 */
-	prev_h = 0;
-	prev_delta = 0;
-	for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
-		uint64_t h = smoothstep_tab[i];
-		uint64_t delta = h - prev_h;
-		assert_u64_ge(delta, prev_delta,
-		    "Slope must monotonically increase in 0.0 <= x <= 0.5, "
-		    "i=%u", i);
-		prev_h = h;
-		prev_delta = delta;
-	}
-
-	prev_h = KQU(1) << SMOOTHSTEP_BFP;
-	prev_delta = 0;
-	for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
-		uint64_t h = smoothstep_tab[i];
-		uint64_t delta = prev_h - h;
-		assert_u64_ge(delta, prev_delta,
-		    "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
-		    "i=%u", i);
-		prev_h = h;
-		prev_delta = delta;
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_smoothstep_integral,
-	    test_smoothstep_monotonic,
-	    test_smoothstep_slope));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/stats.c b/zircon/third_party/ulib/jemalloc/test/unit/stats.c
deleted file mode 100644
index 18856f1..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/stats.c
+++ /dev/null
@@ -1,360 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_stats_summary)
-{
-	size_t sz, allocated, active, resident, mapped;
-	int expected = config_stats ? 0 : ENOENT;
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
-	    0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
-	    expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
-	    expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
-	    expected, "Unexpected mallctl() result");
-
-	if (config_stats) {
-		assert_zu_le(allocated, active,
-		    "allocated should be no larger than active");
-		assert_zu_lt(active, resident,
-		    "active should be less than resident");
-		assert_zu_lt(active, mapped,
-		    "active should be less than mapped");
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_stats_large)
-{
-	void *p;
-	uint64_t epoch;
-	size_t allocated;
-	uint64_t nmalloc, ndalloc, nrequests;
-	size_t sz;
-	int expected = config_stats ? 0 : ENOENT;
-
-	p = mallocx(SMALL_MAXCLASS+1, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
-	    0, "Unexpected mallctl() failure");
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.large.allocated",
-	    (void *)&allocated, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
-	    (void *)&nrequests, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-
-	if (config_stats) {
-		assert_zu_gt(allocated, 0,
-		    "allocated should be greater than zero");
-		assert_u64_ge(nmalloc, ndalloc,
-		    "nmalloc should be at least as large as ndalloc");
-		assert_u64_le(nmalloc, nrequests,
-		    "nmalloc should no larger than nrequests");
-	}
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_stats_arenas_summary)
-{
-	unsigned arena;
-	void *little, *large;
-	uint64_t epoch;
-	size_t sz;
-	int expected = config_stats ? 0 : ENOENT;
-	size_t mapped;
-	uint64_t npurge, nmadvise, purged;
-
-	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
-	    sizeof(arena)), 0, "Unexpected mallctl() failure");
-
-	little = mallocx(SMALL_MAXCLASS, 0);
-	assert_ptr_not_null(little, "Unexpected mallocx() failure");
-	large = mallocx((1U << LG_LARGE_MINCLASS), 0);
-	assert_ptr_not_null(large, "Unexpected mallocx() failure");
-
-	dallocx(little, 0);
-	dallocx(large, 0);
-
-	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
-	    config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
-	    0, "Unexpected mallctl() failure");
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
-	    0), expected, "Unexepected mallctl() result");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL,
-	    0), expected, "Unexepected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz,
-	    NULL, 0), expected, "Unexepected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL,
-	    0), expected, "Unexepected mallctl() result");
-
-	if (config_stats) {
-		assert_u64_gt(npurge, 0,
-		    "At least one purge should have occurred");
-		assert_u64_le(nmadvise, purged,
-		    "nmadvise should be no greater than purged");
-	}
-}
-TEST_END
-
-void *
-thd_start(void *arg)
-{
-	return (NULL);
-}
-
-static void
-no_lazy_lock(void)
-{
-	thd_t thd;
-
-	thd_create(&thd, thd_start, NULL);
-	thd_join(thd, NULL);
-}
-
-TEST_BEGIN(test_stats_arenas_small)
-{
-	unsigned arena;
-	void *p;
-	size_t sz, allocated;
-	uint64_t epoch, nmalloc, ndalloc, nrequests;
-	int expected = config_stats ? 0 : ENOENT;
-
-	no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
-
-	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
-	    sizeof(arena)), 0, "Unexpected mallctl() failure");
-
-	p = mallocx(SMALL_MAXCLASS, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
-	    config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
-	    0, "Unexpected mallctl() failure");
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.small.allocated",
-	    (void *)&allocated, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
-	    (void *)&nrequests, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-
-	if (config_stats) {
-		assert_zu_gt(allocated, 0,
-		    "allocated should be greater than zero");
-		assert_u64_gt(nmalloc, 0,
-		    "nmalloc should be no greater than zero");
-		assert_u64_ge(nmalloc, ndalloc,
-		    "nmalloc should be at least as large as ndalloc");
-		assert_u64_gt(nrequests, 0,
-		    "nrequests should be greater than zero");
-	}
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_stats_arenas_large)
-{
-	unsigned arena;
-	void *p;
-	size_t sz, allocated;
-	uint64_t epoch, nmalloc, ndalloc;
-	int expected = config_stats ? 0 : ENOENT;
-
-	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
-	    sizeof(arena)), 0, "Unexpected mallctl() failure");
-
-	p = mallocx((1U << LG_LARGE_MINCLASS), 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
-	    0, "Unexpected mallctl() failure");
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.large.allocated",
-	    (void *)&allocated, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-
-	if (config_stats) {
-		assert_zu_gt(allocated, 0,
-		    "allocated should be greater than zero");
-		assert_u64_gt(nmalloc, 0,
-		    "nmalloc should be greater than zero");
-		assert_u64_ge(nmalloc, ndalloc,
-		    "nmalloc should be at least as large as ndalloc");
-	}
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_stats_arenas_bins)
-{
-	unsigned arena;
-	void *p;
-	size_t sz, curslabs, curregs;
-	uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
-	uint64_t nslabs, nreslabs;
-	int expected = config_stats ? 0 : ENOENT;
-
-	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
-	    sizeof(arena)), 0, "Unexpected mallctl() failure");
-
-	p = mallocx(arena_bin_info[0].reg_size, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
-	    config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
-	    0, "Unexpected mallctl() failure");
-
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests",
-	    (void *)&nrequests, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills,
-	    &sz, NULL, 0), config_tcache ? expected : ENOENT,
-	    "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes,
-	    &sz, NULL, 0), config_tcache ? expected : ENOENT,
-	    "Unexpected mallctl() result");
-
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nslabs", (void *)&nslabs,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nreslabs", (void *)&nreslabs,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.curslabs", (void *)&curslabs,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
-
-	if (config_stats) {
-		assert_u64_gt(nmalloc, 0,
-		    "nmalloc should be greater than zero");
-		assert_u64_ge(nmalloc, ndalloc,
-		    "nmalloc should be at least as large as ndalloc");
-		assert_u64_gt(nrequests, 0,
-		    "nrequests should be greater than zero");
-		assert_zu_gt(curregs, 0,
-		    "allocated should be greater than zero");
-		if (config_tcache) {
-			assert_u64_gt(nfills, 0,
-			    "At least one fill should have occurred");
-			assert_u64_gt(nflushes, 0,
-			    "At least one flush should have occurred");
-		}
-		assert_u64_gt(nslabs, 0,
-		    "At least one slab should have been allocated");
-		assert_zu_gt(curslabs, 0,
-		    "At least one slab should be currently allocated");
-	}
-
-	dallocx(p, 0);
-}
-TEST_END
-
-TEST_BEGIN(test_stats_arenas_lextents)
-{
-	unsigned arena;
-	void *p;
-	uint64_t epoch, nmalloc, ndalloc;
-	size_t curlextents, sz, hsize;
-	int expected = config_stats ? 0 : ENOENT;
-
-	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
-	    sizeof(arena)), 0, "Unexpected mallctl() failure");
-
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
-	    0), 0, "Unexpected mallctl() failure");
-
-	p = mallocx(hsize, 0);
-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
-	    0, "Unexpected mallctl() failure");
-
-	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
-	    (void *)&nmalloc, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
-	    (void *)&ndalloc, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
-	    (void *)&curlextents, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-
-	if (config_stats) {
-		assert_u64_gt(nmalloc, 0,
-		    "nmalloc should be greater than zero");
-		assert_u64_ge(nmalloc, ndalloc,
-		    "nmalloc should be at least as large as ndalloc");
-		assert_u64_gt(curlextents, 0,
-		    "At least one extent should be currently allocated");
-	}
-
-	dallocx(p, 0);
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_stats_summary,
-	    test_stats_large,
-	    test_stats_arenas_summary,
-	    test_stats_arenas_small,
-	    test_stats_arenas_large,
-	    test_stats_arenas_bins,
-	    test_stats_arenas_lextents));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/stats_print.c b/zircon/third_party/ulib/jemalloc/test/unit/stats_print.c
deleted file mode 100644
index 5accd8e..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/stats_print.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-#include "test/jemalloc_test.h"
-
-typedef enum {
-	TOKEN_TYPE_NONE,
-	TOKEN_TYPE_ERROR,
-	TOKEN_TYPE_EOI,
-	TOKEN_TYPE_NULL,
-	TOKEN_TYPE_FALSE,
-	TOKEN_TYPE_TRUE,
-	TOKEN_TYPE_LBRACKET,
-	TOKEN_TYPE_RBRACKET,
-	TOKEN_TYPE_LBRACE,
-	TOKEN_TYPE_RBRACE,
-	TOKEN_TYPE_COLON,
-	TOKEN_TYPE_COMMA,
-	TOKEN_TYPE_STRING,
-	TOKEN_TYPE_NUMBER
-} token_type_t;
-
-typedef struct parser_s parser_t;
-typedef struct {
-	parser_t	*parser;
-	token_type_t	token_type;
-	size_t		pos;
-	size_t		len;
-	size_t		line;
-	size_t		col;
-} token_t;
-
-struct parser_s {
-	bool verbose;
-	char	*buf; /* '\0'-terminated. */
-	size_t	len; /* Number of characters preceding '\0' in buf. */
-	size_t	pos;
-	size_t	line;
-	size_t	col;
-	token_t	token;
-};
-
-static void
-token_init(token_t *token, parser_t *parser, token_type_t token_type,
-    size_t pos, size_t len, size_t line, size_t col)
-{
-	token->parser = parser;
-	token->token_type = token_type;
-	token->pos = pos;
-	token->len = len;
-	token->line = line;
-	token->col = col;
-}
-
-static void
-token_error(token_t *token)
-{
-	if (!token->parser->verbose) {
-		return;
-	}
-	switch (token->token_type) {
-	case TOKEN_TYPE_NONE:
-		not_reached();
-	case TOKEN_TYPE_ERROR:
-		malloc_printf("%zu:%zu: Unexpected character in token: ",
-		    token->line, token->col);
-		break;
-	default:
-		malloc_printf("%zu:%zu: Unexpected token: ", token->line,
-		    token->col);
-		break;
-	}
-	write(STDERR_FILENO, &token->parser->buf[token->pos], token->len);
-	malloc_printf("\n");
-}
-
-static void
-parser_init(parser_t *parser, bool verbose)
-{
-	parser->verbose = verbose;
-	parser->buf = NULL;
-	parser->len = 0;
-	parser->pos = 0;
-	parser->line = 1;
-	parser->col = 0;
-}
-
-static void
-parser_fini(parser_t *parser)
-{
-	if (parser->buf != NULL) {
-		dallocx(parser->buf, MALLOCX_TCACHE_NONE);
-	}
-}
-
-static bool
-parser_append(parser_t *parser, const char *str)
-{
-	size_t len = strlen(str);
-	char *buf = (parser->buf == NULL) ? mallocx(len + 1,
-	    MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1,
-	    MALLOCX_TCACHE_NONE);
-	if (buf == NULL) {
-		return true;
-	}
-	memcpy(&buf[parser->len], str, len + 1);
-	parser->buf = buf;
-	parser->len += len;
-	return false;
-}
-
-static bool
-parser_tokenize(parser_t *parser)
-{
-	enum {
-		STATE_START,
-		STATE_EOI,
-		STATE_N, STATE_NU, STATE_NUL, STATE_NULL,
-		STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE,
-		STATE_T, STATE_TR, STATE_TRU, STATE_TRUE,
-		STATE_LBRACKET,
-		STATE_RBRACKET,
-		STATE_LBRACE,
-		STATE_RBRACE,
-		STATE_COLON,
-		STATE_COMMA,
-		STATE_CHARS,
-		STATE_CHAR_ESCAPE,
-		STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD,
-		STATE_STRING,
-		STATE_MINUS,
-		STATE_LEADING_ZERO,
-		STATE_DIGITS,
-		STATE_DECIMAL,
-		STATE_FRAC_DIGITS,
-		STATE_EXP,
-		STATE_EXP_SIGN,
-		STATE_EXP_DIGITS,
-		STATE_ACCEPT
-	} state = STATE_START;
-	size_t token_pos, token_line, token_col;
-
-	assert_zu_le(parser->pos, parser->len,
-	    "Position is past end of buffer");
-
-	while (state != STATE_ACCEPT) {
-		char c = parser->buf[parser->pos];
-
-		switch (state) {
-		case STATE_START:
-			token_pos = parser->pos;
-			token_line = parser->line;
-			token_col = parser->col;
-			switch (c) {
-			case ' ': case '\b': case '\n': case '\r': case '\t':
-				break;
-			case '\0':
-				state = STATE_EOI;
-				break;
-			case 'n':
-				state = STATE_N;
-				break;
-			case 'f':
-				state = STATE_F;
-				break;
-			case 't':
-				state = STATE_T;
-				break;
-			case '[':
-				state = STATE_LBRACKET;
-				break;
-			case ']':
-				state = STATE_RBRACKET;
-				break;
-			case '{':
-				state = STATE_LBRACE;
-				break;
-			case '}':
-				state = STATE_RBRACE;
-				break;
-			case ':':
-				state = STATE_COLON;
-				break;
-			case ',':
-				state = STATE_COMMA;
-				break;
-			case '"':
-				state = STATE_CHARS;
-				break;
-			case '-':
-				state = STATE_MINUS;
-				break;
-			case '0':
-				state = STATE_LEADING_ZERO;
-				break;
-			case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				state = STATE_DIGITS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_EOI:
-			token_init(&parser->token, parser,
-			    TOKEN_TYPE_EOI, token_pos, parser->pos -
-			    token_pos, token_line, token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_N:
-			switch (c) {
-			case 'u':
-				state = STATE_NU;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_NU:
-			switch (c) {
-			case 'l':
-				state = STATE_NUL;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_NUL:
-			switch (c) {
-			case 'l':
-				state = STATE_NULL;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_NULL:
-			switch (c) {
-			case ' ': case '\b': case '\n': case '\r': case '\t':
-			case '\0':
-			case '[': case ']': case '{': case '}': case ':':
-			case ',':
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			token_init(&parser->token, parser, TOKEN_TYPE_NULL,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_F:
-			switch (c) {
-			case 'a':
-				state = STATE_FA;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_FA:
-			switch (c) {
-			case 'l':
-				state = STATE_FAL;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_FAL:
-			switch (c) {
-			case 's':
-				state = STATE_FALS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_FALS:
-			switch (c) {
-			case 'e':
-				state = STATE_FALSE;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_FALSE:
-			switch (c) {
-			case ' ': case '\b': case '\n': case '\r': case '\t':
-			case '\0':
-			case '[': case ']': case '{': case '}': case ':':
-			case ',':
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			token_init(&parser->token, parser,
-			    TOKEN_TYPE_FALSE, token_pos, parser->pos -
-			    token_pos, token_line, token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_T:
-			switch (c) {
-			case 'r':
-				state = STATE_TR;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_TR:
-			switch (c) {
-			case 'u':
-				state = STATE_TRU;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_TRU:
-			switch (c) {
-			case 'e':
-				state = STATE_TRUE;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_TRUE:
-			switch (c) {
-			case ' ': case '\b': case '\n': case '\r': case '\t':
-			case '\0':
-			case '[': case ']': case '{': case '}': case ':':
-			case ',':
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			token_init(&parser->token, parser, TOKEN_TYPE_TRUE,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_LBRACKET:
-			token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_RBRACKET:
-			token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_LBRACE:
-			token_init(&parser->token, parser, TOKEN_TYPE_LBRACE,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_RBRACE:
-			token_init(&parser->token, parser, TOKEN_TYPE_RBRACE,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_COLON:
-			token_init(&parser->token, parser, TOKEN_TYPE_COLON,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_COMMA:
-			token_init(&parser->token, parser, TOKEN_TYPE_COMMA,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_CHARS:
-			switch (c) {
-			case '\\':
-				state = STATE_CHAR_ESCAPE;
-				break;
-			case '"':
-				state = STATE_STRING;
-				break;
-			case 0x00: case 0x01: case 0x02: case 0x03: case 0x04:
-			case 0x05: case 0x06: case 0x07: case 0x08: case 0x09:
-			case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
-			case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13:
-			case 0x14: case 0x15: case 0x16: case 0x17: case 0x18:
-			case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d:
-			case 0x1e: case 0x1f:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			default:
-				break;
-			}
-			break;
-		case STATE_CHAR_ESCAPE:
-			switch (c) {
-			case '"': case '\\': case '/': case 'b': case 'n':
-			case 'r': case 't':
-				state = STATE_CHARS;
-				break;
-			case 'u':
-				state = STATE_CHAR_U;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_CHAR_U:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-			case 'a': case 'b': case 'c': case 'd': case 'e':
-			case 'f':
-			case 'A': case 'B': case 'C': case 'D': case 'E':
-			case 'F':
-				state = STATE_CHAR_UD;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_CHAR_UD:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-			case 'a': case 'b': case 'c': case 'd': case 'e':
-			case 'f':
-			case 'A': case 'B': case 'C': case 'D': case 'E':
-			case 'F':
-				state = STATE_CHAR_UDD;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_CHAR_UDD:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-			case 'a': case 'b': case 'c': case 'd': case 'e':
-			case 'f':
-			case 'A': case 'B': case 'C': case 'D': case 'E':
-			case 'F':
-				state = STATE_CHAR_UDDD;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_CHAR_UDDD:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-			case 'a': case 'b': case 'c': case 'd': case 'e':
-			case 'f':
-			case 'A': case 'B': case 'C': case 'D': case 'E':
-			case 'F':
-				state = STATE_CHARS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_STRING:
-			token_init(&parser->token, parser, TOKEN_TYPE_STRING,
-			    token_pos, parser->pos - token_pos, token_line,
-			    token_col);
-			state = STATE_ACCEPT;
-			break;
-		case STATE_MINUS:
-			switch (c) {
-			case '0':
-				state = STATE_LEADING_ZERO;
-				break;
-			case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				state = STATE_DIGITS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_LEADING_ZERO:
-			switch (c) {
-			case '.':
-				state = STATE_DECIMAL;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
-				    token_pos, token_line, token_col);
-				state = STATE_ACCEPT;
-				break;
-			}
-			break;
-		case STATE_DIGITS:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				break;
-			case '.':
-				state = STATE_DECIMAL;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
-				    token_pos, token_line, token_col);
-				state = STATE_ACCEPT;
-				break;
-			}
-			break;
-		case STATE_DECIMAL:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				state = STATE_FRAC_DIGITS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_FRAC_DIGITS:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				break;
-			case 'e': case 'E':
-				state = STATE_EXP;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
-				    token_pos, token_line, token_col);
-				state = STATE_ACCEPT;
-				break;
-			}
-			break;
-		case STATE_EXP:
-			switch (c) {
-			case '-': case '+':
-				state = STATE_EXP_SIGN;
-				break;
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				state = STATE_EXP_DIGITS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_EXP_SIGN:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				state = STATE_EXP_DIGITS;
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
-				    - token_pos, token_line, token_col);
-				return true;
-			}
-			break;
-		case STATE_EXP_DIGITS:
-			switch (c) {
-			case '0': case '1': case '2': case '3': case '4':
-			case '5': case '6': case '7': case '8': case '9':
-				break;
-			default:
-				token_init(&parser->token, parser,
-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
-				    token_pos, token_line, token_col);
-				state = STATE_ACCEPT;
-				break;
-			}
-			break;
-		default:
-			not_reached();
-		}
-
-		if (state != STATE_ACCEPT) {
-			if (c == '\n') {
-				parser->line++;
-				parser->col = 0;
-			} else {
-				parser->col++;
-			}
-			parser->pos++;
-		}
-	}
-	return false;
-}
-
-static bool	parser_parse_array(parser_t *parser);
-static bool	parser_parse_object(parser_t *parser);
-
-static bool
-parser_parse_value(parser_t *parser)
-{
-	switch (parser->token.token_type) {
-	case TOKEN_TYPE_NULL:
-	case TOKEN_TYPE_FALSE:
-	case TOKEN_TYPE_TRUE:
-	case TOKEN_TYPE_STRING:
-	case TOKEN_TYPE_NUMBER:
-		return false;
-	case TOKEN_TYPE_LBRACE:
-		return parser_parse_object(parser);
-	case TOKEN_TYPE_LBRACKET:
-		return parser_parse_array(parser);
-	default:
-		return true;
-	}
-	not_reached();
-}
-
-static bool
-parser_parse_pair(parser_t *parser)
-{
-	assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
-	    "Pair should start with string");
-	if (parser_tokenize(parser)) {
-		return true;
-	}
-	switch (parser->token.token_type) {
-	case TOKEN_TYPE_COLON:
-		if (parser_tokenize(parser)) {
-			return true;
-		}
-		return parser_parse_value(parser);
-	default:
-		return true;
-	}
-}
-
-static bool
-parser_parse_values(parser_t *parser)
-{
-	if (parser_parse_value(parser)) {
-		return true;
-	}
-
-	while (true) {
-		if (parser_tokenize(parser)) {
-			return true;
-		}
-		switch (parser->token.token_type) {
-		case TOKEN_TYPE_COMMA:
-			if (parser_tokenize(parser)) {
-				return true;
-			}
-			if (parser_parse_value(parser)) {
-				return true;
-			}
-			break;
-		case TOKEN_TYPE_RBRACKET:
-			return false;
-		default:
-			return true;
-		}
-	}
-}
-
-static bool
-parser_parse_array(parser_t *parser)
-{
-	assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
-	    "Array should start with [");
-	if (parser_tokenize(parser)) {
-		return true;
-	}
-	switch (parser->token.token_type) {
-	case TOKEN_TYPE_RBRACKET:
-		return false;
-	default:
-		return parser_parse_values(parser);
-	}
-	not_reached();
-}
-
-static bool
-parser_parse_pairs(parser_t *parser)
-{
-	assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
-	    "Object should start with string");
-	if (parser_parse_pair(parser)) {
-		return true;
-	}
-
-	while (true) {
-		if (parser_tokenize(parser)) {
-			return true;
-		}
-		switch (parser->token.token_type) {
-		case TOKEN_TYPE_COMMA:
-			if (parser_tokenize(parser)) {
-				return true;
-			}
-			switch (parser->token.token_type) {
-			case TOKEN_TYPE_STRING:
-				if (parser_parse_pair(parser)) {
-					return true;
-				}
-				break;
-			default:
-				return true;
-			}
-			break;
-		case TOKEN_TYPE_RBRACE:
-			return false;
-		default:
-			return true;
-		}
-	}
-}
-
-static bool
-parser_parse_object(parser_t *parser)
-{
-	assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
-	    "Object should start with {");
-	if (parser_tokenize(parser)) {
-		return true;
-	}
-	switch (parser->token.token_type) {
-	case TOKEN_TYPE_STRING:
-		return parser_parse_pairs(parser);
-	case TOKEN_TYPE_RBRACE:
-		return false;
-	default:
-		return true;
-	}
-	not_reached();
-}
-
-static bool
-parser_parse(parser_t *parser)
-{
-	if (parser_tokenize(parser)) {
-		goto label_error;
-	}
-	if (parser_parse_value(parser)) {
-		goto label_error;
-	}
-
-	if (parser_tokenize(parser)) {
-		goto label_error;
-	}
-	switch (parser->token.token_type) {
-	case TOKEN_TYPE_EOI:
-		return false;
-	default:
-		goto label_error;
-	}
-	not_reached();
-
-label_error:
-	token_error(&parser->token);
-	return true;
-}
-
-TEST_BEGIN(test_json_parser)
-{
-	size_t i;
-	const char *invalid_inputs[] = {
-		/* Tokenizer error case tests. */
-		"{ \"string\": X }",
-		"{ \"string\": nXll }",
-		"{ \"string\": nuXl }",
-		"{ \"string\": nulX }",
-		"{ \"string\": nullX }",
-		"{ \"string\": fXlse }",
-		"{ \"string\": faXse }",
-		"{ \"string\": falXe }",
-		"{ \"string\": falsX }",
-		"{ \"string\": falseX }",
-		"{ \"string\": tXue }",
-		"{ \"string\": trXe }",
-		"{ \"string\": truX }",
-		"{ \"string\": trueX }",
-		"{ \"string\": \"\n\" }",
-		"{ \"string\": \"\\z\" }",
-		"{ \"string\": \"\\uX000\" }",
-		"{ \"string\": \"\\u0X00\" }",
-		"{ \"string\": \"\\u00X0\" }",
-		"{ \"string\": \"\\u000X\" }",
-		"{ \"string\": -X }",
-		"{ \"string\": 0.X }",
-		"{ \"string\": 0.0eX }",
-		"{ \"string\": 0.0e+X }",
-
-		/* Parser error test cases. */
-		"{\"string\": }",
-		"{\"string\" }",
-		"{\"string\": [ 0 }",
-		"{\"string\": {\"a\":0, 1 } }",
-		"{\"string\": {\"a\":0: } }",
-		"{",
-		"{}{",
-	};
-	const char *valid_inputs[] = {
-		/* Token tests. */
-		"null",
-		"false",
-		"true",
-		"{}",
-		"{\"a\": 0}",
-		"[]",
-		"[0, 1]",
-		"0",
-		"1",
-		"10",
-		"-10",
-		"10.23",
-		"10.23e4",
-		"10.23e-4",
-		"10.23e+4",
-		"10.23E4",
-		"10.23E-4",
-		"10.23E+4",
-		"-10.23",
-		"-10.23e4",
-		"-10.23e-4",
-		"-10.23e+4",
-		"-10.23E4",
-		"-10.23E-4",
-		"-10.23E+4",
-		"\"value\"",
-		"\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"",
-
-		/* Parser test with various nesting. */
-		"{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}",
-	};
-
-	for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) {
-		const char *input = invalid_inputs[i];
-		parser_t parser;
-		parser_init(&parser, false);
-		assert_false(parser_append(&parser, input),
-		    "Unexpected input appending failure");
-		assert_true(parser_parse(&parser),
-		    "Unexpected parse success for input: %s", input);
-		parser_fini(&parser);
-	}
-
-	for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) {
-		const char *input = valid_inputs[i];
-		parser_t parser;
-		parser_init(&parser, true);
-		assert_false(parser_append(&parser, input),
-		    "Unexpected input appending failure");
-		assert_false(parser_parse(&parser),
-		    "Unexpected parse error for input: %s", input);
-		parser_fini(&parser);
-	}
-}
-TEST_END
-
-void
-write_cb(void *opaque, const char *str)
-{
-	parser_t *parser = (parser_t *)opaque;
-	if (parser_append(parser, str)) {
-		test_fail("Unexpected input appending failure");
-	}
-}
-
-TEST_BEGIN(test_stats_print_json)
-{
-	const char *opts[] = {
-		"J",
-		"Jg",
-		"Jm",
-		"Jd",
-		"Jmd",
-		"Jgd",
-		"Jgm",
-		"Jgmd",
-		"Ja",
-		"Jb",
-		"Jl",
-		"Jbl",
-		"Jal",
-		"Jab",
-		"Jabl",
-		"Jgmdabl",
-	};
-	unsigned arena_ind, i;
-
-	for (i = 0; i < 3; i++) {
-		unsigned j;
-
-		switch (i) {
-		case 0:
-			break;
-		case 1: {
-			size_t sz = sizeof(arena_ind);
-			assert_d_eq(mallctl("arenas.create", (void *)&arena_ind,
-			    &sz, NULL, 0), 0, "Unexpected mallctl failure");
-			break;
-		} case 2: {
-			size_t mib[3];
-			size_t miblen = sizeof(mib)/sizeof(size_t);
-			assert_d_eq(mallctlnametomib("arena.0.destroy",
-			    mib, &miblen), 0,
-			    "Unexpected mallctlnametomib failure");
-			mib[1] = arena_ind;
-			assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
-			    0), 0, "Unexpected mallctlbymib failure");
-			break;
-		} default:
-			not_reached();
-		}
-
-		for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) {
-			parser_t parser;
-
-			parser_init(&parser, true);
-			malloc_stats_print(write_cb, (void *)&parser, opts[j]);
-			assert_false(parser_parse(&parser),
-			    "Unexpected parse error, opts=\"%s\"", opts[j]);
-			parser_fini(&parser);
-		}
-	}
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_json_parser,
-	    test_stats_print_json));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/ticker.c b/zircon/third_party/ulib/jemalloc/test/unit/ticker.c
deleted file mode 100644
index b8af46c..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/ticker.c
+++ /dev/null
@@ -1,75 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_ticker_tick)
-{
-#define	NREPS 2
-#define	NTICKS 3
-	ticker_t ticker;
-	int32_t i, j;
-
-	ticker_init(&ticker, NTICKS);
-	for (i = 0; i < NREPS; i++) {
-		for (j = 0; j < NTICKS; j++) {
-			assert_u_eq(ticker_read(&ticker), NTICKS - j,
-			    "Unexpected ticker value (i=%d, j=%d)", i, j);
-			assert_false(ticker_tick(&ticker),
-			    "Unexpected ticker fire (i=%d, j=%d)", i, j);
-		}
-		assert_u32_eq(ticker_read(&ticker), 0,
-		    "Expected ticker depletion");
-		assert_true(ticker_tick(&ticker),
-		    "Expected ticker fire (i=%d)", i);
-		assert_u32_eq(ticker_read(&ticker), NTICKS,
-		    "Expected ticker reset");
-	}
-#undef NTICKS
-}
-TEST_END
-
-TEST_BEGIN(test_ticker_ticks)
-{
-#define	NTICKS 3
-	ticker_t ticker;
-
-	ticker_init(&ticker, NTICKS);
-
-	assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
-	assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
-	assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
-	assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
-	assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
-
-	assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
-	assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
-#undef NTICKS
-}
-TEST_END
-
-TEST_BEGIN(test_ticker_copy)
-{
-#define	NTICKS 3
-	ticker_t ta, tb;
-
-	ticker_init(&ta, NTICKS);
-	ticker_copy(&tb, &ta);
-	assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
-	assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
-	assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
-
-	ticker_tick(&ta);
-	ticker_copy(&tb, &ta);
-	assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
-	assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
-	assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
-#undef NTICKS
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_ticker_tick,
-	    test_ticker_ticks,
-	    test_ticker_copy));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/tsd.c b/zircon/third_party/ulib/jemalloc/test/unit/tsd.c
deleted file mode 100644
index 5313ef8..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/tsd.c
+++ /dev/null
@@ -1,110 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	THREAD_DATA 0x72b65c10
-
-typedef unsigned int data_t;
-
-static bool data_cleanup_executed;
-
-malloc_tsd_types(data_, data_t)
-malloc_tsd_protos(, data_, data_t)
-
-void
-data_cleanup(void *arg)
-{
-	data_t *data = (data_t *)arg;
-
-	if (!data_cleanup_executed) {
-		assert_x_eq(*data, THREAD_DATA,
-		    "Argument passed into cleanup function should match tsd "
-		    "value");
-	}
-	data_cleanup_executed = true;
-
-	/*
-	 * Allocate during cleanup for two rounds, in order to assure that
-	 * jemalloc's internal tsd reinitialization happens.
-	 */
-	switch (*data) {
-	case THREAD_DATA:
-		*data = 1;
-		data_tsd_set(data);
-		break;
-	case 1:
-		*data = 2;
-		data_tsd_set(data);
-		break;
-	case 2:
-		return;
-	default:
-		not_reached();
-	}
-
-	{
-		void *p = mallocx(1, 0);
-		assert_ptr_not_null(p, "Unexpeced mallocx() failure");
-		dallocx(p, 0);
-	}
-}
-
-malloc_tsd_externs(data_, data_t)
-#define	DATA_INIT 0x12345678
-malloc_tsd_data(, data_, data_t, DATA_INIT)
-malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup)
-
-static void *
-thd_start(void *arg)
-{
-	data_t d = (data_t)(uintptr_t)arg;
-	void *p;
-
-	assert_x_eq(*data_tsd_get(true), DATA_INIT,
-	    "Initial tsd get should return initialization value");
-
-	p = malloc(1);
-	assert_ptr_not_null(p, "Unexpected malloc() failure");
-
-	data_tsd_set(&d);
-	assert_x_eq(*data_tsd_get(true), d,
-	    "After tsd set, tsd get should return value that was set");
-
-	d = 0;
-	assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg,
-	    "Resetting local data should have no effect on tsd");
-
-	free(p);
-	return (NULL);
-}
-
-TEST_BEGIN(test_tsd_main_thread)
-{
-	thd_start((void *)(uintptr_t)0xa5f3e329);
-}
-TEST_END
-
-TEST_BEGIN(test_tsd_sub_thread)
-{
-	thd_t thd;
-
-	data_cleanup_executed = false;
-	thd_create(&thd, thd_start, (void *)THREAD_DATA);
-	thd_join(thd, NULL);
-	assert_true(data_cleanup_executed,
-	    "Cleanup function should have executed");
-}
-TEST_END
-
-int
-main(void)
-{
-	/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
-	if (nallocx(1, 0) == 0) {
-		malloc_printf("Initialization error");
-		return (test_status_fail);
-	}
-	data_tsd_boot();
-
-	return (test(
-	    test_tsd_main_thread,
-	    test_tsd_sub_thread));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/util.c b/zircon/third_party/ulib/jemalloc/test/unit/util.c
deleted file mode 100644
index b891a19..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/util.c
+++ /dev/null
@@ -1,315 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define	TEST_POW2_CEIL(t, suf, pri) do {				\
-	unsigned i, pow2;						\
-	t x;								\
-									\
-	assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result");	\
-									\
-	for (i = 0; i < sizeof(t) * 8; i++) {				\
-		assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1)	\
-		    << i, "Unexpected result");				\
-	}								\
-									\
-	for (i = 2; i < sizeof(t) * 8; i++) {				\
-		assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1),	\
-		    ((t)1) << i, "Unexpected result");			\
-	}								\
-									\
-	for (i = 0; i < sizeof(t) * 8 - 1; i++) {			\
-		assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1),	\
-		    ((t)1) << (i+1), "Unexpected result");		\
-	}								\
-									\
-	for (pow2 = 1; pow2 < 25; pow2++) {				\
-		for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2;	\
-		    x++) {						\
-			assert_##suf##_eq(pow2_ceil_##suf(x),		\
-			    ((t)1) << pow2,				\
-			    "Unexpected result, x=%"pri, x);		\
-		}							\
-	}								\
-} while (0)
-
-TEST_BEGIN(test_pow2_ceil_u64)
-{
-	TEST_POW2_CEIL(uint64_t, u64, FMTu64);
-}
-TEST_END
-
-TEST_BEGIN(test_pow2_ceil_u32)
-{
-	TEST_POW2_CEIL(uint32_t, u32, FMTu32);
-}
-TEST_END
-
-TEST_BEGIN(test_pow2_ceil_zu)
-{
-	TEST_POW2_CEIL(size_t, zu, "zu");
-}
-TEST_END
-
-TEST_BEGIN(test_malloc_strtoumax_no_endptr)
-{
-	int err;
-
-	set_errno(0);
-	assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
-	err = get_errno();
-	assert_d_eq(err, 0, "Unexpected failure");
-}
-TEST_END
-
-TEST_BEGIN(test_malloc_strtoumax)
-{
-	struct test_s {
-		const char *input;
-		const char *expected_remainder;
-		int base;
-		int expected_errno;
-		const char *expected_errno_name;
-		uintmax_t expected_x;
-	};
-#define	ERR(e)		e, #e
-#define	KUMAX(x)	((uintmax_t)x##ULL)
-#define	KSMAX(x)	((uintmax_t)(intmax_t)x##LL)
-	struct test_s tests[] = {
-		{"0",		"0",	-1,	ERR(EINVAL),	UINTMAX_MAX},
-		{"0",		"0",	1,	ERR(EINVAL),	UINTMAX_MAX},
-		{"0",		"0",	37,	ERR(EINVAL),	UINTMAX_MAX},
-
-		{"",		"",	0,	ERR(EINVAL),	UINTMAX_MAX},
-		{"+",		"+",	0,	ERR(EINVAL),	UINTMAX_MAX},
-		{"++3",		"++3",	0,	ERR(EINVAL),	UINTMAX_MAX},
-		{"-",		"-",	0,	ERR(EINVAL),	UINTMAX_MAX},
-
-		{"42",		"",	0,	ERR(0),		KUMAX(42)},
-		{"+42",		"",	0,	ERR(0),		KUMAX(42)},
-		{"-42",		"",	0,	ERR(0),		KSMAX(-42)},
-		{"042",		"",	0,	ERR(0),		KUMAX(042)},
-		{"+042",	"",	0,	ERR(0),		KUMAX(042)},
-		{"-042",	"",	0,	ERR(0),		KSMAX(-042)},
-		{"0x42",	"",	0,	ERR(0),		KUMAX(0x42)},
-		{"+0x42",	"",	0,	ERR(0),		KUMAX(0x42)},
-		{"-0x42",	"",	0,	ERR(0),		KSMAX(-0x42)},
-
-		{"0",		"",	0,	ERR(0),		KUMAX(0)},
-		{"1",		"",	0,	ERR(0),		KUMAX(1)},
-
-		{"42",		"",	0,	ERR(0),		KUMAX(42)},
-		{" 42",		"",	0,	ERR(0),		KUMAX(42)},
-		{"42 ",		" ",	0,	ERR(0),		KUMAX(42)},
-		{"0x",		"x",	0,	ERR(0),		KUMAX(0)},
-		{"42x",		"x",	0,	ERR(0),		KUMAX(42)},
-
-		{"07",		"",	0,	ERR(0),		KUMAX(7)},
-		{"010",		"",	0,	ERR(0),		KUMAX(8)},
-		{"08",		"8",	0,	ERR(0),		KUMAX(0)},
-		{"0_",		"_",	0,	ERR(0),		KUMAX(0)},
-
-		{"0x",		"x",	0,	ERR(0),		KUMAX(0)},
-		{"0X",		"X",	0,	ERR(0),		KUMAX(0)},
-		{"0xg",		"xg",	0,	ERR(0),		KUMAX(0)},
-		{"0XA",		"",	0,	ERR(0),		KUMAX(10)},
-
-		{"010",		"",	10,	ERR(0),		KUMAX(10)},
-		{"0x3",		"x3",	10,	ERR(0),		KUMAX(0)},
-
-		{"12",		"2",	2,	ERR(0),		KUMAX(1)},
-		{"78",		"8",	8,	ERR(0),		KUMAX(7)},
-		{"9a",		"a",	10,	ERR(0),		KUMAX(9)},
-		{"9A",		"A",	10,	ERR(0),		KUMAX(9)},
-		{"fg",		"g",	16,	ERR(0),		KUMAX(15)},
-		{"FG",		"G",	16,	ERR(0),		KUMAX(15)},
-		{"0xfg",	"g",	16,	ERR(0),		KUMAX(15)},
-		{"0XFG",	"G",	16,	ERR(0),		KUMAX(15)},
-		{"z_",		"_",	36,	ERR(0),		KUMAX(35)},
-		{"Z_",		"_",	36,	ERR(0),		KUMAX(35)}
-	};
-#undef ERR
-#undef KUMAX
-#undef KSMAX
-	unsigned i;
-
-	for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
-		struct test_s *test = &tests[i];
-		int err;
-		uintmax_t result;
-		char *remainder;
-
-		set_errno(0);
-		result = malloc_strtoumax(test->input, &remainder, test->base);
-		err = get_errno();
-		assert_d_eq(err, test->expected_errno,
-		    "Expected errno %s for \"%s\", base %d",
-		    test->expected_errno_name, test->input, test->base);
-		assert_str_eq(remainder, test->expected_remainder,
-		    "Unexpected remainder for \"%s\", base %d",
-		    test->input, test->base);
-		if (err == 0) {
-			assert_ju_eq(result, test->expected_x,
-			    "Unexpected result for \"%s\", base %d",
-			    test->input, test->base);
-		}
-	}
-}
-TEST_END
-
-TEST_BEGIN(test_malloc_snprintf_truncated)
-{
-#define	BUFLEN	15
-	char buf[BUFLEN];
-	size_t result;
-	size_t len;
-#define	TEST(expected_str_untruncated, ...) do {			\
-	result = malloc_snprintf(buf, len, __VA_ARGS__);		\
-	assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0,	\
-	    "Unexpected string inequality (\"%s\" vs \"%s\")",		\
-	    buf, expected_str_untruncated);				\
-	assert_zu_eq(result, strlen(expected_str_untruncated),		\
-	    "Unexpected result");					\
-} while (0)
-
-	for (len = 1; len < BUFLEN; len++) {
-		TEST("012346789",	"012346789");
-		TEST("a0123b",		"a%sb", "0123");
-		TEST("a01234567",	"a%s%s", "0123", "4567");
-		TEST("a0123  ",		"a%-6s", "0123");
-		TEST("a  0123",		"a%6s", "0123");
-		TEST("a   012",		"a%6.3s", "0123");
-		TEST("a   012",		"a%*.*s", 6, 3, "0123");
-		TEST("a 123b",		"a% db", 123);
-		TEST("a123b",		"a%-db", 123);
-		TEST("a-123b",		"a%-db", -123);
-		TEST("a+123b",		"a%+db", 123);
-	}
-#undef BUFLEN
-#undef TEST
-}
-TEST_END
-
-TEST_BEGIN(test_malloc_snprintf)
-{
-#define	BUFLEN	128
-	char buf[BUFLEN];
-	size_t result;
-#define	TEST(expected_str, ...) do {					\
-	result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__);	\
-	assert_str_eq(buf, expected_str, "Unexpected output");		\
-	assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
-} while (0)
-
-	TEST("hello", "hello");
-
-	TEST("50%, 100%", "50%%, %d%%", 100);
-
-	TEST("a0123b", "a%sb", "0123");
-
-	TEST("a 0123b", "a%5sb", "0123");
-	TEST("a 0123b", "a%*sb", 5, "0123");
-
-	TEST("a0123 b", "a%-5sb", "0123");
-	TEST("a0123b", "a%*sb", -1, "0123");
-	TEST("a0123 b", "a%*sb", -5, "0123");
-	TEST("a0123 b", "a%-*sb", -5, "0123");
-
-	TEST("a012b", "a%.3sb", "0123");
-	TEST("a012b", "a%.*sb", 3, "0123");
-	TEST("a0123b", "a%.*sb", -3, "0123");
-
-	TEST("a  012b", "a%5.3sb", "0123");
-	TEST("a  012b", "a%5.*sb", 3, "0123");
-	TEST("a  012b", "a%*.3sb", 5, "0123");
-	TEST("a  012b", "a%*.*sb", 5, 3, "0123");
-	TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
-
-	TEST("_abcd_", "_%x_", 0xabcd);
-	TEST("_0xabcd_", "_%#x_", 0xabcd);
-	TEST("_1234_", "_%o_", 01234);
-	TEST("_01234_", "_%#o_", 01234);
-	TEST("_1234_", "_%u_", 1234);
-
-	TEST("_1234_", "_%d_", 1234);
-	TEST("_ 1234_", "_% d_", 1234);
-	TEST("_+1234_", "_%+d_", 1234);
-	TEST("_-1234_", "_%d_", -1234);
-	TEST("_-1234_", "_% d_", -1234);
-	TEST("_-1234_", "_%+d_", -1234);
-
-	TEST("_-1234_", "_%d_", -1234);
-	TEST("_1234_", "_%d_", 1234);
-	TEST("_-1234_", "_%i_", -1234);
-	TEST("_1234_", "_%i_", 1234);
-	TEST("_01234_", "_%#o_", 01234);
-	TEST("_1234_", "_%u_", 1234);
-	TEST("_0x1234abc_", "_%#x_", 0x1234abc);
-	TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
-	TEST("_c_", "_%c_", 'c');
-	TEST("_string_", "_%s_", "string");
-	TEST("_0x42_", "_%p_", ((void *)0x42));
-
-	TEST("_-1234_", "_%ld_", ((long)-1234));
-	TEST("_1234_", "_%ld_", ((long)1234));
-	TEST("_-1234_", "_%li_", ((long)-1234));
-	TEST("_1234_", "_%li_", ((long)1234));
-	TEST("_01234_", "_%#lo_", ((long)01234));
-	TEST("_1234_", "_%lu_", ((long)1234));
-	TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
-	TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
-
-	TEST("_-1234_", "_%lld_", ((long long)-1234));
-	TEST("_1234_", "_%lld_", ((long long)1234));
-	TEST("_-1234_", "_%lli_", ((long long)-1234));
-	TEST("_1234_", "_%lli_", ((long long)1234));
-	TEST("_01234_", "_%#llo_", ((long long)01234));
-	TEST("_1234_", "_%llu_", ((long long)1234));
-	TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
-	TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
-
-	TEST("_-1234_", "_%qd_", ((long long)-1234));
-	TEST("_1234_", "_%qd_", ((long long)1234));
-	TEST("_-1234_", "_%qi_", ((long long)-1234));
-	TEST("_1234_", "_%qi_", ((long long)1234));
-	TEST("_01234_", "_%#qo_", ((long long)01234));
-	TEST("_1234_", "_%qu_", ((long long)1234));
-	TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
-	TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
-
-	TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
-	TEST("_1234_", "_%jd_", ((intmax_t)1234));
-	TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
-	TEST("_1234_", "_%ji_", ((intmax_t)1234));
-	TEST("_01234_", "_%#jo_", ((intmax_t)01234));
-	TEST("_1234_", "_%ju_", ((intmax_t)1234));
-	TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
-	TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
-
-	TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
-	TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
-	TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
-	TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
-
-	TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
-	TEST("_1234_", "_%zd_", ((ssize_t)1234));
-	TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
-	TEST("_1234_", "_%zi_", ((ssize_t)1234));
-	TEST("_01234_", "_%#zo_", ((ssize_t)01234));
-	TEST("_1234_", "_%zu_", ((ssize_t)1234));
-	TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
-	TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
-#undef BUFLEN
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_pow2_ceil_u64,
-	    test_pow2_ceil_u32,
-	    test_pow2_ceil_zu,
-	    test_malloc_strtoumax_no_endptr,
-	    test_malloc_strtoumax,
-	    test_malloc_snprintf_truncated,
-	    test_malloc_snprintf));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/witness.c b/zircon/third_party/ulib/jemalloc/test/unit/witness.c
deleted file mode 100644
index 1359398..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/witness.c
+++ /dev/null
@@ -1,277 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static witness_lock_error_t *witness_lock_error_orig;
-static witness_owner_error_t *witness_owner_error_orig;
-static witness_not_owner_error_t *witness_not_owner_error_orig;
-static witness_lockless_error_t *witness_lockless_error_orig;
-
-static bool saw_lock_error;
-static bool saw_owner_error;
-static bool saw_not_owner_error;
-static bool saw_lockless_error;
-
-static void
-witness_lock_error_intercept(const witness_list_t *witnesses,
-    const witness_t *witness)
-{
-	saw_lock_error = true;
-}
-
-static void
-witness_owner_error_intercept(const witness_t *witness)
-{
-	saw_owner_error = true;
-}
-
-static void
-witness_not_owner_error_intercept(const witness_t *witness)
-{
-	saw_not_owner_error = true;
-}
-
-static void
-witness_lockless_error_intercept(const witness_list_t *witnesses)
-{
-	saw_lockless_error = true;
-}
-
-static int
-witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob)
-{
-	assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
-
-	assert(oa == (void *)a);
-	assert(ob == (void *)b);
-
-	return (strcmp(a->name, b->name));
-}
-
-static int
-witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, void *ob)
-{
-	assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
-
-	assert(oa == (void *)a);
-	assert(ob == (void *)b);
-
-	return (-strcmp(a->name, b->name));
-}
-
-TEST_BEGIN(test_witness)
-{
-	witness_t a, b;
-	tsdn_t *tsdn;
-
-	test_skip_if(!config_debug);
-
-	tsdn = tsdn_fetch();
-
-	witness_assert_lockless(tsdn);
-
-	witness_init(&a, "a", 1, NULL, NULL);
-	witness_assert_not_owner(tsdn, &a);
-	witness_lock(tsdn, &a);
-	witness_assert_owner(tsdn, &a);
-
-	witness_init(&b, "b", 2, NULL, NULL);
-	witness_assert_not_owner(tsdn, &b);
-	witness_lock(tsdn, &b);
-	witness_assert_owner(tsdn, &b);
-
-	witness_unlock(tsdn, &a);
-	witness_unlock(tsdn, &b);
-
-	witness_assert_lockless(tsdn);
-}
-TEST_END
-
-TEST_BEGIN(test_witness_comp)
-{
-	witness_t a, b, c, d;
-	tsdn_t *tsdn;
-
-	test_skip_if(!config_debug);
-
-	tsdn = tsdn_fetch();
-
-	witness_assert_lockless(tsdn);
-
-	witness_init(&a, "a", 1, witness_comp, &a);
-	witness_assert_not_owner(tsdn, &a);
-	witness_lock(tsdn, &a);
-	witness_assert_owner(tsdn, &a);
-
-	witness_init(&b, "b", 1, witness_comp, &b);
-	witness_assert_not_owner(tsdn, &b);
-	witness_lock(tsdn, &b);
-	witness_assert_owner(tsdn, &b);
-	witness_unlock(tsdn, &b);
-
-	witness_lock_error_orig = witness_lock_error;
-	witness_lock_error = witness_lock_error_intercept;
-	saw_lock_error = false;
-
-	witness_init(&c, "c", 1, witness_comp_reverse, &c);
-	witness_assert_not_owner(tsdn, &c);
-	assert_false(saw_lock_error, "Unexpected witness lock error");
-	witness_lock(tsdn, &c);
-	assert_true(saw_lock_error, "Expected witness lock error");
-	witness_unlock(tsdn, &c);
-
-	saw_lock_error = false;
-
-	witness_init(&d, "d", 1, NULL, NULL);
-	witness_assert_not_owner(tsdn, &d);
-	assert_false(saw_lock_error, "Unexpected witness lock error");
-	witness_lock(tsdn, &d);
-	assert_true(saw_lock_error, "Expected witness lock error");
-	witness_unlock(tsdn, &d);
-
-	witness_unlock(tsdn, &a);
-
-	witness_assert_lockless(tsdn);
-
-	witness_lock_error = witness_lock_error_orig;
-}
-TEST_END
-
-TEST_BEGIN(test_witness_reversal)
-{
-	witness_t a, b;
-	tsdn_t *tsdn;
-
-	test_skip_if(!config_debug);
-
-	witness_lock_error_orig = witness_lock_error;
-	witness_lock_error = witness_lock_error_intercept;
-	saw_lock_error = false;
-
-	tsdn = tsdn_fetch();
-
-	witness_assert_lockless(tsdn);
-
-	witness_init(&a, "a", 1, NULL, NULL);
-	witness_init(&b, "b", 2, NULL, NULL);
-
-	witness_lock(tsdn, &b);
-	assert_false(saw_lock_error, "Unexpected witness lock error");
-	witness_lock(tsdn, &a);
-	assert_true(saw_lock_error, "Expected witness lock error");
-
-	witness_unlock(tsdn, &a);
-	witness_unlock(tsdn, &b);
-
-	witness_assert_lockless(tsdn);
-
-	witness_lock_error = witness_lock_error_orig;
-}
-TEST_END
-
-TEST_BEGIN(test_witness_recursive)
-{
-	witness_t a;
-	tsdn_t *tsdn;
-
-	test_skip_if(!config_debug);
-
-	witness_not_owner_error_orig = witness_not_owner_error;
-	witness_not_owner_error = witness_not_owner_error_intercept;
-	saw_not_owner_error = false;
-
-	witness_lock_error_orig = witness_lock_error;
-	witness_lock_error = witness_lock_error_intercept;
-	saw_lock_error = false;
-
-	tsdn = tsdn_fetch();
-
-	witness_assert_lockless(tsdn);
-
-	witness_init(&a, "a", 1, NULL, NULL);
-
-	witness_lock(tsdn, &a);
-	assert_false(saw_lock_error, "Unexpected witness lock error");
-	assert_false(saw_not_owner_error, "Unexpected witness not owner error");
-	witness_lock(tsdn, &a);
-	assert_true(saw_lock_error, "Expected witness lock error");
-	assert_true(saw_not_owner_error, "Expected witness not owner error");
-
-	witness_unlock(tsdn, &a);
-
-	witness_assert_lockless(tsdn);
-
-	witness_owner_error = witness_owner_error_orig;
-	witness_lock_error = witness_lock_error_orig;
-
-}
-TEST_END
-
-TEST_BEGIN(test_witness_unlock_not_owned)
-{
-	witness_t a;
-	tsdn_t *tsdn;
-
-	test_skip_if(!config_debug);
-
-	witness_owner_error_orig = witness_owner_error;
-	witness_owner_error = witness_owner_error_intercept;
-	saw_owner_error = false;
-
-	tsdn = tsdn_fetch();
-
-	witness_assert_lockless(tsdn);
-
-	witness_init(&a, "a", 1, NULL, NULL);
-
-	assert_false(saw_owner_error, "Unexpected owner error");
-	witness_unlock(tsdn, &a);
-	assert_true(saw_owner_error, "Expected owner error");
-
-	witness_assert_lockless(tsdn);
-
-	witness_owner_error = witness_owner_error_orig;
-}
-TEST_END
-
-TEST_BEGIN(test_witness_lockful)
-{
-	witness_t a;
-	tsdn_t *tsdn;
-
-	test_skip_if(!config_debug);
-
-	witness_lockless_error_orig = witness_lockless_error;
-	witness_lockless_error = witness_lockless_error_intercept;
-	saw_lockless_error = false;
-
-	tsdn = tsdn_fetch();
-
-	witness_assert_lockless(tsdn);
-
-	witness_init(&a, "a", 1, NULL, NULL);
-
-	assert_false(saw_lockless_error, "Unexpected lockless error");
-	witness_assert_lockless(tsdn);
-
-	witness_lock(tsdn, &a);
-	witness_assert_lockless(tsdn);
-	assert_true(saw_lockless_error, "Expected lockless error");
-
-	witness_unlock(tsdn, &a);
-
-	witness_assert_lockless(tsdn);
-
-	witness_lockless_error = witness_lockless_error_orig;
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_witness,
-	    test_witness_comp,
-	    test_witness_reversal,
-	    test_witness_recursive,
-	    test_witness_unlock_not_owned,
-	    test_witness_lockful));
-}
diff --git a/zircon/third_party/ulib/jemalloc/test/unit/zero.c b/zircon/third_party/ulib/jemalloc/test/unit/zero.c
deleted file mode 100644
index c752954..0000000
--- a/zircon/third_party/ulib/jemalloc/test/unit/zero.c
+++ /dev/null
@@ -1,68 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_FILL
-const char *malloc_conf =
-    "abort:false,junk:false,zero:true";
-#endif
-
-static void
-test_zero(size_t sz_min, size_t sz_max)
-{
-	uint8_t *s;
-	size_t sz_prev, sz, i;
-#define	MAGIC	((uint8_t)0x61)
-
-	sz_prev = 0;
-	s = (uint8_t *)mallocx(sz_min, 0);
-	assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
-
-	for (sz = sallocx(s, 0); sz <= sz_max;
-	    sz_prev = sz, sz = sallocx(s, 0)) {
-		if (sz_prev > 0) {
-			assert_u_eq(s[0], MAGIC,
-			    "Previously allocated byte %zu/%zu is corrupted",
-			    ZU(0), sz_prev);
-			assert_u_eq(s[sz_prev-1], MAGIC,
-			    "Previously allocated byte %zu/%zu is corrupted",
-			    sz_prev-1, sz_prev);
-		}
-
-		for (i = sz_prev; i < sz; i++) {
-			assert_u_eq(s[i], 0x0,
-			    "Newly allocated byte %zu/%zu isn't zero-filled",
-			    i, sz);
-			s[i] = MAGIC;
-		}
-
-		if (xallocx(s, sz+1, 0, 0) == sz) {
-			s = (uint8_t *)rallocx(s, sz+1, 0);
-			assert_ptr_not_null((void *)s,
-			    "Unexpected rallocx() failure");
-		}
-	}
-
-	dallocx(s, 0);
-#undef MAGIC
-}
-
-TEST_BEGIN(test_zero_small)
-{
-	test_skip_if(!config_fill);
-	test_zero(1, SMALL_MAXCLASS-1);
-}
-TEST_END
-
-TEST_BEGIN(test_zero_large)
-{
-	test_skip_if(!config_fill);
-	test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
-}
-TEST_END
-
-int
-main(void)
-{
-	return (test(
-	    test_zero_small,
-	    test_zero_large));
-}