Merge branch 'rc-4.4.0'
diff --git a/ChangeLog b/ChangeLog
index 587685d..f75edd9 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -4,6 +4,33 @@
 
     https://github.com/jemalloc/jemalloc
 
+* 4.4.0 (December 3, 2016)
+
+  New features:
+  - Add configure support for *-*-linux-android.  (@cferris1000, @jasone)
+  - Add the --disable-syscall configure option, for use on systems that place
+    security-motivated limitations on syscall(2).  (@jasone)
+  - Add support for Debian GNU/kFreeBSD.  (@thesam)
+
+  Optimizations:
+  - Add extent serial numbers and use them where appropriate as a sort key that
+    is higher priority than address, so that the allocation policy prefers older
+    extents.  This tends to improve locality (decrease fragmentation) when
+    memory grows downward.  (@jasone)
+  - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
+    on Linux 4.5 and newer.  (@jasone)
+  - Mark partially purged arena chunks as non-huge-page.  This improves
+    interaction with Linux's transparent huge page functionality.  (@jasone)
+
+  Bug fixes:
+  - Fix size class computations for edge conditions involving extremely large
+    allocations.  This regression was first released in 4.0.0.  (@jasone,
+    @ingvarha)
+  - Remove overly restrictive assertions related to the cactive statistic.  This
+    regression was first released in 4.1.0.  (@jasone)
+  - Implement a more reliable detection scheme for os_unfair_lock on macOS.
+    (@jszakmeister)
+
 * 4.3.1 (November 7, 2016)
 
   Bug fixes:
diff --git a/INSTALL b/INSTALL
index 6878716..cce3ed7 100644
--- a/INSTALL
+++ b/INSTALL
@@ -206,6 +206,11 @@
     most extreme case increases physical memory usage for the 16 KiB size class
     to 20 KiB.
 
+--disable-syscall
+    Disable use of syscall(2) rather than {open,read,write,close}(2).  This is
+    intended as a workaround for systems that place security limitations on
+    syscall(2).
+
 --with-xslroot=<path>
     Specify where to find DocBook XSL stylesheets when building the
     documentation.
@@ -327,6 +332,15 @@
 PATH="?"
     'configure' uses this to find programs.
 
+In some cases it may be necessary to work around configuration results that do
+not match reality.  For example, Linux 4.5 added support for the MADV_FREE flag
+to madvise(2), which can cause problems if building on a host with MADV_FREE
+support and deploying to a target without.  To work around this, use a cache
+file to override the relevant configuration variable defined in configure.ac,
+e.g.:
+
+    echo "je_cv_madv_free=no" > config.cache && ./configure -C
+
 === Advanced compilation =======================================================
 
 To build only parts of jemalloc, use the following targets:
diff --git a/Makefile.in b/Makefile.in
index d13c7f1..c705363 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -166,6 +166,8 @@
 	$(srcroot)test/unit/math.c \
 	$(srcroot)test/unit/mq.c \
 	$(srcroot)test/unit/mtx.c \
+	$(srcroot)test/unit/pack.c \
+	$(srcroot)test/unit/pages.c \
 	$(srcroot)test/unit/ph.c \
 	$(srcroot)test/unit/prng.c \
 	$(srcroot)test/unit/prof_accum.c \
diff --git a/build-aux/config.guess b/build-aux/config.guess
index 1f5c50c..2e9ad7f 100755
--- a/build-aux/config.guess
+++ b/build-aux/config.guess
@@ -1,8 +1,8 @@
 #! /bin/sh
 # Attempt to guess a canonical system name.
-#   Copyright 1992-2014 Free Software Foundation, Inc.
+#   Copyright 1992-2016 Free Software Foundation, Inc.
 
-timestamp='2014-03-23'
+timestamp='2016-10-02'
 
 # This file is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by
@@ -24,12 +24,12 @@
 # program.  This Exception is an additional permission under section 7
 # of the GNU General Public License, version 3 ("GPLv3").
 #
-# Originally written by Per Bothner.
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
 #
 # You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
 #
-# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+# Please send patches to <config-patches@gnu.org>.
 
 
 me=`echo "$0" | sed -e 's,.*/,,'`
@@ -50,7 +50,7 @@
 GNU config.guess ($timestamp)
 
 Originally written by Per Bothner.
-Copyright 1992-2014 Free Software Foundation, Inc.
+Copyright 1992-2016 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -168,19 +168,29 @@
 	# Note: NetBSD doesn't particularly care about the vendor
 	# portion of the name.  We always set it to "unknown".
 	sysctl="sysctl -n hw.machine_arch"
-	UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
-	    /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+	UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
+	    /sbin/$sysctl 2>/dev/null || \
+	    /usr/sbin/$sysctl 2>/dev/null || \
+	    echo unknown)`
 	case "${UNAME_MACHINE_ARCH}" in
 	    armeb) machine=armeb-unknown ;;
 	    arm*) machine=arm-unknown ;;
 	    sh3el) machine=shl-unknown ;;
 	    sh3eb) machine=sh-unknown ;;
 	    sh5el) machine=sh5le-unknown ;;
+	    earmv*)
+		arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+		endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'`
+		machine=${arch}${endian}-unknown
+		;;
 	    *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
 	esac
 	# The Operating System including object format, if it has switched
-	# to ELF recently, or will in the future.
+	# to ELF recently (or will in the future) and ABI.
 	case "${UNAME_MACHINE_ARCH}" in
+	    earm*)
+		os=netbsdelf
+		;;
 	    arm*|i386|m68k|ns32k|sh3*|sparc|vax)
 		eval $set_cc_for_build
 		if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
@@ -197,6 +207,13 @@
 		os=netbsd
 		;;
 	esac
+	# Determine ABI tags.
+	case "${UNAME_MACHINE_ARCH}" in
+	    earm*)
+		expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+		abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"`
+		;;
+	esac
 	# The OS release
 	# Debian GNU/NetBSD machines have a different userland, and
 	# thus, need a distinct triplet. However, they do not need
@@ -207,13 +224,13 @@
 		release='-gnu'
 		;;
 	    *)
-		release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+		release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2`
 		;;
 	esac
 	# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
 	# contains redundant information, the shorter form:
 	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
-	echo "${machine}-${os}${release}"
+	echo "${machine}-${os}${release}${abi}"
 	exit ;;
     *:Bitrig:*:*)
 	UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
@@ -223,6 +240,10 @@
 	UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
 	echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
 	exit ;;
+    *:LibertyBSD:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE}
+	exit ;;
     *:ekkoBSD:*:*)
 	echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
 	exit ;;
@@ -235,6 +256,9 @@
     *:MirBSD:*:*)
 	echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
 	exit ;;
+    *:Sortix:*:*)
+	echo ${UNAME_MACHINE}-unknown-sortix
+	exit ;;
     alpha:OSF1:*:*)
 	case $UNAME_RELEASE in
 	*4.0)
@@ -251,42 +275,42 @@
 	ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
 	case "$ALPHA_CPU_TYPE" in
 	    "EV4 (21064)")
-		UNAME_MACHINE="alpha" ;;
+		UNAME_MACHINE=alpha ;;
 	    "EV4.5 (21064)")
-		UNAME_MACHINE="alpha" ;;
+		UNAME_MACHINE=alpha ;;
 	    "LCA4 (21066/21068)")
-		UNAME_MACHINE="alpha" ;;
+		UNAME_MACHINE=alpha ;;
 	    "EV5 (21164)")
-		UNAME_MACHINE="alphaev5" ;;
+		UNAME_MACHINE=alphaev5 ;;
 	    "EV5.6 (21164A)")
-		UNAME_MACHINE="alphaev56" ;;
+		UNAME_MACHINE=alphaev56 ;;
 	    "EV5.6 (21164PC)")
-		UNAME_MACHINE="alphapca56" ;;
+		UNAME_MACHINE=alphapca56 ;;
 	    "EV5.7 (21164PC)")
-		UNAME_MACHINE="alphapca57" ;;
+		UNAME_MACHINE=alphapca57 ;;
 	    "EV6 (21264)")
-		UNAME_MACHINE="alphaev6" ;;
+		UNAME_MACHINE=alphaev6 ;;
 	    "EV6.7 (21264A)")
-		UNAME_MACHINE="alphaev67" ;;
+		UNAME_MACHINE=alphaev67 ;;
 	    "EV6.8CB (21264C)")
-		UNAME_MACHINE="alphaev68" ;;
+		UNAME_MACHINE=alphaev68 ;;
 	    "EV6.8AL (21264B)")
-		UNAME_MACHINE="alphaev68" ;;
+		UNAME_MACHINE=alphaev68 ;;
 	    "EV6.8CX (21264D)")
-		UNAME_MACHINE="alphaev68" ;;
+		UNAME_MACHINE=alphaev68 ;;
 	    "EV6.9A (21264/EV69A)")
-		UNAME_MACHINE="alphaev69" ;;
+		UNAME_MACHINE=alphaev69 ;;
 	    "EV7 (21364)")
-		UNAME_MACHINE="alphaev7" ;;
+		UNAME_MACHINE=alphaev7 ;;
 	    "EV7.9 (21364A)")
-		UNAME_MACHINE="alphaev79" ;;
+		UNAME_MACHINE=alphaev79 ;;
 	esac
 	# A Pn.n version is a patched version.
 	# A Vn.n version is a released version.
 	# A Tn.n version is a released field test version.
 	# A Xn.n version is an unreleased experimental baselevel.
 	# 1.2 uses "1.2" for uname -r.
-	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
 	# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
 	exitcode=$?
 	trap '' 0
@@ -359,16 +383,16 @@
 	exit ;;
     i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
 	eval $set_cc_for_build
-	SUN_ARCH="i386"
+	SUN_ARCH=i386
 	# If there is a compiler, see if it is configured for 64-bit objects.
 	# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
 	# This test works for both compilers.
-	if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+	if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
 	    if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
-		(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
 		grep IS_64BIT_ARCH >/dev/null
 	    then
-		SUN_ARCH="x86_64"
+		SUN_ARCH=x86_64
 	    fi
 	fi
 	echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
@@ -393,7 +417,7 @@
 	exit ;;
     sun*:*:4.2BSD:*)
 	UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
-	test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+	test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3
 	case "`/bin/arch`" in
 	    sun3)
 		echo m68k-sun-sunos${UNAME_RELEASE}
@@ -579,8 +603,9 @@
 	else
 		IBM_ARCH=powerpc
 	fi
-	if [ -x /usr/bin/oslevel ] ; then
-		IBM_REV=`/usr/bin/oslevel`
+	if [ -x /usr/bin/lslpp ] ; then
+		IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+			   awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
 	else
 		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
 	fi
@@ -617,13 +642,13 @@
 		    sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
 		    sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
 		    case "${sc_cpu_version}" in
-		      523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
-		      528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+		      523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
+		      528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
 		      532)                      # CPU_PA_RISC2_0
 			case "${sc_kernel_bits}" in
-			  32) HP_ARCH="hppa2.0n" ;;
-			  64) HP_ARCH="hppa2.0w" ;;
-			  '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
+			  32) HP_ARCH=hppa2.0n ;;
+			  64) HP_ARCH=hppa2.0w ;;
+			  '') HP_ARCH=hppa2.0 ;;   # HP-UX 10.20
 			esac ;;
 		    esac
 		fi
@@ -662,11 +687,11 @@
 		    exit (0);
 		}
 EOF
-		    (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+		    (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
 		    test -z "$HP_ARCH" && HP_ARCH=hppa
 		fi ;;
 	esac
-	if [ ${HP_ARCH} = "hppa2.0w" ]
+	if [ ${HP_ARCH} = hppa2.0w ]
 	then
 	    eval $set_cc_for_build
 
@@ -679,12 +704,12 @@
 	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
 	    # => hppa64-hp-hpux11.23
 
-	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+	    if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
 		grep -q __LP64__
 	    then
-		HP_ARCH="hppa2.0w"
+		HP_ARCH=hppa2.0w
 	    else
-		HP_ARCH="hppa64"
+		HP_ARCH=hppa64
 	    fi
 	fi
 	echo ${HP_ARCH}-hp-hpux${HPUX_REV}
@@ -789,14 +814,14 @@
 	echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
 	exit ;;
     F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
-	FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+	FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
 	FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
 	echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
 	exit ;;
     5000:UNIX_System_V:4.*:*)
-	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
-	FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+	FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
 	echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
 	exit ;;
     i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
@@ -878,7 +903,7 @@
 	exit ;;
     *:GNU/*:*:*)
 	# other systems with GNU libc and userland
-	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
 	exit ;;
     i*86:Minix:*:*)
 	echo ${UNAME_MACHINE}-pc-minix
@@ -901,7 +926,7 @@
 	  EV68*) UNAME_MACHINE=alphaev68 ;;
 	esac
 	objdump --private-headers /bin/sh | grep -q ld.so.1
-	if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+	if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
 	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
 	exit ;;
     arc:Linux:*:* | arceb:Linux:*:*)
@@ -932,6 +957,9 @@
     crisv32:Linux:*:*)
 	echo ${UNAME_MACHINE}-axis-linux-${LIBC}
 	exit ;;
+    e2k:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
     frv:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
 	exit ;;
@@ -944,6 +972,9 @@
     ia64:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
 	exit ;;
+    k1om:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
     m32r*:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
 	exit ;;
@@ -969,6 +1000,9 @@
 	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
 	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
 	;;
+    mips64el:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
     openrisc*:Linux:*:*)
 	echo or1k-unknown-linux-${LIBC}
 	exit ;;
@@ -1001,6 +1035,9 @@
     ppcle:Linux:*:*)
 	echo powerpcle-unknown-linux-${LIBC}
 	exit ;;
+    riscv32:Linux:*:* | riscv64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	exit ;;
     s390:Linux:*:* | s390x:Linux:*:*)
 	echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
 	exit ;;
@@ -1020,7 +1057,7 @@
 	echo ${UNAME_MACHINE}-dec-linux-${LIBC}
 	exit ;;
     x86_64:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+	echo ${UNAME_MACHINE}-pc-linux-${LIBC}
 	exit ;;
     xtensa*:Linux:*:*)
 	echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
@@ -1099,7 +1136,7 @@
 	# uname -m prints for DJGPP always 'pc', but it prints nothing about
 	# the processor, so we play safe by assuming i586.
 	# Note: whatever this is, it MUST be the same as what config.sub
-	# prints for the "djgpp" host, or else GDB configury will decide that
+	# prints for the "djgpp" host, or else GDB configure will decide that
 	# this is a cross-build.
 	echo i586-pc-msdosdjgpp
 	exit ;;
@@ -1248,6 +1285,9 @@
     SX-8R:SUPER-UX:*:*)
 	echo sx8r-nec-superux${UNAME_RELEASE}
 	exit ;;
+    SX-ACE:SUPER-UX:*:*)
+	echo sxace-nec-superux${UNAME_RELEASE}
+	exit ;;
     Power*:Rhapsody:*:*)
 	echo powerpc-apple-rhapsody${UNAME_RELEASE}
 	exit ;;
@@ -1261,9 +1301,9 @@
 	    UNAME_PROCESSOR=powerpc
 	fi
 	if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
-	    if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+	    if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
 		if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
-		    (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		    (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
 		    grep IS_64BIT_ARCH >/dev/null
 		then
 		    case $UNAME_PROCESSOR in
@@ -1285,7 +1325,7 @@
 	exit ;;
     *:procnto*:*:* | *:QNX:[0123456789]*:*)
 	UNAME_PROCESSOR=`uname -p`
-	if test "$UNAME_PROCESSOR" = "x86"; then
+	if test "$UNAME_PROCESSOR" = x86; then
 		UNAME_PROCESSOR=i386
 		UNAME_MACHINE=pc
 	fi
@@ -1316,7 +1356,7 @@
 	# "uname -m" is not consistent, so use $cputype instead. 386
 	# is converted to i386 for consistency with other x86
 	# operating systems.
-	if test "$cputype" = "386"; then
+	if test "$cputype" = 386; then
 	    UNAME_MACHINE=i386
 	else
 	    UNAME_MACHINE="$cputype"
@@ -1358,7 +1398,7 @@
 	echo i386-pc-xenix
 	exit ;;
     i*86:skyos:*:*)
-	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'`
 	exit ;;
     i*86:rdos:*:*)
 	echo ${UNAME_MACHINE}-pc-rdos
@@ -1369,23 +1409,25 @@
     x86_64:VMkernel:*:*)
 	echo ${UNAME_MACHINE}-unknown-esx
 	exit ;;
+    amd64:Isilon\ OneFS:*:*)
+	echo x86_64-unknown-onefs
+	exit ;;
 esac
 
 cat >&2 <<EOF
 $0: unable to guess system type
 
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
+This script (version $timestamp), has failed to recognize the
+operating system you are using. If your script is old, overwrite
+config.guess and config.sub with the latest versions from:
 
-  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
 and
-  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
 
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <config-patches@gnu.org> in order to provide the needed
-information to handle your system.
+If $0 has already been updated, send the following data and any
+information you think might be pertinent to config-patches@gnu.org to
+provide the necessary information to handle your system.
 
 config.guess timestamp = $timestamp
 
diff --git a/build-aux/config.sub b/build-aux/config.sub
index 0ccff77..dd2ca93 100755
--- a/build-aux/config.sub
+++ b/build-aux/config.sub
@@ -1,8 +1,8 @@
 #! /bin/sh
 # Configuration validation subroutine script.
-#   Copyright 1992-2014 Free Software Foundation, Inc.
+#   Copyright 1992-2016 Free Software Foundation, Inc.
 
-timestamp='2014-05-01'
+timestamp='2016-11-04'
 
 # This file is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
 # of the GNU General Public License, version 3 ("GPLv3").
 
 
-# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+# Please send patches to <config-patches@gnu.org>.
 #
 # Configuration subroutine to validate and canonicalize a configuration type.
 # Supply the specified configuration type as an argument.
@@ -33,7 +33,7 @@
 # Otherwise, we print the canonical config type on stdout and succeed.
 
 # You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
 
 # This file is supposed to be the same for all GNU packages
 # and recognize all the CPU types, system types and aliases
@@ -53,8 +53,7 @@
 me=`echo "$0" | sed -e 's,.*/,,'`
 
 usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
-       $0 [OPTION] ALIAS
+Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
 
 Canonicalize a configuration name.
 
@@ -68,7 +67,7 @@
 version="\
 GNU config.sub ($timestamp)
 
-Copyright 1992-2014 Free Software Foundation, Inc.
+Copyright 1992-2016 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -117,8 +116,8 @@
 case $maybe_os in
   nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
   linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
-  knetbsd*-gnu* | netbsd*-gnu* | \
-  kopensolaris*-gnu* | \
+  knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
+  kopensolaris*-gnu* | cloudabi*-eabi* | \
   storm-chaos* | os2-emx* | rtmk-nova*)
     os=-$maybe_os
     basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
@@ -255,12 +254,13 @@
 	| arc | arceb \
 	| arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
 	| avr | avr32 \
+	| ba \
 	| be32 | be64 \
 	| bfin \
 	| c4x | c8051 | clipper \
 	| d10v | d30v | dlx | dsp16xx \
-	| epiphany \
-	| fido | fr30 | frv \
+	| e2k | epiphany \
+	| fido | fr30 | frv | ft32 \
 	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
 	| hexagon \
 	| i370 | i860 | i960 | ia64 \
@@ -301,10 +301,12 @@
 	| open8 | or1k | or1knd | or32 \
 	| pdp10 | pdp11 | pj | pjl \
 	| powerpc | powerpc64 | powerpc64le | powerpcle \
+	| pru \
 	| pyramid \
+	| riscv32 | riscv64 \
 	| rl78 | rx \
 	| score \
-	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
 	| sh64 | sh64le \
 	| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
 	| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
@@ -312,6 +314,7 @@
 	| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
 	| ubicom32 \
 	| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+	| visium \
 	| we32k \
 	| x86 | xc16x | xstormy16 | xtensa \
 	| z8k | z80)
@@ -326,6 +329,9 @@
 	c6x)
 		basic_machine=tic6x-unknown
 		;;
+	leon|leon[3-9])
+		basic_machine=sparc-$basic_machine
+		;;
 	m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
 		basic_machine=$basic_machine-unknown
 		os=-none
@@ -371,12 +377,13 @@
 	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
 	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
 	| avr-* | avr32-* \
+	| ba-* \
 	| be32-* | be64-* \
 	| bfin-* | bs2000-* \
 	| c[123]* | c30-* | [cjt]90-* | c4x-* \
 	| c8051-* | clipper-* | craynv-* | cydra-* \
 	| d10v-* | d30v-* | dlx-* \
-	| elxsi-* \
+	| e2k-* | elxsi-* \
 	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
 	| h8300-* | h8500-* \
 	| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
@@ -422,13 +429,15 @@
 	| orion-* \
 	| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
 	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+	| pru-* \
 	| pyramid-* \
+	| riscv32-* | riscv64-* \
 	| rl78-* | romp-* | rs6000-* | rx-* \
 	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
 	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
 	| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
 	| sparclite-* \
-	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
 	| tahoe-* \
 	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
 	| tile*-* \
@@ -436,6 +445,7 @@
 	| ubicom32-* \
 	| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
 	| vax-* \
+	| visium-* \
 	| we32k-* \
 	| x86-* | x86_64-* | xc16x-* | xps100-* \
 	| xstormy16-* | xtensa*-* \
@@ -512,6 +522,9 @@
 		basic_machine=i386-pc
 		os=-aros
 		;;
+	asmjs)
+		basic_machine=asmjs-unknown
+		;;
 	aux)
 		basic_machine=m68k-apple
 		os=-aux
@@ -632,6 +645,14 @@
 		basic_machine=m68k-bull
 		os=-sysv3
 		;;
+	e500v[12])
+		basic_machine=powerpc-unknown
+		os=$os"spe"
+		;;
+	e500v[12]-*)
+		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=$os"spe"
+		;;
 	ebmon29k)
 		basic_machine=a29k-amd
 		os=-ebmon
@@ -773,6 +794,9 @@
 		basic_machine=m68k-isi
 		os=-sysv
 		;;
+	leon-*|leon[3-9]-*)
+		basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
+		;;
 	m68knommu)
 		basic_machine=m68k-unknown
 		os=-linux
@@ -828,6 +852,10 @@
 		basic_machine=powerpc-unknown
 		os=-morphos
 		;;
+	moxiebox)
+		basic_machine=moxie-unknown
+		os=-moxiebox
+		;;
 	msdos)
 		basic_machine=i386-pc
 		os=-msdos
@@ -1004,7 +1032,7 @@
 	ppc-* | ppcbe-*)
 		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
 		;;
-	ppcle | powerpclittle | ppc-le | powerpc-little)
+	ppcle | powerpclittle)
 		basic_machine=powerpcle-unknown
 		;;
 	ppcle-* | powerpclittle-*)
@@ -1014,7 +1042,7 @@
 		;;
 	ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
 		;;
-	ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+	ppc64le | powerpc64little)
 		basic_machine=powerpc64le-unknown
 		;;
 	ppc64le-* | powerpc64little-*)
@@ -1360,27 +1388,28 @@
 	      | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
 	      | -sym* | -kopensolaris* | -plan9* \
 	      | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
-	      | -aos* | -aros* \
+	      | -aos* | -aros* | -cloudabi* | -sortix* \
 	      | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
 	      | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
 	      | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
-	      | -bitrig* | -openbsd* | -solidbsd* \
+	      | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \
 	      | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
 	      | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
 	      | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
 	      | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
 	      | -chorusos* | -chorusrdb* | -cegcc* \
 	      | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
-	      | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+	      | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
 	      | -linux-newlib* | -linux-musl* | -linux-uclibc* \
-	      | -uxpv* | -beos* | -mpeix* | -udk* \
+	      | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
 	      | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
 	      | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
 	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
 	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
 	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
 	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
-	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
+	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \
+	      | -onefs* | -tirtos* | -phoenix* | -fuchsia*)
 	# Remember, each alternative MUST END IN *, to match a version number.
 		;;
 	-qnx*)
@@ -1404,9 +1433,6 @@
 	-mac*)
 		os=`echo $os | sed -e 's|mac|macos|'`
 		;;
-	# Apple iOS
-	-ios*)
-		;;
 	-linux-dietlibc)
 		os=-linux-dietlibc
 		;;
@@ -1515,6 +1541,8 @@
 		;;
 	-nacl*)
 		;;
+	-ios)
+		;;
 	-none)
 		;;
 	*)
diff --git a/configure.ac b/configure.ac
index 104fd99..9573c30 100644
--- a/configure.ac
+++ b/configure.ac
@@ -171,7 +171,6 @@
 if test "x$CFLAGS" = "x" ; then
   no_CFLAGS="yes"
   if test "x$GCC" = "xyes" ; then
-dnl    JE_CFLAGS_APPEND([-std=gnu99])
     JE_CFLAGS_APPEND([-std=gnu11])
     if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then
       AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
@@ -355,7 +354,6 @@
 case "${host}" in
   *-*-darwin* | *-*-ios*)
 	abi="macho"
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	RPATH=""
 	LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
 	so="dylib"
@@ -368,29 +366,35 @@
   *-*-freebsd*)
 	abi="elf"
 	AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	force_lazy_lock="1"
 	;;
   *-*-dragonfly*)
 	abi="elf"
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	;;
   *-*-openbsd*)
 	abi="elf"
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	force_tls="0"
 	;;
   *-*-bitrig*)
 	abi="elf"
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	;;
-  *-*-linux*)
+  *-*-linux-android)
 	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
 	CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
 	abi="elf"
 	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
 	AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
+	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
+	AC_DEFINE([JEMALLOC_C11ATOMICS])
+	force_tls="0"
+	default_munmap="0"
+	;;
+  *-*-linux* | *-*-kfreebsd*)
+	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+	CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+	abi="elf"
+	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
+	AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
 	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
 	AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
 	default_munmap="0"
@@ -407,11 +411,9 @@
                           [abi="elf"],
                           [abi="aout"])
 	AC_MSG_RESULT([$abi])
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	;;
   *-*-solaris2*)
 	abi="elf"
-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
 	RPATH='-Wl,-R,$(1)'
 	dnl Solaris needs this for sigwait().
 	CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
@@ -1327,6 +1329,14 @@
   AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"],
                [AC_SEARCH_LIBS([pthread_create], , ,
                                AC_MSG_ERROR([libpthread is missing]))])
+  JE_COMPILABLE([pthread_atfork(3)], [
+#include <pthread.h>
+], [
+  pthread_atfork((void *)0, (void *)0, (void *)0);
+], [je_cv_pthread_atfork])
+  if test "x${je_cv_pthread_atfork}" = "xyes" ; then
+    AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ])
+  fi
 fi
 
 CPPFLAGS="$CPPFLAGS -D_REENTRANT"
@@ -1386,20 +1396,33 @@
   AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME])
 fi
 
-dnl Check if syscall(2) is usable.  Treat warnings as errors, so that e.g. OS X
-dnl 10.12's deprecation warning prevents use.
-SAVED_CFLAGS="${CFLAGS}"
-JE_CFLAGS_APPEND([-Werror])
-JE_COMPILABLE([syscall(2)], [
+dnl Use syscall(2) (if available) by default.
+AC_ARG_ENABLE([syscall],
+  [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])],
+[if test "x$enable_syscall" = "xno" ; then
+  enable_syscall="0"
+else
+  enable_syscall="1"
+fi
+],
+[enable_syscall="1"]
+)
+if test "x$enable_syscall" = "x1" ; then
+  dnl Check if syscall(2) is usable.  Treat warnings as errors, so that e.g. OS
+  dnl X 10.12's deprecation warning prevents use.
+  SAVED_CFLAGS="${CFLAGS}"
+  JE_CFLAGS_APPEND([-Werror])
+  JE_COMPILABLE([syscall(2)], [
 #include <sys/syscall.h>
 #include <unistd.h>
 ], [
 	syscall(SYS_write, 2, "hello", 5);
 ],
-              [je_cv_syscall])
-CFLAGS="${SAVED_CFLAGS}"
-if test "x$je_cv_syscall" = "xyes" ; then
-  AC_DEFINE([JEMALLOC_HAVE_SYSCALL], [ ])
+                [je_cv_syscall])
+  CFLAGS="${SAVED_CFLAGS}"
+  if test "x$je_cv_syscall" = "xyes" ; then
+    AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ])
+  fi
 fi
 
 dnl Check if the GNU-specific secure_getenv function exists.
@@ -1599,12 +1622,41 @@
 JE_COMPILABLE([madvise(2)], [
 #include <sys/mman.h>
 ], [
-	{
-		madvise((void *)0, 0, 0);
-	}
+	madvise((void *)0, 0, 0);
 ], [je_cv_madvise])
 if test "x${je_cv_madvise}" = "xyes" ; then
   AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ])
+
+  dnl Check for madvise(..., MADV_FREE).
+  JE_COMPILABLE([madvise(..., MADV_FREE)], [
+#include <sys/mman.h>
+], [
+	madvise((void *)0, 0, MADV_FREE);
+], [je_cv_madv_free])
+  if test "x${je_cv_madv_free}" = "xyes" ; then
+    AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+  fi
+
+  dnl Check for madvise(..., MADV_DONTNEED).
+  JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [
+#include <sys/mman.h>
+], [
+	madvise((void *)0, 0, MADV_DONTNEED);
+], [je_cv_madv_dontneed])
+  if test "x${je_cv_madv_dontneed}" = "xyes" ; then
+    AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
+  fi
+
+  dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
+  JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
+#include <sys/mman.h>
+], [
+	madvise((void *)0, 0, MADV_HUGEPAGE);
+	madvise((void *)0, 0, MADV_NOHUGEPAGE);
+], [je_cv_thp])
+  if test "x${je_cv_thp}" = "xyes" ; then
+    AC_DEFINE([JEMALLOC_THP], [ ])
+  fi
 fi
 
 dnl ============================================================================
@@ -1669,10 +1721,15 @@
 
 JE_COMPILABLE([Darwin os_unfair_lock_*()], [
 #include <os/lock.h>
+#include <AvailabilityMacros.h>
 ], [
+	#if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
+	#error "os_unfair_lock is not supported"
+	#else
 	os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
 	os_unfair_lock_lock(&lock);
 	os_unfair_lock_unlock(&lock);
+	#endif
 ], [je_cv_os_unfair_lock])
 if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
   AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ])
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 3d2e721..d9c8345 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -406,7 +406,7 @@
 
 	mib[2] = i;
 	len = sizeof(bin_size);
-	mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
+	mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
 	/* Do something with bin_size... */
 }]]></programlisting></para>
 
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index f39ce54..ce4e602 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -191,6 +191,14 @@
 	extent_node_t		node;
 
 	/*
+	 * True if memory could be backed by transparent huge pages.  This is
+	 * only directly relevant to Linux, since it is the only supported
+	 * platform on which jemalloc interacts with explicit transparent huge
+	 * page controls.
+	 */
+	bool			hugepage;
+
+	/*
 	 * Map of pages within chunk that keeps track of free/large/small.  The
 	 * first map_bias entries are omitted, since the chunk header does not
 	 * need to be tracked in the map.  This omission saves a header page
@@ -374,10 +382,12 @@
 
 	dss_prec_t		dss_prec;
 
-
 	/* Extant arena chunks. */
 	ql_head(extent_node_t)	achunks;
 
+	/* Extent serial number generator state. */
+	size_t			extent_sn_next;
+
 	/*
 	 * In order to avoid rapid chunk allocation/deallocation when an arena
 	 * oscillates right on the cusp of needing a new chunk, cache the most
@@ -453,9 +463,9 @@
 	 * orderings are needed, which is why there are two trees with the same
 	 * contents.
 	 */
-	extent_tree_t		chunks_szad_cached;
+	extent_tree_t		chunks_szsnad_cached;
 	extent_tree_t		chunks_ad_cached;
-	extent_tree_t		chunks_szad_retained;
+	extent_tree_t		chunks_szsnad_retained;
 	extent_tree_t		chunks_ad_retained;
 
 	malloc_mutex_t		chunks_mtx;
@@ -522,13 +532,13 @@
 extent_node_t	*arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
 void	arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
 void	*arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool *zero);
+    size_t alignment, size_t *sn, bool *zero);
 void	arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
-    size_t usize);
+    size_t usize, size_t sn);
 void	arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
     void *chunk, size_t oldsize, size_t usize);
 void	arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
-    void *chunk, size_t oldsize, size_t usize);
+    void *chunk, size_t oldsize, size_t usize, size_t sn);
 bool	arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
     void *chunk, size_t oldsize, size_t usize, bool *zero);
 ssize_t	arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
@@ -601,6 +611,7 @@
 unsigned	arena_nthreads_get(arena_t *arena, bool internal);
 void	arena_nthreads_inc(arena_t *arena, bool internal);
 void	arena_nthreads_dec(arena_t *arena, bool internal);
+size_t	arena_extent_sn_next(arena_t *arena);
 arena_t	*arena_new(tsdn_t *tsdn, unsigned ind);
 void	arena_boot(void);
 void	arena_prefork0(tsdn_t *tsdn, arena_t *arena);
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 38c9a01..50b9904 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -58,15 +58,16 @@
 void	*chunk_alloc_base(size_t size);
 void	*chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
-    bool *zero, bool *commit, bool dalloc_node);
+    size_t *sn, bool *zero, bool *commit, bool dalloc_node);
 void	*chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
-    bool *zero, bool *commit);
+    size_t *sn, bool *zero, bool *commit);
 void	chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
-void	chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
+    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
     bool committed);
+void	chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
+    bool zeroed, bool committed);
 bool	chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
     size_t length);
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 49d76a5..168ffe6 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -19,6 +19,20 @@
 	size_t			en_size;
 
 	/*
+	 * Serial number (potentially non-unique).
+	 *
+	 * In principle serial numbers can wrap around on 32-bit systems if
+	 * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
+	 * back on address comparison for equal serial numbers, stable (if
+	 * imperfect) ordering is maintained.
+	 *
+	 * Serial numbers may not be unique even in the absence of wrap-around,
+	 * e.g. when splitting an extent and assigning the same serial number to
+	 * both resulting adjacent extents.
+	 */
+	size_t			en_sn;
+
+	/*
 	 * The zeroed flag is used by chunk recycling code to track whether
 	 * memory is zero-filled.
 	 */
@@ -45,8 +59,8 @@
 	qr(extent_node_t)	cc_link;
 
 	union {
-		/* Linkage for the size/address-ordered tree. */
-		rb_node(extent_node_t)	szad_link;
+		/* Linkage for the size/sn/address-ordered tree. */
+		rb_node(extent_node_t)	szsnad_link;
 
 		/* Linkage for arena's achunks, huge, and node_cache lists. */
 		ql_elm(extent_node_t)	ql_link;
@@ -61,7 +75,7 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
+rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
 
 rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
 
@@ -73,6 +87,7 @@
 arena_t	*extent_node_arena_get(const extent_node_t *node);
 void	*extent_node_addr_get(const extent_node_t *node);
 size_t	extent_node_size_get(const extent_node_t *node);
+size_t	extent_node_sn_get(const extent_node_t *node);
 bool	extent_node_zeroed_get(const extent_node_t *node);
 bool	extent_node_committed_get(const extent_node_t *node);
 bool	extent_node_achunk_get(const extent_node_t *node);
@@ -80,12 +95,13 @@
 void	extent_node_arena_set(extent_node_t *node, arena_t *arena);
 void	extent_node_addr_set(extent_node_t *node, void *addr);
 void	extent_node_size_set(extent_node_t *node, size_t size);
+void	extent_node_sn_set(extent_node_t *node, size_t sn);
 void	extent_node_zeroed_set(extent_node_t *node, bool zeroed);
 void	extent_node_committed_set(extent_node_t *node, bool committed);
 void	extent_node_achunk_set(extent_node_t *node, bool achunk);
 void	extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
 void	extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
-    size_t size, bool zeroed, bool committed);
+    size_t size, size_t sn, bool zeroed, bool committed);
 void	extent_node_dirty_linkage_init(extent_node_t *node);
 void	extent_node_dirty_insert(extent_node_t *node,
     arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
@@ -114,6 +130,13 @@
 	return (node->en_size);
 }
 
+JEMALLOC_INLINE size_t
+extent_node_sn_get(const extent_node_t *node)
+{
+
+	return (node->en_sn);
+}
+
 JEMALLOC_INLINE bool
 extent_node_zeroed_get(const extent_node_t *node)
 {
@@ -165,6 +188,13 @@
 }
 
 JEMALLOC_INLINE void
+extent_node_sn_set(extent_node_t *node, size_t sn)
+{
+
+	node->en_sn = sn;
+}
+
+JEMALLOC_INLINE void
 extent_node_zeroed_set(extent_node_t *node, bool zeroed)
 {
 
@@ -194,12 +224,13 @@
 
 JEMALLOC_INLINE void
 extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
-    bool zeroed, bool committed)
+    size_t sn, bool zeroed, bool committed)
 {
 
 	extent_node_arena_set(node, arena);
 	extent_node_addr_set(node, addr);
 	extent_node_size_set(node, size);
+	extent_node_sn_set(node, sn);
 	extent_node_zeroed_set(node, zeroed);
 	extent_node_committed_set(node, committed);
 	extent_node_achunk_set(node, false);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index fdc8fef..e7ace7d 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -337,7 +337,7 @@
 
 /* Return the nearest aligned address at or below a. */
 #define	ALIGNMENT_ADDR2BASE(a, alignment)				\
-	((void *)((uintptr_t)(a) & (-(alignment))))
+	((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
 
 /* Return the offset between a and the nearest aligned address at or below a. */
 #define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
@@ -345,7 +345,7 @@
 
 /* Return the smallest alignment multiple that is >= s. */
 #define	ALIGNMENT_CEILING(s, alignment)					\
-	(((s) + (alignment - 1)) & (-(alignment)))
+	(((s) + (alignment - 1)) & ((~(alignment)) + 1))
 
 /* Declare a variable-length array. */
 #if __STDC_VERSION__ < 199901L
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index 9b3dca5..def4ba5 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -56,11 +56,6 @@
 #undef JEMALLOC_HAVE_BUILTIN_CLZ
 
 /*
- * Defined if madvise(2) is available.
- */
-#undef JEMALLOC_HAVE_MADVISE
-
-/*
  * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
  */
 #undef JEMALLOC_OS_UNFAIR_LOCK
@@ -71,8 +66,8 @@
  */
 #undef JEMALLOC_OSSPIN
 
-/* Defined if syscall(2) is available. */
-#undef JEMALLOC_HAVE_SYSCALL
+/* Defined if syscall(2) is usable. */
+#undef JEMALLOC_USE_SYSCALL
 
 /*
  * Defined if secure_getenv(3) is available.
@@ -84,6 +79,9 @@
  */
 #undef JEMALLOC_HAVE_ISSETUGID
 
+/* Defined if pthread_atfork(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_ATFORK
+
 /*
  * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
  */
@@ -252,18 +250,26 @@
 #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
 #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
 
+/* Defined if madvise(2) is available. */
+#undef JEMALLOC_HAVE_MADVISE
+
 /*
  * Methods for purging unused pages differ between operating systems.
  *
- *   madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
- *                                 such that new pages will be demand-zeroed if
- *                                 the address region is later touched.
- *   madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
- *                             unused, such that they will be discarded rather
- *                             than swapped out.
+ *   madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ *                             will be discarded rather than swapped out.
+ *   madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
+ *                                 new pages will be demand-zeroed if the
+ *                                 address region is later touched.
  */
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED
 #undef JEMALLOC_PURGE_MADVISE_FREE
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#undef JEMALLOC_THP
 
 /* Define if operating system has alloca.h header. */
 #undef JEMALLOC_HAS_ALLOCA_H
diff --git a/include/jemalloc/internal/pages.h b/include/jemalloc/internal/pages.h
index e21effd..4ae9f15 100644
--- a/include/jemalloc/internal/pages.h
+++ b/include/jemalloc/internal/pages.h
@@ -16,6 +16,8 @@
 bool	pages_commit(void *addr, size_t size);
 bool	pages_decommit(void *addr, size_t size);
 bool	pages_purge(void *addr, size_t size);
+bool	pages_huge(void *addr, size_t size);
+bool	pages_nohuge(void *addr, size_t size);
 void	pages_boot(void);
 
 #endif /* JEMALLOC_H_EXTERNS */
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 87c8c9b..c1c6c40 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -36,6 +36,7 @@
 arena_decay_time_set
 arena_dss_prec_get
 arena_dss_prec_set
+arena_extent_sn_next
 arena_get
 arena_ichoose
 arena_init
@@ -218,6 +219,8 @@
 extent_node_prof_tctx_set
 extent_node_size_get
 extent_node_size_set
+extent_node_sn_get
+extent_node_sn_set
 extent_node_zeroed_get
 extent_node_zeroed_set
 extent_tree_ad_destroy
@@ -239,25 +242,25 @@
 extent_tree_ad_reverse_iter_recurse
 extent_tree_ad_reverse_iter_start
 extent_tree_ad_search
-extent_tree_szad_destroy
-extent_tree_szad_destroy_recurse
-extent_tree_szad_empty
-extent_tree_szad_first
-extent_tree_szad_insert
-extent_tree_szad_iter
-extent_tree_szad_iter_recurse
-extent_tree_szad_iter_start
-extent_tree_szad_last
-extent_tree_szad_new
-extent_tree_szad_next
-extent_tree_szad_nsearch
-extent_tree_szad_prev
-extent_tree_szad_psearch
-extent_tree_szad_remove
-extent_tree_szad_reverse_iter
-extent_tree_szad_reverse_iter_recurse
-extent_tree_szad_reverse_iter_start
-extent_tree_szad_search
+extent_tree_szsnad_destroy
+extent_tree_szsnad_destroy_recurse
+extent_tree_szsnad_empty
+extent_tree_szsnad_first
+extent_tree_szsnad_insert
+extent_tree_szsnad_iter
+extent_tree_szsnad_iter_recurse
+extent_tree_szsnad_iter_start
+extent_tree_szsnad_last
+extent_tree_szsnad_new
+extent_tree_szsnad_next
+extent_tree_szsnad_nsearch
+extent_tree_szsnad_prev
+extent_tree_szsnad_psearch
+extent_tree_szsnad_remove
+extent_tree_szsnad_reverse_iter
+extent_tree_szsnad_reverse_iter_recurse
+extent_tree_szsnad_reverse_iter_start
+extent_tree_szsnad_search
 ffs_llu
 ffs_lu
 ffs_u
@@ -394,7 +397,9 @@
 pages_boot
 pages_commit
 pages_decommit
+pages_huge
 pages_map
+pages_nohuge
 pages_purge
 pages_trim
 pages_unmap
diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h
index b621817..04e7dae 100644
--- a/include/jemalloc/internal/stats.h
+++ b/include/jemalloc/internal/stats.h
@@ -175,25 +175,21 @@
 JEMALLOC_INLINE void
 stats_cactive_add(size_t size)
 {
-	UNUSED size_t cactive;
 
 	assert(size > 0);
 	assert((size & chunksize_mask) == 0);
 
-	cactive = atomic_add_z(&stats_cactive, size);
-	assert(cactive - size < cactive);
+	atomic_add_z(&stats_cactive, size);
 }
 
 JEMALLOC_INLINE void
 stats_cactive_sub(size_t size)
 {
-	UNUSED size_t cactive;
 
 	assert(size > 0);
 	assert((size & chunksize_mask) == 0);
 
-	cactive = atomic_sub_z(&stats_cactive, size);
-	assert(cactive + size > cactive);
+	atomic_sub_z(&stats_cactive, size);
 }
 #endif
 
diff --git a/include/jemalloc/internal/util.h b/include/jemalloc/internal/util.h
index aee00d6..4b56d65 100644
--- a/include/jemalloc/internal/util.h
+++ b/include/jemalloc/internal/util.h
@@ -41,8 +41,12 @@
 #define	MALLOC_PRINTF_BUFSIZE	4096
 
 /* Junk fill patterns. */
-#define	JEMALLOC_ALLOC_JUNK	((uint8_t)0xa5)
-#define	JEMALLOC_FREE_JUNK	((uint8_t)0x5a)
+#ifndef JEMALLOC_ALLOC_JUNK
+#  define JEMALLOC_ALLOC_JUNK	((uint8_t)0xa5)
+#endif
+#ifndef JEMALLOC_FREE_JUNK
+#  define JEMALLOC_FREE_JUNK	((uint8_t)0x5a)
+#endif
 
 /*
  * Wrap a cpp argument that contains commas such that it isn't broken up into
diff --git a/include/jemalloc/internal/valgrind.h b/include/jemalloc/internal/valgrind.h
index 1a86808..877a142 100644
--- a/include/jemalloc/internal/valgrind.h
+++ b/include/jemalloc/internal/valgrind.h
@@ -36,13 +36,25 @@
 		    zero);						\
 	}								\
 } while (0)
-#define	JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize,	\
-    ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null,	\
-    zero) do {								\
+#define	JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr)		\
+    (false)
+#define	JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr)		\
+    ((ptr) != (old_ptr))
+#define	JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr)			\
+    (false)
+#define	JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr)			\
+    (ptr == NULL)
+#define	JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr)		\
+    (false)
+#define	JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr)		\
+    (old_ptr == NULL)
+#define	JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null,	\
+    old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do {		\
 	if (unlikely(in_valgrind)) {					\
 		size_t rzsize = p2rz(tsdn, ptr);			\
 									\
-		if (!maybe_moved || ptr == old_ptr) {			\
+		if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr,	\
+		    old_ptr)) {						\
 			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
 			    usize, rzsize);				\
 			if (zero && old_usize < usize) {		\
@@ -51,11 +63,13 @@
 				    old_usize), usize - old_usize);	\
 			}						\
 		} else {						\
-			if (!old_ptr_maybe_null || old_ptr != NULL) {	\
+			if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_##	\
+			    old_ptr_null(old_ptr)) {			\
 				valgrind_freelike_block(old_ptr,	\
 				    old_rzsize);			\
 			}						\
-			if (!ptr_maybe_null || ptr != NULL) {		\
+			if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_##	\
+			    ptr_null(ptr)) {				\
 				size_t copy_size = (old_usize < usize)	\
 				    ?  old_usize : usize;		\
 				size_t tail_size = usize - copy_size;	\
diff --git a/msvc/projects/vc2015/test_threads/test_threads.cpp b/msvc/projects/vc2015/test_threads/test_threads.cpp
old mode 100644
new mode 100755
index c8cb7d6..a3d1a79
--- a/msvc/projects/vc2015/test_threads/test_threads.cpp
+++ b/msvc/projects/vc2015/test_threads/test_threads.cpp
@@ -21,7 +21,7 @@
   je_malloc_conf = "narenas:3";
   int narenas = 0;
   size_t sz = sizeof(narenas);
-  je_mallctl("opt.narenas", &narenas, &sz, NULL, 0);
+  je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
   if (narenas != 3) {
     printf("Error: unexpected number of arenas: %d\n", narenas);
     return 1;
@@ -33,7 +33,7 @@
   je_malloc_stats_print(NULL, NULL, NULL);
   size_t allocated1;
   size_t sz1 = sizeof(allocated1);
-  je_mallctl("stats.active", &allocated1, &sz1, NULL, 0);
+  je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
   printf("\nPress Enter to start threads...\n");
   getchar();
   printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
@@ -78,7 +78,7 @@
   }
   je_malloc_stats_print(NULL, NULL, NULL);
   size_t allocated2;
-  je_mallctl("stats.active", &allocated2, &sz1, NULL, 0);
+  je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
   size_t leaked = allocated2 - allocated1;
   printf("\nDone. Leaked: %zd bytes\n", leaked);
   bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
diff --git a/src/arena.c b/src/arena.c
index e196b13..648a8da 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -38,8 +38,8 @@
     bool dirty, bool cleaned, bool decommitted);
 static void	arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
     arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
-    arena_run_t *run, arena_bin_t *bin);
+static void	arena_bin_lower_run(arena_t *arena, arena_run_t *run,
+    arena_bin_t *bin);
 
 /******************************************************************************/
 
@@ -55,8 +55,31 @@
 	return (arena_mapbits_size_decode(mapbits));
 }
 
+JEMALLOC_INLINE_C const extent_node_t *
+arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+{
+	arena_chunk_t *chunk;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+	return (&chunk->node);
+}
+
 JEMALLOC_INLINE_C int
-arena_run_addr_comp(const arena_chunk_map_misc_t *a,
+arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+{
+	size_t a_sn, b_sn;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
+	b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+
+	return ((a_sn > b_sn) - (a_sn < b_sn));
+}
+
+JEMALLOC_INLINE_C int
+arena_ad_comp(const arena_chunk_map_misc_t *a,
     const arena_chunk_map_misc_t *b)
 {
 	uintptr_t a_miscelm = (uintptr_t)a;
@@ -68,9 +91,26 @@
 	return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
 }
 
+JEMALLOC_INLINE_C int
+arena_snad_comp(const arena_chunk_map_misc_t *a,
+    const arena_chunk_map_misc_t *b)
+{
+	int ret;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	ret = arena_sn_comp(a, b);
+	if (ret != 0)
+		return (ret);
+
+	ret = arena_ad_comp(a, b);
+	return (ret);
+}
+
 /* Generate pairing heap functions. */
 ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
-    ph_link, arena_run_addr_comp)
+    ph_link, arena_snad_comp)
 
 #ifdef JEMALLOC_JET
 #undef run_quantize_floor
@@ -529,7 +569,7 @@
 
 static bool
 arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
-    bool zero)
+    size_t sn, bool zero)
 {
 
 	/*
@@ -538,7 +578,7 @@
 	 * of runs is tracked individually, and upon chunk deallocation the
 	 * entire chunk is in a consistent commit state.
 	 */
-	extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+	extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
 	extent_node_achunk_set(&chunk->node, true);
 	return (chunk_register(tsdn, chunk, &chunk->node));
 }
@@ -548,28 +588,30 @@
     chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
 {
 	arena_chunk_t *chunk;
+	size_t sn;
 
 	malloc_mutex_unlock(tsdn, &arena->lock);
 
 	chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
-	    NULL, chunksize, chunksize, zero, commit);
+	    NULL, chunksize, chunksize, &sn, zero, commit);
 	if (chunk != NULL && !*commit) {
 		/* Commit header. */
 		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
 		    LG_PAGE, arena->ind)) {
 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
-			    (void *)chunk, chunksize, *zero, *commit);
+			    (void *)chunk, chunksize, sn, *zero, *commit);
 			chunk = NULL;
 		}
 	}
-	if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
+	if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
+	    *zero)) {
 		if (!*commit) {
 			/* Undo commit of header. */
 			chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
 			    LG_PAGE, arena->ind);
 		}
 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
-		    chunksize, *zero, *commit);
+		    chunksize, sn, *zero, *commit);
 		chunk = NULL;
 	}
 
@@ -583,13 +625,14 @@
 {
 	arena_chunk_t *chunk;
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+	size_t sn;
 
 	chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
-	    chunksize, zero, commit, true);
+	    chunksize, &sn, zero, commit, true);
 	if (chunk != NULL) {
-		if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+		if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
 			chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
-			    chunksize, true);
+			    chunksize, sn, true);
 			return (NULL);
 		}
 	}
@@ -621,6 +664,8 @@
 	if (chunk == NULL)
 		return (NULL);
 
+	chunk->hugepage = true;
+
 	/*
 	 * Initialize the map to contain one maximal free untouched run.  Mark
 	 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
@@ -684,11 +729,14 @@
 static void
 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
 {
+	size_t sn, hugepage;
 	bool committed;
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
 
 	chunk_deregister(chunk, &chunk->node);
 
+	sn = extent_node_sn_get(&chunk->node);
+	hugepage = chunk->hugepage;
 	committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
 	if (!committed) {
 		/*
@@ -701,9 +749,17 @@
 		chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
 		    arena->ind);
 	}
+	if (!hugepage) {
+		/*
+		 * Convert chunk back to the default state, so that all
+		 * subsequent chunk allocations start out with chunks that can
+		 * be backed by transparent huge pages.
+		 */
+		pages_huge(chunk, chunksize);
+	}
 
 	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
-	    committed);
+	    sn, committed);
 
 	if (config_stats) {
 		arena->stats.mapped -= chunksize;
@@ -859,14 +915,14 @@
 
 static void *
 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
-    size_t csize)
+    chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
+    bool *zero, size_t csize)
 {
 	void *ret;
 	bool commit = true;
 
 	ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
-	    alignment, zero, &commit);
+	    alignment, sn, zero, &commit);
 	if (ret == NULL) {
 		/* Revert optimistic stats updates. */
 		malloc_mutex_lock(tsdn, &arena->lock);
@@ -883,7 +939,7 @@
 
 void *
 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool *zero)
+    size_t alignment, size_t *sn, bool *zero)
 {
 	void *ret;
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
@@ -900,18 +956,19 @@
 	arena_nactive_add(arena, usize >> LG_PAGE);
 
 	ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
-	    alignment, zero, &commit, true);
+	    alignment, sn, zero, &commit, true);
 	malloc_mutex_unlock(tsdn, &arena->lock);
 	if (ret == NULL) {
 		ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
-		    usize, alignment, zero, csize);
+		    usize, alignment, sn, zero, csize);
 	}
 
 	return (ret);
 }
 
 void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
+    size_t sn)
 {
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
 	size_t csize;
@@ -924,7 +981,7 @@
 	}
 	arena_nactive_sub(arena, usize >> LG_PAGE);
 
-	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
 	malloc_mutex_unlock(tsdn, &arena->lock);
 }
 
@@ -948,7 +1005,7 @@
 
 void
 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
-    size_t oldsize, size_t usize)
+    size_t oldsize, size_t usize, size_t sn)
 {
 	size_t udiff = oldsize - usize;
 	size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@@ -967,7 +1024,7 @@
 		    CHUNK_CEILING(usize));
 
 		chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
-		    true);
+		    sn, true);
 	}
 	malloc_mutex_unlock(tsdn, &arena->lock);
 }
@@ -975,13 +1032,13 @@
 static bool
 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
-    bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+    size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
 {
 	bool err;
 	bool commit = true;
 
 	err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
-	    chunksize, zero, &commit) == NULL);
+	    chunksize, sn, zero, &commit) == NULL);
 	if (err) {
 		/* Revert optimistic stats updates. */
 		malloc_mutex_lock(tsdn, &arena->lock);
@@ -995,7 +1052,7 @@
 	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
 	    cdiff, true, arena->ind)) {
 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
-		    *zero, true);
+		    *sn, *zero, true);
 		err = true;
 	}
 	return (err);
@@ -1010,6 +1067,7 @@
 	void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
 	size_t udiff = usize - oldsize;
 	size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+	size_t sn;
 	bool commit = true;
 
 	malloc_mutex_lock(tsdn, &arena->lock);
@@ -1022,16 +1080,16 @@
 	arena_nactive_add(arena, udiff >> LG_PAGE);
 
 	err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
-	    chunksize, zero, &commit, true) == NULL);
+	    chunksize, &sn, zero, &commit, true) == NULL);
 	malloc_mutex_unlock(tsdn, &arena->lock);
 	if (err) {
 		err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
-		    &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
-		    cdiff);
+		    &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
+		    udiff, cdiff);
 	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
 	    cdiff, true, arena->ind)) {
 		chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
-		    *zero, true);
+		    sn, *zero, true);
 		err = true;
 	}
 
@@ -1519,6 +1577,7 @@
 
 		if (rdelm == &chunkselm->rd) {
 			extent_node_t *chunkselm_next;
+			size_t sn;
 			bool zero, commit;
 			UNUSED void *chunk;
 
@@ -1536,8 +1595,8 @@
 			commit = false;
 			chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
 			    extent_node_addr_get(chunkselm),
-			    extent_node_size_get(chunkselm), chunksize, &zero,
-			    &commit, false);
+			    extent_node_size_get(chunkselm), chunksize, &sn,
+			    &zero, &commit, false);
 			assert(chunk == extent_node_addr_get(chunkselm));
 			assert(zero == extent_node_zeroed_get(chunkselm));
 			extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
@@ -1634,6 +1693,17 @@
 			run_size = arena_mapbits_large_size_get(chunk, pageind);
 			npages = run_size >> LG_PAGE;
 
+			/*
+			 * If this is the first run purged within chunk, mark
+			 * the chunk as non-huge.  This will prevent all use of
+			 * transparent huge pages for this chunk until the chunk
+			 * as a whole is deallocated.
+			 */
+			if (chunk->hugepage) {
+				pages_nohuge(chunk, chunksize);
+				chunk->hugepage = false;
+			}
+
 			assert(pageind + npages <= chunk_npages);
 			assert(!arena_mapbits_decommitted_get(chunk, pageind));
 			assert(!arena_mapbits_decommitted_get(chunk,
@@ -1703,13 +1773,14 @@
 			    cc_link);
 			void *addr = extent_node_addr_get(chunkselm);
 			size_t size = extent_node_size_get(chunkselm);
+			size_t sn = extent_node_sn_get(chunkselm);
 			bool zeroed = extent_node_zeroed_get(chunkselm);
 			bool committed = extent_node_committed_get(chunkselm);
 			extent_node_dirty_remove(chunkselm);
 			arena_node_dalloc(tsdn, arena, chunkselm);
 			chunkselm = chunkselm_next;
 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
-			    size, zeroed, committed);
+			    size, sn, zeroed, committed);
 		} else {
 			arena_chunk_t *chunk =
 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -2315,7 +2386,7 @@
 				arena_dalloc_bin_run(tsdn, arena, chunk, run,
 				    bin);
 			} else
-				arena_bin_lower_run(arena, chunk, run, bin);
+				arena_bin_lower_run(arena, run, bin);
 		}
 		return (ret);
 	}
@@ -2820,16 +2891,18 @@
 }
 
 static void
-arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
-    arena_bin_t *bin)
+arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
 {
 
 	/*
-	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
-	 * non-full run.  It is okay to NULL runcur out rather than proactively
-	 * keeping it pointing at the lowest non-full run.
+	 * Make sure that if bin->runcur is non-NULL, it refers to the
+	 * oldest/lowest non-full run.  It is okay to NULL runcur out rather
+	 * than proactively keeping it pointing at the oldest/lowest non-full
+	 * run.
 	 */
-	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+	if (bin->runcur != NULL &&
+	    arena_snad_comp(arena_run_to_miscelm(bin->runcur),
+	    arena_run_to_miscelm(run)) > 0) {
 		/* Switch runcur. */
 		if (bin->runcur->nfree > 0)
 			arena_bin_runs_insert(bin, bin->runcur);
@@ -2865,7 +2938,7 @@
 		arena_dissociate_bin_run(chunk, run, bin);
 		arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
 	} else if (run->nfree == 1 && run != bin->runcur)
-		arena_bin_lower_run(arena, chunk, run, bin);
+		arena_bin_lower_run(arena, run, bin);
 
 	if (config_stats) {
 		bin->stats.ndalloc++;
@@ -3452,6 +3525,13 @@
 	atomic_sub_u(&arena->nthreads[internal], 1);
 }
 
+size_t
+arena_extent_sn_next(arena_t *arena)
+{
+
+	return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
+}
+
 arena_t *
 arena_new(tsdn_t *tsdn, unsigned ind)
 {
@@ -3511,6 +3591,8 @@
 
 	ql_new(&arena->achunks);
 
+	arena->extent_sn_next = 0;
+
 	arena->spare = NULL;
 
 	arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
@@ -3532,9 +3614,9 @@
 	    WITNESS_RANK_ARENA_HUGE))
 		return (NULL);
 
-	extent_tree_szad_new(&arena->chunks_szad_cached);
+	extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
 	extent_tree_ad_new(&arena->chunks_ad_cached);
-	extent_tree_szad_new(&arena->chunks_szad_retained);
+	extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
 	extent_tree_ad_new(&arena->chunks_ad_retained);
 	if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
 	    WITNESS_RANK_ARENA_CHUNKS))
diff --git a/src/base.c b/src/base.c
index 81b0801..5681a3f 100644
--- a/src/base.c
+++ b/src/base.c
@@ -5,7 +5,8 @@
 /* Data. */
 
 static malloc_mutex_t	base_mtx;
-static extent_tree_t	base_avail_szad;
+static size_t		base_extent_sn_next;
+static extent_tree_t	base_avail_szsnad;
 static extent_node_t	*base_nodes;
 static size_t		base_allocated;
 static size_t		base_resident;
@@ -39,6 +40,14 @@
 	base_nodes = node;
 }
 
+static void
+base_extent_node_init(extent_node_t *node, void *addr, size_t size)
+{
+	size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
+
+	extent_node_init(node, NULL, addr, size, sn, true, true);
+}
+
 static extent_node_t *
 base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
 {
@@ -68,7 +77,7 @@
 			base_resident += PAGE_CEILING(nsize);
 		}
 	}
-	extent_node_init(node, NULL, addr, csize, true, true);
+	base_extent_node_init(node, addr, csize);
 	return (node);
 }
 
@@ -92,12 +101,12 @@
 	csize = CACHELINE_CEILING(size);
 
 	usize = s2u(csize);
-	extent_node_init(&key, NULL, NULL, usize, false, false);
+	extent_node_init(&key, NULL, NULL, usize, 0, false, false);
 	malloc_mutex_lock(tsdn, &base_mtx);
-	node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+	node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
 	if (node != NULL) {
 		/* Use existing space. */
-		extent_tree_szad_remove(&base_avail_szad, node);
+		extent_tree_szsnad_remove(&base_avail_szsnad, node);
 	} else {
 		/* Try to allocate more space. */
 		node = base_chunk_alloc(tsdn, csize);
@@ -111,7 +120,7 @@
 	if (extent_node_size_get(node) > csize) {
 		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
 		extent_node_size_set(node, extent_node_size_get(node) - csize);
-		extent_tree_szad_insert(&base_avail_szad, node);
+		extent_tree_szsnad_insert(&base_avail_szsnad, node);
 	} else
 		base_node_dalloc(tsdn, node);
 	if (config_stats) {
@@ -149,7 +158,8 @@
 
 	if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
 		return (true);
-	extent_tree_szad_new(&base_avail_szad);
+	base_extent_sn_next = 0;
+	extent_tree_szsnad_new(&base_avail_szsnad);
 	base_nodes = NULL;
 
 	return (false);
diff --git a/src/chunk.c b/src/chunk.c
index 07e26f7..c1c514a 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -50,9 +50,9 @@
  */
 
 static void	chunk_record(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
-    extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
-    bool committed);
+    chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
+    extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
+    bool zeroed, bool committed);
 
 /******************************************************************************/
 
@@ -183,33 +183,35 @@
 }
 
 /*
- * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
- * fits.
+ * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
+ * best fits.
  */
 static extent_node_t *
-chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
-    extent_tree_t *chunks_ad, size_t size)
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
 {
 	extent_node_t key;
 
 	assert(size == CHUNK_CEILING(size));
 
-	extent_node_init(&key, arena, NULL, size, false, false);
-	return (extent_tree_szad_nsearch(chunks_szad, &key));
+	extent_node_init(&key, arena, NULL, size, 0, false, false);
+	return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
 }
 
 static void *
 chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
-    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
-    bool dalloc_node)
+    extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
+    void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+    bool *commit, bool dalloc_node)
 {
 	void *ret;
 	extent_node_t *node;
 	size_t alloc_size, leadsize, trailsize;
 	bool zeroed, committed;
 
+	assert(CHUNK_CEILING(size) == size);
+	assert(alignment > 0);
 	assert(new_addr == NULL || alignment == chunksize);
+	assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
 	/*
 	 * Cached chunks use the node linkage embedded in their headers, in
 	 * which case dalloc_node is true, and new_addr is non-NULL because
@@ -217,7 +219,7 @@
 	 */
 	assert(dalloc_node || new_addr != NULL);
 
-	alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
+	alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
 	/* Beware size_t wrap-around. */
 	if (alloc_size < size)
 		return (NULL);
@@ -225,12 +227,11 @@
 	chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
 	if (new_addr != NULL) {
 		extent_node_t key;
-		extent_node_init(&key, arena, new_addr, alloc_size, false,
+		extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
 		    false);
 		node = extent_tree_ad_search(chunks_ad, &key);
 	} else {
-		node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
-		    alloc_size);
+		node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
 	}
 	if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
 	    size)) {
@@ -243,6 +244,7 @@
 	assert(extent_node_size_get(node) >= leadsize + size);
 	trailsize = extent_node_size_get(node) - leadsize - size;
 	ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+	*sn = extent_node_sn_get(node);
 	zeroed = extent_node_zeroed_get(node);
 	if (zeroed)
 		*zero = true;
@@ -257,13 +259,13 @@
 		return (NULL);
 	}
 	/* Remove node from the tree. */
-	extent_tree_szad_remove(chunks_szad, node);
+	extent_tree_szsnad_remove(chunks_szsnad, node);
 	extent_tree_ad_remove(chunks_ad, node);
 	arena_chunk_cache_maybe_remove(arena, node, cache);
 	if (leadsize != 0) {
 		/* Insert the leading space as a smaller chunk. */
 		extent_node_size_set(node, leadsize);
-		extent_tree_szad_insert(chunks_szad, node);
+		extent_tree_szsnad_insert(chunks_szsnad, node);
 		extent_tree_ad_insert(chunks_ad, node);
 		arena_chunk_cache_maybe_insert(arena, node, cache);
 		node = NULL;
@@ -275,9 +277,9 @@
 			if (dalloc_node && node != NULL)
 				arena_node_dalloc(tsdn, arena, node);
 			malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
-			chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
-			    chunks_ad, cache, ret, size + trailsize, zeroed,
-			    committed);
+			chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
+			    chunks_ad, cache, ret, size + trailsize, *sn,
+			    zeroed, committed);
 			return (NULL);
 		}
 		/* Insert the trailing space as a smaller chunk. */
@@ -286,22 +288,22 @@
 			if (node == NULL) {
 				malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
 				chunk_record(tsdn, arena, chunk_hooks,
-				    chunks_szad, chunks_ad, cache, ret, size +
-				    trailsize, zeroed, committed);
+				    chunks_szsnad, chunks_ad, cache, ret, size
+				    + trailsize, *sn, zeroed, committed);
 				return (NULL);
 			}
 		}
 		extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
-		    trailsize, zeroed, committed);
-		extent_tree_szad_insert(chunks_szad, node);
+		    trailsize, *sn, zeroed, committed);
+		extent_tree_szsnad_insert(chunks_szsnad, node);
 		extent_tree_ad_insert(chunks_ad, node);
 		arena_chunk_cache_maybe_insert(arena, node, cache);
 		node = NULL;
 	}
 	if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
 		malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
-		chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
-		    cache, ret, size, zeroed, committed);
+		chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
+		    cache, ret, size, *sn, zeroed, committed);
 		return (NULL);
 	}
 	malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -385,8 +387,8 @@
 
 void *
 chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
-    bool dalloc_node)
+    void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+    bool *commit, bool dalloc_node)
 {
 	void *ret;
 
@@ -396,8 +398,8 @@
 	assert((alignment & chunksize_mask) == 0);
 
 	ret = chunk_recycle(tsdn, arena, chunk_hooks,
-	    &arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
-	    new_addr, size, alignment, zero, commit, dalloc_node);
+	    &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
+	    new_addr, size, alignment, sn, zero, commit, dalloc_node);
 	if (ret == NULL)
 		return (NULL);
 	if (config_valgrind)
@@ -451,7 +453,8 @@
 
 static void *
 chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
+    void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+    bool *commit)
 {
 	void *ret;
 
@@ -461,8 +464,8 @@
 	assert((alignment & chunksize_mask) == 0);
 
 	ret = chunk_recycle(tsdn, arena, chunk_hooks,
-	    &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
-	    new_addr, size, alignment, zero, commit, true);
+	    &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
+	    new_addr, size, alignment, sn, zero, commit, true);
 
 	if (config_stats && ret != NULL)
 		arena->stats.retained -= size;
@@ -472,14 +475,15 @@
 
 void *
 chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
+    void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+    bool *commit)
 {
 	void *ret;
 
 	chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
 
 	ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
-	    alignment, zero, commit);
+	    alignment, sn, zero, commit);
 	if (ret == NULL) {
 		if (chunk_hooks->alloc == chunk_alloc_default) {
 			/* Call directly to propagate tsdn. */
@@ -493,6 +497,8 @@
 		if (ret == NULL)
 			return (NULL);
 
+		*sn = arena_extent_sn_next(arena);
+
 		if (config_valgrind && chunk_hooks->alloc !=
 		    chunk_alloc_default)
 			JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
@@ -503,8 +509,8 @@
 
 static void
 chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
-    void *chunk, size_t size, bool zeroed, bool committed)
+    extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
+    void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
 {
 	bool unzeroed;
 	extent_node_t *node, *prev;
@@ -516,7 +522,7 @@
 
 	malloc_mutex_lock(tsdn, &arena->chunks_mtx);
 	chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
-	extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
+	extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
 	    false, false);
 	node = extent_tree_ad_nsearch(chunks_ad, &key);
 	/* Try to coalesce forward. */
@@ -528,15 +534,17 @@
 		/*
 		 * Coalesce chunk with the following address range.  This does
 		 * not change the position within chunks_ad, so only
-		 * remove/insert from/into chunks_szad.
+		 * remove/insert from/into chunks_szsnad.
 		 */
-		extent_tree_szad_remove(chunks_szad, node);
+		extent_tree_szsnad_remove(chunks_szsnad, node);
 		arena_chunk_cache_maybe_remove(arena, node, cache);
 		extent_node_addr_set(node, chunk);
 		extent_node_size_set(node, size + extent_node_size_get(node));
+		if (sn < extent_node_sn_get(node))
+			extent_node_sn_set(node, sn);
 		extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
 		    !unzeroed);
-		extent_tree_szad_insert(chunks_szad, node);
+		extent_tree_szsnad_insert(chunks_szsnad, node);
 		arena_chunk_cache_maybe_insert(arena, node, cache);
 	} else {
 		/* Coalescing forward failed, so insert a new node. */
@@ -554,10 +562,10 @@
 			}
 			goto label_return;
 		}
-		extent_node_init(node, arena, chunk, size, !unzeroed,
+		extent_node_init(node, arena, chunk, size, sn, !unzeroed,
 		    committed);
 		extent_tree_ad_insert(chunks_ad, node);
-		extent_tree_szad_insert(chunks_szad, node);
+		extent_tree_szsnad_insert(chunks_szsnad, node);
 		arena_chunk_cache_maybe_insert(arena, node, cache);
 	}
 
@@ -571,19 +579,21 @@
 		/*
 		 * Coalesce chunk with the previous address range.  This does
 		 * not change the position within chunks_ad, so only
-		 * remove/insert node from/into chunks_szad.
+		 * remove/insert node from/into chunks_szsnad.
 		 */
-		extent_tree_szad_remove(chunks_szad, prev);
+		extent_tree_szsnad_remove(chunks_szsnad, prev);
 		extent_tree_ad_remove(chunks_ad, prev);
 		arena_chunk_cache_maybe_remove(arena, prev, cache);
-		extent_tree_szad_remove(chunks_szad, node);
+		extent_tree_szsnad_remove(chunks_szsnad, node);
 		arena_chunk_cache_maybe_remove(arena, node, cache);
 		extent_node_addr_set(node, extent_node_addr_get(prev));
 		extent_node_size_set(node, extent_node_size_get(prev) +
 		    extent_node_size_get(node));
+		if (extent_node_sn_get(prev) < extent_node_sn_get(node))
+			extent_node_sn_set(node, extent_node_sn_get(prev));
 		extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
 		    extent_node_zeroed_get(node));
-		extent_tree_szad_insert(chunks_szad, node);
+		extent_tree_szsnad_insert(chunks_szsnad, node);
 		arena_chunk_cache_maybe_insert(arena, node, cache);
 
 		arena_node_dalloc(tsdn, arena, prev);
@@ -595,7 +605,7 @@
 
 void
 chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    void *chunk, size_t size, bool committed)
+    void *chunk, size_t size, size_t sn, bool committed)
 {
 
 	assert(chunk != NULL);
@@ -603,8 +613,9 @@
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
-	chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
-	    &arena->chunks_ad_cached, true, chunk, size, false, committed);
+	chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
+	    &arena->chunks_ad_cached, true, chunk, size, sn, false,
+	    committed);
 	arena_maybe_purge(tsdn, arena);
 }
 
@@ -627,7 +638,7 @@
 
 void
 chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
-    void *chunk, size_t size, bool zeroed, bool committed)
+    void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
 {
 	bool err;
 
@@ -653,8 +664,9 @@
 	}
 	zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
 	    arena->ind);
-	chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
-	    &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+	chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
+	    &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
+	    committed);
 
 	if (config_stats)
 		arena->stats.retained += size;
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 85a1354..ee3f838 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -162,7 +162,8 @@
 					    CHUNK_HOOKS_INITIALIZER;
 					chunk_dalloc_wrapper(tsdn, arena,
 					    &chunk_hooks, cpad, cpad_size,
-					    false, true);
+					    arena_extent_sn_next(arena), false,
+					    true);
 				}
 				if (*zero) {
 					JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
diff --git a/src/extent.c b/src/extent.c
index 9f5146e..218156c 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -3,42 +3,45 @@
 
 /******************************************************************************/
 
+/*
+ * Round down to the nearest chunk size that can actually be requested during
+ * normal huge allocation.
+ */
 JEMALLOC_INLINE_C size_t
 extent_quantize(size_t size)
 {
+	size_t ret;
+	szind_t ind;
 
-	/*
-	 * Round down to the nearest chunk size that can actually be requested
-	 * during normal huge allocation.
-	 */
-	return (index2size(size2index(size + 1) - 1));
-}
+	assert(size > 0);
 
-JEMALLOC_INLINE_C int
-extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
-{
-	int ret;
-	size_t a_qsize = extent_quantize(extent_node_size_get(a));
-	size_t b_qsize = extent_quantize(extent_node_size_get(b));
-
-	/*
-	 * Compare based on quantized size rather than size, in order to sort
-	 * equally useful extents only by address.
-	 */
-	ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
-	if (ret == 0) {
-		uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
-		uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
-
-		ret = (a_addr > b_addr) - (a_addr < b_addr);
+	ind = size2index(size + 1);
+	if (ind == 0) {
+		/* Avoid underflow. */
+		return (index2size(0));
 	}
-
+	ret = index2size(ind - 1);
+	assert(ret <= size);
 	return (ret);
 }
 
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
-    extent_szad_comp)
+JEMALLOC_INLINE_C int
+extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
+{
+	size_t a_qsize = extent_quantize(extent_node_size_get(a));
+	size_t b_qsize = extent_quantize(extent_node_size_get(b));
+
+	return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
+}
+
+JEMALLOC_INLINE_C int
+extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
+{
+	size_t a_sn = extent_node_sn_get(a);
+	size_t b_sn = extent_node_sn_get(b);
+
+	return ((a_sn > b_sn) - (a_sn < b_sn));
+}
 
 JEMALLOC_INLINE_C int
 extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
@@ -49,5 +52,26 @@
 	return ((a_addr > b_addr) - (a_addr < b_addr));
 }
 
+JEMALLOC_INLINE_C int
+extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+	int ret;
+
+	ret = extent_sz_comp(a, b);
+	if (ret != 0)
+		return (ret);
+
+	ret = extent_sn_comp(a, b);
+	if (ret != 0)
+		return (ret);
+
+	ret = extent_ad_comp(a, b);
+	return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
+    extent_szsnad_comp)
+
 /* Generate red-black tree functions. */
 rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/src/huge.c b/src/huge.c
index 62e6932..8abd8c0 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -56,6 +56,7 @@
 	size_t ausize;
 	arena_t *iarena;
 	extent_node_t *node;
+	size_t sn;
 	bool is_zeroed;
 
 	/* Allocate one or more contiguous chunks for this request. */
@@ -68,7 +69,8 @@
 	assert(ausize >= chunksize);
 
 	/* Allocate an extent node with which to track the chunk. */
-	iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get();
+	iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
+	    a0get();
 	node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
 	    CACHELINE, false, NULL, true, iarena);
 	if (node == NULL)
@@ -82,15 +84,15 @@
 	if (likely(!tsdn_null(tsdn)))
 		arena = arena_choose(tsdn_tsd(tsdn), arena);
 	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
-	    arena, usize, alignment, &is_zeroed)) == NULL) {
+	    arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
 		idalloctm(tsdn, node, NULL, true, true);
 		return (NULL);
 	}
 
-	extent_node_init(node, arena, ret, usize, is_zeroed, true);
+	extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
 
 	if (huge_node_set(tsdn, ret, node)) {
-		arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
+		arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
 		idalloctm(tsdn, node, NULL, true, true);
 		return (NULL);
 	}
@@ -245,7 +247,8 @@
 	malloc_mutex_unlock(tsdn, &arena->huge_mtx);
 
 	/* Zap the excess chunks. */
-	arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
+	arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
+	    extent_node_sn_get(node));
 
 	return (false);
 }
@@ -407,7 +410,8 @@
 	huge_dalloc_junk(extent_node_addr_get(node),
 	    extent_node_size_get(node));
 	arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
-	    extent_node_addr_get(node), extent_node_size_get(node));
+	    extent_node_addr_get(node), extent_node_size_get(node),
+	    extent_node_sn_get(node));
 	idalloctm(tsdn, node, NULL, true, true);
 
 	arena_decay_tick(tsdn, arena);
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 38650ff..baead66 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1056,7 +1056,11 @@
 				if (cont)				\
 					continue;			\
 			}
-#define	CONF_HANDLE_T_U(t, o, n, min, max, clip)			\
+#define	CONF_MIN_no(um, min)	false
+#define	CONF_MIN_yes(um, min)	((um) < (min))
+#define	CONF_MAX_no(um, max)	false
+#define	CONF_MAX_yes(um, max)	((um) > (max))
+#define	CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
 			if (CONF_MATCH(n)) {				\
 				uintmax_t um;				\
 				char *end;				\
@@ -1069,15 +1073,19 @@
 					    "Invalid conf value",	\
 					    k, klen, v, vlen);		\
 				} else if (clip) {			\
-					if ((min) != 0 && um < (min))	\
+					if (CONF_MIN_##check_min(um,	\
+					    (min)))			\
 						o = (t)(min);		\
-					else if (um > (max))		\
+					else if (CONF_MAX_##check_max(	\
+					    um, (max)))			\
 						o = (t)(max);		\
 					else				\
 						o = (t)um;		\
 				} else {				\
-					if (((min) != 0 && um < (min))	\
-					    || um > (max)) {		\
+					if (CONF_MIN_##check_min(um,	\
+					    (min)) ||			\
+					    CONF_MAX_##check_max(um,	\
+					    (max))) {			\
 						malloc_conf_error(	\
 						    "Out-of-range "	\
 						    "conf value",	\
@@ -1087,10 +1095,13 @@
 				}					\
 				continue;				\
 			}
-#define	CONF_HANDLE_UNSIGNED(o, n, min, max, clip)			\
-			CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
-#define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
-			CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
+#define	CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
+    clip)								\
+			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
+			    check_min, check_max, clip)
+#define	CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
+			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
+			    check_min, check_max, clip)
 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
 			if (CONF_MATCH(n)) {				\
 				long l;					\
@@ -1133,7 +1144,7 @@
 			 */
 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
 			    LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
-			    (sizeof(size_t) << 3) - 1, true)
+			    (sizeof(size_t) << 3) - 1, yes, yes, true)
 			if (strncmp("dss", k, klen) == 0) {
 				int i;
 				bool match = false;
@@ -1159,7 +1170,7 @@
 				continue;
 			}
 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
-			    UINT_MAX, false)
+			    UINT_MAX, yes, no, false)
 			if (strncmp("purge", k, klen) == 0) {
 				int i;
 				bool match = false;
@@ -1230,7 +1241,7 @@
 					continue;
 				}
 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
-				    0, SIZE_T_MAX, false)
+				    0, SIZE_T_MAX, no, no, false)
 				CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
 				CONF_HANDLE_BOOL(opt_zero, "zero", true)
 			}
@@ -1267,8 +1278,8 @@
 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
 				    "prof_thread_active_init", true)
 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
-				    "lg_prof_sample", 0,
-				    (sizeof(uint64_t) << 3) - 1, true)
+				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
+				    - 1, no, yes, true)
 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
 				    true)
 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
@@ -1284,7 +1295,14 @@
 			malloc_conf_error("Invalid conf pair", k, klen, v,
 			    vlen);
 #undef CONF_MATCH
+#undef CONF_MATCH_VALUE
 #undef CONF_HANDLE_BOOL
+#undef CONF_MIN_no
+#undef CONF_MIN_yes
+#undef CONF_MAX_no
+#undef CONF_MAX_yes
+#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_UNSIGNED
 #undef CONF_HANDLE_SIZE_T
 #undef CONF_HANDLE_SSIZE_T
 #undef CONF_HANDLE_CHAR_P
@@ -1393,8 +1411,9 @@
 
 	ncpus = malloc_ncpus();
 
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
-    && !defined(_WIN32) && !defined(__native_client__))
+#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
+    && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
+    !defined(__native_client__))
 	/* LinuxThreads' pthread_atfork() allocates. */
 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
 	    jemalloc_postfork_child) != 0) {
@@ -1973,8 +1992,8 @@
 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
 	}
 	UTRACE(ptr, size, ret);
-	JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
-	    old_rzsize, true, false);
+	JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
+	    old_usize, old_rzsize, maybe, false);
 	witness_assert_lockless(tsdn);
 	return (ret);
 }
@@ -2400,8 +2419,8 @@
 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
 	}
 	UTRACE(ptr, size, p);
-	JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
-	    old_usize, old_rzsize, false, zero);
+	JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
+	    old_usize, old_rzsize, no, zero);
 	witness_assert_lockless(tsd_tsdn(tsd));
 	return (p);
 label_oom:
@@ -2543,8 +2562,8 @@
 		*tsd_thread_allocatedp_get(tsd) += usize;
 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
 	}
-	JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
-	    old_usize, old_rzsize, false, zero);
+	JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
+	    old_usize, old_rzsize, no, zero);
 label_not_resized:
 	UTRACE(ptr, size, ptr);
 	witness_assert_lockless(tsd_tsdn(tsd));
diff --git a/src/pages.c b/src/pages.c
index 647952a..5f0c966 100644
--- a/src/pages.c
+++ b/src/pages.c
@@ -170,15 +170,16 @@
 #ifdef _WIN32
 	VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
 	unzeroed = true;
-#elif defined(JEMALLOC_HAVE_MADVISE)
-#  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
-#    define JEMALLOC_MADV_ZEROS true
-#  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
+    defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
+#  if defined(JEMALLOC_PURGE_MADVISE_FREE)
 #    define JEMALLOC_MADV_PURGE MADV_FREE
 #    define JEMALLOC_MADV_ZEROS false
+#  elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
+#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
+#    define JEMALLOC_MADV_ZEROS true
 #  else
-#    error "No madvise(2) flag defined for purging unused dirty pages."
+#    error No madvise(2) flag defined for purging unused dirty pages
 #  endif
 	int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
 	unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
@@ -191,6 +192,34 @@
 	return (unzeroed);
 }
 
+bool
+pages_huge(void *addr, size_t size)
+{
+
+	assert(PAGE_ADDR2BASE(addr) == addr);
+	assert(PAGE_CEILING(size) == size);
+
+#ifdef JEMALLOC_THP
+	return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#else
+	return (false);
+#endif
+}
+
+bool
+pages_nohuge(void *addr, size_t size)
+{
+
+	assert(PAGE_ADDR2BASE(addr) == addr);
+	assert(PAGE_CEILING(size) == size);
+
+#ifdef JEMALLOC_THP
+	return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
+#else
+	return (false);
+#endif
+}
+
 #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
 static bool
 os_overcommits_sysctl(void)
@@ -219,7 +248,7 @@
 	char buf[1];
 	ssize_t nread;
 
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_open)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
 	fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
 #else
 	fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
@@ -227,13 +256,13 @@
 	if (fd == -1)
 		return (false); /* Error. */
 
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_read)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
 	nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
 #else
 	nread = read(fd, &buf, sizeof(buf));
 #endif
 
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_close)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
 	syscall(SYS_close, fd);
 #else
 	close(fd);
diff --git a/src/stats.c b/src/stats.c
old mode 100644
new mode 100755
index bd8af39..1360f3b
--- a/src/stats.c
+++ b/src/stats.c
@@ -3,7 +3,7 @@
 
 #define	CTL_GET(n, v, t) do {						\
 	size_t sz = sizeof(t);						\
-	xmallctl(n, v, &sz, NULL, 0);					\
+	xmallctl(n, (void *)v, &sz, NULL, 0);				\
 } while (0)
 
 #define	CTL_M2_GET(n, i, v, t) do {					\
@@ -12,7 +12,7 @@
 	size_t sz = sizeof(t);						\
 	xmallctlnametomib(n, mib, &miblen);				\
 	mib[2] = (i);							\
-	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
+	xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0);		\
 } while (0)
 
 #define	CTL_M2_M4_GET(n, i, j, v, t) do {				\
@@ -22,7 +22,7 @@
 	xmallctlnametomib(n, mib, &miblen);				\
 	mib[2] = (i);							\
 	mib[4] = (j);							\
-	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
+	xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0);		\
 } while (0)
 
 /******************************************************************************/
@@ -647,7 +647,7 @@
 #define	OPT_WRITE_BOOL_MUTABLE(n, m, c) {				\
 	bool bv2;							\
 	if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 &&	\
-	    je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) {			\
+	    je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) {		\
 		if (json) {						\
 			malloc_cprintf(write_cb, cbopaque,		\
 			    "\t\t\t\""#n"\": %s%s\n", bv ? "true" :	\
@@ -692,7 +692,7 @@
 #define	OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) {				\
 	ssize_t ssv2;							\
 	if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 &&	\
-	    je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) {		\
+	    je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) {	\
 		if (json) {						\
 			malloc_cprintf(write_cb, cbopaque,		\
 			    "\t\t\t\""#n"\": %zd%s\n", ssv, (c));	\
@@ -1084,7 +1084,8 @@
 	 * */
 	epoch = 1;
 	u64sz = sizeof(uint64_t);
-	err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+	err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
+	    sizeof(uint64_t));
 	if (err != 0) {
 		if (err == EAGAIN) {
 			malloc_write("<jemalloc>: Memory allocation failure in "
diff --git a/src/tcache.c b/src/tcache.c
old mode 100644
new mode 100755
index f97aa42..21540ff
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -517,12 +517,12 @@
 	 * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
 	 * known.
 	 */
-	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+	if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
 		tcache_maxclass = SMALL_MAXCLASS;
-	else if ((1U << opt_lg_tcache_max) > large_maxclass)
+	else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
 		tcache_maxclass = large_maxclass;
 	else
-		tcache_maxclass = (1U << opt_lg_tcache_max);
+		tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
 
 	nhbins = size2index(tcache_maxclass) + 1;
 
diff --git a/src/util.c b/src/util.c
old mode 100644
new mode 100755
index 7905267..dd8c236
--- a/src/util.c
+++ b/src/util.c
@@ -49,7 +49,7 @@
 wrtmessage(void *cbopaque, const char *s)
 {
 
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_write)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
 	/*
 	 * Use syscall(2) rather than write(2) when possible in order to avoid
 	 * the possibility of memory allocation within libc.  This is necessary
@@ -200,7 +200,7 @@
 		p++;
 	}
 	if (neg)
-		ret = -ret;
+		ret = (uintmax_t)(-((intmax_t)ret));
 
 	if (p == ns) {
 		/* No conversion performed. */
diff --git a/test/integration/MALLOCX_ARENA.c b/test/integration/MALLOCX_ARENA.c
old mode 100644
new mode 100755
index 30c203a..910a096
--- a/test/integration/MALLOCX_ARENA.c
+++ b/test/integration/MALLOCX_ARENA.c
@@ -19,8 +19,8 @@
 	size_t sz;
 
 	sz = sizeof(arena_ind);
-	assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
-	    "Error in arenas.extend");
+	assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
+	    0, "Error in arenas.extend");
 
 	if (thread_ind % 4 != 3) {
 		size_t mib[3];
diff --git a/test/integration/allocated.c b/test/integration/allocated.c
old mode 100644
new mode 100755
index 3630e80..6ce145b
--- a/test/integration/allocated.c
+++ b/test/integration/allocated.c
@@ -18,14 +18,14 @@
 	size_t sz, usize;
 
 	sz = sizeof(a0);
-	if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
+	if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
 		if (err == ENOENT)
 			goto label_ENOENT;
 		test_fail("%s(): Error in mallctl(): %s", __func__,
 		    strerror(err));
 	}
 	sz = sizeof(ap0);
-	if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
+	if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
 		if (err == ENOENT)
 			goto label_ENOENT;
 		test_fail("%s(): Error in mallctl(): %s", __func__,
@@ -36,14 +36,15 @@
 	    "storage");
 
 	sz = sizeof(d0);
-	if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
+	if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
 		if (err == ENOENT)
 			goto label_ENOENT;
 		test_fail("%s(): Error in mallctl(): %s", __func__,
 		    strerror(err));
 	}
 	sz = sizeof(dp0);
-	if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
+	if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
+	    0))) {
 		if (err == ENOENT)
 			goto label_ENOENT;
 		test_fail("%s(): Error in mallctl(): %s", __func__,
@@ -57,9 +58,9 @@
 	assert_ptr_not_null(p, "Unexpected malloc() error");
 
 	sz = sizeof(a1);
-	mallctl("thread.allocated", &a1, &sz, NULL, 0);
+	mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
 	sz = sizeof(ap1);
-	mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
+	mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
 	assert_u64_eq(*ap1, a1,
 	    "Dereferenced \"thread.allocatedp\" value should equal "
 	    "\"thread.allocated\" value");
@@ -74,9 +75,9 @@
 	free(p);
 
 	sz = sizeof(d1);
-	mallctl("thread.deallocated", &d1, &sz, NULL, 0);
+	mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
 	sz = sizeof(dp1);
-	mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
+	mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
 	assert_u64_eq(*dp1, d1,
 	    "Dereferenced \"thread.deallocatedp\" value should equal "
 	    "\"thread.deallocated\" value");
diff --git a/test/integration/chunk.c b/test/integration/chunk.c
index ff9bf96..94cf002 100644
--- a/test/integration/chunk.c
+++ b/test/integration/chunk.c
@@ -137,8 +137,8 @@
 	bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
+	    0, "Unexpected mallctl() failure");
 	flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
 
 	/* Install custom chunk hooks. */
@@ -148,8 +148,9 @@
 	hooks_mib[1] = (size_t)arena_ind;
 	old_size = sizeof(chunk_hooks_t);
 	new_size = sizeof(chunk_hooks_t);
-	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
-	    &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
+	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+	    &old_size, (void *)&new_hooks, new_size), 0,
+	    "Unexpected chunk_hooks error");
 	orig_hooks = old_hooks;
 	assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
 	assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
@@ -164,18 +165,18 @@
 
 	/* Get large size classes. */
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
-	    "Unexpected arenas.lrun.0.size failure");
-	assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
-	    "Unexpected arenas.lrun.1.size failure");
+	assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
+	    0), 0, "Unexpected arenas.lrun.0.size failure");
+	assert_d_eq(mallctl("arenas.lrun.1.size", (void *)&large1, &sz, NULL,
+	    0), 0, "Unexpected arenas.lrun.1.size failure");
 
 	/* Get huge size classes. */
-	assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
-	    "Unexpected arenas.hchunk.0.size failure");
-	assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
-	    "Unexpected arenas.hchunk.1.size failure");
-	assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
-	    "Unexpected arenas.hchunk.2.size failure");
+	assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL,
+	    0), 0, "Unexpected arenas.hchunk.0.size failure");
+	assert_d_eq(mallctl("arenas.hchunk.1.size", (void *)&huge1, &sz, NULL,
+	    0), 0, "Unexpected arenas.hchunk.1.size failure");
+	assert_d_eq(mallctl("arenas.hchunk.2.size", (void *)&huge2, &sz, NULL,
+	    0), 0, "Unexpected arenas.hchunk.2.size failure");
 
 	/* Test dalloc/decommit/purge cascade. */
 	purge_miblen = sizeof(purge_mib)/sizeof(size_t);
@@ -265,9 +266,9 @@
 
 	/* Restore chunk hooks. */
 	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
-	    &old_hooks, new_size), 0, "Unexpected chunk_hooks error");
-	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
-	    NULL, 0), 0, "Unexpected chunk_hooks error");
+	    (void *)&old_hooks, new_size), 0, "Unexpected chunk_hooks error");
+	assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+	    &old_size, NULL, 0), 0, "Unexpected chunk_hooks error");
 	assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
 	    "Unexpected alloc error");
 	assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c
old mode 100644
new mode 100755
index 43b76eb..d709eb3
--- a/test/integration/mallocx.c
+++ b/test/integration/mallocx.c
@@ -11,7 +11,7 @@
 	size_t z;
 
 	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
 	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
 
 	return (ret);
@@ -37,7 +37,7 @@
 	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
 	mib[2] = ind;
 	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
 	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
 
 	return (ret);
diff --git a/test/integration/overflow.c b/test/integration/overflow.c
old mode 100644
new mode 100755
index 303d9b2..84a3565
--- a/test/integration/overflow.c
+++ b/test/integration/overflow.c
@@ -8,8 +8,8 @@
 	void *p;
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0),
+	    0, "Unexpected mallctl() error");
 
 	miblen = sizeof(mib) / sizeof(size_t);
 	assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
@@ -17,8 +17,8 @@
 	mib[2] = nhchunks - 1;
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
-	    "Unexpected mallctlbymib() error");
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+	    NULL, 0), 0, "Unexpected mallctlbymib() error");
 
 	assert_ptr_null(malloc(max_size_class + 1),
 	    "Expected OOM due to over-sized allocation request");
diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c
old mode 100644
new mode 100755
index 66ad866..506bf1c
--- a/test/integration/rallocx.c
+++ b/test/integration/rallocx.c
@@ -7,7 +7,7 @@
 	size_t z;
 
 	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
 	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
 
 	return (ret);
@@ -33,7 +33,7 @@
 	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
 	mib[2] = ind;
 	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
 	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
 
 	return (ret);
diff --git a/test/integration/sdallocx.c b/test/integration/sdallocx.c
index b84817d..f92e058 100644
--- a/test/integration/sdallocx.c
+++ b/test/integration/sdallocx.c
@@ -1,7 +1,7 @@
 #include "test/jemalloc_test.h"
 
-#define	MAXALIGN (((size_t)1) << 25)
-#define	NITER 4
+#define	MAXALIGN (((size_t)1) << 22)
+#define	NITER 3
 
 TEST_BEGIN(test_basic)
 {
diff --git a/test/integration/thread_arena.c b/test/integration/thread_arena.c
old mode 100644
new mode 100755
index 67be535..7a35a63
--- a/test/integration/thread_arena.c
+++ b/test/integration/thread_arena.c
@@ -16,8 +16,8 @@
 	free(p);
 
 	size = sizeof(arena_ind);
-	if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
-	    sizeof(main_arena_ind)))) {
+	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
+	    (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
 		char buf[BUFERROR_BUF];
 
 		buferror(err, buf, sizeof(buf));
@@ -25,7 +25,8 @@
 	}
 
 	size = sizeof(arena_ind);
-	if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
+	    0))) {
 		char buf[BUFERROR_BUF];
 
 		buferror(err, buf, sizeof(buf));
@@ -50,7 +51,8 @@
 	assert_ptr_not_null(p, "Error in malloc()");
 
 	size = sizeof(arena_ind);
-	if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
+	    0))) {
 		char buf[BUFERROR_BUF];
 
 		buferror(err, buf, sizeof(buf));
diff --git a/test/integration/thread_tcache_enabled.c b/test/integration/thread_tcache_enabled.c
old mode 100644
new mode 100755
index f4e89c6..2c2825e
--- a/test/integration/thread_tcache_enabled.c
+++ b/test/integration/thread_tcache_enabled.c
@@ -16,7 +16,8 @@
 	bool e0, e1;
 
 	sz = sizeof(bool);
-	if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
+	if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
+	    0))) {
 		if (err == ENOENT) {
 			assert_false(config_tcache,
 			    "ENOENT should only be returned if tcache is "
@@ -27,53 +28,53 @@
 
 	if (e0) {
 		e1 = false;
-		assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
-		    0, "Unexpected mallctl() error");
+		assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+		    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 		assert_true(e0, "tcache should be enabled");
 	}
 
 	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_false(e0, "tcache should be disabled");
 
 	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_true(e0, "tcache should be enabled");
 
 	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_true(e0, "tcache should be enabled");
 
 	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_false(e0, "tcache should be disabled");
 
 	free(malloc(1));
 	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_false(e0, "tcache should be disabled");
 
 	free(malloc(1));
 	e1 = true;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_true(e0, "tcache should be enabled");
 
 	free(malloc(1));
 	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_true(e0, "tcache should be enabled");
 
 	free(malloc(1));
 	e1 = false;
-	assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
 	assert_false(e0, "tcache should be disabled");
 
 	free(malloc(1));
diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c
old mode 100644
new mode 100755
index ad292bb..67e0a0e
--- a/test/integration/xallocx.c
+++ b/test/integration/xallocx.c
@@ -16,8 +16,8 @@
 
 	if (ind == 0) {
 		size_t sz = sizeof(ind);
-		assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0,
-		    "Unexpected mallctl failure creating arena");
+		assert_d_eq(mallctl("arenas.extend", (void *)&ind, &sz, NULL,
+		    0), 0, "Unexpected mallctl failure creating arena");
 	}
 
 	return (ind);
@@ -78,7 +78,7 @@
 	size_t z;
 
 	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
 	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
 
 	return (ret);
@@ -118,7 +118,7 @@
 	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
 	mib[2] = ind;
 	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
 	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
 
 	return (ret);
diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c
old mode 100644
new mode 100755
index 8ba36c2..adf9baa
--- a/test/unit/arena_reset.c
+++ b/test/unit/arena_reset.c
@@ -11,7 +11,7 @@
 	size_t z;
 
 	z = sizeof(unsigned);
-	assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
 	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
 
 	return (ret);
@@ -51,7 +51,7 @@
 	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
 	mib[2] = ind;
 	z = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
 	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
 
 	return (ret);
@@ -92,8 +92,8 @@
 	    && unlikely(opt_quarantine)));
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
+	    0, "Unexpected mallctl() failure");
 
 	flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
 
diff --git a/test/unit/decay.c b/test/unit/decay.c
old mode 100644
new mode 100755
index e169ae2..5af8f80
--- a/test/unit/decay.c
+++ b/test/unit/decay.c
@@ -40,10 +40,10 @@
 	    "Unexpected failure getting decay ticker");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
-	    "Unexpected mallctl failure");
-	assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
-	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL,
+	    0), 0, "Unexpected mallctl failure");
+	assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
+	    0), 0, "Unexpected mallctl failure");
 
 	/*
 	 * Test the standard APIs using a huge size class, since we can't
@@ -175,8 +175,8 @@
 		tcache_sizes[1] = 1;
 
 		sz = sizeof(unsigned);
-		assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0),
-		    0, "Unexpected mallctl failure");
+		assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
+		    NULL, 0), 0, "Unexpected mallctl failure");
 
 		for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
 			sz = tcache_sizes[i];
@@ -193,7 +193,7 @@
 			dallocx(p, MALLOCX_TCACHE(tcache_ind));
 			tick0 = ticker_read(decay_ticker);
 			assert_d_eq(mallctl("tcache.flush", NULL, NULL,
-			    &tcache_ind, sizeof(unsigned)), 0,
+			    (void *)&tcache_ind, sizeof(unsigned)), 0,
 			    "Unexpected mallctl failure");
 			tick1 = ticker_read(decay_ticker);
 			assert_u32_ne(tick1, tick0,
@@ -228,22 +228,22 @@
 		size_t tcache_max;
 
 		sz = sizeof(size_t);
-		assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL,
-		    0), 0, "Unexpected mallctl failure");
+		assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
+		    &sz, NULL, 0), 0, "Unexpected mallctl failure");
 		large = nallocx(tcache_max + 1, flags);
 	}  else {
 		sz = sizeof(size_t);
-		assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0),
-		    0, "Unexpected mallctl failure");
+		assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz,
+		    NULL, 0), 0, "Unexpected mallctl failure");
 	}
 
 	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
 	    "Unexpected mallctl failure");
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
-	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0),
-	    config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
+	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
 
 	for (i = 0; i < NPS; i++) {
 		ps[i] = mallocx(large, flags);
@@ -283,11 +283,11 @@
 			assert_ptr_not_null(p, "Unexpected mallocx() failure");
 			dallocx(p, flags);
 		}
-		assert_d_eq(mallctl("epoch", NULL, NULL, &epoch,
+		assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
 		    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
 		sz = sizeof(uint64_t);
-		assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz,
-		    NULL, 0), config_stats ? 0 : ENOENT,
+		assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1,
+		    &sz, NULL, 0), config_stats ? 0 : ENOENT,
 		    "Unexpected mallctl result");
 
 		nstime_update(&time);
@@ -313,16 +313,16 @@
 	test_skip_if(opt_purge != purge_mode_decay);
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
-	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
+	    0), 0, "Unexpected mallctl failure");
 
 	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
 	    "Unexpected mallctl failure");
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
-	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0),
-	    config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
+	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
 
 	nupdates_mock = 0;
 	nstime_init(&time_mock, 0);
@@ -348,11 +348,11 @@
 		    "Expected nstime_update() to be called");
 	}
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0,
-	    "Unexpected mallctl failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0),
-	    config_stats ? 0 : ENOENT, "Unexpected mallctl result");
+	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
+	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
 
 	if (config_stats)
 		assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c
old mode 100644
new mode 100755
index 69f8c20..2353c92
--- a/test/unit/mallctl.c
+++ b/test/unit/mallctl.c
@@ -12,16 +12,18 @@
 	    EPERM, "mallctl() should return EPERM on attempt to write "
 	    "read-only value");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
-	    EINVAL, "mallctl() should return EINVAL for input size mismatch");
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
-	    EINVAL, "mallctl() should return EINVAL for input size mismatch");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+	    sizeof(epoch)-1), EINVAL,
+	    "mallctl() should return EINVAL for input size mismatch");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+	    sizeof(epoch)+1), EINVAL,
+	    "mallctl() should return EINVAL for input size mismatch");
 
 	sz = sizeof(epoch)-1;
-	assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
+	assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
 	    "mallctl() should return EINVAL for output size mismatch");
 	sz = sizeof(epoch)+1;
-	assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
+	assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
 	    "mallctl() should return EINVAL for output size mismatch");
 }
 TEST_END
@@ -56,18 +58,20 @@
 	assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
 	    "Unexpected mallctlnametomib() failure");
 
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
+	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
 	    sizeof(epoch)-1), EINVAL,
 	    "mallctlbymib() should return EINVAL for input size mismatch");
-	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
+	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
 	    sizeof(epoch)+1), EINVAL,
 	    "mallctlbymib() should return EINVAL for input size mismatch");
 
 	sz = sizeof(epoch)-1;
-	assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+	    EINVAL,
 	    "mallctlbymib() should return EINVAL for output size mismatch");
 	sz = sizeof(epoch)+1;
-	assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+	    EINVAL,
 	    "mallctlbymib() should return EINVAL for output size mismatch");
 }
 TEST_END
@@ -83,18 +87,19 @@
 	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
 
 	/* Read. */
-	assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
 	    "Unexpected mallctl() failure");
 	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
 
 	/* Write. */
-	assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
+	    sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
 	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
 
 	/* Read+write. */
-	assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
-	    sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
+	    (void *)&new_epoch, sizeof(new_epoch)), 0,
+	    "Unexpected mallctl() failure");
 	assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
 }
 TEST_END
@@ -120,8 +125,8 @@
 #define	TEST_MALLCTL_CONFIG(config, t) do {				\
 	t oldval;							\
 	size_t sz = sizeof(oldval);					\
-	assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0),	\
-	    0, "Unexpected mallctl() failure");				\
+	assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz,	\
+	    NULL, 0), 0, "Unexpected mallctl() failure");		\
 	assert_b_eq(oldval, config_##config, "Incorrect config value");	\
 	assert_zu_eq(sz, sizeof(oldval), "Unexpected output size");	\
 } while (0)
@@ -154,7 +159,8 @@
 	t oldval;							\
 	size_t sz = sizeof(oldval);					\
 	int expected = config_##config ? 0 : ENOENT;			\
-	int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0);	\
+	int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL,	\
+	    0);								\
 	assert_d_eq(result, expected,					\
 	    "Unexpected mallctl() result for opt."#opt);		\
 	assert_zu_eq(sz, sizeof(oldval), "Unexpected output size");	\
@@ -197,7 +203,7 @@
 	size_t len, miblen;
 
 	len = sizeof(nbins);
-	assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
+	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
 	    "Unexpected mallctl() failure");
 
 	miblen = 4;
@@ -208,8 +214,8 @@
 
 		mib[2] = i;
 		len = sizeof(bin_size);
-		assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
-		    0, "Unexpected mallctlbymib() failure");
+		assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
+		    NULL, 0), 0, "Unexpected mallctlbymib() failure");
 		/* Do something with bin_size... */
 	}
 }
@@ -258,25 +264,25 @@
 	/* Create tcaches. */
 	for (i = 0; i < NTCACHES; i++) {
 		sz = sizeof(unsigned);
-		assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
-		    "Unexpected mallctl() failure, i=%u", i);
+		assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+		    0), 0, "Unexpected mallctl() failure, i=%u", i);
 	}
 
 	/* Exercise tcache ID recycling. */
 	for (i = 0; i < NTCACHES; i++) {
-		assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
-		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
-		    i);
+		assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
+		    (void *)&tis[i], sizeof(unsigned)), 0,
+		    "Unexpected mallctl() failure, i=%u", i);
 	}
 	for (i = 0; i < NTCACHES; i++) {
 		sz = sizeof(unsigned);
-		assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
-		    "Unexpected mallctl() failure, i=%u", i);
+		assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+		    0), 0, "Unexpected mallctl() failure, i=%u", i);
 	}
 
 	/* Flush empty tcaches. */
 	for (i = 0; i < NTCACHES; i++) {
-		assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+		assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
 		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
 		    i);
 	}
@@ -321,16 +327,16 @@
 
 	/* Flush some non-empty tcaches. */
 	for (i = 0; i < NTCACHES/2; i++) {
-		assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+		assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
 		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
 		    i);
 	}
 
 	/* Destroy tcaches. */
 	for (i = 0; i < NTCACHES; i++) {
-		assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
-		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
-		    i);
+		assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
+		    (void *)&tis[i], sizeof(unsigned)), 0,
+		    "Unexpected mallctl() failure, i=%u", i);
 	}
 }
 TEST_END
@@ -340,15 +346,17 @@
 	unsigned arena_old, arena_new, narenas;
 	size_t sz = sizeof(unsigned);
 
-	assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+	    0, "Unexpected mallctl() failure");
 	assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
 	arena_new = narenas - 1;
-	assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
-	    sizeof(unsigned)), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
+	    (void *)&arena_new, sizeof(unsigned)), 0,
+	    "Unexpected mallctl() failure");
 	arena_new = 0;
-	assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
-	    sizeof(unsigned)), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
+	    (void *)&arena_new, sizeof(unsigned)), 0,
+	    "Unexpected mallctl() failure");
 }
 TEST_END
 
@@ -359,17 +367,18 @@
 
 	test_skip_if(opt_purge != purge_mode_ratio);
 
-	assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
-	    NULL, 0), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arena.0.lg_dirty_mult",
+	    (void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0,
+	    "Unexpected mallctl() failure");
 
 	lg_dirty_mult = -2;
 	assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
-	    &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+	    (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
 	    "Unexpected mallctl() success");
 
 	lg_dirty_mult = (sizeof(size_t) << 3);
 	assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
-	    &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+	    (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
 	    "Unexpected mallctl() success");
 
 	for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
@@ -377,9 +386,9 @@
 	    = lg_dirty_mult, lg_dirty_mult++) {
 		ssize_t old_lg_dirty_mult;
 
-		assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
-		    &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
-		    "Unexpected mallctl() failure");
+		assert_d_eq(mallctl("arena.0.lg_dirty_mult",
+		    (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult,
+		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
 		assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
 		    "Unexpected old arena.0.lg_dirty_mult");
 	}
@@ -393,25 +402,25 @@
 
 	test_skip_if(opt_purge != purge_mode_decay);
 
-	assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz,
+	assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz,
 	    NULL, 0), 0, "Unexpected mallctl() failure");
 
 	decay_time = -2;
 	assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
-	    &decay_time, sizeof(ssize_t)), EFAULT,
+	    (void *)&decay_time, sizeof(ssize_t)), EFAULT,
 	    "Unexpected mallctl() success");
 
 	decay_time = 0x7fffffff;
 	assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
-	    &decay_time, sizeof(ssize_t)), 0,
+	    (void *)&decay_time, sizeof(ssize_t)), 0,
 	    "Unexpected mallctl() failure");
 
 	for (prev_decay_time = decay_time, decay_time = -1;
 	    decay_time < 20; prev_decay_time = decay_time, decay_time++) {
 		ssize_t old_decay_time;
 
-		assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time,
-		    &sz, &decay_time, sizeof(ssize_t)), 0,
+		assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time,
+		    &sz, (void *)&decay_time, sizeof(ssize_t)), 0,
 		    "Unexpected mallctl() failure");
 		assert_zd_eq(old_decay_time, prev_decay_time,
 		    "Unexpected old arena.0.decay_time");
@@ -429,8 +438,8 @@
 	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
 	    "Unexpected mallctl() failure");
 
-	assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+	    0, "Unexpected mallctl() failure");
 	assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
 	    "Unexpected mallctlnametomib() failure");
 	mib[1] = narenas;
@@ -449,8 +458,8 @@
 	assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
 	    "Unexpected mallctl() failure");
 
-	assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+	    0, "Unexpected mallctl() failure");
 	assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
 	    "Unexpected mallctlnametomib() failure");
 	mib[1] = narenas;
@@ -471,31 +480,35 @@
 	    "Unexpected mallctlnametomib() error");
 
 	dss_prec_new = "disabled";
-	assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
-	    sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+	    (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
+	    "Unexpected mallctl() failure");
 	assert_str_ne(dss_prec_old, "primary",
 	    "Unexpected default for dss precedence");
 
-	assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
-	    sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+	    (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
 	    "Unexpected mallctl() failure");
+
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+	    0), 0, "Unexpected mallctl() failure");
 	assert_str_ne(dss_prec_old, "primary",
 	    "Unexpected value for dss precedence");
 
 	mib[1] = narenas_total_get();
 	dss_prec_new = "disabled";
-	assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
-	    sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+	    (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
+	    "Unexpected mallctl() failure");
 	assert_str_ne(dss_prec_old, "primary",
 	    "Unexpected default for dss precedence");
 
-	assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
-	    sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
-
-	assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+	    (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
 	    "Unexpected mallctl() failure");
+
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+	    0), 0, "Unexpected mallctl() failure");
 	assert_str_ne(dss_prec_old, "primary",
 	    "Unexpected value for dss precedence");
 }
@@ -506,14 +519,14 @@
 	unsigned narenas;
 	size_t sz = sizeof(narenas);
 
-	assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+	    0, "Unexpected mallctl() failure");
 	{
 		VARIABLE_ARRAY(bool, initialized, narenas);
 
 		sz = narenas * sizeof(bool);
-		assert_d_eq(mallctl("arenas.initialized", initialized, &sz,
-		    NULL, 0), 0, "Unexpected mallctl() failure");
+		assert_d_eq(mallctl("arenas.initialized", (void *)initialized,
+		    &sz, NULL, 0), 0, "Unexpected mallctl() failure");
 	}
 }
 TEST_END
@@ -525,17 +538,17 @@
 
 	test_skip_if(opt_purge != purge_mode_ratio);
 
-	assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
-	    NULL, 0), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&orig_lg_dirty_mult,
+	    &sz, NULL, 0), 0, "Unexpected mallctl() failure");
 
 	lg_dirty_mult = -2;
 	assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
-	    &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+	    (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
 	    "Unexpected mallctl() success");
 
 	lg_dirty_mult = (sizeof(size_t) << 3);
 	assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
-	    &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+	    (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
 	    "Unexpected mallctl() success");
 
 	for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
@@ -543,9 +556,9 @@
 	    lg_dirty_mult, lg_dirty_mult++) {
 		ssize_t old_lg_dirty_mult;
 
-		assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
-		    &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
-		    "Unexpected mallctl() failure");
+		assert_d_eq(mallctl("arenas.lg_dirty_mult",
+		    (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult,
+		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
 		assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
 		    "Unexpected old arenas.lg_dirty_mult");
 	}
@@ -559,26 +572,26 @@
 
 	test_skip_if(opt_purge != purge_mode_decay);
 
-	assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz,
+	assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz,
 	    NULL, 0), 0, "Unexpected mallctl() failure");
 
 	decay_time = -2;
 	assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
-	    &decay_time, sizeof(ssize_t)), EFAULT,
+	    (void *)&decay_time, sizeof(ssize_t)), EFAULT,
 	    "Unexpected mallctl() success");
 
 	decay_time = 0x7fffffff;
 	assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
-	    &decay_time, sizeof(ssize_t)), 0,
+	    (void *)&decay_time, sizeof(ssize_t)), 0,
 	    "Expected mallctl() failure");
 
 	for (prev_decay_time = decay_time, decay_time = -1;
 	    decay_time < 20; prev_decay_time = decay_time, decay_time++) {
 		ssize_t old_decay_time;
 
-		assert_d_eq(mallctl("arenas.decay_time", &old_decay_time,
-		    &sz, &decay_time, sizeof(ssize_t)), 0,
-		    "Unexpected mallctl() failure");
+		assert_d_eq(mallctl("arenas.decay_time",
+		    (void *)&old_decay_time, &sz, (void *)&decay_time,
+		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
 		assert_zd_eq(old_decay_time, prev_decay_time,
 		    "Unexpected old arenas.decay_time");
 	}
@@ -591,8 +604,8 @@
 #define	TEST_ARENAS_CONSTANT(t, name, expected) do {			\
 	t name;								\
 	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0,	\
-	    "Unexpected mallctl() failure");				\
+	assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL,	\
+	    0), 0, "Unexpected mallctl() failure");			\
 	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
 } while (0)
 
@@ -612,8 +625,8 @@
 #define	TEST_ARENAS_BIN_CONSTANT(t, name, expected) do {		\
 	t name;								\
 	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0),	\
-	    0, "Unexpected mallctl() failure");				\
+	assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz,	\
+	    NULL, 0), 0, "Unexpected mallctl() failure");		\
 	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
 } while (0)
 
@@ -631,8 +644,8 @@
 #define	TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do {		\
 	t name;								\
 	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL,	\
-	    0), 0, "Unexpected mallctl() failure");			\
+	assert_d_eq(mallctl("arenas.lrun.0."#name, (void *)&name, &sz,	\
+	    NULL, 0), 0, "Unexpected mallctl() failure");		\
 	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
 } while (0)
 
@@ -648,8 +661,8 @@
 #define	TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do {		\
 	t name;								\
 	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL,	\
-	    0), 0, "Unexpected mallctl() failure");			\
+	assert_d_eq(mallctl("arenas.hchunk.0."#name, (void *)&name,	\
+	    &sz, NULL, 0), 0, "Unexpected mallctl() failure");		\
 	assert_zu_eq(name, expected, "Incorrect "#name" size");		\
 } while (0)
 
@@ -664,12 +677,12 @@
 	unsigned narenas_before, arena, narenas_after;
 	size_t sz = sizeof(unsigned);
 
-	assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
+	    NULL, 0), 0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.extend", (void *)&arena, &sz, NULL, 0), 0,
 	    "Unexpected mallctl() failure");
-	assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
-	assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
+	    0), 0, "Unexpected mallctl() failure");
 
 	assert_u_eq(narenas_before+1, narenas_after,
 	    "Unexpected number of arenas before versus after extension");
@@ -683,8 +696,8 @@
 #define	TEST_STATS_ARENAS(t, name) do {					\
 	t name;								\
 	size_t sz = sizeof(t);						\
-	assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL,	\
-	    0), 0, "Unexpected mallctl() failure");			\
+	assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz,	\
+	    NULL, 0), 0, "Unexpected mallctl() failure");		\
 } while (0)
 
 	TEST_STATS_ARENAS(unsigned, nthreads);
diff --git a/test/unit/pack.c b/test/unit/pack.c
new file mode 100644
index 0000000..0b6ffcd
--- /dev/null
+++ b/test/unit/pack.c
@@ -0,0 +1,206 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf =
+    /* Use smallest possible chunk size. */
+    "lg_chunk:0"
+    /* Immediately purge to minimize fragmentation. */
+    ",lg_dirty_mult:-1"
+    ",decay_time:-1"
+    ;
+
+/*
+ * Size class that is a divisor of the page size, ideally 4+ regions per run.
+ */
+#if LG_PAGE <= 14
+#define	SZ	(ZU(1) << (LG_PAGE - 2))
+#else
+#define	SZ	4096
+#endif
+
+/*
+ * Number of chunks to consume at high water mark.  Should be at least 2 so that
+ * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
+ * tested.
+ */
+#define	NCHUNKS	8
+
+static unsigned
+binind_compute(void)
+{
+	size_t sz;
+	unsigned nbins, i;
+
+	sz = sizeof(nbins);
+	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+	    "Unexpected mallctl failure");
+
+	for (i = 0; i < nbins; i++) {
+		size_t mib[4];
+		size_t miblen = sizeof(mib)/sizeof(size_t);
+		size_t size;
+
+		assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
+		    &miblen), 0, "Unexpected mallctlnametomb failure");
+		mib[2] = (size_t)i;
+
+		sz = sizeof(size);
+		assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
+		    0), 0, "Unexpected mallctlbymib failure");
+		if (size == SZ)
+			return (i);
+	}
+
+	test_fail("Unable to compute nregs_per_run");
+	return (0);
+}
+
+static size_t
+nregs_per_run_compute(void)
+{
+	uint32_t nregs;
+	size_t sz;
+	unsigned binind = binind_compute();
+	size_t mib[4];
+	size_t miblen = sizeof(mib)/sizeof(size_t);
+
+	assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+	    "Unexpected mallctlnametomb failure");
+	mib[2] = (size_t)binind;
+	sz = sizeof(nregs);
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
+	    0), 0, "Unexpected mallctlbymib failure");
+	return (nregs);
+}
+
+static size_t
+npages_per_run_compute(void)
+{
+	size_t sz;
+	unsigned binind = binind_compute();
+	size_t mib[4];
+	size_t miblen = sizeof(mib)/sizeof(size_t);
+	size_t run_size;
+
+	assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
+	    "Unexpected mallctlnametomb failure");
+	mib[2] = (size_t)binind;
+	sz = sizeof(run_size);
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL,
+	    0), 0, "Unexpected mallctlbymib failure");
+	return (run_size >> LG_PAGE);
+}
+
+static size_t
+npages_per_chunk_compute(void)
+{
+
+	return ((chunksize >> LG_PAGE) - map_bias);
+}
+
+static size_t
+nruns_per_chunk_compute(void)
+{
+
+	return (npages_per_chunk_compute() / npages_per_run_compute());
+}
+
+static unsigned
+arenas_extend_mallctl(void)
+{
+	unsigned arena_ind;
+	size_t sz;
+
+	sz = sizeof(arena_ind);
+	assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
+	    0, "Error in arenas.extend");
+
+	return (arena_ind);
+}
+
+static void
+arena_reset_mallctl(unsigned arena_ind)
+{
+	size_t mib[3];
+	size_t miblen = sizeof(mib)/sizeof(size_t);
+
+	assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
+	    "Unexpected mallctlnametomib() failure");
+	mib[1] = (size_t)arena_ind;
+	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+	    "Unexpected mallctlbymib() failure");
+}
+
+TEST_BEGIN(test_pack)
+{
+	unsigned arena_ind = arenas_extend_mallctl();
+	size_t nregs_per_run = nregs_per_run_compute();
+	size_t nruns_per_chunk = nruns_per_chunk_compute();
+	size_t nruns = nruns_per_chunk * NCHUNKS;
+	size_t nregs = nregs_per_run * nruns;
+	VARIABLE_ARRAY(void *, ptrs, nregs);
+	size_t i, j, offset;
+
+	/* Fill matrix. */
+	for (i = offset = 0; i < nruns; i++) {
+		for (j = 0; j < nregs_per_run; j++) {
+			void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
+			    MALLOCX_TCACHE_NONE);
+			assert_ptr_not_null(p,
+			    "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
+			    " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
+			    SZ, arena_ind, i, j);
+			ptrs[(i * nregs_per_run) + j] = p;
+		}
+	}
+
+	/*
+	 * Free all but one region of each run, but rotate which region is
+	 * preserved, so that subsequent allocations exercise the within-run
+	 * layout policy.
+	 */
+	offset = 0;
+	for (i = offset = 0;
+	    i < nruns;
+	    i++, offset = (offset + 1) % nregs_per_run) {
+		for (j = 0; j < nregs_per_run; j++) {
+			void *p = ptrs[(i * nregs_per_run) + j];
+			if (offset == j)
+				continue;
+			dallocx(p, MALLOCX_ARENA(arena_ind) |
+			    MALLOCX_TCACHE_NONE);
+		}
+	}
+
+	/*
+	 * Logically refill matrix, skipping preserved regions and verifying
+	 * that the matrix is unmodified.
+	 */
+	offset = 0;
+	for (i = offset = 0;
+	    i < nruns;
+	    i++, offset = (offset + 1) % nregs_per_run) {
+		for (j = 0; j < nregs_per_run; j++) {
+			void *p;
+
+			if (offset == j)
+				continue;
+			p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
+			    MALLOCX_TCACHE_NONE);
+			assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
+			    "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
+			    i, j);
+		}
+	}
+
+	/* Clean up. */
+	arena_reset_mallctl(arena_ind);
+}
+TEST_END
+
+int
+main(void)
+{
+
+	return (test(
+	    test_pack));
+}
diff --git a/test/unit/pages.c b/test/unit/pages.c
new file mode 100644
index 0000000..d31a35e
--- /dev/null
+++ b/test/unit/pages.c
@@ -0,0 +1,27 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_pages_huge)
+{
+	bool commit;
+	void *pages;
+
+	commit = true;
+	pages = pages_map(NULL, PAGE, &commit);
+	assert_ptr_not_null(pages, "Unexpected pages_map() error");
+
+	assert_false(pages_huge(pages, PAGE),
+	    "Unexpected pages_huge() result");
+	assert_false(pages_nohuge(pages, PAGE),
+	    "Unexpected pages_nohuge() result");
+
+	pages_unmap(pages, PAGE);
+}
+TEST_END
+
+int
+main(void)
+{
+
+	return (test(
+	    test_pages_huge));
+}
diff --git a/test/unit/prof_accum.c b/test/unit/prof_accum.c
old mode 100644
new mode 100755
index fd229e0..d941b5b
--- a/test/unit/prof_accum.c
+++ b/test/unit/prof_accum.c
@@ -68,8 +68,9 @@
 	test_skip_if(!config_prof);
 
 	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while activating profiling");
+	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+	    sizeof(active)), 0,
+	    "Unexpected mallctl failure while activating profiling");
 
 	prof_dump_open = prof_dump_open_intercept;
 
diff --git a/test/unit/prof_active.c b/test/unit/prof_active.c
old mode 100644
new mode 100755
index 8149095..d00943a
--- a/test/unit/prof_active.c
+++ b/test/unit/prof_active.c
@@ -12,7 +12,7 @@
 	size_t sz;
 
 	sz = sizeof(old);
-	assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
 	    "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
 	assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
 	    name);
@@ -26,7 +26,8 @@
 	size_t sz;
 
 	sz = sizeof(old);
-	assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0,
+	assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
+	    sizeof(val_new)), 0,
 	    "%s():%d: Unexpected mallctl failure reading/writing %s", func,
 	    line, name);
 	assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c
old mode 100644
new mode 100755
index a0e6ee9..996cb67
--- a/test/unit/prof_gdump.c
+++ b/test/unit/prof_gdump.c
@@ -28,8 +28,9 @@
 	test_skip_if(!config_prof);
 
 	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while activating profiling");
+	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+	    sizeof(active)), 0,
+	    "Unexpected mallctl failure while activating profiling");
 
 	prof_dump_open = prof_dump_open_intercept;
 
@@ -45,8 +46,8 @@
 
 	gdump = false;
 	sz = sizeof(gdump_old);
-	assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
-	    sizeof(gdump)), 0,
+	assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+	    (void *)&gdump, sizeof(gdump)), 0,
 	    "Unexpected mallctl failure while disabling prof.gdump");
 	assert(gdump_old);
 	did_prof_dump_open = false;
@@ -56,8 +57,8 @@
 
 	gdump = true;
 	sz = sizeof(gdump_old);
-	assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
-	    sizeof(gdump)), 0,
+	assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+	    (void *)&gdump, sizeof(gdump)), 0,
 	    "Unexpected mallctl failure while enabling prof.gdump");
 	assert(!gdump_old);
 	did_prof_dump_open = false;
diff --git a/test/unit/prof_idump.c b/test/unit/prof_idump.c
old mode 100644
new mode 100755
index bdea53e..16c6462
--- a/test/unit/prof_idump.c
+++ b/test/unit/prof_idump.c
@@ -29,8 +29,9 @@
 	test_skip_if(!config_prof);
 
 	active = true;
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure while activating profiling");
+	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+	    sizeof(active)), 0,
+	    "Unexpected mallctl failure while activating profiling");
 
 	prof_dump_open = prof_dump_open_intercept;
 
diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c
old mode 100644
new mode 100755
index 5ae45fd..59d7079
--- a/test/unit/prof_reset.c
+++ b/test/unit/prof_reset.c
@@ -20,8 +20,8 @@
 set_prof_active(bool active)
 {
 
-	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
-	    0, "Unexpected mallctl failure");
+	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+	    sizeof(active)), 0, "Unexpected mallctl failure");
 }
 
 static size_t
@@ -30,7 +30,8 @@
 	size_t lg_prof_sample;
 	size_t sz = sizeof(size_t);
 
-	assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
+	    NULL, 0), 0,
 	    "Unexpected mallctl failure while reading profiling sample rate");
 	return (lg_prof_sample);
 }
@@ -39,7 +40,7 @@
 do_prof_reset(size_t lg_prof_sample)
 {
 	assert_d_eq(mallctl("prof.reset", NULL, NULL,
-	    &lg_prof_sample, sizeof(size_t)), 0,
+	    (void *)&lg_prof_sample, sizeof(size_t)), 0,
 	    "Unexpected mallctl failure while resetting profile data");
 	assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
 	    "Expected profile sample rate change");
@@ -54,8 +55,8 @@
 	test_skip_if(!config_prof);
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
-	    NULL, 0), 0,
+	assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
+	    &sz, NULL, 0), 0,
 	    "Unexpected mallctl failure while reading profiling sample rate");
 	assert_zu_eq(lg_prof_sample_orig, 0,
 	    "Unexpected profiling sample rate");
diff --git a/test/unit/prof_thread_name.c b/test/unit/prof_thread_name.c
old mode 100644
new mode 100755
index f501158..9ec5497
--- a/test/unit/prof_thread_name.c
+++ b/test/unit/prof_thread_name.c
@@ -12,8 +12,9 @@
 	size_t sz;
 
 	sz = sizeof(thread_name_old);
-	assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0),
-	    0, "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+	assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
+	    NULL, 0), 0,
+	    "%s():%d: Unexpected mallctl failure reading thread.prof.name",
 	    func, line);
 	assert_str_eq(thread_name_old, thread_name_expected,
 	    "%s():%d: Unexpected thread.prof.name value", func, line);
@@ -26,8 +27,8 @@
     int line)
 {
 
-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
-	    sizeof(thread_name)), 0,
+	assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+	    (void *)&thread_name, sizeof(thread_name)), 0,
 	    "%s():%d: Unexpected mallctl failure reading thread.prof.name",
 	    func, line);
 	mallctl_thread_name_get_impl(thread_name, func, line);
@@ -46,15 +47,15 @@
 
 	/* NULL input shouldn't be allowed. */
 	thread_name = NULL;
-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
-	    sizeof(thread_name)), EFAULT,
+	assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+	    (void *)&thread_name, sizeof(thread_name)), EFAULT,
 	    "Unexpected mallctl result writing \"%s\" to thread.prof.name",
 	    thread_name);
 
 	/* '\n' shouldn't be allowed. */
 	thread_name = "hi\nthere";
-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
-	    sizeof(thread_name)), EFAULT,
+	assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+	    (void *)&thread_name, sizeof(thread_name)), EFAULT,
 	    "Unexpected mallctl result writing \"%s\" to thread.prof.name",
 	    thread_name);
 
@@ -64,8 +65,9 @@
 		size_t sz;
 
 		sz = sizeof(thread_name_old);
-		assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz,
-		    &thread_name, sizeof(thread_name)), EPERM,
+		assert_d_eq(mallctl("thread.prof.name",
+		    (void *)&thread_name_old, &sz, (void *)&thread_name,
+		    sizeof(thread_name)), EPERM,
 		    "Unexpected mallctl result writing \"%s\" to "
 		    "thread.prof.name", thread_name);
 	}
diff --git a/test/unit/run_quantize.c b/test/unit/run_quantize.c
index b1ca635..089176f 100644
--- a/test/unit/run_quantize.c
+++ b/test/unit/run_quantize.c
@@ -13,7 +13,7 @@
 	 */
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
 	    "Unexpected mallctl failure");
 
 	assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
@@ -21,8 +21,8 @@
 	for (i = 0; i < nbins; i++) {
 		mib[2] = i;
 		sz = sizeof(size_t);
-		assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0),
-		    0, "Unexpected mallctlbymib failure");
+		assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz,
+		    NULL, 0), 0, "Unexpected mallctlbymib failure");
 		assert_zu_eq(run_size, run_quantize_floor(run_size),
 		    "Small run quantization should be a no-op (run_size=%zu)",
 		    run_size);
@@ -47,11 +47,11 @@
 	 */
 
 	sz = sizeof(bool);
-	assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz,
-	    NULL, 0), 0, "Unexpected mallctl failure");
+	assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
+	    &sz, NULL, 0), 0, "Unexpected mallctl failure");
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
 	    "Unexpected mallctl failure");
 
 	assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
@@ -61,8 +61,8 @@
 
 		mib[2] = i;
 		sz = sizeof(size_t);
-		assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0),
-		    0, "Unexpected mallctlbymib failure");
+		assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz,
+		    NULL, 0), 0, "Unexpected mallctlbymib failure");
 		run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
 		floor = run_quantize_floor(run_size);
 		ceil = run_quantize_ceil(run_size);
@@ -102,11 +102,11 @@
 	 */
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
 	    "Unexpected mallctl failure");
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+	assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
 	    "Unexpected mallctl failure");
 
 	floor_prev = 0;
diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c
old mode 100644
new mode 100755
index 4e1e0ce..81cc606
--- a/test/unit/size_classes.c
+++ b/test/unit/size_classes.c
@@ -8,8 +8,8 @@
 	size_t sz, miblen, max_size_class;
 
 	sz = sizeof(unsigned);
-	assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
-	    "Unexpected mallctl() error");
+	assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0),
+	    0, "Unexpected mallctl() error");
 
 	miblen = sizeof(mib) / sizeof(size_t);
 	assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
@@ -17,8 +17,8 @@
 	mib[2] = nhchunks - 1;
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
-	    "Unexpected mallctlbymib() error");
+	assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+	    NULL, 0), 0, "Unexpected mallctlbymib() error");
 
 	return (max_size_class);
 }
diff --git a/test/unit/stats.c b/test/unit/stats.c
old mode 100644
new mode 100755
index a9a3981..315717d
--- a/test/unit/stats.c
+++ b/test/unit/stats.c
@@ -7,18 +7,18 @@
 	int expected = config_stats ? 0 : ENOENT;
 
 	sz = sizeof(cactive);
-	assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0),
+	    expected, "Unexpected mallctl() result");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
+	assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
+	    0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
 	    expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
+	assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
 	    expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
-	    "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
+	    expected, "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_zu_le(active, *cactive,
@@ -45,19 +45,19 @@
 	p = mallocx(large_maxclass+1, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
-	    0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
-	    0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_zu_gt(allocated, 0,
@@ -83,8 +83,8 @@
 	uint64_t npurge, nmadvise, purged;
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	little = mallocx(SMALL_MAXCLASS, 0);
 	assert_ptr_not_null(little, "Unexpected mallocx() failure");
@@ -100,19 +100,19 @@
 	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
 	    "Unexpected mallctl() failure");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
-	    expected, "Unexepected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
+	    0), expected, "Unexepected mallctl() result");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
-	    expected, "Unexepected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
-	    expected, "Unexepected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0),
-	    expected, "Unexepected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL,
+	    0), expected, "Unexepected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz,
+	    NULL, 0), expected, "Unexepected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL,
+	    0), expected, "Unexepected mallctl() result");
 
 	if (config_stats) {
 		assert_u64_gt(npurge, 0,
@@ -150,8 +150,8 @@
 	no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	p = mallocx(SMALL_MAXCLASS, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
@@ -159,19 +159,21 @@
 	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
 	    config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.small.allocated",
+	    (void *)&allocated, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
+	    (void *)&nrequests, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_zu_gt(allocated, 0,
@@ -197,25 +199,27 @@
 	int expected = config_stats ? 0 : ENOENT;
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	p = mallocx(large_maxclass, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.large.allocated",
+	    (void *)&allocated, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
+	    (void *)&nrequests, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_zu_gt(allocated, 0,
@@ -241,23 +245,23 @@
 	int expected = config_stats ? 0 : ENOENT;
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	p = mallocx(chunksize, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_zu_gt(allocated, 0,
@@ -282,8 +286,8 @@
 	int expected = config_stats ? 0 : ENOENT;
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	p = mallocx(arena_bin_info[0].reg_size, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
@@ -291,35 +295,36 @@
 	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
 	    config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests",
+	    (void *)&nrequests, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
-	    NULL, 0), config_tcache ? expected : ENOENT,
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills,
+	    &sz, NULL, 0), config_tcache ? expected : ENOENT,
 	    "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
-	    NULL, 0), config_tcache ? expected : ENOENT,
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes,
+	    &sz, NULL, 0), config_tcache ? expected : ENOENT,
 	    "Unexpected mallctl() result");
 
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz,
 	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_u64_gt(nmalloc, 0,
@@ -355,25 +360,26 @@
 	int expected = config_stats ? 0 : ENOENT;
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	p = mallocx(LARGE_MINCLASS, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests",
+	    (void *)&nrequests, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns,
+	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_u64_gt(nmalloc, 0,
@@ -399,23 +405,26 @@
 	int expected = config_stats ? 0 : ENOENT;
 
 	arena = 0;
-	assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
-	    0, "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
+	    sizeof(arena)), 0, "Unexpected mallctl() failure");
 
 	p = mallocx(chunksize, 0);
 	assert_ptr_not_null(p, "Unexpected mallocx() failure");
 
-	assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
-	    "Unexpected mallctl() failure");
+	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+	    0, "Unexpected mallctl() failure");
 
 	sz = sizeof(uint64_t);
-	assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
-	assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
-	    NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc",
+	    (void *)&nmalloc, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc",
+	    (void *)&ndalloc, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 	sz = sizeof(size_t);
-	assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
+	assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks",
+	    (void *)&curhchunks, &sz, NULL, 0), expected,
+	    "Unexpected mallctl() result");
 
 	if (config_stats) {
 		assert_u64_gt(nmalloc, 0,
diff --git a/test/unit/tsd.c b/test/unit/tsd.c
index 4e2622a..d5f96ac 100644
--- a/test/unit/tsd.c
+++ b/test/unit/tsd.c
@@ -79,7 +79,7 @@
 TEST_BEGIN(test_tsd_main_thread)
 {
 
-	thd_start((void *) 0xa5f3e329);
+	thd_start((void *)(uintptr_t)0xa5f3e329);
 }
 TEST_END
 
diff --git a/test/unit/util.c b/test/unit/util.c
index c958dc0..b1f9abd 100644
--- a/test/unit/util.c
+++ b/test/unit/util.c
@@ -75,6 +75,7 @@
 	};
 #define	ERR(e)		e, #e
 #define	KUMAX(x)	((uintmax_t)x##ULL)
+#define	KSMAX(x)	((uintmax_t)(intmax_t)x##LL)
 	struct test_s tests[] = {
 		{"0",		"0",	-1,	ERR(EINVAL),	UINTMAX_MAX},
 		{"0",		"0",	1,	ERR(EINVAL),	UINTMAX_MAX},
@@ -87,13 +88,13 @@
 
 		{"42",		"",	0,	ERR(0),		KUMAX(42)},
 		{"+42",		"",	0,	ERR(0),		KUMAX(42)},
-		{"-42",		"",	0,	ERR(0),		KUMAX(-42)},
+		{"-42",		"",	0,	ERR(0),		KSMAX(-42)},
 		{"042",		"",	0,	ERR(0),		KUMAX(042)},
 		{"+042",	"",	0,	ERR(0),		KUMAX(042)},
-		{"-042",	"",	0,	ERR(0),		KUMAX(-042)},
+		{"-042",	"",	0,	ERR(0),		KSMAX(-042)},
 		{"0x42",	"",	0,	ERR(0),		KUMAX(0x42)},
 		{"+0x42",	"",	0,	ERR(0),		KUMAX(0x42)},
-		{"-0x42",	"",	0,	ERR(0),		KUMAX(-0x42)},
+		{"-0x42",	"",	0,	ERR(0),		KSMAX(-0x42)},
 
 		{"0",		"",	0,	ERR(0),		KUMAX(0)},
 		{"1",		"",	0,	ERR(0),		KUMAX(1)},
@@ -130,6 +131,7 @@
 	};
 #undef ERR
 #undef KUMAX
+#undef KSMAX
 	unsigned i;
 
 	for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {