Merge branch 'dev'
diff --git a/ChangeLog b/ChangeLog
index c0ca338..d56ee99 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -5,6 +5,30 @@
 
     https://github.com/jemalloc/jemalloc
 
+* 3.6.0 (March 31, 2014)
+
+  This version contains a critical bug fix for a regression present in 3.5.0 and
+  3.5.1.
+
+  Bug fixes:
+  - Fix a regression in arena_chunk_alloc() that caused crashes during
+    small/large allocation if chunk allocation failed.  In the absence of this
+    bug, chunk allocation failure would result in allocation failure, e.g.  NULL
+    return from malloc().  This regression was introduced in 3.5.0.
+  - Fix backtracing for gcc intrinsics-based backtracing by specifying
+    -fno-omit-frame-pointer to gcc.  Note that the application (and all the
+    libraries it links to) must also be compiled with this option for
+    backtracing to be reliable.
+  - Use dss allocation precedence for huge allocations as well as small/large
+    allocations.
+  - Fix test assertion failure message formatting.  This bug did not manifect on
+    x86_64 systems because of implementation subtleties in va_list.
+  - Fix inconsequential test failures for hash and SFMT code.
+
+  New features:
+  - Support heap profiling on FreeBSD.  This feature depends on the proc
+    filesystem being mounted during heap profile dumping.
+
 * 3.5.1 (February 25, 2014)
 
   This version primarily addresses minor bugs in test code.
diff --git a/Makefile.in b/Makefile.in
index 7399f27..d6b7d6e 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -141,7 +141,7 @@
 	$(srcroot)test/integration/xallocx.c
 ifeq ($(enable_experimental), 1)
 TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \
-	$(srcroot)test/integration/ALLOCM_ARENA.c \
+	$(srcroot)test/integration/MALLOCX_ARENA.c \
 	$(srcroot)test/integration/rallocm.c
 endif
 TESTS_STRESS :=
diff --git a/bin/pprof b/bin/pprof
index 727eb43..a309943 100755
--- a/bin/pprof
+++ b/bin/pprof
@@ -4197,8 +4197,12 @@
 # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
 sub DebuggingLibrary {
   my $file = shift;
-  if ($file =~ m|^/| && -f "/usr/lib/debug$file") {
-    return "/usr/lib/debug$file";
+  if ($file =~ m|^/|) {
+      if (-f "/usr/lib/debug$file") {
+        return "/usr/lib/debug$file";
+      } elsif (-f "/usr/lib/debug$file.debug") {
+        return "/usr/lib/debug$file.debug";
+      }
   }
   return undef;
 }
@@ -4360,6 +4364,19 @@
       $finish = HexExtend($2);
       $offset = $zero_offset;
       $lib = $3;
+    }
+    # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
+    # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
+    #
+    # Example:
+    # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
+    # o.1 NCH -1
+    elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
+      $start = HexExtend($1);
+      $finish = HexExtend($2);
+      $offset = $zero_offset;
+      $lib = FindLibrary($5);
+
     } else {
       next;
     }
@@ -4382,6 +4399,7 @@
       }
     }
 
+    if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
     push(@{$result}, [$lib, $start, $finish, $offset]);
   }
 
@@ -4589,6 +4607,12 @@
     my $finish = $lib->[2];
     my $offset = $lib->[3];
 
+    # Use debug library if it exists
+    my $debug_libname = DebuggingLibrary($libname);
+    if ($debug_libname) {
+        $libname = $debug_libname;
+    }
+
     # Get list of pcs that belong in this library.
     my $contained = [];
     my ($start_pc_index, $finish_pc_index);
@@ -5019,7 +5043,7 @@
 
       # Tag this routine with the starting address in case the image
       # has multiple occurrences of this routine.  We use a syntax
-      # that resembles template paramters that are automatically
+      # that resembles template parameters that are automatically
       # stripped out by ShortFunctionName()
       $this_routine .= "<$start_val>";
 
diff --git a/configure.ac b/configure.ac
index 3837a78..4de81dc 100644
--- a/configure.ac
+++ b/configure.ac
@@ -150,6 +150,11 @@
 fi
 AC_PROG_CPP
 
+AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
+if test "x${ac_cv_big_endian}" = "x1" ; then
+  AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
+fi
+
 AC_CHECK_SIZEOF([void *])
 if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
   LG_SIZEOF_PTR=3
@@ -742,22 +747,6 @@
      -a "x$GCC" = "xyes" ; then
   AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
   AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"])
-  dnl The following is conservative, in that it only has entries for CPUs on
-  dnl which jemalloc has been tested.
-  AC_MSG_CHECKING([libgcc-based backtracing reliability on ${host_cpu}])
-  case "${host_cpu}" in
-    i[[3456]]86)
-      AC_MSG_RESULT([unreliable])
-      enable_prof_libgcc="0";
-      ;;
-    x86_64)
-      AC_MSG_RESULT([reliable])
-      ;;
-    *)
-      AC_MSG_RESULT([unreliable])
-      enable_prof_libgcc="0";
-      ;;
-  esac
   if test "x${enable_prof_libgcc}" = "x1" ; then
     backtrace_method="libgcc"
     AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
@@ -779,6 +768,7 @@
 )
 if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
      -a "x$GCC" = "xyes" ; then
+  JE_CFLAGS_APPEND([-fno-omit-frame-pointer])
   backtrace_method="gcc intrinsics"
   AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
 else
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index c7e2e87..d8e2e71 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -1439,8 +1439,12 @@
         <listitem><para>Set the precedence of dss allocation as related to mmap
         allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
         <link
-        linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.  See
-        <link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
+        linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.  Note
+        that even during huge allocation this setting is read from the arena
+        that would be chosen for small or large allocation so that applications
+        can depend on consistent dss versus mmap allocation regardless of
+        allocation size.  See <link
+        linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
         settings.
         </para></listitem>
       </varlistentry>
diff --git a/include/jemalloc/internal/hash.h b/include/jemalloc/internal/hash.h
index 09b69df..c7183ed 100644
--- a/include/jemalloc/internal/hash.h
+++ b/include/jemalloc/internal/hash.h
@@ -320,7 +320,7 @@
 JEMALLOC_INLINE void
 hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
 {
-#if (LG_SIZEOF_PTR == 3)
+#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
 	hash_x64_128(key, len, seed, (uint64_t *)r_hash);
 #else
 	uint64_t hashes[2];
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index ddf1313..a2b9c77 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -17,18 +17,20 @@
 /* Protects chunk-related data structures. */
 extern malloc_mutex_t	huge_mtx;
 
-void	*huge_malloc(size_t size, bool zero);
-void	*huge_palloc(size_t size, size_t alignment, bool zero);
+void	*huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
+void	*huge_palloc(size_t size, size_t alignment, bool zero,
+    dss_prec_t dss_prec);
 bool	huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
     size_t extra);
 void	*huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
-    size_t alignment, bool zero, bool try_tcache_dalloc);
+    size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
 #ifdef JEMALLOC_JET
 typedef void (huge_dalloc_junk_t)(void *, size_t);
 extern huge_dalloc_junk_t *huge_dalloc_junk;
 #endif
 void	huge_dalloc(void *ptr, bool unmap);
 size_t	huge_salloc(const void *ptr);
+dss_prec_t	huge_dss_prec_get(arena_t *arena);
 prof_ctx_t	*huge_prof_ctx_get(const void *ptr);
 void	huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
 bool	huge_boot(void);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index d24a1fe..574bbb1 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -770,7 +770,7 @@
 	if (size <= arena_maxclass)
 		return (arena_malloc(arena, size, false, try_tcache));
 	else
-		return (huge_malloc(size, false));
+		return (huge_malloc(size, false, huge_dss_prec_get(arena)));
 }
 
 JEMALLOC_ALWAYS_INLINE void *
@@ -787,7 +787,7 @@
 	if (size <= arena_maxclass)
 		return (arena_malloc(arena, size, true, try_tcache));
 	else
-		return (huge_malloc(size, true));
+		return (huge_malloc(size, true, huge_dss_prec_get(arena)));
 }
 
 JEMALLOC_ALWAYS_INLINE void *
@@ -813,9 +813,9 @@
 			ret = arena_palloc(choose_arena(arena), usize,
 			    alignment, zero);
 		} else if (alignment <= chunksize)
-			ret = huge_malloc(usize, zero);
+			ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
 		else
-			ret = huge_palloc(usize, alignment, zero);
+			ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
 	}
 
 	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
@@ -984,7 +984,7 @@
 		    try_tcache_dalloc));
 	} else {
 		return (huge_ralloc(ptr, oldsize, size, extra,
-		    alignment, zero, try_tcache_dalloc));
+		    alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
 	}
 }
 
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index e3758e4..c166fbd 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -190,6 +190,9 @@
 /* C99 restrict keyword supported. */
 #undef JEMALLOC_HAS_RESTRICT
 
+/* For use by hash code. */
+#undef JEMALLOC_BIG_ENDIAN
+
 /* sizeof(int) == 2^LG_SIZEOF_INT. */
 #undef LG_SIZEOF_INT
 
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 1e64ed5..93516d2 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -197,6 +197,7 @@
 huge_boot
 huge_dalloc
 huge_dalloc_junk
+huge_dss_prec_get
 huge_malloc
 huge_mtx
 huge_ndalloc
diff --git a/src/arena.c b/src/arena.c
index 390ab0f..dad707b 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -614,8 +614,11 @@
 
 	if (arena->spare != NULL)
 		chunk = arena_chunk_init_spare(arena);
-	else
+	else {
 		chunk = arena_chunk_init_hard(arena);
+		if (chunk == NULL)
+			return (NULL);
+	}
 
 	/* Insert the run into the runs_avail tree. */
 	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
diff --git a/src/huge.c b/src/huge.c
index 6d86aed..d72f213 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -16,14 +16,14 @@
 static extent_tree_t	huge;
 
 void *
-huge_malloc(size_t size, bool zero)
+huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
 {
 
-	return (huge_palloc(size, chunksize, zero));
+	return (huge_palloc(size, chunksize, zero, dss_prec));
 }
 
 void *
-huge_palloc(size_t size, size_t alignment, bool zero)
+huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
 {
 	void *ret;
 	size_t csize;
@@ -48,8 +48,7 @@
 	 * it is possible to make correct junk/zero fill decisions below.
 	 */
 	is_zeroed = zero;
-	ret = chunk_alloc(csize, alignment, false, &is_zeroed,
-	    chunk_dss_prec_get());
+	ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
 	if (ret == NULL) {
 		base_node_dealloc(node);
 		return (NULL);
@@ -98,7 +97,7 @@
 
 void *
 huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
-    size_t alignment, bool zero, bool try_tcache_dalloc)
+    size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
 {
 	void *ret;
 	size_t copysize;
@@ -113,18 +112,18 @@
 	 * space and copying.
 	 */
 	if (alignment > chunksize)
-		ret = huge_palloc(size + extra, alignment, zero);
+		ret = huge_palloc(size + extra, alignment, zero, dss_prec);
 	else
-		ret = huge_malloc(size + extra, zero);
+		ret = huge_malloc(size + extra, zero, dss_prec);
 
 	if (ret == NULL) {
 		if (extra == 0)
 			return (NULL);
 		/* Try again, this time without extra. */
 		if (alignment > chunksize)
-			ret = huge_palloc(size, alignment, zero);
+			ret = huge_palloc(size, alignment, zero, dss_prec);
 		else
-			ret = huge_malloc(size, zero);
+			ret = huge_malloc(size, zero, dss_prec);
 
 		if (ret == NULL)
 			return (NULL);
@@ -264,6 +263,13 @@
 	return (ret);
 }
 
+dss_prec_t
+huge_dss_prec_get(arena_t *arena)
+{
+
+	return (arena_dss_prec_get(choose_arena(arena)));
+}
+
 prof_ctx_t *
 huge_prof_ctx_get(const void *ptr)
 {
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 563d99f..204778b 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -2076,7 +2076,7 @@
 	if (size <= arena_maxclass)
 		return (arena_malloc(arenas[0], size, zero, false));
 	else
-		return (huge_malloc(size, zero));
+		return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
 }
 
 void *
diff --git a/src/prof.c b/src/prof.c
index 1d8ccbd..7722b7b 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -935,9 +935,12 @@
 	char filename[PATH_MAX + 1];
 
 	cassert(config_prof);
-
+#ifdef __FreeBSD__
+	malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+#else
 	malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
 	    (int)getpid());
+#endif
 	mfd = open(filename, O_RDONLY);
 	if (mfd != -1) {
 		ssize_t nread;
diff --git a/test/include/test/SFMT-alti.h b/test/include/test/SFMT-alti.h
index 2f86f67..0005df6 100644
--- a/test/include/test/SFMT-alti.h
+++ b/test/include/test/SFMT-alti.h
@@ -61,7 +61,7 @@
  * @return output
  */
 JEMALLOC_ALWAYS_INLINE
-static vector unsigned int vec_recursion(vector unsigned int a,
+vector unsigned int vec_recursion(vector unsigned int a,
 						vector unsigned int b,
 						vector unsigned int c,
 						vector unsigned int d) {
diff --git a/test/include/test/test.h b/test/include/test/test.h
index 8cc97af..a32ec07 100644
--- a/test/include/test/test.h
+++ b/test/include/test/test.h
@@ -1,13 +1,19 @@
+#define	ASSERT_BUFSIZE	256
+
 #define	assert_cmp(t, a, b, cmp, neg_cmp, pri, fmt...) do {		\
 	t a_ = (a);							\
 	t b_ = (b);							\
 	if (!(a_ cmp b_)) {						\
-		p_test_fail(						\
+		char prefix[ASSERT_BUFSIZE];				\
+		char message[ASSERT_BUFSIZE];				\
+		malloc_snprintf(prefix, sizeof(prefix),			\
 		    "%s:%s:%d: Failed assertion: "			\
 		    "(%s) "#cmp" (%s) --> "				\
 		    "%"pri" "#neg_cmp" %"pri": ",			\
 		    __func__, __FILE__, __LINE__,			\
-		    #a, #b, a_, b_, fmt);				\
+		    #a, #b, a_, b_);					\
+		malloc_snprintf(message, sizeof(message), fmt);		\
+		p_test_fail(prefix, message);				\
 	}								\
 } while (0)
 
@@ -208,24 +214,32 @@
 	bool a_ = (a);							\
 	bool b_ = (b);							\
 	if (!(a_ == b_)) {						\
-		p_test_fail(						\
+		char prefix[ASSERT_BUFSIZE];				\
+		char message[ASSERT_BUFSIZE];				\
+		malloc_snprintf(prefix, sizeof(prefix),			\
 		    "%s:%s:%d: Failed assertion: "			\
 		    "(%s) == (%s) --> %s != %s: ",			\
 		    __func__, __FILE__, __LINE__,			\
 		    #a, #b, a_ ? "true" : "false",			\
-		    b_ ? "true" : "false", fmt);			\
+		    b_ ? "true" : "false");				\
+		malloc_snprintf(message, sizeof(message), fmt);		\
+		p_test_fail(prefix, message);				\
 	}								\
 } while (0)
 #define	assert_b_ne(a, b, fmt...) do {					\
 	bool a_ = (a);							\
 	bool b_ = (b);							\
 	if (!(a_ != b_)) {						\
-		p_test_fail(						\
+		char prefix[ASSERT_BUFSIZE];				\
+		char message[ASSERT_BUFSIZE];				\
+		malloc_snprintf(prefix, sizeof(prefix),			\
 		    "%s:%s:%d: Failed assertion: "			\
 		    "(%s) != (%s) --> %s == %s: ",			\
 		    __func__, __FILE__, __LINE__,			\
 		    #a, #b, a_ ? "true" : "false",			\
-		    b_ ? "true" : "false", fmt);			\
+		    b_ ? "true" : "false");				\
+		malloc_snprintf(message, sizeof(message), fmt);		\
+		p_test_fail(prefix, message);				\
 	}								\
 } while (0)
 #define	assert_true(a, fmt...)	assert_b_eq(a, true, fmt)
@@ -233,26 +247,39 @@
 
 #define	assert_str_eq(a, b, fmt...) do {				\
 	if (strcmp((a), (b))) {						\
-		p_test_fail(						\
+		char prefix[ASSERT_BUFSIZE];				\
+		char message[ASSERT_BUFSIZE];				\
+		malloc_snprintf(prefix, sizeof(prefix),			\
 		    "%s:%s:%d: Failed assertion: "			\
 		    "(%s) same as (%s) --> "				\
 		    "\"%s\" differs from \"%s\": ",			\
-		    __func__, __FILE__, __LINE__, #a, #b, a, b, fmt);	\
+		    __func__, __FILE__, __LINE__, #a, #b, a, b);	\
+		malloc_snprintf(message, sizeof(message), fmt);		\
+		p_test_fail(prefix, message);				\
 	}								\
 } while (0)
 #define	assert_str_ne(a, b, fmt...) do {				\
 	if (!strcmp((a), (b))) {					\
-		p_test_fail(						\
+		char prefix[ASSERT_BUFSIZE];				\
+		char message[ASSERT_BUFSIZE];				\
+		malloc_snprintf(prefix, sizeof(prefix),			\
 		    "%s:%s:%d: Failed assertion: "			\
 		    "(%s) differs from (%s) --> "			\
 		    "\"%s\" same as \"%s\": ",				\
-		    __func__, __FILE__, __LINE__, #a, #b, a, b, fmt);	\
+		    __func__, __FILE__, __LINE__, #a, #b, a, b);	\
+		malloc_snprintf(message, sizeof(message), fmt);		\
+		p_test_fail(prefix, message);				\
 	}								\
 } while (0)
 
 #define	assert_not_reached(fmt...) do {					\
-	p_test_fail("%s:%s:%d: Unreachable code reached: ",		\
-	    __func__, __FILE__, __LINE__, fmt);				\
+	char prefix[ASSERT_BUFSIZE];					\
+	char message[ASSERT_BUFSIZE];					\
+	malloc_snprintf(prefix, sizeof(prefix),				\
+	    "%s:%s:%d: Unreachable code reached: ",			\
+	    __func__, __FILE__, __LINE__);				\
+	malloc_snprintf(message, sizeof(message), fmt);			\
+	p_test_fail(prefix, message);					\
 } while (0)
 
 /*
@@ -299,4 +326,4 @@
 test_status_t	p_test(test_t* t, ...);
 void	p_test_init(const char *name);
 void	p_test_fini(void);
-void	p_test_fail(const char *format, ...);
+void	p_test_fail(const char *prefix, const char *message);
diff --git a/test/integration/ALLOCM_ARENA.c b/test/integration/MALLOCX_ARENA.c
similarity index 88%
rename from test/integration/ALLOCM_ARENA.c
rename to test/integration/MALLOCX_ARENA.c
index 5bf3c4a..71cf6f2 100644
--- a/test/integration/ALLOCM_ARENA.c
+++ b/test/integration/MALLOCX_ARENA.c
@@ -8,7 +8,7 @@
 	unsigned thread_ind = (unsigned)(uintptr_t)arg;
 	unsigned arena_ind;
 	void *p;
-	size_t rsz, sz;
+	size_t sz;
 
 	sz = sizeof(arena_ind);
 	assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
@@ -27,9 +27,9 @@
 		    sizeof(const char *)), 0, "Error in mallctlbymib()");
 	}
 
-	assert_d_eq(allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind)),
-	    ALLOCM_SUCCESS, "Unexpected allocm() error");
-	dallocm(p, 0);
+	p = mallocx(1, MALLOCX_ARENA(arena_ind));
+	assert_ptr_not_null(p, "Unexpected mallocx() error");
+	dallocx(p, 0);
 
 	return (NULL);
 }
diff --git a/test/integration/allocm.c b/test/integration/allocm.c
index 66ecf86..7b4ea0c 100644
--- a/test/integration/allocm.c
+++ b/test/integration/allocm.c
@@ -1,8 +1,7 @@
 #include "test/jemalloc_test.h"
 
 #define	CHUNK 0x400000
-/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
-#define	MAXALIGN ((size_t)0x2000000LU)
+#define	MAXALIGN (((size_t)1) << 25)
 #define	NITER 4
 
 TEST_BEGIN(test_basic)
diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c
index f37a74b..123e041 100644
--- a/test/integration/mallocx.c
+++ b/test/integration/mallocx.c
@@ -1,8 +1,7 @@
 #include "test/jemalloc_test.h"
 
 #define	CHUNK 0x400000
-/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
-#define	MAXALIGN ((size_t)0x2000000LU)
+#define	MAXALIGN (((size_t)1) << 25)
 #define	NITER 4
 
 TEST_BEGIN(test_basic)
diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c
index b4b6780..ee21aed 100644
--- a/test/integration/rallocx.c
+++ b/test/integration/rallocx.c
@@ -112,7 +112,7 @@
 {
 	void *p, *q;
 	size_t align;
-#define	MAX_ALIGN (ZU(1) << 29)
+#define	MAX_ALIGN (ZU(1) << 25)
 
 	align = ZU(1);
 	p = mallocx(1, MALLOCX_ALIGN(align));
@@ -137,7 +137,7 @@
 {
 	void *p, *q;
 	size_t lg_align, sz;
-#define	MAX_LG_ALIGN 29
+#define	MAX_LG_ALIGN 25
 #define	MAX_VALIDATE (ZU(1) << 22)
 
 	lg_align = ZU(0);
diff --git a/test/src/SFMT.c b/test/src/SFMT.c
index 433d7f6..e6f8dee 100644
--- a/test/src/SFMT.c
+++ b/test/src/SFMT.c
@@ -49,6 +49,9 @@
 #include "test/jemalloc_test.h"
 #include "test/SFMT-params.h"
 
+#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
 #if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
 #define BIG_ENDIAN64 1
 #endif
diff --git a/test/src/test.c b/test/src/test.c
index 6552e37..528d858 100644
--- a/test/src/test.c
+++ b/test/src/test.c
@@ -86,15 +86,9 @@
 }
 
 void
-p_test_fail(const char *format, ...)
+p_test_fail(const char *prefix, const char *message)
 {
-	va_list ap;
 
-	va_start(ap, format);
-	malloc_vcprintf(NULL, NULL, format, ap);
-	format = va_arg(ap, const char *);
-	malloc_vcprintf(NULL, NULL, format, ap);
-	va_end(ap);
-	malloc_printf("\n");
+	malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
 	test_status = test_status_fail;
 }
diff --git a/test/unit/SFMT.c b/test/unit/SFMT.c
index 4805f8e..c57bd68 100644
--- a/test/unit/SFMT.c
+++ b/test/unit/SFMT.c
@@ -1576,7 +1576,7 @@
 	for (i = 0; i < BLOCK_SIZE64; i++) {
 		if (i < COUNT_1) {
 			assert_u64_eq(array64[i], init_by_array_64_expected[i],
-			    "Output mismatch for i=%d");
+			    "Output mismatch for i=%d", i);
 		}
 		r = gen_rand64(ctx);
 		assert_u64_eq(r, array64[i],
diff --git a/test/unit/ckh.c b/test/unit/ckh.c
index 69fd7f5..b214c27 100644
--- a/test/unit/ckh.c
+++ b/test/unit/ckh.c
@@ -29,7 +29,7 @@
 	assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
 	    "Unexpected ckh_new() error");
 	assert_zu_eq(ckh_count(&ckh), 0,
-	    "ckh_count() should return %zu, but it returned %zu", 0,
+	    "ckh_count() should return %zu, but it returned %zu", ZU(0),
 	    ckh_count(&ckh));
 
 	/* Insert. */
@@ -101,11 +101,11 @@
 
 TEST_BEGIN(test_insert_iter_remove)
 {
-#define	NITEMS 1000
+#define	NITEMS ZU(1000)
 	ckh_t ckh;
 	void **p[NITEMS];
 	void *q, *r;
-	unsigned i;
+	size_t i;
 
 	assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
 	    "Unexpected ckh_new() error");
@@ -116,7 +116,7 @@
 	}
 
 	for (i = 0; i < NITEMS; i++) {
-		unsigned j;
+		size_t j;
 
 		for (j = i; j < NITEMS; j++) {
 			assert_false(ckh_insert(&ckh, p[j], p[j]),
@@ -152,7 +152,7 @@
 
 			for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
 			    false;) {
-				unsigned k;
+				size_t k;
 
 				assert_ptr_eq(q, r, "Key and val not equal");
 
@@ -188,7 +188,7 @@
 	}
 
 	assert_zu_eq(ckh_count(&ckh), 0,
-	    "ckh_count() should return %zu, but it returned %zu", 0,
+	    "ckh_count() should return %zu, but it returned %zu", ZU(0),
 	    ckh_count(&ckh));
 	ckh_delete(&ckh);
 #undef NITEMS
diff --git a/test/unit/hash.c b/test/unit/hash.c
index 0446e52..abb394a 100644
--- a/test/unit/hash.c
+++ b/test/unit/hash.c
@@ -122,9 +122,15 @@
 	    (final[3] << 24);
 
 	switch (variant) {
+#ifdef JEMALLOC_BIG_ENDIAN
+	case hash_variant_x86_32: expected = 0x6213303eU; break;
+	case hash_variant_x86_128: expected = 0x266820caU; break;
+	case hash_variant_x64_128: expected = 0xcc622b6fU; break;
+#else
 	case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
 	case hash_variant_x86_128: expected = 0xb3ece62aU; break;
 	case hash_variant_x64_128: expected = 0x6384ba69U; break;
+#endif
 	default: not_reached();
 	}
 
diff --git a/test/unit/junk.c b/test/unit/junk.c
index ef8f9c1..85bbf9e 100644
--- a/test/unit/junk.c
+++ b/test/unit/junk.c
@@ -73,7 +73,7 @@
 		if (sz_prev > 0) {
 			assert_c_eq(s[0], 'a',
 			    "Previously allocated byte %zu/%zu is corrupted",
-			    0, sz_prev);
+			    ZU(0), sz_prev);
 			assert_c_eq(s[sz_prev-1], 'a',
 			    "Previously allocated byte %zu/%zu is corrupted",
 			    sz_prev-1, sz_prev);
diff --git a/test/unit/quarantine.c b/test/unit/quarantine.c
index 4534923..bbd48a5 100644
--- a/test/unit/quarantine.c
+++ b/test/unit/quarantine.c
@@ -21,7 +21,7 @@
 
 TEST_BEGIN(test_quarantine)
 {
-#define	SZ		256
+#define	SZ		ZU(256)
 #define	NQUARANTINED	(QUARANTINE_SIZE/SZ)
 	void *quarantined[NQUARANTINED+1];
 	size_t i, j;
diff --git a/test/unit/rtree.c b/test/unit/rtree.c
index 5e7a411..5463055 100644
--- a/test/unit/rtree.c
+++ b/test/unit/rtree.c
@@ -48,8 +48,9 @@
 				assert_u_eq(rtree_get(rtree, keys[k]), 1,
 				    "rtree_get() should return previously set "
 				    "value and ignore insignificant key bits; "
-				    "i=%u, j=%u, k=%u, set key=%#x, "
-				    "get key=%#x", i, j, k, keys[j], keys[k]);
+				    "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
+				    "get key=%#"PRIxPTR, i, j, k, keys[j],
+				    keys[k]);
 			}
 			assert_u_eq(rtree_get(rtree,
 			    (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
diff --git a/test/unit/zero.c b/test/unit/zero.c
index 2fdae2f..65a8f0c 100644
--- a/test/unit/zero.c
+++ b/test/unit/zero.c
@@ -20,7 +20,7 @@
 		if (sz_prev > 0) {
 			assert_c_eq(s[0], 'a',
 			    "Previously allocated byte %zu/%zu is corrupted",
-			    0, sz_prev);
+			    ZU(0), sz_prev);
 			assert_c_eq(s[sz_prev-1], 'a',
 			    "Previously allocated byte %zu/%zu is corrupted",
 			    sz_prev-1, sz_prev);