Merge branch 'dev'
diff --git a/jemalloc/include/jemalloc/internal/chunk_mmap.h b/jemalloc/include/jemalloc/internal/chunk_mmap.h
index 8fb90b7..dc52448 100644
--- a/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -10,6 +10,7 @@
 #ifdef JEMALLOC_H_EXTERNS
 
 void	*chunk_alloc_mmap(size_t size);
+void	*chunk_alloc_mmap_noreserve(size_t size);
 void	chunk_dealloc_mmap(void *chunk, size_t size);
 
 #endif /* JEMALLOC_H_EXTERNS */
diff --git a/jemalloc/include/jemalloc/internal/tcache.h b/jemalloc/include/jemalloc/internal/tcache.h
index fa6c53f..a8be436 100644
--- a/jemalloc/include/jemalloc/internal/tcache.h
+++ b/jemalloc/include/jemalloc/internal/tcache.h
@@ -353,7 +353,7 @@
 
 #ifdef JEMALLOC_FILL
 	if (opt_junk)
-		memset(ptr, 0x5a, arena->bins[binind].reg_size);
+		memset(ptr, 0x5a, size);
 #endif
 
 	tbin = &tcache->tbins[binind];
diff --git a/jemalloc/src/chunk_mmap.c b/jemalloc/src/chunk_mmap.c
index 8f07113..d9f9e86 100644
--- a/jemalloc/src/chunk_mmap.c
+++ b/jemalloc/src/chunk_mmap.c
@@ -23,14 +23,15 @@
 /******************************************************************************/
 /* Function prototypes for non-inline static functions. */
 
-static void	*pages_map(void *addr, size_t size);
+static void	*pages_map(void *addr, size_t size, bool noreserve);
 static void	pages_unmap(void *addr, size_t size);
-static void	*chunk_alloc_mmap_slow(size_t size, bool unaligned);
+static void	*chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve);
+static void	*chunk_alloc_mmap_internal(size_t size, bool noreserve);
 
 /******************************************************************************/
 
 static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, bool noreserve)
 {
 	void *ret;
 
@@ -38,8 +39,12 @@
 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
 	 * of existing mappings, and we only want to create new mappings.
 	 */
-	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-	    -1, 0);
+	int flags = MAP_PRIVATE | MAP_ANON;
+#ifdef MAP_NORESERVE
+	if (noreserve)
+		flags |= MAP_NORESERVE;
+#endif
+	ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
 	assert(ret != NULL);
 
 	if (ret == MAP_FAILED)
@@ -83,7 +88,7 @@
 }
 
 static void *
-chunk_alloc_mmap_slow(size_t size, bool unaligned)
+chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
 {
 	void *ret;
 	size_t offset;
@@ -92,7 +97,7 @@
 	if (size + chunksize <= size)
 		return (NULL);
 
-	ret = pages_map(NULL, size + chunksize);
+	ret = pages_map(NULL, size + chunksize, noreserve);
 	if (ret == NULL)
 		return (NULL);
 
@@ -128,8 +133,8 @@
 	return (ret);
 }
 
-void *
-chunk_alloc_mmap(size_t size)
+static void *
+chunk_alloc_mmap_internal(size_t size, bool noreserve)
 {
 	void *ret;
 
@@ -164,7 +169,7 @@
 	if (mmap_unaligned == false) {
 		size_t offset;
 
-		ret = pages_map(NULL, size);
+		ret = pages_map(NULL, size, noreserve);
 		if (ret == NULL)
 			return (NULL);
 
@@ -173,13 +178,13 @@
 			mmap_unaligned = true;
 			/* Try to extend chunk boundary. */
 			if (pages_map((void *)((uintptr_t)ret + size),
-			    chunksize - offset) == NULL) {
+			    chunksize - offset, noreserve) == NULL) {
 				/*
 				 * Extension failed.  Clean up, then revert to
 				 * the reliable-but-expensive method.
 				 */
 				pages_unmap(ret, size);
-				ret = chunk_alloc_mmap_slow(size, true);
+				ret = chunk_alloc_mmap_slow(size, true, noreserve);
 			} else {
 				/* Clean up unneeded leading space. */
 				pages_unmap(ret, chunksize - offset);
@@ -188,11 +193,23 @@
 			}
 		}
 	} else
-		ret = chunk_alloc_mmap_slow(size, false);
+		ret = chunk_alloc_mmap_slow(size, false, noreserve);
 
 	return (ret);
 }
 
+void *
+chunk_alloc_mmap(size_t size)
+{
+	return chunk_alloc_mmap_internal(size, false);
+}
+
+void *
+chunk_alloc_mmap_noreserve(size_t size)
+{
+	return chunk_alloc_mmap_internal(size, true);
+}
+
 void
 chunk_dealloc_mmap(void *chunk, size_t size)
 {
diff --git a/jemalloc/src/chunk_swap.c b/jemalloc/src/chunk_swap.c
index b8c880f..ed9e414 100644
--- a/jemalloc/src/chunk_swap.c
+++ b/jemalloc/src/chunk_swap.c
@@ -283,7 +283,7 @@
 	 * Allocate a chunk-aligned region of anonymous memory, which will
 	 * be the final location for the memory-mapped files.
 	 */
-	vaddr = chunk_alloc_mmap(cumsize);
+	vaddr = chunk_alloc_mmap_noreserve(cumsize);
 	if (vaddr == NULL) {
 		ret = true;
 		goto RETURN;
diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c
index aeab140..bf2ace3 100644
--- a/jemalloc/src/jemalloc.c
+++ b/jemalloc/src/jemalloc.c
@@ -775,7 +775,7 @@
 #endif
 
 #ifndef NO_TLS
-	next_arena = 0;
+	next_arena = (narenas > 0) ? 1 : 0;
 #endif
 
 	/* Allocate and initialize arenas. */
diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c
index 93904b8..88e8f44 100644
--- a/jemalloc/src/prof.c
+++ b/jemalloc/src/prof.c
@@ -623,13 +623,8 @@
 prof_sample_accum_update(size_t size)
 {
 
-	if (opt_lg_prof_sample == 0) {
-		/*
-		 * Don't bother with sampling logic, since sampling interval is
-		 * 1.
-		 */
-		return;
-	}
+	/* Sampling logic is unnecessary if the interval is 1. */
+	assert(opt_lg_prof_sample != 0);
 
 	/* Take care to avoid integer overflow. */
 	if (size >= prof_sample_threshold - prof_sample_accum) {
@@ -647,11 +642,15 @@
 void
 prof_malloc(const void *ptr, prof_thr_cnt_t *cnt)
 {
-	size_t size = isalloc(ptr);
+	size_t size;
 
 	assert(ptr != NULL);
 
-	prof_sample_accum_update(size);
+	if (opt_lg_prof_sample != 0) {
+		size = isalloc(ptr);
+		prof_sample_accum_update(size);
+	} else if ((uintptr_t)cnt > (uintptr_t)1U)
+		size = isalloc(ptr);
 
 	if ((uintptr_t)cnt > (uintptr_t)1U) {
 		prof_ctx_set(ptr, cnt->ctx);
@@ -679,11 +678,18 @@
 prof_realloc(const void *ptr, prof_thr_cnt_t *cnt, const void *old_ptr,
     size_t old_size, prof_ctx_t *old_ctx)
 {
-	size_t size = isalloc(ptr);
+	size_t size;
 	prof_thr_cnt_t *told_cnt;
 
-	if (ptr != NULL)
-		prof_sample_accum_update(size);
+	assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
+
+	if (ptr != NULL) {
+		if (opt_lg_prof_sample != 0) {
+			size = isalloc(ptr);
+			prof_sample_accum_update(size);
+		} else if ((uintptr_t)cnt > (uintptr_t)1U)
+			size = isalloc(ptr);
+	}
 
 	if ((uintptr_t)old_ctx > (uintptr_t)1U) {
 		told_cnt = prof_lookup(old_ctx->bt);