Protect extents_dirty access with extents_mtx.

This fixes race conditions during purging.
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 3c931c3..cee90b5 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -177,12 +177,6 @@
 	size_t			ndirty;
 
 	/*
-	 * Ring sentinel used to track unused dirty memory.  Dirty memory is
-	 * managed as an LRU of cached extents.
-	 */
-	extent_t		extents_dirty;
-
-	/*
 	 * Approximate time in seconds from the creation of a set of unused
 	 * dirty pages until an equivalent set of unused dirty pages is purged
 	 * and/or reused.
@@ -240,7 +234,12 @@
 	 */
 	extent_heap_t		extents_cached[NPSIZES];
 	extent_heap_t		extents_retained[NPSIZES];
-	/* Protects extents_cached and extents_retained. */
+	/*
+	 * Ring sentinel used to track unused dirty memory.  Dirty memory is
+	 * managed as an LRU of cached extents.
+	 */
+	extent_t		extents_dirty;
+	/* Protects extents_{cached,retained,dirty}. */
 	malloc_mutex_t		extents_mtx;
 
 	/* User-configurable extent hook functions. */
@@ -287,10 +286,10 @@
     size_t alignment, bool *zero);
 void	arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
     extent_hooks_t **r_extent_hooks, extent_t *extent);
-void	arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent,
-    bool cache);
-void	arena_extent_cache_maybe_remove(arena_t *arena, extent_t *extent,
-    bool cache);
+void	arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena,
+    extent_t *extent, bool cache);
+void	arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena,
+    extent_t *extent, bool cache);
 extent_t	*arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
     size_t usize, size_t alignment, bool *zero);
 void	arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena,
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index dbdc805..eeebdf0 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -99,6 +99,9 @@
 
 ph_proto(, extent_heap_, extent_heap_t, extent_t)
 
+extent_t	*extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
+    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
+    size_t alignment, bool *zero, bool slab);
 extent_t	*extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
     extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
     size_t alignment, bool *zero, bool slab);
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index a489e14..ae60f6c 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -145,6 +145,7 @@
 extent_addr_set
 extent_alloc
 extent_alloc_cache
+extent_alloc_cache_locked
 extent_alloc_dss
 extent_alloc_mmap
 extent_alloc_wrapper
diff --git a/src/arena.c b/src/arena.c
index 7dcf12d..da9e985 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -101,9 +101,12 @@
 }
 
 void
-arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
+arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+    bool cache)
 {
 
+	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
+
 	if (cache) {
 		extent_ring_insert(&arena->extents_dirty, extent);
 		arena->ndirty += arena_extent_dirty_npages(extent);
@@ -111,9 +114,12 @@
 }
 
 void
-arena_extent_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
+arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+    bool dirty)
 {
 
+	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
+
 	if (dirty) {
 		extent_ring_remove(extent);
 		assert(arena->ndirty >= arena_extent_dirty_npages(extent));
@@ -727,6 +733,8 @@
 	extent_t *extent;
 	size_t ndirty = 0;
 
+	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
+
 	for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
 	    &arena->extents_dirty; extent = qr_next(extent, qr_link))
 		ndirty += extent_size_get(extent) >> LG_PAGE;
@@ -741,6 +749,8 @@
 	extent_t *extent, *next;
 	size_t nstashed = 0;
 
+	malloc_mutex_lock(tsdn, &arena->extents_mtx);
+
 	/* Stash extents according to ndirty_limit. */
 	for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
 	    &arena->extents_dirty; extent = next) {
@@ -756,9 +766,9 @@
 		next = qr_next(extent, qr_link);
 		/* Allocate. */
 		zero = false;
-		textent = arena_extent_cache_alloc_locked(tsdn, arena,
-		    r_extent_hooks, extent_base_get(extent),
-		    extent_size_get(extent), 0, CACHELINE, &zero, false);
+		textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
+		    extent_base_get(extent), extent_size_get(extent), 0,
+		    CACHELINE, &zero, false);
 		assert(textent == extent);
 		assert(zero == extent_zeroed_get(extent));
 		extent_ring_remove(extent);
@@ -770,6 +780,7 @@
 			break;
 	}
 
+	malloc_mutex_unlock(tsdn, &arena->extents_mtx);
 	return (nstashed);
 }
 
@@ -1788,9 +1799,6 @@
 	arena->nactive = 0;
 	arena->ndirty = 0;
 
-	extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
-	    false, false);
-
 	if (opt_purge == purge_mode_decay)
 		arena_decay_init(arena, arena_decay_time_default_get());
 
@@ -1804,12 +1812,15 @@
 		extent_heap_new(&arena->extents_retained[i]);
 	}
 
-	arena->extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+	extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
+	    false, false);
 
 	if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
 	    WITNESS_RANK_ARENA_EXTENTS))
 		return (NULL);
 
+	arena->extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+
 	ql_new(&arena->extent_cache);
 	if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
 	    WITNESS_RANK_ARENA_EXTENT_CACHE))
diff --git a/src/extent.c b/src/extent.c
index 29c9d2b..522cbb9 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -191,18 +191,26 @@
 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)
 
 static void
-extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
+extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
+    extent_t *extent)
 {
 	size_t psz = extent_size_quantize_floor(extent_size_get(extent));
 	pszind_t pind = psz2ind(psz);
+
+	malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
+
 	extent_heap_insert(&extent_heaps[pind], extent);
 }
 
 static void
-extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
+extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
+    extent_t *extent)
 {
 	size_t psz = extent_size_quantize_floor(extent_size_get(extent));
 	pszind_t pind = psz2ind(psz);
+
+	malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
+
 	extent_heap_remove(&extent_heaps[pind], extent);
 }
 
@@ -381,9 +389,9 @@
 
 static extent_t *
 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
-    extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
-    size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
-    bool slab)
+    extent_heap_t extent_heaps[NPSIZES], bool locked, bool cache,
+    void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
+    bool *commit, bool slab)
 {
 	extent_t *extent;
 	rtree_ctx_t rtree_ctx_fallback;
@@ -398,7 +406,8 @@
 	/* Beware size_t wrap-around. */
 	if (alloc_size < usize)
 		return (NULL);
-	malloc_mutex_lock(tsdn, &arena->extents_mtx);
+	if (!locked)
+		malloc_mutex_lock(tsdn, &arena->extents_mtx);
 	extent_hooks_assure_initialized(arena, r_extent_hooks);
 	if (new_addr != NULL) {
 		rtree_elm_t *elm;
@@ -419,11 +428,12 @@
 		extent = extent_first_best_fit(arena, extent_heaps, alloc_size);
 	if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
 	    size)) {
-		malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+		if (!locked)
+			malloc_mutex_unlock(tsdn, &arena->extents_mtx);
 		return (NULL);
 	}
-	extent_heaps_remove(extent_heaps, extent);
-	arena_extent_cache_maybe_remove(arena, extent, cache);
+	extent_heaps_remove(tsdn, extent_heaps, extent);
+	arena_extent_cache_maybe_remove(tsdn, arena, extent, cache);
 
 	leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
 	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
@@ -444,11 +454,12 @@
 		if (extent == NULL) {
 			extent_deregister(tsdn, lead);
 			extent_leak(tsdn, arena, r_extent_hooks, cache, lead);
-			malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+			if (!locked)
+				malloc_mutex_unlock(tsdn, &arena->extents_mtx);
 			return (NULL);
 		}
-		extent_heaps_insert(extent_heaps, lead);
-		arena_extent_cache_maybe_insert(arena, lead, cache);
+		extent_heaps_insert(tsdn, extent_heaps, lead);
+		arena_extent_cache_maybe_insert(tsdn, arena, lead, cache);
 	}
 
 	/* Split the trail. */
@@ -459,11 +470,12 @@
 			extent_deregister(tsdn, extent);
 			extent_leak(tsdn, arena, r_extent_hooks, cache,
 			    extent);
-			malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+			if (!locked)
+				malloc_mutex_unlock(tsdn, &arena->extents_mtx);
 			return (NULL);
 		}
-		extent_heaps_insert(extent_heaps, trail);
-		arena_extent_cache_maybe_insert(arena, trail, cache);
+		extent_heaps_insert(tsdn, extent_heaps, trail);
+		arena_extent_cache_maybe_insert(tsdn, arena, trail, cache);
 	} else if (leadsize == 0) {
 		/*
 		 * Splitting causes usize to be set as a side effect, but no
@@ -474,7 +486,8 @@
 
 	if (!extent_committed_get(extent) && extent_commit_wrapper(tsdn, arena,
 	    r_extent_hooks, extent, 0, extent_size_get(extent))) {
-		malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+		if (!locked)
+			malloc_mutex_unlock(tsdn, &arena->extents_mtx);
 		extent_record(tsdn, arena, r_extent_hooks, extent_heaps, cache,
 		    extent);
 		return (NULL);
@@ -488,7 +501,8 @@
 		extent_interior_register(tsdn, rtree_ctx, extent);
 	}
 
-	malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+	if (!locked)
+		malloc_mutex_unlock(tsdn, &arena->extents_mtx);
 
 	if (*zero) {
 		if (!extent_zeroed_get(extent)) {
@@ -540,27 +554,51 @@
 	return (NULL);
 }
 
-extent_t *
-extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
-    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
-    size_t alignment, bool *zero, bool slab)
+static extent_t *
+extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
+    extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize,
+    size_t pad, size_t alignment, bool *zero, bool slab)
 {
 	extent_t *extent;
 	bool commit;
 
 	assert(usize + pad != 0);
 	assert(alignment != 0);
+	if (locked)
+		malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
 
 	commit = true;
 	extent = extent_recycle(tsdn, arena, r_extent_hooks,
-	    arena->extents_cached, true, new_addr, usize, pad, alignment, zero,
-	    &commit, slab);
+	    arena->extents_cached, locked, true, new_addr, usize, pad,
+	    alignment, zero, &commit, slab);
 	if (extent == NULL)
 		return (NULL);
 	assert(commit);
 	return (extent);
 }
 
+extent_t *
+extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
+    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
+    size_t alignment, bool *zero, bool slab)
+{
+
+	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
+
+	return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
+	    new_addr, usize, pad, alignment, zero, slab));
+}
+
+extent_t *
+extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
+    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
+    size_t alignment, bool *zero, bool slab)
+{
+
+	return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
+	    new_addr, usize, pad, alignment, zero, slab));
+}
+
 static void *
 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
     size_t size, size_t alignment, bool *zero, bool *commit)
@@ -607,8 +645,8 @@
 	assert(alignment != 0);
 
 	extent = extent_recycle(tsdn, arena, r_extent_hooks,
-	    arena->extents_retained, false, new_addr, usize, pad, alignment,
-	    zero, commit, slab);
+	    arena->extents_retained, false, false, new_addr, usize, pad,
+	    alignment, zero, commit, slab);
 	if (extent != NULL && config_stats) {
 		size_t size = usize + pad;
 		arena->stats.retained -= size;
@@ -697,22 +735,24 @@
 	if (!extent_can_coalesce(a, b))
 		return;
 
-	extent_heaps_remove(extent_heaps, a);
-	extent_heaps_remove(extent_heaps, b);
+	extent_heaps_remove(tsdn, extent_heaps, a);
+	extent_heaps_remove(tsdn, extent_heaps, b);
 
-	arena_extent_cache_maybe_remove(extent_arena_get(a), a, cache);
-	arena_extent_cache_maybe_remove(extent_arena_get(b), b, cache);
+	arena_extent_cache_maybe_remove(tsdn, extent_arena_get(a), a, cache);
+	arena_extent_cache_maybe_remove(tsdn, extent_arena_get(b), b, cache);
 
 	if (extent_merge_wrapper(tsdn, arena, r_extent_hooks, a, b)) {
-		extent_heaps_insert(extent_heaps, a);
-		extent_heaps_insert(extent_heaps, b);
-		arena_extent_cache_maybe_insert(extent_arena_get(a), a, cache);
-		arena_extent_cache_maybe_insert(extent_arena_get(b), b, cache);
+		extent_heaps_insert(tsdn, extent_heaps, a);
+		extent_heaps_insert(tsdn, extent_heaps, b);
+		arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a,
+		    cache);
+		arena_extent_cache_maybe_insert(tsdn, extent_arena_get(b), b,
+		    cache);
 		return;
 	}
 
-	extent_heaps_insert(extent_heaps, a);
-	arena_extent_cache_maybe_insert(extent_arena_get(a), a, cache);
+	extent_heaps_insert(tsdn, extent_heaps, a);
+	arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a, cache);
 }
 
 static void
@@ -737,8 +777,8 @@
 	}
 
 	assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
-	extent_heaps_insert(extent_heaps, extent);
-	arena_extent_cache_maybe_insert(arena, extent, cache);
+	extent_heaps_insert(tsdn, extent_heaps, extent);
+	arena_extent_cache_maybe_insert(tsdn, arena, extent, cache);
 
 	/* Try to coalesce forward. */
 	next = rtree_read(tsdn, &extents_rtree, rtree_ctx,
@@ -1021,7 +1061,7 @@
 		return (true);
 	if (have_dss && extent_in_dss(tsdn, addr_a) != extent_in_dss(tsdn,
 	    addr_b))
-			return (true);
+		return (true);
 
 	return (false);
 }