blob: ac3f3a0d848070ef9a1337bf0d4746e3532906ae [file] [log] [blame]
#define JEMALLOC_HUGE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
#endif
malloc_mutex_t huge_mtx;
/******************************************************************************/
/* Tree of chunks that are stand-alone huge allocations. */
static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
/* Allocate one or more contiguous chunks for this request. */
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (NULL);
}
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
ret = chunk_alloc(csize, false, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero)
memset(ret, 0, csize);
}
#endif
return (ret);
}
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t alloc_size, chunk_size, offset;
extent_node_t *node;
/*
* This allocation requires alignment that is even larger than chunk
* alignment. This means that huge_malloc() isn't good enough.
*
* Allocate almost twice as many chunks as are demanded by the size or
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
*/
assert(alignment > chunksize);
chunk_size = CHUNK_CEILING(size);
if (size >= alignment)
alloc_size = chunk_size + alignment - chunksize;
else
alloc_size = (alignment << 1) - chunksize;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
ret = chunk_alloc(alloc_size, false, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
offset = (uintptr_t)ret & (alignment - 1);
assert((offset & chunksize_mask) == 0);
assert(offset < alloc_size);
if (offset == 0) {
/* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size);
} else {
size_t trailsize;
/* Trim leading space. */
chunk_dealloc(ret, alignment - offset);
ret = (void *)((uintptr_t)ret + (alignment - offset));
trailsize = alloc_size - (alignment - offset) - chunk_size;
if (trailsize != 0) {
/* Trim trailing space. */
assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize);
}
}
/* Insert node into huge. */
node->addr = ret;
node->size = chunk_size;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, chunk_size);
else if (opt_zero)
memset(ret, 0, chunk_size);
}
#endif
return (ret);
}
void *
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (oldsize > arena_maxclass
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
#endif
return (ptr);
}
/* Reallocation would require a move. */
return (NULL);
}
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
if (ret != NULL)
return (ret);
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero);
else
ret = huge_malloc(size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero);
else
ret = huge_malloc(size, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in swap or dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize
# ifdef JEMALLOC_SWAP
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
chunk_in_swap(ret) == false))
# endif
# ifdef JEMALLOC_DSS
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
# endif
) {
size_t newsize = huge_salloc(ret);
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
ret) == MAP_FAILED) {
/*
* Assuming no chunk management bugs in the allocator,
* the only documented way an error can occur here is
* if the application changed the map type for a
* portion of the old allocation. This is firmly in
* undefined behavior territory, so write a diagnostic
* message, and optionally abort.
*/
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in mremap(): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
idalloc(ptr);
} else
huge_dalloc(ptr, false);
} else
#endif
{
memcpy(ret, ptr, copysize);
idalloc(ptr);
}
return (ret);
}
void
huge_dalloc(void *ptr, bool unmap)
{
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif
malloc_mutex_unlock(&huge_mtx);
if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
#endif
chunk_dealloc(node->addr, node->size);
}
base_node_dealloc(node);
}
size_t
huge_salloc(const void *ptr)
{
size_t ret;
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->size;
malloc_mutex_unlock(&huge_mtx);
return (ret);
}
#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret;
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->prof_ctx;
malloc_mutex_unlock(&huge_mtx);
return (ret);
}
void
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
node->prof_ctx = ctx;
malloc_mutex_unlock(&huge_mtx);
}
#endif
bool
huge_boot(void)
{
/* Initialize chunks data. */
if (malloc_mutex_init(&huge_mtx))
return (true);
extent_tree_ad_new(&huge);
#ifdef JEMALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
return (false);
}