Commit 5c988b33 authored by antirez's avatar antirez
Browse files

Jemalloc updated to 3.6.0.

Not a single bug in about 3 months, and our previous version was
too old (3.2.0).
parent f94d46ba
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#include "jemalloc_defs@install_suffix@.h"
#ifdef JEMALLOC_EXPERIMENTAL
#define ALLOCM_LG_ALIGN(la) (la)
#if LG_SIZEOF_PTR == 2
#define ALLOCM_ALIGN(a) (ffs(a)-1)
#else
#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_NOT_MOVED 2
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact of
* namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see below).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
#endif
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
#ifndef JEMALLOC_NO_DEMANGLE
#define JEMALLOC_NO_DEMANGLE
#endif
#define malloc_conf je_malloc_conf
#define malloc_message je_malloc_message
#define malloc je_malloc
#define calloc je_calloc
#define posix_memalign je_posix_memalign
#define aligned_alloc je_aligned_alloc
#define realloc je_realloc
#define free je_free
#define malloc_usable_size je_malloc_usable_size
#define malloc_stats_print je_malloc_stats_print
#define mallctl je_mallctl
#define mallctlnametomib je_mallctlnametomib
#define mallctlbymib je_mallctlbymib
#define memalign je_memalign
#define valloc je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#define allocm je_allocm
#define rallocm je_rallocm
#define sallocm je_sallocm
#define dallocm je_dallocm
#define nallocm je_nallocm
#endif
#endif
/*
* The je_* macros can be used as stable alternative names for the public
* jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
* for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
#endif
#endif
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
#!/bin/sh
objroot=$1
cat <<EOF
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
EOF
for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
jemalloc_protos.h jemalloc_mangle.h ; do
cat "${objroot}include/jemalloc/${hdr}" \
| grep -v 'Generated from .* by configure\.' \
| sed -e 's/^#define /#define /g' \
| sed -e 's/ $//g'
echo
done
cat <<EOF
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
EOF
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
#undef JEMALLOC_N
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if __attribute__((...)) syntax is supported. */ /* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR #undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support the experimental API. */ /* Support the experimental API. */
#undef JEMALLOC_EXPERIMENTAL #undef JEMALLOC_EXPERIMENTAL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/* /*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside * Define overrides for non-standard allocator-related functions if they are
* within jemalloc-owned chunks before dereferencing them. * present on the system.
*/
#undef JEMALLOC_IVSALLOC
/*
* Define overrides for non-standard allocator-related functions if they
* are present on the system.
*/ */
#undef JEMALLOC_OVERRIDE_MEMALIGN #undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC #undef JEMALLOC_OVERRIDE_VALLOC
...@@ -230,33 +20,5 @@ ...@@ -230,33 +20,5 @@
*/ */
#undef JEMALLOC_USABLE_SIZE_CONST #undef JEMALLOC_USABLE_SIZE_CONST
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ /* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR #undef LG_SIZEOF_PTR
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_EXPERIMENTAL
# define ALLOCM_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define ALLOCM_ALIGN(a) (ffs(a)-1)
# else
# define ALLOCM_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define ALLOCM_ZERO ((int)0x40)
# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
# define ALLOCM_SUCCESS 0
# define ALLOCM_ERR_OOM 1
# define ALLOCM_ERR_NOT_MOVED 2
#endif
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
#!/bin/sh
public_symbols_txt=$1
symbol_prefix=$2
cat <<EOF
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "# define ${n} ${symbol_prefix}${n}"
done
cat <<EOF
#endif
/*
* The ${symbol_prefix}* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "# undef ${symbol_prefix}${n}"
done
cat <<EOF
#endif
EOF
/*
* The @je_@ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
*/
extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *@je_@calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int @je_@posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void @je_@free(void *ptr);
JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
const char *), void *@je_@cbopaque, const char *opts);
JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@nallocm(size_t *rsize, size_t size, int flags);
#endif
#!/bin/sh
public_symbols_txt=$1
cat <<EOF
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
echo "# define je_${n} ${m}"
done
cat <<EOF
#endif
EOF
...@@ -38,52 +38,18 @@ const uint8_t small_size2bin[] = { ...@@ -38,52 +38,18 @@ const uint8_t small_size2bin[] = {
}; };
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /*
* Function prototypes for static functions that are referenced prior to
static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, * definition.
size_t pageind, size_t npages, bool maybe_adjac_pred, */
bool maybe_adjac_succ);
static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
size_t pageind, size_t npages, bool maybe_adjac_pred,
bool maybe_adjac_succ);
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, size_t binind, bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
bool large, size_t binind, bool zero);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
size_t binind, bool zero);
static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
arena_chunk_t *chunk, void *arg);
static void arena_purge(arena_t *arena, bool all); static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned); bool cleaned);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin); arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin); arena_run_t *run, arena_bin_t *bin);
static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t size);
static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
size_t min_run_size);
static void bin_info_init(void);
/******************************************************************************/ /******************************************************************************/
...@@ -359,60 +325,73 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr) ...@@ -359,60 +325,73 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
} }
static inline void static inline void
arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
}
static inline void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
}
static inline void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{ {
size_t i; size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++) for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0); assert(p[i] == 0);
} }
static void static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
size_t binind, bool zero)
{ {
arena_chunk_t *chunk;
size_t run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
assert((large && binind == BININD_INVALID) || (large == false && binind if (config_stats) {
!= BININD_INVALID)); ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
sub_pages) << LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
}
}
static void
arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
size_t flag_dirty, size_t need_pages)
{
size_t total_pages, rem_pages;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE; LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
flag_dirty); flag_dirty);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
assert(need_pages <= total_pages); assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages; rem_pages = total_pages - need_pages;
arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
if (config_stats) { arena_cactive_update(arena, need_pages, 0);
/*
* Update stats_cactive if nactive is crossing a chunk
* multiple.
*/
size_t cactive_diff = CHUNK_CEILING((arena->nactive +
need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
}
arena->nactive += need_pages; arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */ /* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) { if (rem_pages > 0) {
if (flag_dirty != 0) { if (flag_dirty != 0) {
arena_mapbits_unallocated_set(chunk, run_ind+need_pages, arena_mapbits_unallocated_set(chunk,
(rem_pages << LG_PAGE), CHUNK_MAP_DIRTY); run_ind+need_pages, (rem_pages << LG_PAGE),
flag_dirty);
arena_mapbits_unallocated_set(chunk, arena_mapbits_unallocated_set(chunk,
run_ind+total_pages-1, (rem_pages << LG_PAGE), run_ind+total_pages-1, (rem_pages << LG_PAGE),
CHUNK_MAP_DIRTY); flag_dirty);
} else { } else {
arena_mapbits_unallocated_set(chunk, run_ind+need_pages, arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE), (rem_pages << LG_PAGE),
...@@ -426,97 +405,128 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, ...@@ -426,97 +405,128 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
false, true); false, true);
} }
}
static void
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
bool remove, bool zero)
{
arena_chunk_t *chunk;
size_t flag_dirty, run_ind, need_pages, i;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
if (remove) {
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
need_pages);
}
/*
* Update the page map separately for large vs. small runs, since it is
* possible to avoid iteration for large mallocs.
*/
if (large) {
if (zero) { if (zero) {
if (flag_dirty == 0) { if (flag_dirty == 0) {
/* /*
* The run is clean, so some pages may be * The run is clean, so some pages may be zeroed (i.e.
* zeroed (i.e. never before touched). * never before touched).
*/ */
for (i = 0; i < need_pages; i++) { for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk, if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
run_ind+i) != 0) { != 0)
VALGRIND_MAKE_MEM_UNDEFINED( arena_run_zero(chunk, run_ind+i, 1);
(void *)((uintptr_t) else if (config_debug) {
chunk + ((run_ind+i) << arena_run_page_validate_zeroed(chunk,
LG_PAGE)), PAGE); run_ind+i);
memset((void *)((uintptr_t) } else {
chunk + ((run_ind+i) << arena_run_page_mark_zeroed(chunk,
LG_PAGE)), 0, PAGE); run_ind+i);
} else if (config_debug) {
VALGRIND_MAKE_MEM_DEFINED(
(void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), PAGE);
arena_chunk_validate_zeroed(
chunk, run_ind+i);
} }
} }
} else { } else {
/* /* The run is dirty, so all pages must be zeroed. */
* The run is dirty, so all pages must be arena_run_zero(chunk, run_ind, need_pages);
* zeroed.
*/
VALGRIND_MAKE_MEM_UNDEFINED((void
*)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (need_pages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), 0, (need_pages << LG_PAGE));
} }
} else {
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
} }
/* /*
* Set the last element first, in case the run only contains one * Set the last element first, in case the run only contains one page
* page (i.e. both statements set the same element). * (i.e. both statements set the same element).
*/ */
arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
flag_dirty);
arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
} else { }
assert(zero == false);
static void
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
arena_run_split_large_helper(arena, run, size, true, zero);
}
static void
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
arena_run_split_large_helper(arena, run, size, false, zero);
}
static void
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
size_t binind)
{
arena_chunk_t *chunk;
size_t flag_dirty, run_ind, need_pages, i;
assert(binind != BININD_INVALID);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
/* /*
* Propagate the dirty and unzeroed flags to the allocated * Propagate the dirty and unzeroed flags to the allocated small run,
* small run, so that arena_dalloc_bin_run() has the ability to * so that arena_dalloc_bin_run() has the ability to conditionally trim
* conditionally trim clean pages. * clean pages.
*/ */
arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
/* /*
* The first page will always be dirtied during small run * The first page will always be dirtied during small run
* initialization, so a validation failure here would not * initialization, so a validation failure here would not actually
* actually cause an observable failure. * cause an observable failure.
*/ */
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
arena_mapbits_unzeroed_get(chunk, run_ind) == 0) run_ind) == 0)
arena_chunk_validate_zeroed(chunk, run_ind); arena_run_page_validate_zeroed(chunk, run_ind);
for (i = 1; i < need_pages - 1; i++) { for (i = 1; i < need_pages - 1; i++) {
arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
arena_chunk_validate_zeroed(chunk, run_ind+i); arena_run_page_validate_zeroed(chunk, run_ind+i);
} }
arena_mapbits_small_set(chunk, run_ind+need_pages-1, arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
need_pages-1, binind, flag_dirty); binind, flag_dirty);
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) == run_ind+need_pages-1) == 0)
0) { arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
arena_chunk_validate_zeroed(chunk, VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
run_ind+need_pages-1); (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
}
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc(arena_t *arena) arena_chunk_init_spare(arena_t *arena)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t i;
if (arena->spare != NULL) { assert(arena->spare != NULL);
chunk = arena->spare; chunk = arena->spare;
arena->spare = NULL; arena->spare = NULL;
...@@ -524,18 +534,27 @@ arena_chunk_alloc(arena_t *arena) ...@@ -524,18 +534,27 @@ arena_chunk_alloc(arena_t *arena)
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass); arena_maxclass);
assert(arena_mapbits_unallocated_size_get(chunk, assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
chunk_npages-1) == arena_maxclass); arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) == assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1)); arena_mapbits_dirty_get(chunk, chunk_npages-1));
} else {
return (chunk);
}
static arena_chunk_t *
arena_chunk_init_hard(arena_t *arena)
{
arena_chunk_t *chunk;
bool zero; bool zero;
size_t unzeroed; size_t unzeroed, i;
assert(arena->spare == NULL);
zero = false; zero = false;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
false, &zero, arena->dss_prec); &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
...@@ -545,8 +564,7 @@ arena_chunk_alloc(arena_t *arena) ...@@ -545,8 +564,7 @@ arena_chunk_alloc(arena_t *arena)
chunk->arena = arena; chunk->arena = arena;
/* /*
* Claim that no pages are in use, since the header is merely * Claim that no pages are in use, since the header is merely overhead.
* overhead.
*/ */
chunk->ndirty = 0; chunk->ndirty = 0;
...@@ -554,28 +572,52 @@ arena_chunk_alloc(arena_t *arena) ...@@ -554,28 +572,52 @@ arena_chunk_alloc(arena_t *arena)
chunk->nruns_adjac = 0; chunk->nruns_adjac = 0;
/* /*
* Initialize the map to contain one maximal free untouched run. * Initialize the map to contain one maximal free untouched run. Mark
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
* chunk.
*/ */
unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
unzeroed); unzeroed);
/* /*
* There is no need to initialize the internal page map entries * There is no need to initialize the internal page map entries unless
* unless the chunk is not zeroed. * the chunk is not zeroed.
*/ */
if (zero == false) { if (zero == false) {
VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed); arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else if (config_debug) { } else {
VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) { for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) == assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed); unzeroed);
} }
} }
arena_mapbits_unallocated_set(chunk, chunk_npages-1, }
arena_maxclass, unzeroed); arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
unzeroed);
return (chunk);
}
static arena_chunk_t *
arena_chunk_alloc(arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
chunk = arena_chunk_init_hard(arena);
if (chunk == NULL)
return (NULL);
} }
/* Insert the run into the runs_avail tree. */ /* Insert the run into the runs_avail tree. */
...@@ -618,8 +660,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) ...@@ -618,8 +660,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
bool zero)
{ {
arena_run_t *run; arena_run_t *run;
arena_chunk_map_t *mapelm, key; arena_chunk_map_t *mapelm, key;
...@@ -634,7 +675,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, ...@@ -634,7 +675,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE)); LG_PAGE));
arena_run_split(arena, run, size, large, binind, zero); arena_run_split_large(arena, run, size, zero);
return (run); return (run);
} }
...@@ -642,19 +683,16 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, ...@@ -642,19 +683,16 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
} }
static arena_run_t * static arena_run_t *
arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0); assert((size & PAGE_MASK) == 0);
assert((large && binind == BININD_INVALID) || (large == false && binind
!= BININD_INVALID));
/* Search the arena's chunks for the lowest best fit. */ /* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_helper(arena, size, large, binind, zero); run = arena_run_alloc_large_helper(arena, size, zero);
if (run != NULL) if (run != NULL)
return (run); return (run);
...@@ -664,7 +702,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, ...@@ -664,7 +702,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
chunk = arena_chunk_alloc(arena); chunk = arena_chunk_alloc(arena);
if (chunk != NULL) { if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
arena_run_split(arena, run, size, large, binind, zero); arena_run_split_large(arena, run, size, zero);
return (run); return (run);
} }
...@@ -673,7 +711,63 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, ...@@ -673,7 +711,63 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
* sufficient memory available while this one dropped arena->lock in * sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time. * arena_chunk_alloc(), so search one more time.
*/ */
return (arena_run_alloc_helper(arena, size, large, binind, zero)); return (arena_run_alloc_large_helper(arena, size, zero));
}
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
{
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
key.bits = size | CHUNK_MAP_KEY;
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ map_bias;
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
arena_run_split_small(arena, run, size, binind);
return (run);
}
return (NULL);
}
static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0);
assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_small_helper(arena, size, binind);
if (run != NULL)
return (run);
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
arena_run_split_small(arena, run, size, binind);
return (run);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_small_helper(arena, size, binind));
} }
static inline void static inline void
...@@ -699,48 +793,42 @@ arena_maybe_purge(arena_t *arena) ...@@ -699,48 +793,42 @@ arena_maybe_purge(arena_t *arena)
arena_purge(arena, false); arena_purge(arena, false);
} }
static inline size_t static arena_chunk_t *
arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
{ {
size_t npurged; size_t *ndirty = (size_t *)arg;
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
size_t pageind, npages;
size_t nmadvise;
ql_new(&mapelms); assert(chunk->ndirty != 0);
*ndirty += chunk->ndirty;
return (NULL);
}
static size_t
arena_compute_npurgatory(arena_t *arena, bool all)
{
size_t npurgatory, npurgeable;
/* /*
* If chunk is the spare, temporarily re-allocate it, 1) so that its * Compute the minimum number of pages that this thread should try to
* run is reinserted into runs_avail, and 2) so that it cannot be * purge.
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
*/ */
if (chunk == arena->spare) { npurgeable = arena->ndirty - arena->npurgatory;
assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
arena_chunk_alloc(arena); if (all == false) {
} size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
if (config_stats) npurgatory = npurgeable - threshold;
arena->stats.purged += chunk->ndirty; } else
npurgatory = npurgeable;
/* return (npurgatory);
* Operate on all dirty runs if there is no clean/dirty run }
* fragmentation.
*/ static void
if (chunk->nruns_adjac == 0) arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
all = true; arena_chunk_mapelms_t *mapelms)
{
size_t pageind, npages;
/* /*
* Temporarily allocate free dirty runs within chunk. If all is false, * Temporarily allocate free dirty runs within chunk. If all is false,
...@@ -748,7 +836,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) ...@@ -748,7 +836,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
* all dirty runs. * all dirty runs.
*/ */
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
mapelm = arena_mapp_get(chunk, pageind); arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
if (arena_mapbits_allocated_get(chunk, pageind) == 0) { if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
size_t run_size = size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind); arena_mapbits_unallocated_size_get(chunk, pageind);
...@@ -764,11 +852,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) ...@@ -764,11 +852,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
arena_run_t *run = (arena_run_t *)((uintptr_t) arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << LG_PAGE)); chunk + (uintptr_t)(pageind << LG_PAGE));
arena_run_split(arena, run, run_size, true, arena_run_split_large(arena, run, run_size,
BININD_INVALID, false); false);
/* Append to list for later processing. */ /* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link); ql_elm_new(mapelm, u.ql_link);
ql_tail_insert(&mapelms, mapelm, u.ql_link); ql_tail_insert(mapelms, mapelm, u.ql_link);
} }
} else { } else {
/* Skip run. */ /* Skip run. */
...@@ -792,12 +880,20 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) ...@@ -792,12 +880,20 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
assert(pageind == chunk_npages); assert(pageind == chunk_npages);
assert(chunk->ndirty == 0 || all == false); assert(chunk->ndirty == 0 || all == false);
assert(chunk->nruns_adjac == 0); assert(chunk->nruns_adjac == 0);
}
static size_t
arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
arena_chunk_mapelms_t *mapelms)
{
size_t npurged, pageind, npages, nmadvise;
arena_chunk_map_t *mapelm;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
if (config_stats) if (config_stats)
nmadvise = 0; nmadvise = 0;
npurged = 0; npurged = 0;
ql_foreach(mapelm, &mapelms, u.ql_link) { ql_foreach(mapelm, mapelms, u.ql_link) {
bool unzeroed; bool unzeroed;
size_t flag_unzeroed, i; size_t flag_unzeroed, i;
...@@ -831,30 +927,75 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) ...@@ -831,30 +927,75 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
if (config_stats) if (config_stats)
arena->stats.nmadvise += nmadvise; arena->stats.nmadvise += nmadvise;
return (npurged);
}
static void
arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
arena_chunk_mapelms_t *mapelms)
{
arena_chunk_map_t *mapelm;
size_t pageind;
/* Deallocate runs. */ /* Deallocate runs. */
for (mapelm = ql_first(&mapelms); mapelm != NULL; for (mapelm = ql_first(mapelms); mapelm != NULL;
mapelm = ql_first(&mapelms)) { mapelm = ql_first(mapelms)) {
arena_run_t *run; arena_run_t *run;
pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias; sizeof(arena_chunk_map_t)) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
LG_PAGE)); LG_PAGE));
ql_remove(&mapelms, mapelm, u.ql_link); ql_remove(mapelms, mapelm, u.ql_link);
arena_run_dalloc(arena, run, false, true); arena_run_dalloc(arena, run, false, true);
} }
return (npurged);
} }
static arena_chunk_t * static inline size_t
chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{ {
size_t *ndirty = (size_t *)arg; size_t npurged;
arena_chunk_mapelms_t mapelms;
assert(chunk->ndirty != 0); ql_new(&mapelms);
*ndirty += chunk->ndirty;
return (NULL); /*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
*/
if (chunk == arena->spare) {
assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
arena_chunk_alloc(arena);
}
if (config_stats)
arena->stats.purged += chunk->ndirty;
/*
* Operate on all dirty runs if there is no clean/dirty run
* fragmentation.
*/
if (chunk->nruns_adjac == 0)
all = true;
arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
arena_chunk_unstash_purged(arena, chunk, &mapelms);
return (npurged);
} }
static void static void
...@@ -877,21 +1018,11 @@ arena_purge(arena_t *arena, bool all) ...@@ -877,21 +1018,11 @@ arena_purge(arena_t *arena, bool all)
arena->stats.npurge++; arena->stats.npurge++;
/* /*
* Compute the minimum number of pages that this thread should try to * Add the minimum number of pages this thread should try to purge to
* purge, and add the result to arena->npurgatory. This will keep * arena->npurgatory. This will keep multiple threads from racing to
* multiple threads from racing to reduce ndirty below the threshold. * reduce ndirty below the threshold.
*/ */
{ npurgatory = arena_compute_npurgatory(arena, all);
size_t npurgeable = arena->ndirty - arena->npurgatory;
if (all == false) {
size_t threshold = (arena->nactive >>
opt_lg_dirty_mult);
npurgatory = npurgeable - threshold;
} else
npurgatory = npurgeable;
}
arena->npurgatory += npurgatory; arena->npurgatory += npurgatory;
while (npurgatory > 0) { while (npurgatory > 0) {
...@@ -958,61 +1089,12 @@ arena_purge_all(arena_t *arena) ...@@ -958,61 +1089,12 @@ arena_purge_all(arena_t *arena)
} }
static void static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
{ {
arena_chunk_t *chunk; size_t size = *p_size;
size_t size, run_ind, run_pages, flag_dirty; size_t run_ind = *p_run_ind;
size_t run_pages = *p_run_pages;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = arena_mapbits_large_size_get(chunk, run_ind);
assert(size == PAGE ||
arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
size_t binind = arena_bin_index(arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
size = bin_info->run_size;
}
run_pages = (size >> LG_PAGE);
if (config_stats) {
/*
* Update stats_cactive if nactive is crossing a chunk
* multiple.
*/
size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
if (cactive_diff != 0)
stats_cactive_sub(cactive_diff);
}
arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
arena_mapbits_unallocated_set(chunk, run_ind, size,
CHUNK_MAP_DIRTY);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
CHUNK_MAP_DIRTY);
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
}
/* Try to coalesce forward. */ /* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages && if (run_ind + run_pages < chunk_npages &&
...@@ -1042,8 +1124,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) ...@@ -1042,8 +1124,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
} }
/* Try to coalesce backward. */ /* Try to coalesce backward. */
if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1) if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
== 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) { run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
flag_dirty) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk, size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1); run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE; size_t prun_pages = prun_size >> LG_PAGE;
...@@ -1068,6 +1151,62 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) ...@@ -1068,6 +1151,62 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
size); size);
} }
*p_size = size;
*p_run_ind = run_ind;
*p_run_pages = run_pages;
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = arena_mapbits_large_size_get(chunk, run_ind);
assert(size == PAGE ||
arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
size_t binind = arena_bin_index(arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
size = bin_info->run_size;
}
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
arena_mapbits_unallocated_set(chunk, run_ind, size,
CHUNK_MAP_DIRTY);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
CHUNK_MAP_DIRTY);
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
}
arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
flag_dirty);
/* Insert into runs_avail, now that coalescing is complete. */ /* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
...@@ -1235,14 +1374,12 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) ...@@ -1235,14 +1374,12 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
run = arena_run_alloc(arena, bin_info->run_size, false, binind, false); run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) { if (run != NULL) {
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset); (uintptr_t)bin_info->bitmap_offset);
/* Initialize run internals. */ /* Initialize run internals. */
VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
bin_info->redzone_size);
run->bin = bin; run->bin = bin;
run->nextind = 0; run->nextind = 0;
run->nfree = bin_info->nregs; run->nfree = bin_info->nregs;
...@@ -1260,7 +1397,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) ...@@ -1260,7 +1397,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
} }
/* /*
* arena_run_alloc() failed, but another thread may have made * arena_run_alloc_small() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above, * sufficient memory available while this one dropped bin->lock above,
* so search one more time. * so search one more time.
*/ */
...@@ -1295,12 +1432,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) ...@@ -1295,12 +1432,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
arena_chunk_t *chunk; arena_chunk_t *chunk;
/* /*
* arena_run_alloc() may have allocated run, or it may * arena_run_alloc_small() may have allocated run, or
* have pulled run from the bin's run tree. Therefore * it may have pulled run from the bin's run tree.
* it is unsafe to make any assumptions about how run * Therefore it is unsafe to make any assumptions about
* has previously been used, and arena_bin_lower_run() * how run has previously been used, and
* must be called, as if a region were just deallocated * arena_bin_lower_run() must be called, as if a region
* from the run. * were just deallocated from the run.
*/ */
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs) if (run->nfree == bin_info->nregs)
...@@ -1321,21 +1458,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) ...@@ -1321,21 +1458,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
return (arena_run_reg_alloc(bin->runcur, bin_info)); return (arena_run_reg_alloc(bin->runcur, bin_info));
} }
void
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (config_prof && prof_interval != 0) {
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
prof_idump();
arena->prof_accumbytes -= prof_interval;
}
}
}
void void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
uint64_t prof_accumbytes) uint64_t prof_accumbytes)
...@@ -1347,11 +1469,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, ...@@ -1347,11 +1469,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
if (config_prof) { if (config_prof && arena_prof_accum(arena, prof_accumbytes))
malloc_mutex_lock(&arena->lock); prof_idump();
arena_prof_accum(arena, prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
}
bin = &arena->bins[binind]; bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
...@@ -1396,8 +1515,28 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) ...@@ -1396,8 +1515,28 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
} }
} }
void #ifdef JEMALLOC_JET
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) #undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
"(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
after ? "after" : "before", ptr, usize, byte);
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
JEMALLOC_N(arena_redzone_corruption_impl);
#endif
static void
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
{ {
size_t size = bin_info->reg_size; size_t size = bin_info->reg_size;
size_t redzone_size = bin_info->redzone_size; size_t redzone_size = bin_info->redzone_size;
...@@ -1405,29 +1544,61 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) ...@@ -1405,29 +1544,61 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
bool error = false; bool error = false;
for (i = 1; i <= redzone_size; i++) { for (i = 1; i <= redzone_size; i++) {
unsigned byte; uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) { if (*byte != 0xa5) {
error = true; error = true;
malloc_printf("<jemalloc>: Corrupt redzone " arena_redzone_corruption(ptr, size, false, i, *byte);
"%zu byte%s before %p (size %zu), byte=%#x\n", i, if (reset)
(i == 1) ? "" : "s", ptr, size, byte); *byte = 0xa5;
} }
} }
for (i = 0; i < redzone_size; i++) { for (i = 0; i < redzone_size; i++) {
unsigned byte; uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) { if (*byte != 0xa5) {
error = true; error = true;
malloc_printf("<jemalloc>: Corrupt redzone " arena_redzone_corruption(ptr, size, true, i, *byte);
"%zu byte%s after end of %p (size %zu), byte=%#x\n", if (reset)
i, (i == 1) ? "" : "s", ptr, size, byte); *byte = 0xa5;
} }
} }
if (opt_abort && error) if (opt_abort && error)
abort(); abort();
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
{
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval); bin_info->reg_interval);
} }
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
JEMALLOC_N(arena_dalloc_junk_small_impl);
#endif
void
arena_quarantine_junk_small(void *ptr, size_t usize)
{
size_t binind;
arena_bin_info_t *bin_info;
cassert(config_fill);
assert(opt_junk);
assert(opt_quarantine);
assert(usize <= SMALL_MAXCLASS);
binind = SMALL_SIZE2BIN(usize);
bin_info = &arena_bin_info[binind];
arena_redzones_validate(ptr, bin_info, true);
}
void * void *
arena_malloc_small(arena_t *arena, size_t size, bool zero) arena_malloc_small(arena_t *arena, size_t size, bool zero)
...@@ -1459,11 +1630,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) ...@@ -1459,11 +1630,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++; bin->stats.nrequests++;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
if (config_prof && isthreaded == false) { if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
malloc_mutex_lock(&arena->lock); prof_idump();
arena_prof_accum(arena, size);
malloc_mutex_unlock(&arena->lock);
}
if (zero == false) { if (zero == false) {
if (config_fill) { if (config_fill) {
...@@ -1473,6 +1641,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) ...@@ -1473,6 +1641,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} else if (opt_zero) } else if (opt_zero)
memset(ret, 0, size); memset(ret, 0, size);
} }
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else { } else {
if (config_fill && opt_junk) { if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &arena_bin_info[binind],
...@@ -1489,11 +1658,12 @@ void * ...@@ -1489,11 +1658,12 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero) arena_malloc_large(arena_t *arena, size_t size, bool zero)
{ {
void *ret; void *ret;
UNUSED bool idump;
/* Large allocation. */ /* Large allocation. */
size = PAGE_CEILING(size); size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero); ret = (void *)arena_run_alloc_large(arena, size, zero);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
...@@ -1507,8 +1677,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) ...@@ -1507,8 +1677,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
} }
if (config_prof) if (config_prof)
arena_prof_accum(arena, size); idump = arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
if (zero == false) { if (zero == false) {
if (config_fill) { if (config_fill) {
...@@ -1537,7 +1709,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) ...@@ -1537,7 +1709,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
alloc_size = size + alignment - PAGE; alloc_size = size + alignment - PAGE;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero); run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) { if (run == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
...@@ -1557,6 +1729,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) ...@@ -1557,6 +1729,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
false); false);
} }
arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
if (config_stats) { if (config_stats) {
arena->stats.nmalloc_large++; arena->stats.nmalloc_large++;
...@@ -1760,21 +1933,38 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -1760,21 +1933,38 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
} }
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
static void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && opt_junk)
memset(ptr, 0x5a, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif
void void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{ {
if (config_fill || config_stats) { if (config_fill || config_stats) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t size = arena_mapbits_large_size_get(chunk, pageind); size_t usize = arena_mapbits_large_size_get(chunk, pageind);
if (config_fill && config_stats && opt_junk) arena_dalloc_junk_large(ptr, usize);
memset(ptr, 0x5a, size);
if (config_stats) { if (config_stats) {
arena->stats.ndalloc_large++; arena->stats.ndalloc_large++;
arena->stats.allocated_large -= size; arena->stats.allocated_large -= usize;
arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++; arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--; arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
} }
} }
...@@ -1845,9 +2035,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -1845,9 +2035,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t flag_dirty; size_t flag_dirty;
size_t splitsize = (oldsize + followsize <= size + extra) size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize; ? followsize : size + extra - oldsize;
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
((pageind+npages) << LG_PAGE)), splitsize, true, ((pageind+npages) << LG_PAGE)), splitsize, zero);
BININD_INVALID, zero);
size = oldsize + splitsize; size = oldsize + splitsize;
npages = size >> LG_PAGE; npages = size >> LG_PAGE;
...@@ -1886,6 +2075,26 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -1886,6 +2075,26 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
return (true); return (true);
} }
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && opt_junk) {
memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize);
}
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
JEMALLOC_N(arena_ralloc_junk_large_impl);
#endif
/* /*
* Try to resize a large allocation, in order to avoid copying. This will * Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use. * always fail if growing an object, and the following run is already in use.
...@@ -1899,10 +2108,6 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -1899,10 +2108,6 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
psize = PAGE_CEILING(size + extra); psize = PAGE_CEILING(size + extra);
if (psize == oldsize) { if (psize == oldsize) {
/* Same size class. */ /* Same size class. */
if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
size);
}
return (false); return (false);
} else { } else {
arena_chunk_t *chunk; arena_chunk_t *chunk;
...@@ -1913,10 +2118,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -1913,10 +2118,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
if (psize < oldsize) { if (psize < oldsize) {
/* Fill before shrinking in order avoid a race. */ /* Fill before shrinking in order avoid a race. */
if (config_fill && opt_junk) { arena_ralloc_junk_large(ptr, oldsize, psize);
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize); psize);
return (false); return (false);
...@@ -1924,17 +2126,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -1924,17 +2126,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size), oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero); psize - PAGE_CEILING(size), zero);
if (config_fill && ret == false && zero == false && if (config_fill && ret == false && zero == false) {
opt_zero) { if (opt_junk) {
memset((void *)((uintptr_t)ptr + oldsize), 0, memset((void *)((uintptr_t)ptr +
size - oldsize); oldsize), 0xa5, isalloc(ptr,
config_prof) - oldsize);
} else if (opt_zero) {
memset((void *)((uintptr_t)ptr +
oldsize), 0, isalloc(ptr,
config_prof) - oldsize);
}
} }
return (ret); return (ret);
} }
} }
} }
void * bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero) bool zero)
{ {
...@@ -1949,25 +2157,20 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -1949,25 +2157,20 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
if ((size + extra <= SMALL_MAXCLASS && if ((size + extra <= SMALL_MAXCLASS &&
SMALL_SIZE2BIN(size + extra) == SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
size + extra >= oldsize)) { size + extra >= oldsize))
if (config_fill && opt_junk && size < oldsize) { return (false);
memset((void *)((uintptr_t)ptr + size),
0x5a, oldsize - size);
}
return (ptr);
}
} else { } else {
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) { if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size, if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false) extra, zero) == false)
return (ptr); return (false);
} }
} }
} }
/* Reallocation would require a move. */ /* Reallocation would require a move. */
return (NULL); return (true);
} }
void * void *
...@@ -1979,9 +2182,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -1979,9 +2182,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
if (ret != NULL) return (ptr);
return (ret);
/* /*
* size and oldsize are different enough that we need to move the * size and oldsize are different enough that we need to move the
...@@ -1992,7 +2194,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -1992,7 +2194,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size + extra, alignment); size_t usize = sa2u(size + extra, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
} else } else
ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
...@@ -2004,7 +2206,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -2004,7 +2206,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size, alignment); size_t usize = sa2u(size, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
arena); arena);
} else } else
ret = arena_malloc(arena, size, zero, try_tcache_alloc); ret = arena_malloc(arena, size, zero, try_tcache_alloc);
...@@ -2022,7 +2224,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -2022,7 +2224,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
iqallocx(ptr, try_tcache_dalloc); iqalloct(ptr, try_tcache_dalloc);
return (ret); return (ret);
} }
...@@ -2277,7 +2479,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) ...@@ -2277,7 +2479,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
bin_info->reg_interval) - pad_size; bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset); } while (try_hdr_size > try_redzone0_offset);
} while (try_run_size <= arena_maxclass } while (try_run_size <= arena_maxclass
&& try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
RUN_MAX_OVRHD_RELAX RUN_MAX_OVRHD_RELAX
&& (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
......
...@@ -63,6 +63,7 @@ base_alloc(size_t size) ...@@ -63,6 +63,7 @@ base_alloc(size_t size)
ret = base_next_addr; ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize); base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret); return (ret);
} }
...@@ -88,6 +89,7 @@ base_node_alloc(void) ...@@ -88,6 +89,7 @@ base_node_alloc(void)
ret = base_nodes; ret = base_nodes;
base_nodes = *(extent_node_t **)ret; base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else { } else {
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
...@@ -100,6 +102,7 @@ void ...@@ -100,6 +102,7 @@ void
base_node_dealloc(extent_node_t *node) base_node_dealloc(extent_node_t *node)
{ {
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx); malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes; *(extent_node_t **)node = base_nodes;
base_nodes = node; base_nodes = node;
......
...@@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, ...@@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
assert(node->size >= leadsize + size); assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size; trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize); ret = (void *)((uintptr_t)node->addr + leadsize);
zeroed = node->zeroed;
if (zeroed)
*zero = true;
/* Remove node from the tree. */ /* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node); extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node); extent_tree_ad_remove(chunks_ad, node);
...@@ -108,23 +111,26 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, ...@@ -108,23 +111,26 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
} }
node->addr = (void *)((uintptr_t)(ret) + size); node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize; node->size = trailsize;
node->zeroed = zeroed;
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
node = NULL; node = NULL;
} }
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
zeroed = false; if (node != NULL)
if (node != NULL) {
if (node->zeroed) {
zeroed = true;
*zero = true;
}
base_node_dealloc(node); base_node_dealloc(node);
} if (*zero) {
if (zeroed == false && *zero) { if (zeroed == false)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
} }
return (ret); return (ret);
} }
...@@ -172,20 +178,22 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, ...@@ -172,20 +178,22 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
/* All strategies for allocation failed. */ /* All strategies for allocation failed. */
ret = NULL; ret = NULL;
label_return: label_return:
if (config_ivsalloc && base == false && ret != NULL) { if (ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { if (config_ivsalloc && base == false) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
chunk_dealloc(ret, size, true); chunk_dealloc(ret, size, true);
return (NULL); return (NULL);
} }
} }
if ((config_stats || config_prof) && ret != NULL) { if (config_stats || config_prof) {
bool gdump; bool gdump;
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
if (config_stats) if (config_stats)
stats_chunks.nchunks += (size / chunksize); stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize); stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) { if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks; stats_chunks.highchunks =
stats_chunks.curchunks;
if (config_prof) if (config_prof)
gdump = true; gdump = true;
} else if (config_prof) } else if (config_prof)
...@@ -194,13 +202,8 @@ label_return: ...@@ -194,13 +202,8 @@ label_return:
if (config_prof && opt_prof && opt_prof_gdump && gdump) if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump(); prof_gdump();
} }
if (config_debug && *zero && ret != NULL) { if (config_valgrind)
size_t i; VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
size_t *p = (size_t *)(uintptr_t)ret;
VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
} }
assert(CHUNK_ADDR2BASE(ret) == ret); assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret); return (ret);
...@@ -211,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, ...@@ -211,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size) size_t size)
{ {
bool unzeroed; bool unzeroed;
extent_node_t *xnode, *node, *prev, key; extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size); unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/* /*
* Allocate a node before acquiring chunks_mtx even though it might not * Allocate a node before acquiring chunks_mtx even though it might not
...@@ -222,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, ...@@ -222,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held. * held.
*/ */
xnode = base_node_alloc(); xnode = base_node_alloc();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = NULL;
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size); key.addr = (void *)((uintptr_t)chunk + size);
...@@ -238,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, ...@@ -238,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size; node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false)); node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
if (xnode != NULL)
base_node_dealloc(xnode);
} else { } else {
/* Coalescing forward failed, so insert a new node. */ /* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) { if (xnode == NULL) {
...@@ -249,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, ...@@ -249,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual * already been purged, so this is only a virtual
* memory leak. * memory leak.
*/ */
malloc_mutex_unlock(&chunks_mtx); goto label_return;
return;
} }
node = xnode; node = xnode;
xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk; node->addr = chunk;
node->size = size; node->size = size;
node->zeroed = (unzeroed == false); node->zeroed = (unzeroed == false);
...@@ -278,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, ...@@ -278,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed); node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
base_node_dealloc(prev); xprev = prev;
} }
label_return:
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode != NULL)
base_node_dealloc(xnode);
if (xprev != NULL)
base_node_dealloc(xprev);
} }
void void
...@@ -307,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap) ...@@ -307,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
if (config_ivsalloc) if (config_ivsalloc)
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
if (config_stats || config_prof) { if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
assert(stats_chunks.curchunks >= (size / chunksize)); assert(stats_chunks.curchunks >= (size / chunksize));
...@@ -342,7 +356,7 @@ chunk_boot(void) ...@@ -342,7 +356,7 @@ chunk_boot(void)
extent_tree_ad_new(&chunks_ad_dss); extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) { if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk); opt_lg_chunk, base_alloc, NULL);
if (chunks_rtree == NULL) if (chunks_rtree == NULL)
return (true); return (true);
} }
...@@ -354,7 +368,7 @@ void ...@@ -354,7 +368,7 @@ void
chunk_prefork(void) chunk_prefork(void)
{ {
malloc_mutex_lock(&chunks_mtx); malloc_mutex_prefork(&chunks_mtx);
if (config_ivsalloc) if (config_ivsalloc)
rtree_prefork(chunks_rtree); rtree_prefork(chunks_rtree);
chunk_dss_prefork(); chunk_dss_prefork();
......
...@@ -28,16 +28,17 @@ static void *dss_max; ...@@ -28,16 +28,17 @@ static void *dss_max;
/******************************************************************************/ /******************************************************************************/
#ifndef JEMALLOC_HAVE_SBRK
static void * static void *
sbrk(intptr_t increment) chunk_dss_sbrk(intptr_t increment)
{ {
#ifdef JEMALLOC_HAVE_SBRK
return (sbrk(increment));
#else
not_implemented(); not_implemented();
return (NULL); return (NULL);
}
#endif #endif
}
dss_prec_t dss_prec_t
chunk_dss_prec_get(void) chunk_dss_prec_get(void)
...@@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) ...@@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
*/ */
do { do {
/* Get the current end of the DSS. */ /* Get the current end of the DSS. */
dss_max = sbrk(0); dss_max = chunk_dss_sbrk(0);
/* /*
* Calculate how much padding is necessary to * Calculate how much padding is necessary to
* chunk-align the end of the DSS. * chunk-align the end of the DSS.
...@@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) ...@@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
return (NULL); return (NULL);
} }
incr = gap_size + cpad_size + size; incr = gap_size + cpad_size + size;
dss_prev = sbrk(incr); dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) { if (dss_prev == dss_max) {
/* Success. */ /* Success. */
dss_max = dss_next; dss_max = dss_next;
...@@ -163,7 +164,7 @@ chunk_dss_boot(void) ...@@ -163,7 +164,7 @@ chunk_dss_boot(void)
if (malloc_mutex_init(&dss_mtx)) if (malloc_mutex_init(&dss_mtx))
return (true); return (true);
dss_base = sbrk(0); dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base; dss_prev = dss_base;
dss_max = dss_base; dss_max = dss_base;
......
...@@ -43,7 +43,7 @@ pages_map(void *addr, size_t size) ...@@ -43,7 +43,7 @@ pages_map(void *addr, size_t size)
if (munmap(ret, size) == -1) { if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF]; char buf[BUFERROR_BUF];
buferror(buf, sizeof(buf)); buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n", malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf); buf);
if (opt_abort) if (opt_abort)
...@@ -69,7 +69,7 @@ pages_unmap(void *addr, size_t size) ...@@ -69,7 +69,7 @@ pages_unmap(void *addr, size_t size)
{ {
char buf[BUFERROR_BUF]; char buf[BUFERROR_BUF];
buferror(buf, sizeof(buf)); buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in " malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32 #ifdef _WIN32
"VirtualFree" "VirtualFree"
......
...@@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh); ...@@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX * Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise. * otherwise.
*/ */
JEMALLOC_INLINE size_t JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{ {
ckhc_t *cell; ckhc_t *cell;
...@@ -67,28 +67,28 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) ...@@ -67,28 +67,28 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
/* /*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise. * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/ */
JEMALLOC_INLINE size_t JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key) ckh_isearch(ckh_t *ckh, const void *key)
{ {
size_t hash1, hash2, bucket, cell; size_t hashes[2], bucket, cell;
assert(ckh != NULL); assert(ckh != NULL);
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); ckh->hash(key, hashes);
/* Search primary bucket. */ /* Search primary bucket. */
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key); cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX) if (cell != SIZE_T_MAX)
return (cell); return (cell);
/* Search secondary bucket. */ /* Search secondary bucket. */
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key); cell = ckh_bucket_search(ckh, bucket, key);
return (cell); return (cell);
} }
JEMALLOC_INLINE bool JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data) const void *data)
{ {
...@@ -120,13 +120,13 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ...@@ -120,13 +120,13 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an * eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle. * eviction/relocation bucket cycle.
*/ */
JEMALLOC_INLINE bool JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata) void const **argdata)
{ {
const void *key, *data, *tkey, *tdata; const void *key, *data, *tkey, *tdata;
ckhc_t *cell; ckhc_t *cell;
size_t hash1, hash2, bucket, tbucket; size_t hashes[2], bucket, tbucket;
unsigned i; unsigned i;
bucket = argbucket; bucket = argbucket;
...@@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ...@@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
#endif #endif
/* Find the alternate bucket for the evicted item. */ /* Find the alternate bucket for the evicted item. */
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); ckh->hash(key, hashes);
tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) { if (tbucket == bucket) {
tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- 1);
/* /*
* It may be that (tbucket == bucket) still, if the * It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However, * item's hashes both indicate this bucket. However,
...@@ -189,22 +190,22 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ...@@ -189,22 +190,22 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
} }
} }
JEMALLOC_INLINE bool JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{ {
size_t hash1, hash2, bucket; size_t hashes[2], bucket;
const void *key = *argkey; const void *key = *argkey;
const void *data = *argdata; const void *data = *argdata;
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); ckh->hash(key, hashes);
/* Try to insert in primary bucket. */ /* Try to insert in primary bucket. */
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false); return (false);
/* Try to insert in secondary bucket. */ /* Try to insert in secondary bucket. */
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false); return (false);
...@@ -218,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) ...@@ -218,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
* Try to rebuild the hash table from scratch by inserting all items from the * Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new. * old table into the new.
*/ */
JEMALLOC_INLINE bool JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{ {
size_t count, i, nins; size_t count, i, nins;
...@@ -417,9 +418,8 @@ ckh_delete(ckh_t *ckh) ...@@ -417,9 +418,8 @@ ckh_delete(ckh_t *ckh)
#endif #endif
idalloc(ckh->tab); idalloc(ckh->tab);
#ifdef JEMALLOC_DEBUG if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t)); memset(ckh, 0x5a, sizeof(ckh_t));
#endif
} }
size_t size_t
...@@ -526,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) ...@@ -526,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
} }
void void
ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) ckh_string_hash(const void *key, size_t r_hash[2])
{ {
size_t ret1, ret2;
uint64_t h;
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1 = h & ZU(0xffffffffU);
ret2 = h >> 32;
} else {
ret1 = h;
ret2 = hash(key, strlen((const char *)key),
UINT64_C(0x8432a476666bbc13));
}
*hash1 = ret1; hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
*hash2 = ret2;
} }
bool bool
...@@ -564,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2) ...@@ -564,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
} }
void void
ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1, ckh_pointer_hash(const void *key, size_t r_hash[2])
size_t *hash2)
{ {
size_t ret1, ret2;
uint64_t h;
union { union {
const void *v; const void *v;
uint64_t i; size_t i;
} u; } u;
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
assert(sizeof(u.v) == sizeof(u.i)); assert(sizeof(u.v) == sizeof(u.i));
#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
u.i = 0;
#endif
u.v = key; u.v = key;
h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082)); hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1 = h & ZU(0xffffffffU);
ret2 = h >> 32;
} else {
assert(SIZEOF_PTR == 8);
ret1 = h;
ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
}
*hash1 = ret1;
*hash2 = ret2;
} }
bool bool
......
...@@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i) ...@@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
static bool static bool
ctl_grow(void) ctl_grow(void)
{ {
size_t astats_size;
ctl_arena_stats_t *astats; ctl_arena_stats_t *astats;
arena_t **tarenas; arena_t **tarenas;
/* Extend arena stats and arenas arrays. */ /* Allocate extended arena stats and arenas arrays. */
astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t); astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
if (ctl_stats.narenas == narenas_auto) { sizeof(ctl_arena_stats_t));
/* ctl_stats.arenas and arenas came from base_alloc(). */
astats = (ctl_arena_stats_t *)imalloc(astats_size);
if (astats == NULL) if (astats == NULL)
return (true); return (true);
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
sizeof(arena_t *)); sizeof(arena_t *));
if (tarenas == NULL) { if (tarenas == NULL) {
idalloc(astats); idalloc(astats);
return (true); return (true);
} }
memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
} else {
astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
astats_size, 0, 0, false, false);
if (astats == NULL)
return (true);
tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) * /* Initialize the new astats element. */
sizeof(arena_t *), 0, 0, false, false); memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
if (tarenas == NULL) sizeof(ctl_arena_stats_t));
return (true);
}
/* Initialize the new astats and arenas elements. */
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
idalloc(tarenas);
idalloc(astats);
return (true); return (true);
tarenas[ctl_stats.narenas] = NULL; }
/* Swap merged stats to their new location. */ /* Swap merged stats to their new location. */
{ {
ctl_arena_stats_t tstats; ctl_arena_stats_t tstats;
...@@ -593,13 +580,34 @@ ctl_grow(void) ...@@ -593,13 +580,34 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats, memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t)); sizeof(ctl_arena_stats_t));
} }
ctl_stats.arenas = astats; /* Initialize the new arenas element. */
ctl_stats.narenas++; tarenas[ctl_stats.narenas] = NULL;
{
arena_t **arenas_old = arenas;
/*
* Swap extended arenas array into place. Although ctl_mtx
* protects this function from other threads extending the
* array, it does not protect from other threads mutating it
* (i.e. initializing arenas and setting array elements to
* point to them). Therefore, array copying must happen under
* the protection of arenas_lock.
*/
malloc_mutex_lock(&arenas_lock); malloc_mutex_lock(&arenas_lock);
arenas = tarenas; arenas = tarenas;
memcpy(arenas, arenas_old, ctl_stats.narenas *
sizeof(arena_t *));
narenas_total++; narenas_total++;
arenas_extend(narenas_total - 1); arenas_extend(narenas_total - 1);
malloc_mutex_unlock(&arenas_lock); malloc_mutex_unlock(&arenas_lock);
/*
* Deallocate arenas_old only if it came from imalloc() (not
* base_alloc()).
*/
if (ctl_stats.narenas != narenas_auto)
idalloc(arenas_old);
}
ctl_stats.arenas = astats;
ctl_stats.narenas++;
return (false); return (false);
} }
...@@ -921,7 +929,7 @@ void ...@@ -921,7 +929,7 @@ void
ctl_prefork(void) ctl_prefork(void)
{ {
malloc_mutex_lock(&ctl_mtx); malloc_mutex_prefork(&ctl_mtx);
} }
void void
...@@ -960,11 +968,11 @@ ctl_postfork_child(void) ...@@ -960,11 +968,11 @@ ctl_postfork_child(void)
if (*oldlenp != sizeof(t)) { \ if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \ size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \ ? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&v, copylen); \ memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \ ret = EINVAL; \
goto label_return; \ goto label_return; \
} else \ } else \
*(t *)oldp = v; \ *(t *)oldp = (v); \
} \ } \
} while (0) } while (0)
...@@ -974,7 +982,7 @@ ctl_postfork_child(void) ...@@ -974,7 +982,7 @@ ctl_postfork_child(void)
ret = EINVAL; \ ret = EINVAL; \
goto label_return; \ goto label_return; \
} \ } \
v = *(t *)newp; \ (v) = *(t *)newp; \
} \ } \
} while (0) } while (0)
...@@ -995,7 +1003,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -995,7 +1003,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (l) \ if (l) \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = v; \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
...@@ -1017,7 +1025,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1017,7 +1025,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
return (ENOENT); \ return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = v; \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
...@@ -1036,7 +1044,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1036,7 +1044,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\ \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = v; \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
...@@ -1060,7 +1068,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1060,7 +1068,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if ((c) == false) \ if ((c) == false) \
return (ENOENT); \ return (ENOENT); \
READONLY(); \ READONLY(); \
oldval = v; \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
...@@ -1077,7 +1085,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1077,7 +1085,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
t oldval; \ t oldval; \
\ \
READONLY(); \ READONLY(); \
oldval = v; \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
...@@ -1102,6 +1110,8 @@ label_return: \ ...@@ -1102,6 +1110,8 @@ label_return: \
return (ret); \ return (ret); \
} }
/******************************************************************************/
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int static int
...@@ -1109,7 +1119,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1109,7 +1119,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
uint64_t newval; UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t); WRITE(newval, uint64_t);
...@@ -1123,49 +1133,52 @@ label_return: ...@@ -1123,49 +1133,52 @@ label_return:
return (ret); return (ret);
} }
static int /******************************************************************************/
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
if (config_tcache == false)
return (ENOENT);
oldval = tcache_enabled_get();
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
tcache_enabled_set(*(bool *)newp);
}
READ(oldval, bool);
ret = 0;
label_return:
return (ret);
}
static int
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (config_tcache == false) CTL_RO_BOOL_CONFIG_GEN(config_debug)
return (ENOENT); CTL_RO_BOOL_CONFIG_GEN(config_dss)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
READONLY(); /******************************************************************************/
WRITEONLY();
tcache_flush(); CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
ret = 0; /******************************************************************************/
label_return:
return (ret);
}
static int static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
...@@ -1227,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated, ...@@ -1227,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated,
CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
&thread_allocated_tsd_get()->deallocated, uint64_t *) &thread_allocated_tsd_get()->deallocated, uint64_t *)
/******************************************************************************/ static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
CTL_RO_BOOL_CONFIG_GEN(config_debug) if (config_tcache == false)
CTL_RO_BOOL_CONFIG_GEN(config_dss) return (ENOENT);
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/ oldval = tcache_enabled_get();
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
tcache_enabled_set(*(bool *)newp);
}
READ(oldval, bool);
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) ret = 0;
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) label_return:
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) return (ret);
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) }
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) static int
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) size_t *oldlenp, void *newp, size_t newlen)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) {
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) int ret;
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) if (config_tcache == false)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) return (ENOENT);
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) READONLY();
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) WRITEONLY();
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ tcache_flush();
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) ret = 0;
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) label_return:
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) return (ret);
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) }
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
/******************************************************************************/ /******************************************************************************/
...@@ -1382,31 +1394,8 @@ label_return: ...@@ -1382,31 +1394,8 @@ label_return:
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
return (NULL);
return (super_arenas_bin_i_node);
}
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
return (NULL);
return (super_arenas_lrun_i_node);
}
static int static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
...@@ -1460,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t) ...@@ -1460,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
return (NULL);
return (super_arenas_bin_i_node);
}
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
return (NULL);
return (super_arenas_lrun_i_node);
}
static int static int
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
...@@ -1492,6 +1502,7 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1492,6 +1502,7 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned narenas;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
...@@ -1499,7 +1510,8 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1499,7 +1510,8 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
READ(ctl_stats.narenas - 1, unsigned); narenas = ctl_stats.narenas - 1;
READ(narenas, unsigned);
ret = 0; ret = 0;
label_return: label_return:
...@@ -1565,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) ...@@ -1565,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/ /******************************************************************************/
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
...@@ -1572,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) ...@@ -1572,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t) ctl_stats.arenas[mib[2]].allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
...@@ -1635,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) ...@@ -1635,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node); return (super_stats_arenas_i_lruns_j_node);
} }
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
...@@ -1664,8 +1682,3 @@ label_return: ...@@ -1664,8 +1682,3 @@ label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
...@@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx; ...@@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge; static extent_tree_t huge;
void * void *
huge_malloc(size_t size, bool zero) huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
{ {
return (huge_palloc(size, chunksize, zero)); return (huge_palloc(size, chunksize, zero, dss_prec));
} }
void * void *
huge_palloc(size_t size, size_t alignment, bool zero) huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
{ {
void *ret; void *ret;
size_t csize; size_t csize;
...@@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero) ...@@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below. * it is possible to make correct junk/zero fill decisions below.
*/ */
is_zeroed = zero; is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed, ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
chunk_dss_prec_get());
if (ret == NULL) { if (ret == NULL) {
base_node_dealloc(node); base_node_dealloc(node);
return (NULL); return (NULL);
...@@ -78,7 +77,7 @@ huge_palloc(size_t size, size_t alignment, bool zero) ...@@ -78,7 +77,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
return (ret); return (ret);
} }
void * bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{ {
...@@ -89,28 +88,23 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) ...@@ -89,28 +88,23 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize); assert(CHUNK_CEILING(oldsize) == oldsize);
if (config_fill && opt_junk && size < oldsize) { return (false);
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
return (ptr);
} }
/* Reallocation would require a move. */ /* Reallocation would require a move. */
return (NULL); return (true);
} }
void * void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc) size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
{ {
void *ret; void *ret;
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
ret = huge_ralloc_no_move(ptr, oldsize, size, extra); if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
if (ret != NULL) return (ptr);
return (ret);
/* /*
* size and oldsize are different enough that we need to use a * size and oldsize are different enough that we need to use a
...@@ -118,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -118,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying. * space and copying.
*/ */
if (alignment > chunksize) if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero); ret = huge_palloc(size + extra, alignment, zero, dss_prec);
else else
ret = huge_malloc(size + extra, zero); ret = huge_malloc(size + extra, zero, dss_prec);
if (ret == NULL) { if (ret == NULL) {
if (extra == 0) if (extra == 0)
return (NULL); return (NULL);
/* Try again, this time without extra. */ /* Try again, this time without extra. */
if (alignment > chunksize) if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero); ret = huge_palloc(size, alignment, zero, dss_prec);
else else
ret = huge_malloc(size, zero); ret = huge_malloc(size, zero, dss_prec);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
...@@ -169,23 +163,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -169,23 +163,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/ */
char buf[BUFERROR_BUF]; char buf[BUFERROR_BUF];
buferror(buf, sizeof(buf)); buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in mremap(): %s\n", malloc_printf("<jemalloc>: Error in mremap(): %s\n",
buf); buf);
if (opt_abort) if (opt_abort)
abort(); abort();
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
chunk_dealloc_mmap(ptr, oldsize); chunk_dealloc_mmap(ptr, oldsize);
} else if (config_fill && zero == false && opt_junk && oldsize
< newsize) {
/*
* mremap(2) clobbers the original mapping, so
* junk/zero filling is not preserved. There is no
* need to zero fill here, since any trailing
* uninititialized memory is demand-zeroed by the
* kernel, but junk filling must be redone.
*/
memset(ret + oldsize, 0xa5, newsize - oldsize);
} }
} else } else
#endif #endif
{ {
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
iqallocx(ptr, try_tcache_dalloc); iqalloct(ptr, try_tcache_dalloc);
} }
return (ret); return (ret);
} }
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(void *ptr, size_t usize)
{
if (config_fill && config_dss && opt_junk) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
void void
huge_dalloc(void *ptr, bool unmap) huge_dalloc(void *ptr, bool unmap)
{ {
...@@ -208,8 +235,8 @@ huge_dalloc(void *ptr, bool unmap) ...@@ -208,8 +235,8 @@ huge_dalloc(void *ptr, bool unmap)
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
if (unmap && config_fill && config_dss && opt_junk) if (unmap)
memset(node->addr, 0x5a, node->size); huge_dalloc_junk(node->addr, node->size);
chunk_dealloc(node->addr, node->size, unmap); chunk_dealloc(node->addr, node->size, unmap);
...@@ -236,6 +263,13 @@ huge_salloc(const void *ptr) ...@@ -236,6 +263,13 @@ huge_salloc(const void *ptr)
return (ret); return (ret);
} }
dss_prec_t
huge_dss_prec_get(arena_t *arena)
{
return (arena_dss_prec_get(choose_arena(arena)));
}
prof_ctx_t * prof_ctx_t *
huge_prof_ctx_get(const void *ptr) huge_prof_ctx_get(const void *ptr)
{ {
......
...@@ -10,17 +10,20 @@ malloc_tsd_data(, thread_allocated, thread_allocated_t, ...@@ -10,17 +10,20 @@ malloc_tsd_data(, thread_allocated, thread_allocated_t,
/* Runtime configuration options. */ /* Runtime configuration options. */
const char *je_malloc_conf; const char *je_malloc_conf;
bool opt_abort =
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
bool opt_abort = true; true
# ifdef JEMALLOC_FILL #else
bool opt_junk = true; false
# else #endif
bool opt_junk = false; ;
# endif bool opt_junk =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else #else
bool opt_abort = false; false
bool opt_junk = false;
#endif #endif
;
size_t opt_quarantine = ZU(0); size_t opt_quarantine = ZU(0);
bool opt_redzone = false; bool opt_redzone = false;
bool opt_utrace = false; bool opt_utrace = false;
...@@ -83,11 +86,13 @@ typedef struct { ...@@ -83,11 +86,13 @@ typedef struct {
#ifdef JEMALLOC_UTRACE #ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \ # define UTRACE(a, b, c) do { \
if (opt_utrace) { \ if (opt_utrace) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \ malloc_utrace_t ut; \
ut.p = (a); \ ut.p = (a); \
ut.s = (b); \ ut.s = (b); \
ut.r = (c); \ ut.r = (c); \
utrace(&ut, sizeof(ut)); \ utrace(&ut, sizeof(ut)); \
errno = utrace_serrno; \
} \ } \
} while (0) } while (0)
#else #else
...@@ -95,18 +100,12 @@ typedef struct { ...@@ -95,18 +100,12 @@ typedef struct {
#endif #endif
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /*
* Function prototypes for static functions that are referenced prior to
static void stats_print_atexit(void); * definition.
static unsigned malloc_ncpus(void); */
static bool malloc_conf_next(char const **opts_p, char const **k_p,
size_t *klen_p, char const **v_p, size_t *vlen_p);
static void malloc_conf_error(const char *msg, const char *k, size_t klen,
const char *v, size_t vlen);
static void malloc_conf_init(void);
static bool malloc_init_hard(void); static bool malloc_init_hard(void);
static int imemalign(void **memptr, size_t alignment, size_t size,
size_t min_alignment);
/******************************************************************************/ /******************************************************************************/
/* /*
...@@ -247,7 +246,6 @@ stats_print_atexit(void) ...@@ -247,7 +246,6 @@ stats_print_atexit(void)
static unsigned static unsigned
malloc_ncpus(void) malloc_ncpus(void)
{ {
unsigned ret;
long result; long result;
#ifdef _WIN32 #ifdef _WIN32
...@@ -257,14 +255,7 @@ malloc_ncpus(void) ...@@ -257,14 +255,7 @@ malloc_ncpus(void)
#else #else
result = sysconf(_SC_NPROCESSORS_ONLN); result = sysconf(_SC_NPROCESSORS_ONLN);
#endif #endif
if (result == -1) { return ((result == -1) ? 1 : (unsigned)result);
/* Error. */
ret = 1;
} else {
ret = (unsigned)result;
}
return (ret);
} }
void void
...@@ -277,12 +268,30 @@ arenas_cleanup(void *arg) ...@@ -277,12 +268,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock); malloc_mutex_unlock(&arenas_lock);
} }
static inline bool JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)
{
/*
* TSD initialization can't be safely done as a side effect of
* deallocation, because it is possible for a thread to do nothing but
* deallocate its TLS data via free(), in which case writing to TLS
* would cause write-after-free memory corruption. The quarantine
* facility *only* gets used as a side effect of deallocation, so make
* a best effort attempt at initializing its TSD by hooking all
* allocation events.
*/
if (config_fill && opt_quarantine)
quarantine_alloc_hook();
}
JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void) malloc_init(void)
{ {
if (malloc_initialized == false) if (malloc_initialized == false && malloc_init_hard())
return (malloc_init_hard()); return (true);
malloc_thread_init();
return (false); return (false);
} }
...@@ -413,8 +422,9 @@ malloc_conf_init(void) ...@@ -413,8 +422,9 @@ malloc_conf_init(void)
} }
break; break;
case 1: { case 1: {
int linklen = 0;
#ifndef _WIN32 #ifndef _WIN32
int linklen; int saved_errno = errno;
const char *linkname = const char *linkname =
# ifdef JEMALLOC_PREFIX # ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf" "/etc/"JEMALLOC_PREFIX"malloc.conf"
...@@ -423,21 +433,20 @@ malloc_conf_init(void) ...@@ -423,21 +433,20 @@ malloc_conf_init(void)
# endif # endif
; ;
if ((linklen = readlink(linkname, buf,
sizeof(buf) - 1)) != -1) {
/* /*
* Use the contents of the "/etc/malloc.conf" * Try to use the contents of the "/etc/malloc.conf"
* symbolic link's name. * symbolic link's name.
*/ */
buf[linklen] = '\0'; linklen = readlink(linkname, buf, sizeof(buf) - 1);
opts = buf; if (linklen == -1) {
} else
#endif
{
/* No configuration specified. */ /* No configuration specified. */
buf[0] = '\0'; linklen = 0;
opts = buf; /* restore errno */
set_errno(saved_errno);
} }
#endif
buf[linklen] = '\0';
opts = buf;
break; break;
} case 2: { } case 2: {
const char *envname = const char *envname =
...@@ -461,15 +470,14 @@ malloc_conf_init(void) ...@@ -461,15 +470,14 @@ malloc_conf_init(void)
} }
break; break;
} default: } default:
/* NOTREACHED */ not_reached();
assert(false);
buf[0] = '\0'; buf[0] = '\0';
opts = buf; opts = buf;
} }
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) { &vlen) == false) {
#define CONF_HANDLE_BOOL_HIT(o, n, hit) \ #define CONF_HANDLE_BOOL(o, n) \
if (sizeof(n)-1 == klen && strncmp(n, k, \ if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \ klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \ if (strncmp("true", v, vlen) == 0 && \
...@@ -483,16 +491,9 @@ malloc_conf_init(void) ...@@ -483,16 +491,9 @@ malloc_conf_init(void)
"Invalid conf value", \ "Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} \ } \
hit = true; \
} else \
hit = false;
#define CONF_HANDLE_BOOL(o, n) { \
bool hit; \
CONF_HANDLE_BOOL_HIT(o, n, hit); \
if (hit) \
continue; \ continue; \
} }
#define CONF_HANDLE_SIZE_T(o, n, min, max) \ #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (sizeof(n)-1 == klen && strncmp(n, k, \ if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \ klen) == 0) { \
uintmax_t um; \ uintmax_t um; \
...@@ -505,12 +506,23 @@ malloc_conf_init(void) ...@@ -505,12 +506,23 @@ malloc_conf_init(void)
malloc_conf_error( \ malloc_conf_error( \
"Invalid conf value", \ "Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else if (um < min || um > max) { \ } else if (clip) { \
if (min != 0 && um < min) \
o = min; \
else if (um > max) \
o = max; \
else \
o = um; \
} else { \
if ((min != 0 && um < min) || \
um > max) { \
malloc_conf_error( \ malloc_conf_error( \
"Out-of-range conf value", \ "Out-of-range " \
"conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else \ } else \
o = um; \ o = um; \
} \
continue; \ continue; \
} }
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
...@@ -555,7 +567,8 @@ malloc_conf_init(void) ...@@ -555,7 +567,8 @@ malloc_conf_init(void)
* config_fill. * config_fill.
*/ */
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
true)
if (strncmp("dss", k, klen) == 0) { if (strncmp("dss", k, klen) == 0) {
int i; int i;
bool match = false; bool match = false;
...@@ -581,14 +594,14 @@ malloc_conf_init(void) ...@@ -581,14 +594,14 @@ malloc_conf_init(void)
continue; continue;
} }
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX) SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1) -1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_stats_print, "stats_print") CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (config_fill) { if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, "junk") CONF_HANDLE_BOOL(opt_junk, "junk")
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX) 0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone") CONF_HANDLE_BOOL(opt_redzone, "redzone")
CONF_HANDLE_BOOL(opt_zero, "zero") CONF_HANDLE_BOOL(opt_zero, "zero")
} }
...@@ -668,17 +681,6 @@ malloc_init_hard(void) ...@@ -668,17 +681,6 @@ malloc_init_hard(void)
malloc_conf_init(); malloc_conf_init();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(_WIN32))
/* Register fork handlers. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
}
#endif
if (opt_stats_print) { if (opt_stats_print) {
/* Print statistics at exit. */ /* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) { if (atexit(stats_print_atexit) != 0) {
...@@ -718,8 +720,10 @@ malloc_init_hard(void) ...@@ -718,8 +720,10 @@ malloc_init_hard(void)
return (true); return (true);
} }
if (malloc_mutex_init(&arenas_lock)) if (malloc_mutex_init(&arenas_lock)) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
}
/* /*
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
...@@ -765,9 +769,25 @@ malloc_init_hard(void) ...@@ -765,9 +769,25 @@ malloc_init_hard(void)
return (true); return (true);
} }
/* Get number of CPUs. */
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
/**********************************************************************/
/* Recursive allocation may follow. */
ncpus = malloc_ncpus(); ncpus = malloc_ncpus();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(_WIN32))
/* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
}
#endif
/* Done recursively allocating. */
/**********************************************************************/
malloc_mutex_lock(&init_lock); malloc_mutex_lock(&init_lock);
if (mutex_boot()) { if (mutex_boot()) {
...@@ -814,6 +834,7 @@ malloc_init_hard(void) ...@@ -814,6 +834,7 @@ malloc_init_hard(void)
malloc_initialized = true; malloc_initialized = true;
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
...@@ -825,42 +846,88 @@ malloc_init_hard(void) ...@@ -825,42 +846,88 @@ malloc_init_hard(void)
* Begin malloc(3)-compatible functions. * Begin malloc(3)-compatible functions.
*/ */
static void *
imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (prof_promote && usize <= SMALL_MAXCLASS) {
p = imalloc(SMALL_MAXCLASS+1);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = imalloc(usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U)
p = imalloc_prof_sample(usize, cnt);
else
p = imalloc(usize);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
/*
* MALLOC_BODY() is a macro rather than a function because its contents are in
* the fast path, but inlining would cause reliability issues when determining
* how many frames to discard from heap profiling backtraces.
*/
#define MALLOC_BODY(ret, size, usize) do { \
if (malloc_init()) \
ret = NULL; \
else { \
if (config_prof && opt_prof) { \
prof_thr_cnt_t *cnt; \
\
usize = s2u(size); \
/* \
* Call PROF_ALLOC_PREP() here rather than in \
* imalloc_prof() so that imalloc_prof() can be \
* inlined without introducing uncertainty \
* about the number of backtrace frames to \
* ignore. imalloc_prof() is in the fast path \
* when heap profiling is enabled, so inlining \
* is critical to performance. (For \
* consistency all callers of PROF_ALLOC_PREP() \
* are structured similarly, even though e.g. \
* realloc() isn't called enough for inlining \
* to be critical.) \
*/ \
PROF_ALLOC_PREP(1, usize, cnt); \
ret = imalloc_prof(usize, cnt); \
} else { \
if (config_stats || (config_valgrind && \
opt_valgrind)) \
usize = s2u(size); \
ret = imalloc(size); \
} \
} \
} while (0)
void * void *
je_malloc(size_t size) je_malloc(size_t size)
{ {
void *ret; void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) {
ret = NULL;
goto label_oom;
}
if (size == 0) if (size == 0)
size = 1; size = 1;
if (config_prof && opt_prof) { MALLOC_BODY(ret, size, usize);
usize = s2u(size);
PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) {
ret = NULL;
goto label_oom;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
SMALL_MAXCLASS) {
ret = imalloc(SMALL_MAXCLASS+1);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = imalloc(size);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
ret = imalloc(size);
}
label_oom:
if (ret == NULL) { if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) { if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): " malloc_write("<jemalloc>: Error in malloc(): "
...@@ -869,8 +936,6 @@ label_oom: ...@@ -869,8 +936,6 @@ label_oom:
} }
set_errno(ENOMEM); set_errno(ENOMEM);
} }
if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) { if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof)); assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize; thread_allocated_tsd_get()->allocated += usize;
...@@ -880,28 +945,63 @@ label_oom: ...@@ -880,28 +945,63 @@ label_oom:
return (ret); return (ret);
} }
static void *
imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (prof_promote && usize <= SMALL_MAXCLASS) {
assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = ipalloc(usize, alignment, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U)
p = imemalign_prof_sample(alignment, usize, cnt);
else
p = ipalloc(usize, alignment, false);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
JEMALLOC_ATTR(nonnull(1)) JEMALLOC_ATTR(nonnull(1))
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
/* /*
* Avoid any uncertainty as to how many backtrace frames to ignore in * Avoid any uncertainty as to how many backtrace frames to ignore in
* PROF_ALLOC_PREP(). * PROF_ALLOC_PREP().
*/ */
JEMALLOC_ATTR(noinline) JEMALLOC_NOINLINE
#endif #endif
static int static int
imemalign(void **memptr, size_t alignment, size_t size, imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
size_t min_alignment)
{ {
int ret; int ret;
size_t usize; size_t usize;
void *result; void *result;
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
assert(min_alignment != 0); assert(min_alignment != 0);
if (malloc_init()) if (malloc_init()) {
result = NULL; result = NULL;
else { goto label_oom;
} else {
if (size == 0) if (size == 0)
size = 1; size = 1;
...@@ -921,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size, ...@@ -921,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size,
usize = sa2u(size, alignment); usize = sa2u(size, alignment);
if (usize == 0) { if (usize == 0) {
result = NULL; result = NULL;
ret = ENOMEM; goto label_oom;
goto label_return;
} }
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(2, usize, cnt); PROF_ALLOC_PREP(2, usize, cnt);
if (cnt == NULL) { result = imemalign_prof(alignment, usize, cnt);
result = NULL;
ret = EINVAL;
} else {
if (prof_promote && (uintptr_t)cnt !=
(uintptr_t)1U && usize <= SMALL_MAXCLASS) {
assert(sa2u(SMALL_MAXCLASS+1,
alignment) != 0);
result = ipalloc(sa2u(SMALL_MAXCLASS+1,
alignment), alignment, false);
if (result != NULL) {
arena_prof_promoted(result,
usize);
}
} else {
result = ipalloc(usize, alignment,
false);
}
}
} else } else
result = ipalloc(usize, alignment, false); result = ipalloc(usize, alignment, false);
} if (result == NULL)
goto label_oom;
if (result == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error allocating aligned "
"memory: out of memory\n");
abort();
}
ret = ENOMEM;
goto label_return;
} }
*memptr = result; *memptr = result;
ret = 0; ret = 0;
label_return: label_return:
if (config_stats && result != NULL) { if (config_stats && result != NULL) {
assert(usize == isalloc(result, config_prof)); assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize; thread_allocated_tsd_get()->allocated += usize;
} }
if (config_prof && opt_prof && result != NULL)
prof_malloc(result, usize, cnt);
UTRACE(0, size, result); UTRACE(0, size, result);
return (ret); return (ret);
label_oom:
assert(result == NULL);
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error allocating aligned memory: "
"out of memory\n");
abort();
}
ret = ENOMEM;
goto label_return;
} }
int int
...@@ -998,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size) ...@@ -998,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size)
return (ret); return (ret);
} }
static void *
icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (prof_promote && usize <= SMALL_MAXCLASS) {
p = icalloc(SMALL_MAXCLASS+1);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = icalloc(usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if ((uintptr_t)cnt != (uintptr_t)1U)
p = icalloc_prof_sample(usize, cnt);
else
p = icalloc(usize);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
return (p);
}
void * void *
je_calloc(size_t num, size_t size) je_calloc(size_t num, size_t size)
{ {
void *ret; void *ret;
size_t num_size; size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) { if (malloc_init()) {
num_size = 0; num_size = 0;
...@@ -1033,19 +1147,11 @@ je_calloc(size_t num, size_t size) ...@@ -1033,19 +1147,11 @@ je_calloc(size_t num, size_t size)
} }
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
usize = s2u(num_size); usize = s2u(num_size);
PROF_ALLOC_PREP(1, usize, cnt); PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) { ret = icalloc_prof(usize, cnt);
ret = NULL;
goto label_return;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
<= SMALL_MAXCLASS) {
ret = icalloc(SMALL_MAXCLASS+1);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = icalloc(num_size);
} else { } else {
if (config_stats || (config_valgrind && opt_valgrind)) if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(num_size); usize = s2u(num_size);
...@@ -1061,9 +1167,6 @@ label_return: ...@@ -1061,9 +1167,6 @@ label_return:
} }
set_errno(ENOMEM); set_errno(ENOMEM);
} }
if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) { if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof)); assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize; thread_allocated_tsd_get()->allocated += usize;
...@@ -1073,126 +1176,106 @@ label_return: ...@@ -1073,126 +1176,106 @@ label_return:
return (ret); return (ret);
} }
static void *
irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
if (cnt == NULL)
return (NULL);
if (prof_promote && usize <= SMALL_MAXCLASS) {
p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = iralloc(oldptr, usize, 0, 0, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
{
void *p;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(oldptr);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = irealloc_prof_sample(oldptr, usize, cnt);
else
p = iralloc(oldptr, usize, 0, 0, false);
if (p == NULL)
return (NULL);
prof_realloc(p, usize, cnt, old_usize, old_ctx);
return (p);
}
JEMALLOC_INLINE_C void
ifree(void *ptr)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
iqalloc(ptr);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
void * void *
je_realloc(void *ptr, size_t size) je_realloc(void *ptr, size_t size)
{ {
void *ret; void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_size = 0; size_t old_usize = 0;
size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
if (size == 0) { if (size == 0) {
if (ptr != NULL) { if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */ /* realloc(ptr, 0) is equivalent to free(ptr). */
if (config_prof) { UTRACE(ptr, 0, 0);
old_size = isalloc(ptr, true); ifree(ptr);
if (config_valgrind && opt_valgrind) return (NULL);
old_rzsize = p2rz(ptr);
} else if (config_stats) {
old_size = isalloc(ptr, false);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
old_size = isalloc(ptr, false);
old_rzsize = u2rz(old_size);
}
if (config_prof && opt_prof) {
old_ctx = prof_ctx_get(ptr);
cnt = NULL;
} }
iqalloc(ptr);
ret = NULL;
goto label_return;
} else
size = 1; size = 1;
} }
if (ptr != NULL) { if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (config_prof) { if ((config_prof && opt_prof) || config_stats ||
old_size = isalloc(ptr, true); (config_valgrind && opt_valgrind))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && opt_valgrind) if (config_valgrind && opt_valgrind)
old_rzsize = p2rz(ptr); old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
} else if (config_stats) {
old_size = isalloc(ptr, false);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
old_size = isalloc(ptr, false);
old_rzsize = u2rz(old_size);
}
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = s2u(size); prof_thr_cnt_t *cnt;
old_ctx = prof_ctx_get(ptr);
usize = s2u(size);
PROF_ALLOC_PREP(1, usize, cnt); PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) { ret = irealloc_prof(ptr, old_usize, usize, cnt);
old_ctx = NULL;
ret = NULL;
goto label_oom;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
usize <= SMALL_MAXCLASS) {
ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
false, false);
if (ret != NULL)
arena_prof_promoted(ret, usize);
else
old_ctx = NULL;
} else {
ret = iralloc(ptr, size, 0, 0, false, false);
if (ret == NULL)
old_ctx = NULL;
}
} else { } else {
if (config_stats || (config_valgrind && opt_valgrind)) if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size); usize = s2u(size);
ret = iralloc(ptr, size, 0, 0, false, false); ret = iralloc(ptr, size, 0, 0, false);
}
label_oom:
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
} }
} else { } else {
/* realloc(NULL, size) is equivalent to malloc(size). */ /* realloc(NULL, size) is equivalent to malloc(size). */
if (config_prof && opt_prof) MALLOC_BODY(ret, size, usize);
old_ctx = NULL;
if (malloc_init()) {
if (config_prof && opt_prof)
cnt = NULL;
ret = NULL;
} else {
if (config_prof && opt_prof) {
usize = s2u(size);
PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL)
ret = NULL;
else {
if (prof_promote && (uintptr_t)cnt !=
(uintptr_t)1U && usize <=
SMALL_MAXCLASS) {
ret = imalloc(SMALL_MAXCLASS+1);
if (ret != NULL) {
arena_prof_promoted(ret,
usize);
}
} else
ret = imalloc(size);
}
} else {
if (config_stats || (config_valgrind &&
opt_valgrind))
usize = s2u(size);
ret = imalloc(size);
}
} }
if (ret == NULL) { if (ret == NULL) {
...@@ -1203,20 +1286,16 @@ label_oom: ...@@ -1203,20 +1286,16 @@ label_oom:
} }
set_errno(ENOMEM); set_errno(ENOMEM);
} }
}
label_return:
if (config_prof && opt_prof)
prof_realloc(ret, usize, cnt, old_size, old_ctx);
if (config_stats && ret != NULL) { if (config_stats && ret != NULL) {
thread_allocated_t *ta; thread_allocated_t *ta;
assert(usize == isalloc(ret, config_prof)); assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get(); ta = thread_allocated_tsd_get();
ta->allocated += usize; ta->allocated += usize;
ta->deallocated += old_size; ta->deallocated += old_usize;
} }
UTRACE(ptr, size, ret); UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
false);
return (ret); return (ret);
} }
...@@ -1225,24 +1304,8 @@ je_free(void *ptr) ...@@ -1225,24 +1304,8 @@ je_free(void *ptr)
{ {
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
if (ptr != NULL) { if (ptr != NULL)
size_t usize; ifree(ptr);
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
iqalloc(ptr);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
} }
/* /*
...@@ -1308,99 +1371,75 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = ...@@ -1308,99 +1371,75 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions. * Begin non-standard functions.
*/ */
size_t JEMALLOC_ALWAYS_INLINE_C void *
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{ {
size_t ret;
assert(malloc_initialized || IS_INITIALIZER); assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
alignment)));
if (config_ivsalloc) if (alignment != 0)
ret = ivsalloc(ptr, config_prof); return (ipalloct(usize, alignment, zero, try_tcache, arena));
else if (zero)
return (icalloct(usize, try_tcache, arena));
else else
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; return (imalloct(usize, try_tcache, arena));
return (ret);
}
void
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
stats_print(write_cb, cbopaque, opts);
}
int
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
if (malloc_init())
return (EAGAIN);
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
int
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
if (malloc_init())
return (EAGAIN);
return (ctl_nametomib(name, mibp, miblenp));
} }
int static void *
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
void *newp, size_t newlen) arena_t *arena, prof_thr_cnt_t *cnt)
{ {
void *p;
if (malloc_init()) if (cnt == NULL)
return (EAGAIN); return (NULL);
if (prof_promote && usize <= SMALL_MAXCLASS) {
size_t usize_promoted = (alignment == 0) ?
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
assert(usize_promoted != 0);
p = imallocx(usize_promoted, alignment, zero, try_tcache,
arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
p = imallocx(usize, alignment, zero, try_tcache, arena);
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); return (p);
} }
/* JEMALLOC_ALWAYS_INLINE_C void *
* End non-standard functions. imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
*/ arena_t *arena, prof_thr_cnt_t *cnt)
/******************************************************************************/
/*
* Begin experimental functions.
*/
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_INLINE void *
iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{ {
void *p;
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, if ((uintptr_t)cnt != (uintptr_t)1U) {
alignment))); p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
arena, cnt);
} else
p = imallocx(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
return (NULL);
prof_malloc(p, usize, cnt);
if (alignment != 0) return (p);
return (ipallocx(usize, alignment, zero, try_tcache, arena));
else if (zero)
return (icallocx(usize, try_tcache, arena));
else
return (imallocx(usize, try_tcache, arena));
} }
int void *
je_allocm(void **ptr, size_t *rsize, size_t size, int flags) je_mallocx(size_t size, int flags)
{ {
void *p; void *p;
size_t usize; size_t usize;
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1)); & (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO; bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
arena_t *arena; arena_t *arena;
bool try_tcache; bool try_tcache;
assert(ptr != NULL);
assert(size != 0); assert(size != 0);
if (malloc_init()) if (malloc_init())
...@@ -1415,85 +1454,117 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags) ...@@ -1415,85 +1454,117 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
} }
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (usize == 0) assert(usize != 0);
goto label_oom;
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt; prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(1, usize, cnt); PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
goto label_oom; cnt);
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= } else
SMALL_MAXCLASS) { p = imallocx(usize, alignment, zero, try_tcache, arena);
size_t usize_promoted = (alignment == 0) ?
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
alignment);
assert(usize_promoted != 0);
p = iallocm(usize_promoted, alignment, zero,
try_tcache, arena);
if (p == NULL)
goto label_oom;
arena_prof_promoted(p, usize);
} else {
p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
prof_malloc(p, usize, cnt);
} else {
p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL) if (p == NULL)
goto label_oom; goto label_oom;
}
if (rsize != NULL)
*rsize = usize;
*ptr = p;
if (config_stats) { if (config_stats) {
assert(usize == isalloc(p, config_prof)); assert(usize == isalloc(p, config_prof));
thread_allocated_tsd_get()->allocated += usize; thread_allocated_tsd_get()->allocated += usize;
} }
UTRACE(0, size, p); UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
return (ALLOCM_SUCCESS); return (p);
label_oom: label_oom:
if (config_xmalloc && opt_xmalloc) { if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in allocm(): " malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
"out of memory\n");
abort(); abort();
} }
*ptr = NULL;
UTRACE(0, size, 0); UTRACE(0, size, 0);
return (ALLOCM_ERR_OOM); return (NULL);
} }
int static void *
je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
prof_thr_cnt_t *cnt)
{ {
void *p, *q; void *p;
size_t usize;
size_t old_size; if (cnt == NULL)
size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); return (NULL);
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) if (prof_promote && usize <= SMALL_MAXCLASS) {
p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
try_tcache_alloc, try_tcache_dalloc, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else {
p = iralloct(oldptr, size, 0, alignment, zero,
try_tcache_alloc, try_tcache_dalloc, arena);
}
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena, prof_thr_cnt_t *cnt)
{
void *p;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(oldptr);
if ((uintptr_t)cnt != (uintptr_t)1U)
p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
try_tcache_alloc, try_tcache_dalloc, arena, cnt);
else {
p = iralloct(oldptr, size, 0, alignment, zero,
try_tcache_alloc, try_tcache_dalloc, arena);
}
if (p == NULL)
return (NULL);
if (p == oldptr && alignment != 0) {
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
* alignment, and that the alignment constraint was
* serendipitously satisfied. Additionally, old_usize may not
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
*usize = isalloc(p, config_prof);
}
prof_realloc(p, *usize, cnt, old_usize, old_ctx);
return (p);
}
void *
je_rallocx(void *ptr, size_t size, int flags)
{
void *p;
size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1)); & (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO; bool zero = flags & MALLOCX_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache_alloc, try_tcache_dalloc; bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena; arena_t *arena;
assert(ptr != NULL); assert(ptr != NULL);
assert(*ptr != NULL);
assert(size != 0); assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (arena_ind != UINT_MAX) { if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk; arena_chunk_t *chunk;
try_tcache_alloc = true; try_tcache_alloc = false;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
try_tcache_dalloc = (chunk == *ptr || chunk->arena != try_tcache_dalloc = (chunk == ptr || chunk->arena !=
arenas[arena_ind]); arenas[arena_ind]);
arena = arenas[arena_ind]; arena = arenas[arena_ind];
} else { } else {
...@@ -1502,12 +1573,142 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) ...@@ -1502,12 +1573,142 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
arena = NULL; arena = NULL;
} }
p = *ptr; if ((config_prof && opt_prof) || config_stats ||
(config_valgrind && opt_valgrind))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt; prof_thr_cnt_t *cnt;
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
PROF_ALLOC_PREP(1, usize, cnt);
p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
try_tcache_alloc, try_tcache_dalloc, arena, cnt);
if (p == NULL)
goto label_oom;
} else {
p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
try_tcache_dalloc, arena);
if (p == NULL)
goto label_oom;
if (config_stats || (config_valgrind && opt_valgrind))
usize = isalloc(p, config_prof);
}
if (config_stats) {
thread_allocated_t *ta;
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_usize;
}
UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
UTRACE(ptr, size, 0);
return (NULL);
}
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, bool zero, arena_t *arena)
{
size_t usize;
if (ixalloc(ptr, size, extra, alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
return (usize);
}
static size_t
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, size_t max_usize, bool zero, arena_t *arena,
prof_thr_cnt_t *cnt)
{
size_t usize;
if (cnt == NULL)
return (old_usize);
/* Use minimum usize to determine whether promotion may happen. */
if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
alignment)) <= SMALL_MAXCLASS) {
if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
if (max_usize < PAGE)
arena_prof_promoted(ptr, usize);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero, arena);
}
return (usize);
}
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
size_t alignment, size_t max_usize, bool zero, arena_t *arena,
prof_thr_cnt_t *cnt)
{
size_t usize;
prof_ctx_t *old_ctx;
old_ctx = prof_ctx_get(ptr);
if ((uintptr_t)cnt != (uintptr_t)1U) {
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
alignment, zero, max_usize, arena, cnt);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
zero, arena);
}
if (usize == old_usize)
return (usize);
prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
return (usize);
}
size_t
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{
size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
arena_t *arena;
assert(ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (arena_ind != UINT_MAX)
arena = arenas[arena_ind];
else
arena = NULL;
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
/* /*
* usize isn't knowable before iralloc() returns when extra is * usize isn't knowable before ixalloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and * non-zero. Therefore, compute its maximum possible value and
* use that in PROF_ALLOC_PREP() to decide whether to capture a * use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to * backtrace. prof_realloc() will use the actual usize to
...@@ -1515,111 +1716,51 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) ...@@ -1515,111 +1716,51 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
*/ */
size_t max_usize = (alignment == 0) ? s2u(size+extra) : size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment); sa2u(size+extra, alignment);
prof_ctx_t *old_ctx = prof_ctx_get(p);
old_size = isalloc(p, true);
if (config_valgrind && opt_valgrind)
old_rzsize = p2rz(p);
PROF_ALLOC_PREP(1, max_usize, cnt); PROF_ALLOC_PREP(1, max_usize, cnt);
if (cnt == NULL) usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
goto label_oom; max_usize, zero, arena, cnt);
/*
* Use minimum usize to determine whether promotion may happen.
*/
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
<= SMALL_MAXCLASS) {
q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
alignment, zero, no_move, try_tcache_alloc,
try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
if (max_usize < PAGE) {
usize = max_usize;
arena_prof_promoted(q, usize);
} else
usize = isalloc(q, config_prof);
} else {
q = irallocx(p, size, extra, alignment, zero, no_move,
try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
usize = isalloc(q, config_prof);
}
prof_realloc(q, usize, cnt, old_size, old_ctx);
if (rsize != NULL)
*rsize = usize;
} else { } else {
if (config_stats) { usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
old_size = isalloc(p, false); zero, arena);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
old_size = isalloc(p, false);
old_rzsize = u2rz(old_size);
}
q = irallocx(p, size, extra, alignment, zero, no_move,
try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
if (config_stats)
usize = isalloc(q, config_prof);
if (rsize != NULL) {
if (config_stats == false)
usize = isalloc(q, config_prof);
*rsize = usize;
}
} }
if (usize == old_usize)
goto label_not_resized;
*ptr = q;
if (config_stats) { if (config_stats) {
thread_allocated_t *ta; thread_allocated_t *ta;
ta = thread_allocated_tsd_get(); ta = thread_allocated_tsd_get();
ta->allocated += usize; ta->allocated += usize;
ta->deallocated += old_size; ta->deallocated += old_usize;
} }
UTRACE(p, size, q); JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); label_not_resized:
return (ALLOCM_SUCCESS); UTRACE(ptr, size, ptr);
label_err: return (usize);
if (no_move) {
UTRACE(p, size, q);
return (ALLOCM_ERR_NOT_MOVED);
}
label_oom:
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocm(): "
"out of memory\n");
abort();
}
UTRACE(p, size, 0);
return (ALLOCM_ERR_OOM);
} }
int size_t
je_sallocm(const void *ptr, size_t *rsize, int flags) je_sallocx(const void *ptr, int flags)
{ {
size_t sz; size_t usize;
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc) if (config_ivsalloc)
sz = ivsalloc(ptr, config_prof); usize = ivsalloc(ptr, config_prof);
else { else {
assert(ptr != NULL); assert(ptr != NULL);
sz = isalloc(ptr, config_prof); usize = isalloc(ptr, config_prof);
} }
assert(rsize != NULL);
*rsize = sz;
return (ALLOCM_SUCCESS); return (usize);
} }
int void
je_dallocm(void *ptr, int flags) je_dallocx(void *ptr, int flags)
{ {
size_t usize; size_t usize;
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache; bool try_tcache;
...@@ -1645,28 +1786,162 @@ je_dallocm(void *ptr, int flags) ...@@ -1645,28 +1786,162 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize; thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind) if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr); rzsize = p2rz(ptr);
iqallocx(ptr, try_tcache); iqalloct(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize); JEMALLOC_VALGRIND_FREE(ptr, rzsize);
return (ALLOCM_SUCCESS);
} }
int size_t
je_nallocm(size_t *rsize, size_t size, int flags) je_nallocx(size_t size, int flags)
{ {
size_t usize; size_t usize;
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1)); & (SIZE_T_MAX-1));
assert(size != 0); assert(size != 0);
if (malloc_init()) if (malloc_init())
return (ALLOCM_ERR_OOM); return (0);
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (usize == 0) assert(usize != 0);
return (usize);
}
int
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
if (malloc_init())
return (EAGAIN);
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
int
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
if (malloc_init())
return (EAGAIN);
return (ctl_nametomib(name, mibp, miblenp));
}
int
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
if (malloc_init())
return (EAGAIN);
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
void
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
stats_print(write_cb, cbopaque, opts);
}
size_t
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
else
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
return (ret);
}
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* Begin experimental functions.
*/
#ifdef JEMALLOC_EXPERIMENTAL
int
je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
{
void *p;
assert(ptr != NULL);
p = je_mallocx(size, flags);
if (p == NULL)
return (ALLOCM_ERR_OOM); return (ALLOCM_ERR_OOM);
if (rsize != NULL)
*rsize = isalloc(p, config_prof);
*ptr = p;
return (ALLOCM_SUCCESS);
}
int
je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
{
int ret;
bool no_move = flags & ALLOCM_NO_MOVE;
assert(ptr != NULL);
assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
if (no_move) {
size_t usize = je_xallocx(*ptr, size, extra, flags);
ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
if (rsize != NULL)
*rsize = usize;
} else {
void *p = je_rallocx(*ptr, size+extra, flags);
if (p != NULL) {
*ptr = p;
ret = ALLOCM_SUCCESS;
} else
ret = ALLOCM_ERR_OOM;
if (rsize != NULL)
*rsize = isalloc(*ptr, config_prof);
}
return (ret);
}
int
je_sallocm(const void *ptr, size_t *rsize, int flags)
{
assert(rsize != NULL);
*rsize = je_sallocx(ptr, flags);
return (ALLOCM_SUCCESS);
}
int
je_dallocm(void *ptr, int flags)
{
je_dallocx(ptr, flags);
return (ALLOCM_SUCCESS);
}
int
je_nallocm(size_t *rsize, size_t size, int flags)
{
size_t usize;
usize = je_nallocx(size, flags);
if (usize == 0)
return (ALLOCM_ERR_OOM);
if (rsize != NULL) if (rsize != NULL)
*rsize = usize; *rsize = usize;
return (ALLOCM_SUCCESS); return (ALLOCM_SUCCESS);
...@@ -1721,12 +1996,12 @@ _malloc_prefork(void) ...@@ -1721,12 +1996,12 @@ _malloc_prefork(void)
/* Acquire all mutexes in a safe order. */ /* Acquire all mutexes in a safe order. */
ctl_prefork(); ctl_prefork();
prof_prefork();
malloc_mutex_prefork(&arenas_lock); malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) { for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
arena_prefork(arenas[i]); arena_prefork(arenas[i]);
} }
prof_prefork();
chunk_prefork(); chunk_prefork();
base_prefork(); base_prefork();
huge_prefork(); huge_prefork();
...@@ -1752,12 +2027,12 @@ _malloc_postfork(void) ...@@ -1752,12 +2027,12 @@ _malloc_postfork(void)
huge_postfork_parent(); huge_postfork_parent();
base_postfork_parent(); base_postfork_parent();
chunk_postfork_parent(); chunk_postfork_parent();
prof_postfork_parent();
for (i = 0; i < narenas_total; i++) { for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]); arena_postfork_parent(arenas[i]);
} }
malloc_mutex_postfork_parent(&arenas_lock); malloc_mutex_postfork_parent(&arenas_lock);
prof_postfork_parent();
ctl_postfork_parent(); ctl_postfork_parent();
} }
...@@ -1772,12 +2047,12 @@ jemalloc_postfork_child(void) ...@@ -1772,12 +2047,12 @@ jemalloc_postfork_child(void)
huge_postfork_child(); huge_postfork_child();
base_postfork_child(); base_postfork_child();
chunk_postfork_child(); chunk_postfork_child();
prof_postfork_child();
for (i = 0; i < narenas_total; i++) { for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
arena_postfork_child(arenas[i]); arena_postfork_child(arenas[i]);
} }
malloc_mutex_postfork_child(&arenas_lock); malloc_mutex_postfork_child(&arenas_lock);
prof_postfork_child();
ctl_postfork_child(); ctl_postfork_child();
} }
...@@ -1801,7 +2076,7 @@ a0alloc(size_t size, bool zero) ...@@ -1801,7 +2076,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass) if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false)); return (arena_malloc(arenas[0], size, zero, false));
else else
return (huge_malloc(size, zero)); return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
} }
void * void *
......
...@@ -24,9 +24,14 @@ bool opt_prof_gdump = false; ...@@ -24,9 +24,14 @@ bool opt_prof_gdump = false;
bool opt_prof_final = true; bool opt_prof_final = true;
bool opt_prof_leak = false; bool opt_prof_leak = false;
bool opt_prof_accum = false; bool opt_prof_accum = false;
char opt_prof_prefix[PATH_MAX + 1]; char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
uint64_t prof_interval; uint64_t prof_interval = 0;
bool prof_promote; bool prof_promote;
/* /*
...@@ -54,47 +59,23 @@ static uint64_t prof_dump_useq; ...@@ -54,47 +59,23 @@ static uint64_t prof_dump_useq;
/* /*
* This buffer is rather large for stack allocation, so use a single buffer for * This buffer is rather large for stack allocation, so use a single buffer for
* all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since * all profile dumps.
* it must be locked anyway during dumping.
*/ */
static char prof_dump_buf[PROF_DUMP_BUFSIZE]; static malloc_mutex_t prof_dump_mtx;
static char prof_dump_buf[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PROF_DUMP_BUFSIZE
#else
1
#endif
];
static unsigned prof_dump_buf_end; static unsigned prof_dump_buf_end;
static int prof_dump_fd; static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */ /* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false; static bool prof_booted = false;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static prof_bt_t *bt_dup(prof_bt_t *bt);
static void bt_destroy(prof_bt_t *bt);
#ifdef JEMALLOC_PROF_LIBGCC
static _Unwind_Reason_Code prof_unwind_init_callback(
struct _Unwind_Context *context, void *arg);
static _Unwind_Reason_Code prof_unwind_callback(
struct _Unwind_Context *context, void *arg);
#endif
static bool prof_flush(bool propagate_err);
static bool prof_write(bool propagate_err, const char *s);
static bool prof_printf(bool propagate_err, const char *format, ...)
JEMALLOC_ATTR(format(printf, 2, 3));
static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
size_t *leak_nctx);
static void prof_ctx_destroy(prof_ctx_t *ctx);
static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
prof_bt_t *bt);
static bool prof_dump_maps(bool propagate_err);
static bool prof_dump(bool propagate_err, const char *filename,
bool leakcheck);
static void prof_dump_filename(char *filename, char v, int64_t vseq);
static void prof_fdump(void);
static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
size_t *hash2);
static bool prof_bt_keycomp(const void *k1, const void *k2);
static malloc_mutex_t *prof_ctx_mutex_choose(void);
/******************************************************************************/ /******************************************************************************/
void void
...@@ -424,75 +405,155 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore) ...@@ -424,75 +405,155 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
{ {
cassert(config_prof); cassert(config_prof);
assert(false); not_reached();
} }
#endif #endif
prof_thr_cnt_t * static malloc_mutex_t *
prof_lookup(prof_bt_t *bt) prof_ctx_mutex_choose(void)
{
unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
}
static void
prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
{
ctx->bt = bt;
ctx->lock = prof_ctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
* prof_ctx_merge()/prof_ctx_destroy().
*/
ctx->nlimbo = 1;
ql_elm_new(ctx, dump_link);
memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
ql_new(&ctx->cnts_ql);
}
static void
prof_ctx_destroy(prof_ctx_t *ctx)
{ {
union {
prof_thr_cnt_t *p;
void *v;
} ret;
prof_tdata_t *prof_tdata; prof_tdata_t *prof_tdata;
cassert(config_prof); cassert(config_prof);
prof_tdata = prof_tdata_get(); /*
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) * Check that ctx is still unused by any thread cache before destroying
return (NULL); * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
* condition with this function, as does prof_ctx_merge() in order to
* avoid a race between the main body of prof_ctx_merge() and entry
* into this function.
*/
prof_tdata = prof_tdata_get(false);
assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
prof_enter(prof_tdata);
malloc_mutex_lock(ctx->lock);
if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
ctx->nlimbo == 1) {
assert(ctx->cnt_merged.curbytes == 0);
assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */
if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
not_reached();
prof_leave(prof_tdata);
/* Destroy ctx. */
malloc_mutex_unlock(ctx->lock);
bt_destroy(ctx->bt);
idalloc(ctx);
} else {
/*
* Compensate for increment in prof_ctx_merge() or
* prof_lookup().
*/
ctx->nlimbo--;
malloc_mutex_unlock(ctx->lock);
prof_leave(prof_tdata);
}
}
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { static void
union { prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
prof_bt_t *p; {
void *v; bool destroy;
} btkey;
cassert(config_prof);
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
ql_remove(&ctx->cnts_ql, cnt, cnts_link);
if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
/*
* Increment ctx->nlimbo in order to keep another thread from
* winning the race to destroy ctx while this one has ctx->lock
* dropped. Without this, it would be possible for another
* thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
* 3) Successfully prof_ctx_destroy(ctx).
*
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
ctx->nlimbo++;
destroy = true;
} else
destroy = false;
malloc_mutex_unlock(ctx->lock);
if (destroy)
prof_ctx_destroy(ctx);
}
static bool
prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
prof_ctx_t **p_ctx, bool *p_new_ctx)
{
union { union {
prof_ctx_t *p; prof_ctx_t *p;
void *v; void *v;
} ctx; } ctx;
union {
prof_bt_t *p;
void *v;
} btkey;
bool new_ctx; bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
prof_enter(prof_tdata); prof_enter(prof_tdata);
if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
/* bt has never been seen before. Insert it. */ /* bt has never been seen before. Insert it. */
ctx.v = imalloc(sizeof(prof_ctx_t)); ctx.v = imalloc(sizeof(prof_ctx_t));
if (ctx.v == NULL) { if (ctx.v == NULL) {
prof_leave(prof_tdata); prof_leave(prof_tdata);
return (NULL); return (true);
} }
btkey.p = bt_dup(bt); btkey.p = bt_dup(bt);
if (btkey.v == NULL) { if (btkey.v == NULL) {
prof_leave(prof_tdata); prof_leave(prof_tdata);
idalloc(ctx.v); idalloc(ctx.v);
return (NULL); return (true);
} }
ctx.p->bt = btkey.p; prof_ctx_init(ctx.p, btkey.p);
ctx.p->lock = prof_ctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition
* with prof_ctx_merge()/prof_ctx_destroy().
*/
ctx.p->nlimbo = 1;
memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
ql_new(&ctx.p->cnts_ql);
if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
/* OOM. */ /* OOM. */
prof_leave(prof_tdata); prof_leave(prof_tdata);
idalloc(btkey.v); idalloc(btkey.v);
idalloc(ctx.v); idalloc(ctx.v);
return (NULL); return (true);
} }
new_ctx = true; new_ctx = true;
} else { } else {
/* /*
* Increment nlimbo, in order to avoid a race condition * Increment nlimbo, in order to avoid a race condition with
* with prof_ctx_merge()/prof_ctx_destroy(). * prof_ctx_merge()/prof_ctx_destroy().
*/ */
malloc_mutex_lock(ctx.p->lock); malloc_mutex_lock(ctx.p->lock);
ctx.p->nlimbo++; ctx.p->nlimbo++;
...@@ -501,6 +562,39 @@ prof_lookup(prof_bt_t *bt) ...@@ -501,6 +562,39 @@ prof_lookup(prof_bt_t *bt)
} }
prof_leave(prof_tdata); prof_leave(prof_tdata);
*p_btkey = btkey.v;
*p_ctx = ctx.p;
*p_new_ctx = new_ctx;
return (false);
}
prof_thr_cnt_t *
prof_lookup(prof_bt_t *bt)
{
union {
prof_thr_cnt_t *p;
void *v;
} ret;
prof_tdata_t *prof_tdata;
cassert(config_prof);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (NULL);
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
void *btkey;
prof_ctx_t *ctx;
bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
return (NULL);
/* Link a prof_thd_cnt_t into ctx for this thread. */ /* Link a prof_thd_cnt_t into ctx for this thread. */
if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
assert(ckh_count(&prof_tdata->bt2cnt) > 0); assert(ckh_count(&prof_tdata->bt2cnt) > 0);
...@@ -512,7 +606,7 @@ prof_lookup(prof_bt_t *bt) ...@@ -512,7 +606,7 @@ prof_lookup(prof_bt_t *bt)
assert(ret.v != NULL); assert(ret.v != NULL);
if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
NULL, NULL)) NULL, NULL))
assert(false); not_reached();
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p); prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */ /* ret can now be re-used. */
...@@ -522,27 +616,27 @@ prof_lookup(prof_bt_t *bt) ...@@ -522,27 +616,27 @@ prof_lookup(prof_bt_t *bt)
ret.v = imalloc(sizeof(prof_thr_cnt_t)); ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) { if (ret.p == NULL) {
if (new_ctx) if (new_ctx)
prof_ctx_destroy(ctx.p); prof_ctx_destroy(ctx);
return (NULL); return (NULL);
} }
ql_elm_new(ret.p, cnts_link); ql_elm_new(ret.p, cnts_link);
ql_elm_new(ret.p, lru_link); ql_elm_new(ret.p, lru_link);
} }
/* Finish initializing ret. */ /* Finish initializing ret. */
ret.p->ctx = ctx.p; ret.p->ctx = ctx;
ret.p->epoch = 0; ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
if (new_ctx) if (new_ctx)
prof_ctx_destroy(ctx.p); prof_ctx_destroy(ctx);
idalloc(ret.v); idalloc(ret.v);
return (NULL); return (NULL);
} }
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
malloc_mutex_lock(ctx.p->lock); malloc_mutex_lock(ctx->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
ctx.p->nlimbo--; ctx->nlimbo--;
malloc_mutex_unlock(ctx.p->lock); malloc_mutex_unlock(ctx->lock);
} else { } else {
/* Move ret to the front of the LRU. */ /* Move ret to the front of the LRU. */
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
...@@ -552,8 +646,52 @@ prof_lookup(prof_bt_t *bt) ...@@ -552,8 +646,52 @@ prof_lookup(prof_bt_t *bt)
return (ret.p); return (ret.p);
} }
#ifdef JEMALLOC_JET
size_t
prof_bt_count(void)
{
size_t bt_count;
prof_tdata_t *prof_tdata;
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (0);
prof_enter(prof_tdata);
bt_count = ckh_count(&bt2ctx);
prof_leave(prof_tdata);
return (bt_count);
}
#endif
#ifdef JEMALLOC_JET
#undef prof_dump_open
#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
#endif
static int
prof_dump_open(bool propagate_err, const char *filename)
{
int fd;
fd = creat(filename, 0644);
if (fd == -1 && propagate_err == false) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
if (opt_abort)
abort();
}
return (fd);
}
#ifdef JEMALLOC_JET
#undef prof_dump_open
#define prof_dump_open JEMALLOC_N(prof_dump_open)
prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
#endif
static bool static bool
prof_flush(bool propagate_err) prof_dump_flush(bool propagate_err)
{ {
bool ret = false; bool ret = false;
ssize_t err; ssize_t err;
...@@ -576,7 +714,20 @@ prof_flush(bool propagate_err) ...@@ -576,7 +714,20 @@ prof_flush(bool propagate_err)
} }
static bool static bool
prof_write(bool propagate_err, const char *s) prof_dump_close(bool propagate_err)
{
bool ret;
assert(prof_dump_fd != -1);
ret = prof_dump_flush(propagate_err);
close(prof_dump_fd);
prof_dump_fd = -1;
return (ret);
}
static bool
prof_dump_write(bool propagate_err, const char *s)
{ {
unsigned i, slen, n; unsigned i, slen, n;
...@@ -587,7 +738,7 @@ prof_write(bool propagate_err, const char *s) ...@@ -587,7 +738,7 @@ prof_write(bool propagate_err, const char *s)
while (i < slen) { while (i < slen) {
/* Flush the buffer if it is full. */ /* Flush the buffer if it is full. */
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
if (prof_flush(propagate_err) && propagate_err) if (prof_dump_flush(propagate_err) && propagate_err)
return (true); return (true);
if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
...@@ -607,7 +758,7 @@ prof_write(bool propagate_err, const char *s) ...@@ -607,7 +758,7 @@ prof_write(bool propagate_err, const char *s)
JEMALLOC_ATTR(format(printf, 2, 3)) JEMALLOC_ATTR(format(printf, 2, 3))
static bool static bool
prof_printf(bool propagate_err, const char *format, ...) prof_dump_printf(bool propagate_err, const char *format, ...)
{ {
bool ret; bool ret;
va_list ap; va_list ap;
...@@ -616,13 +767,14 @@ prof_printf(bool propagate_err, const char *format, ...) ...@@ -616,13 +767,14 @@ prof_printf(bool propagate_err, const char *format, ...)
va_start(ap, format); va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap); malloc_vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap); va_end(ap);
ret = prof_write(propagate_err, buf); ret = prof_dump_write(propagate_err, buf);
return (ret); return (ret);
} }
static void static void
prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
prof_ctx_list_t *ctx_ql)
{ {
prof_thr_cnt_t *thr_cnt; prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt; prof_cnt_t tcnt;
...@@ -631,6 +783,14 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) ...@@ -631,6 +783,14 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_lock(ctx->lock); malloc_mutex_lock(ctx->lock);
/*
* Increment nlimbo so that ctx won't go away before dump.
* Additionally, link ctx into the dump list so that it is included in
* prof_dump()'s second pass.
*/
ctx->nlimbo++;
ql_tail_insert(ctx_ql, ctx, dump_link);
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
volatile unsigned *epoch = &thr_cnt->epoch; volatile unsigned *epoch = &thr_cnt->epoch;
...@@ -671,89 +831,52 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) ...@@ -671,89 +831,52 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_unlock(ctx->lock); malloc_mutex_unlock(ctx->lock);
} }
static void static bool
prof_ctx_destroy(prof_ctx_t *ctx) prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{ {
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* if (opt_lg_prof_sample == 0) {
* Check that ctx is still unused by any thread cache before destroying if (prof_dump_printf(propagate_err,
* it. prof_lookup() increments ctx->nlimbo in order to avoid a race "heap profile: %"PRId64": %"PRId64
* condition with this function, as does prof_ctx_merge() in order to " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
* avoid a race between the main body of prof_ctx_merge() and entry cnt_all->curobjs, cnt_all->curbytes,
* into this function. cnt_all->accumobjs, cnt_all->accumbytes))
*/ return (true);
prof_tdata = *prof_tdata_tsd_get();
assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
prof_enter(prof_tdata);
malloc_mutex_lock(ctx->lock);
if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
ctx->nlimbo == 1) {
assert(ctx->cnt_merged.curbytes == 0);
assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */
if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
assert(false);
prof_leave(prof_tdata);
/* Destroy ctx. */
malloc_mutex_unlock(ctx->lock);
bt_destroy(ctx->bt);
idalloc(ctx);
} else { } else {
/* if (prof_dump_printf(propagate_err,
* Compensate for increment in prof_ctx_merge() or "heap profile: %"PRId64": %"PRId64
* prof_lookup(). " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
*/ cnt_all->curobjs, cnt_all->curbytes,
ctx->nlimbo--; cnt_all->accumobjs, cnt_all->accumbytes,
malloc_mutex_unlock(ctx->lock); ((uint64_t)1U << opt_lg_prof_sample)))
prof_leave(prof_tdata); return (true);
} }
return (false);
} }
static void static void
prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
{ {
bool destroy;
cassert(config_prof); ctx->nlimbo--;
ql_remove(ctx_ql, ctx, dump_link);
}
static void
prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
{
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(ctx->lock); malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs; prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
ql_remove(&ctx->cnts_ql, cnt, cnts_link);
if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
/*
* Increment ctx->nlimbo in order to keep another thread from
* winning the race to destroy ctx while this one has ctx->lock
* dropped. Without this, it would be possible for another
* thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
* 3) Successfully prof_ctx_destroy(ctx).
*
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
ctx->nlimbo++;
destroy = true;
} else
destroy = false;
malloc_mutex_unlock(ctx->lock); malloc_mutex_unlock(ctx->lock);
if (destroy)
prof_ctx_destroy(ctx);
} }
static bool static bool
prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
prof_ctx_list_t *ctx_ql)
{ {
bool ret;
unsigned i; unsigned i;
cassert(config_prof); cassert(config_prof);
...@@ -765,66 +888,109 @@ prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) ...@@ -765,66 +888,109 @@ prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
* filled in. Avoid dumping any ctx that is an artifact of either * filled in. Avoid dumping any ctx that is an artifact of either
* implementation detail. * implementation detail.
*/ */
malloc_mutex_lock(ctx->lock);
if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
(opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
assert(ctx->cnt_summed.curobjs == 0); assert(ctx->cnt_summed.curobjs == 0);
assert(ctx->cnt_summed.curbytes == 0); assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0); assert(ctx->cnt_summed.accumobjs == 0);
assert(ctx->cnt_summed.accumbytes == 0); assert(ctx->cnt_summed.accumbytes == 0);
return (false); ret = false;
goto label_return;
} }
if (prof_printf(propagate_err, "%"PRId64": %"PRId64 if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @", " [%"PRIu64": %"PRIu64"] @",
ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
return (true); ret = true;
goto label_return;
}
for (i = 0; i < bt->len; i++) { for (i = 0; i < bt->len; i++) {
if (prof_printf(propagate_err, " %#"PRIxPTR, if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
(uintptr_t)bt->vec[i])) (uintptr_t)bt->vec[i])) {
return (true); ret = true;
goto label_return;
}
} }
if (prof_write(propagate_err, "\n")) if (prof_dump_write(propagate_err, "\n")) {
return (true); ret = true;
goto label_return;
}
return (false); ret = false;
label_return:
prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
malloc_mutex_unlock(ctx->lock);
return (ret);
} }
static bool static bool
prof_dump_maps(bool propagate_err) prof_dump_maps(bool propagate_err)
{ {
bool ret;
int mfd; int mfd;
char filename[PATH_MAX + 1]; char filename[PATH_MAX + 1];
cassert(config_prof); cassert(config_prof);
#ifdef __FreeBSD__
malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
#else
malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
(int)getpid()); (int)getpid());
#endif
mfd = open(filename, O_RDONLY); mfd = open(filename, O_RDONLY);
if (mfd != -1) { if (mfd != -1) {
ssize_t nread; ssize_t nread;
if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
propagate_err) propagate_err) {
return (true); ret = true;
goto label_return;
}
nread = 0; nread = 0;
do { do {
prof_dump_buf_end += nread; prof_dump_buf_end += nread;
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
/* Make space in prof_dump_buf before read(). */ /* Make space in prof_dump_buf before read(). */
if (prof_flush(propagate_err) && propagate_err) if (prof_dump_flush(propagate_err) &&
return (true); propagate_err) {
ret = true;
goto label_return;
}
} }
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
PROF_DUMP_BUFSIZE - prof_dump_buf_end); PROF_DUMP_BUFSIZE - prof_dump_buf_end);
} while (nread > 0); } while (nread > 0);
} else {
ret = true;
goto label_return;
}
ret = false;
label_return:
if (mfd != -1)
close(mfd); close(mfd);
} else return (ret);
return (true); }
return (false); static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
const char *filename)
{
if (cnt_all->curbytes != 0) {
malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
PRId64" object%s, %zu context%s\n",
cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
leak_nctx, (leak_nctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run pprof on \"%s\" for leak detail\n",
filename);
}
} }
static bool static bool
...@@ -833,99 +999,75 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck) ...@@ -833,99 +999,75 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
prof_tdata_t *prof_tdata; prof_tdata_t *prof_tdata;
prof_cnt_t cnt_all; prof_cnt_t cnt_all;
size_t tabind; size_t tabind;
union {
prof_bt_t *p;
void *v;
} bt;
union { union {
prof_ctx_t *p; prof_ctx_t *p;
void *v; void *v;
} ctx; } ctx;
size_t leak_nctx; size_t leak_nctx;
prof_ctx_list_t ctx_ql;
cassert(config_prof); cassert(config_prof);
prof_tdata = prof_tdata_get(); prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true); return (true);
prof_enter(prof_tdata);
prof_dump_fd = creat(filename, 0644); malloc_mutex_lock(&prof_dump_mtx);
if (prof_dump_fd == -1) {
if (propagate_err == false) {
malloc_printf(
"<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
if (opt_abort)
abort();
}
goto label_error;
}
/* Merge per thread profile stats, and sum them in cnt_all. */ /* Merge per thread profile stats, and sum them in cnt_all. */
memset(&cnt_all, 0, sizeof(prof_cnt_t)); memset(&cnt_all, 0, sizeof(prof_cnt_t));
leak_nctx = 0; leak_nctx = 0;
ql_new(&ctx_ql);
prof_enter(prof_tdata);
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx); prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
prof_leave(prof_tdata);
/* Create dump file. */
if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
goto label_open_close_error;
/* Dump profile header. */ /* Dump profile header. */
if (opt_lg_prof_sample == 0) { if (prof_dump_header(propagate_err, &cnt_all))
if (prof_printf(propagate_err, goto label_write_error;
"heap profile: %"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @ heapprofile\n",
cnt_all.curobjs, cnt_all.curbytes,
cnt_all.accumobjs, cnt_all.accumbytes))
goto label_error;
} else {
if (prof_printf(propagate_err,
"heap profile: %"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
cnt_all.curobjs, cnt_all.curbytes,
cnt_all.accumobjs, cnt_all.accumbytes,
((uint64_t)1U << opt_lg_prof_sample)))
goto label_error;
}
/* Dump per ctx profile stats. */ /* Dump per ctx profile stats. */
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v) while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
== false;) { if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
if (prof_dump_ctx(propagate_err, ctx.p, bt.p)) goto label_write_error;
goto label_error;
} }
/* Dump /proc/<pid>/maps if possible. */ /* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err)) if (prof_dump_maps(propagate_err))
goto label_error; goto label_write_error;
if (prof_flush(propagate_err)) if (prof_dump_close(propagate_err))
goto label_error; goto label_open_close_error;
close(prof_dump_fd);
prof_leave(prof_tdata);
if (leakcheck && cnt_all.curbytes != 0) { malloc_mutex_unlock(&prof_dump_mtx);
malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
PRId64" object%s, %zu context%s\n", if (leakcheck)
cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "", prof_leakcheck(&cnt_all, leak_nctx, filename);
cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
leak_nctx, (leak_nctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run pprof on \"%s\" for leak detail\n",
filename);
}
return (false); return (false);
label_error: label_write_error:
prof_leave(prof_tdata); prof_dump_close(propagate_err);
label_open_close_error:
while ((ctx.p = ql_first(&ctx_ql)) != NULL)
prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
malloc_mutex_unlock(&prof_dump_mtx);
return (true); return (true);
} }
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void static void
prof_dump_filename(char *filename, char v, int64_t vseq) prof_dump_filename(char *filename, char v, int64_t vseq)
{ {
cassert(config_prof); cassert(config_prof);
if (vseq != UINT64_C(0xffffffffffffffff)) { if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"PRIu64".%c%"PRId64".heap", "%s.%d.%"PRIu64".%c%"PRId64".heap",
...@@ -951,7 +1093,7 @@ prof_fdump(void) ...@@ -951,7 +1093,7 @@ prof_fdump(void)
if (opt_prof_final && opt_prof_prefix[0] != '\0') { if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx); malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff)); prof_dump_filename(filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(&prof_dump_seq_mtx); malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(false, filename, opt_prof_leak); prof_dump(false, filename, opt_prof_leak);
} }
...@@ -967,11 +1109,7 @@ prof_idump(void) ...@@ -967,11 +1109,7 @@ prof_idump(void)
if (prof_booted == false) if (prof_booted == false)
return; return;
/* prof_tdata = prof_tdata_get(false);
* Don't call prof_tdata_get() here, because it could cause recursive
* allocation.
*/
prof_tdata = *prof_tdata_tsd_get();
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return; return;
if (prof_tdata->enq) { if (prof_tdata->enq) {
...@@ -1021,11 +1159,7 @@ prof_gdump(void) ...@@ -1021,11 +1159,7 @@ prof_gdump(void)
if (prof_booted == false) if (prof_booted == false)
return; return;
/* prof_tdata = prof_tdata_get(false);
* Don't call prof_tdata_get() here, because it could cause recursive
* allocation.
*/
prof_tdata = *prof_tdata_tsd_get();
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return; return;
if (prof_tdata->enq) { if (prof_tdata->enq) {
...@@ -1043,34 +1177,13 @@ prof_gdump(void) ...@@ -1043,34 +1177,13 @@ prof_gdump(void)
} }
static void static void
prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) prof_bt_hash(const void *key, size_t r_hash[2])
{ {
size_t ret1, ret2;
uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key; prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof); cassert(config_prof);
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
h = hash(bt->vec, bt->len * sizeof(void *),
UINT64_C(0x94122f335b332aea));
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1 = h & ZU(0xffffffffU);
ret2 = h >> 32;
} else {
ret1 = h;
ret2 = hash(bt->vec, bt->len * sizeof(void *),
UINT64_C(0x8432a476666bbc13));
}
*hash1 = ret1; hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
*hash2 = ret2;
} }
static bool static bool
...@@ -1086,14 +1199,6 @@ prof_bt_keycomp(const void *k1, const void *k2) ...@@ -1086,14 +1199,6 @@ prof_bt_keycomp(const void *k1, const void *k2)
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
} }
static malloc_mutex_t *
prof_ctx_mutex_choose(void)
{
unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
}
prof_tdata_t * prof_tdata_t *
prof_tdata_init(void) prof_tdata_init(void)
{ {
...@@ -1206,13 +1311,11 @@ prof_boot1(void) ...@@ -1206,13 +1311,11 @@ prof_boot1(void)
*/ */
opt_prof = true; opt_prof = true;
opt_prof_gdump = false; opt_prof_gdump = false;
prof_interval = 0;
} else if (opt_prof) { } else if (opt_prof) {
if (opt_lg_prof_interval >= 0) { if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) << prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval); opt_lg_prof_interval);
} else }
prof_interval = 0;
} }
prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
...@@ -1240,6 +1343,8 @@ prof_boot2(void) ...@@ -1240,6 +1343,8 @@ prof_boot2(void)
if (malloc_mutex_init(&prof_dump_seq_mtx)) if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true); return (true);
if (malloc_mutex_init(&prof_dump_mtx))
return (true);
if (atexit(prof_fdump) != 0) { if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n"); malloc_write("<jemalloc>: Error in atexit()\n");
...@@ -1277,10 +1382,10 @@ prof_prefork(void) ...@@ -1277,10 +1382,10 @@ prof_prefork(void)
if (opt_prof) { if (opt_prof) {
unsigned i; unsigned i;
malloc_mutex_lock(&bt2ctx_mtx); malloc_mutex_prefork(&bt2ctx_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx); malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_lock(&ctx_locks[i]); malloc_mutex_prefork(&ctx_locks[i]);
} }
} }
......
#define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_internal.h"
/* /*
...@@ -11,39 +12,18 @@ ...@@ -11,39 +12,18 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
typedef struct quarantine_obj_s quarantine_obj_t; malloc_tsd_data(, quarantine, quarantine_t *, NULL)
typedef struct quarantine_s quarantine_t;
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
static void quarantine_cleanup(void *arg);
malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
quarantine_cleanup)
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_init(size_t lg_maxobjs);
static quarantine_t *quarantine_grow(quarantine_t *quarantine); static quarantine_t *quarantine_grow(quarantine_t *quarantine);
static void quarantine_drain_one(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/ /******************************************************************************/
static quarantine_t * quarantine_t *
quarantine_init(size_t lg_maxobjs) quarantine_init(size_t lg_maxobjs)
{ {
quarantine_t *quarantine; quarantine_t *quarantine;
...@@ -68,8 +48,10 @@ quarantine_grow(quarantine_t *quarantine) ...@@ -68,8 +48,10 @@ quarantine_grow(quarantine_t *quarantine)
quarantine_t *ret; quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1); ret = quarantine_init(quarantine->lg_maxobjs + 1);
if (ret == NULL) if (ret == NULL) {
quarantine_drain_one(quarantine);
return (quarantine); return (quarantine);
}
ret->curbytes = quarantine->curbytes; ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs; ret->curobjs = quarantine->curobjs;
...@@ -89,15 +71,14 @@ quarantine_grow(quarantine_t *quarantine) ...@@ -89,15 +71,14 @@ quarantine_grow(quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t)); sizeof(quarantine_obj_t));
} }
idalloc(quarantine);
return (ret); return (ret);
} }
static void static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound) quarantine_drain_one(quarantine_t *quarantine)
{ {
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof)); assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloc(obj->ptr); idalloc(obj->ptr);
...@@ -105,7 +86,14 @@ quarantine_drain(quarantine_t *quarantine, size_t upper_bound) ...@@ -105,7 +86,14 @@ quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
quarantine->curobjs--; quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
quarantine->lg_maxobjs) - 1); quarantine->lg_maxobjs) - 1);
} }
static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(quarantine);
} }
void void
...@@ -119,17 +107,10 @@ quarantine(void *ptr) ...@@ -119,17 +107,10 @@ quarantine(void *ptr)
quarantine = *quarantine_tsd_get(); quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
if (quarantine == NULL) {
if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) ==
NULL) {
idalloc(ptr);
return;
}
} else {
if (quarantine == QUARANTINE_STATE_PURGATORY) { if (quarantine == QUARANTINE_STATE_PURGATORY) {
/* /*
* Make a note that quarantine() was called * Make a note that quarantine() was called after
* after quarantine_cleanup() was called. * quarantine_cleanup() was called.
*/ */
quarantine = QUARANTINE_STATE_REINCARNATED; quarantine = QUARANTINE_STATE_REINCARNATED;
quarantine_tsd_set(&quarantine); quarantine_tsd_set(&quarantine);
...@@ -137,7 +118,6 @@ quarantine(void *ptr) ...@@ -137,7 +118,6 @@ quarantine(void *ptr)
idalloc(ptr); idalloc(ptr);
return; return;
} }
}
/* /*
* Drain one or more objects if the quarantine size limit would be * Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr. * exceeded by appending ptr.
...@@ -161,15 +141,24 @@ quarantine(void *ptr) ...@@ -161,15 +141,24 @@ quarantine(void *ptr)
obj->usize = usize; obj->usize = usize;
quarantine->curbytes += usize; quarantine->curbytes += usize;
quarantine->curobjs++; quarantine->curobjs++;
if (opt_junk) if (config_fill && opt_junk) {
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
if ((config_valgrind == false || opt_valgrind == false)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, 0x5a, usize); memset(ptr, 0x5a, usize);
}
} else { } else {
assert(quarantine->curbytes == 0); assert(quarantine->curbytes == 0);
idalloc(ptr); idalloc(ptr);
} }
} }
static void void
quarantine_cleanup(void *arg) quarantine_cleanup(void *arg)
{ {
quarantine_t *quarantine = *(quarantine_t **)arg; quarantine_t *quarantine = *(quarantine_t **)arg;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment