Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fceef8e0
Commit
fceef8e0
authored
Jun 20, 2014
by
antirez
Browse files
Jemalloc updated to 3.6.0.
Not a single bug in about 3 months, and our previous version was too old (3.2.0).
parent
fe596d67
Changes
143
Show whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/jemalloc.h.in
deleted
100644 → 0
View file @
fe596d67
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#include "jemalloc_defs@install_suffix@.h"
#ifdef JEMALLOC_EXPERIMENTAL
#define ALLOCM_LG_ALIGN(la) (la)
#if LG_SIZEOF_PTR == 2
#define ALLOCM_ALIGN(a) (ffs(a)-1)
#else
#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_NOT_MOVED 2
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact of
* namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see below).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
#endif
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
#ifndef JEMALLOC_NO_DEMANGLE
#define JEMALLOC_NO_DEMANGLE
#endif
#define malloc_conf je_malloc_conf
#define malloc_message je_malloc_message
#define malloc je_malloc
#define calloc je_calloc
#define posix_memalign je_posix_memalign
#define aligned_alloc je_aligned_alloc
#define realloc je_realloc
#define free je_free
#define malloc_usable_size je_malloc_usable_size
#define malloc_stats_print je_malloc_stats_print
#define mallctl je_mallctl
#define mallctlnametomib je_mallctlnametomib
#define mallctlbymib je_mallctlbymib
#define memalign je_memalign
#define valloc je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#define allocm je_allocm
#define rallocm je_rallocm
#define sallocm je_sallocm
#define dallocm je_dallocm
#define nallocm je_nallocm
#endif
#endif
/*
* The je_* macros can be used as stable alternative names for the public
* jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
* for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
#endif
#endif
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
deps/jemalloc/include/jemalloc/jemalloc.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
objroot
=
$1
cat
<<
EOF
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
EOF
for
hdr
in
jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h
\
jemalloc_protos.h jemalloc_mangle.h
;
do
cat
"
${
objroot
}
include/jemalloc/
${
hdr
}
"
\
|
grep
-v
'Generated from .* by configure\.'
\
|
sed
-e
's/^#define /#define /g'
\
|
sed
-e
's/ $//g'
echo
done
cat
<<
EOF
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
EOF
deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
View file @
fceef8e0
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
#undef JEMALLOC_N
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support the experimental API. */
#undef JEMALLOC_EXPERIMENTAL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* Define overrides for non-standard allocator-related functions if they
* are present on the system.
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC
...
...
@@ -230,33 +20,5 @@
*/
#undef JEMALLOC_USABLE_SIZE_CONST
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
0 → 100644
View file @
fceef8e0
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_EXPERIMENTAL
# define ALLOCM_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define ALLOCM_ALIGN(a) (ffs(a)-1)
# else
# define ALLOCM_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define ALLOCM_ZERO ((int)0x40)
# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
# define ALLOCM_SUCCESS 0
# define ALLOCM_ERR_OOM 1
# define ALLOCM_ERR_NOT_MOVED 2
#endif
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
deps/jemalloc/include/jemalloc/jemalloc_mangle.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
public_symbols_txt
=
$1
symbol_prefix
=
$2
cat
<<
EOF
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
EOF
for
nm
in
`
cat
${
public_symbols_txt
}
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"# define
${
n
}
${
symbol_prefix
}${
n
}
"
done
cat
<<
EOF
#endif
/*
* The
${
symbol_prefix
}
* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
EOF
for
nm
in
`
cat
${
public_symbols_txt
}
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"# undef
${
symbol_prefix
}${
n
}
"
done
cat
<<
EOF
#endif
EOF
deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
0 → 100644
View file @
fceef8e0
/*
* The @je_@ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
*/
extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *@je_@calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int @je_@posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void @je_@free(void *ptr);
JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
const char *), void *@je_@cbopaque, const char *opts);
JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@nallocm(size_t *rsize, size_t size, int flags);
#endif
deps/jemalloc/include/jemalloc/jemalloc_rename.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
public_symbols_txt
=
$1
cat
<<
EOF
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
EOF
for
nm
in
`
cat
${
public_symbols_txt
}
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
m
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $2}'
`
echo
"# define je_
${
n
}
${
m
}
"
done
cat
<<
EOF
#endif
EOF
deps/jemalloc/src/arena.c
View file @
fceef8e0
...
...
@@ -38,52 +38,18 @@ const uint8_t small_size2bin[] = {
};
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
arena_avail_insert
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
,
bool
maybe_adjac_pred
,
bool
maybe_adjac_succ
);
static
void
arena_avail_remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
,
bool
maybe_adjac_pred
,
bool
maybe_adjac_succ
);
static
void
arena_run_split
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
);
static
arena_chunk_t
*
arena_chunk_alloc
(
arena_t
*
arena
);
static
void
arena_chunk_dealloc
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
);
static
arena_run_t
*
arena_run_alloc_helper
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
);
static
arena_run_t
*
arena_run_alloc
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
);
static
arena_chunk_t
*
chunks_dirty_iter_cb
(
arena_chunk_tree_t
*
tree
,
arena_chunk_t
*
chunk
,
void
*
arg
);
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
arena_purge
(
arena_t
*
arena
,
bool
all
);
static
void
arena_run_dalloc
(
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
);
static
void
arena_run_trim_head
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
);
static
void
arena_run_trim_tail
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
,
bool
dirty
);
static
arena_run_t
*
arena_bin_runs_first
(
arena_bin_t
*
bin
);
static
void
arena_bin_runs_insert
(
arena_bin_t
*
bin
,
arena_run_t
*
run
);
static
void
arena_bin_runs_remove
(
arena_bin_t
*
bin
,
arena_run_t
*
run
);
static
arena_run_t
*
arena_bin_nonfull_run_tryget
(
arena_bin_t
*
bin
);
static
arena_run_t
*
arena_bin_nonfull_run_get
(
arena_t
*
arena
,
arena_bin_t
*
bin
);
static
void
*
arena_bin_malloc_hard
(
arena_t
*
arena
,
arena_bin_t
*
bin
);
static
void
arena_dissociate_bin_run
(
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_dalloc_bin_run
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_bin_lower_run
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_ralloc_large_shrink
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
);
static
bool
arena_ralloc_large_grow
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
static
bool
arena_ralloc_large
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
static
size_t
bin_info_run_size_calc
(
arena_bin_info_t
*
bin_info
,
size_t
min_run_size
);
static
void
bin_info_init
(
void
);
/******************************************************************************/
...
...
@@ -359,60 +325,73 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
}
static
inline
void
arena_chunk_validate_zeroed
(
arena_chunk_t
*
chunk
,
size_t
run_ind
)
arena_run_zero
(
arena_chunk_t
*
chunk
,
size_t
run_ind
,
size_t
npages
)
{
VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
npages
<<
LG_PAGE
));
memset
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
0
,
(
npages
<<
LG_PAGE
));
}
static
inline
void
arena_run_page_mark_zeroed
(
arena_chunk_t
*
chunk
,
size_t
run_ind
)
{
VALGRIND_MAKE_MEM_DEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
PAGE
);
}
static
inline
void
arena_run_page_validate_zeroed
(
arena_chunk_t
*
chunk
,
size_t
run_ind
)
{
size_t
i
;
UNUSED
size_t
*
p
=
(
size_t
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
));
arena_run_page_mark_zeroed
(
chunk
,
run_ind
);
for
(
i
=
0
;
i
<
PAGE
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
}
static
void
arena_run_split
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
)
arena_cactive_update
(
arena_t
*
arena
,
size_t
add_pages
,
size_t
sub_pages
)
{
arena_chunk_t
*
chunk
;
size_t
run_ind
,
total_pages
,
need_pages
,
rem_pages
,
i
;
size_t
flag_dirty
;
assert
((
large
&&
binind
==
BININD_INVALID
)
||
(
large
==
false
&&
binind
!=
BININD_INVALID
));
if
(
config_stats
)
{
ssize_t
cactive_diff
=
CHUNK_CEILING
((
arena
->
nactive
+
add_pages
)
<<
LG_PAGE
)
-
CHUNK_CEILING
((
arena
->
nactive
-
sub_pages
)
<<
LG_PAGE
);
if
(
cactive_diff
!=
0
)
stats_cactive_add
(
cactive_diff
);
}
}
static
void
arena_run_split_remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
run_ind
,
size_t
flag_dirty
,
size_t
need_pages
)
{
size_t
total_pages
,
rem_pages
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
unsigned
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
run_ind
);
total_pages
=
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
)
>>
LG_PAGE
;
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
total_pages
-
1
)
==
flag_dirty
);
need_pages
=
(
size
>>
LG_PAGE
);
assert
(
need_pages
>
0
);
assert
(
need_pages
<=
total_pages
);
rem_pages
=
total_pages
-
need_pages
;
arena_avail_remove
(
arena
,
chunk
,
run_ind
,
total_pages
,
true
,
true
);
if
(
config_stats
)
{
/*
* Update stats_cactive if nactive is crossing a chunk
* multiple.
*/
size_t
cactive_diff
=
CHUNK_CEILING
((
arena
->
nactive
+
need_pages
)
<<
LG_PAGE
)
-
CHUNK_CEILING
(
arena
->
nactive
<<
LG_PAGE
);
if
(
cactive_diff
!=
0
)
stats_cactive_add
(
cactive_diff
);
}
arena_cactive_update
(
arena
,
need_pages
,
0
);
arena
->
nactive
+=
need_pages
;
/* Keep track of trailing unused pages for later use. */
if
(
rem_pages
>
0
)
{
if
(
flag_dirty
!=
0
)
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
need_pages
,
(
rem_pages
<<
LG_PAGE
),
CHUNK_MAP_DIRTY
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
need_pages
,
(
rem_pages
<<
LG_PAGE
),
flag_dirty
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
total_pages
-
1
,
(
rem_pages
<<
LG_PAGE
),
CHUNK_MAP_DIRTY
);
flag_dirty
);
}
else
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
need_pages
,
(
rem_pages
<<
LG_PAGE
),
...
...
@@ -426,97 +405,128 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_avail_insert
(
arena
,
chunk
,
run_ind
+
need_pages
,
rem_pages
,
false
,
true
);
}
}
static
void
arena_run_split_large_helper
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
remove
,
bool
zero
)
{
arena_chunk_t
*
chunk
;
size_t
flag_dirty
,
run_ind
,
need_pages
,
i
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
unsigned
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
run_ind
);
need_pages
=
(
size
>>
LG_PAGE
);
assert
(
need_pages
>
0
);
if
(
remove
)
{
arena_run_split_remove
(
arena
,
chunk
,
run_ind
,
flag_dirty
,
need_pages
);
}
/*
* Update the page map separately for large vs. small runs, since it is
* possible to avoid iteration for large mallocs.
*/
if
(
large
)
{
if
(
zero
)
{
if
(
flag_dirty
==
0
)
{
/*
* The run is clean, so some pages may be
*
zeroed (i.e.
never before touched).
* The run is clean, so some pages may be
zeroed (i.e.
* never before touched).
*/
for
(
i
=
0
;
i
<
need_pages
;
i
++
)
{
if
(
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
)
!=
0
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
(
void
*
)((
uintptr_t
)
chunk
+
((
run_ind
+
i
)
<<
LG_PAGE
)),
PAGE
);
memset
((
void
*
)((
uintptr_t
)
chunk
+
((
run_ind
+
i
)
<<
LG_PAGE
)),
0
,
PAGE
);
}
else
if
(
config_debug
)
{
VALGRIND_MAKE_MEM_DEFINED
(
(
void
*
)((
uintptr_t
)
chunk
+
((
run_ind
+
i
)
<<
LG_PAGE
)),
PAGE
);
arena_chunk_validate_zeroed
(
chunk
,
run_ind
+
i
);
if
(
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
)
!=
0
)
arena_run_zero
(
chunk
,
run_ind
+
i
,
1
);
else
if
(
config_debug
)
{
arena_run_page_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
else
{
arena_run_page_mark_zeroed
(
chunk
,
run_ind
+
i
);
}
}
}
else
{
/*
* The run is dirty, so all pages must be
* zeroed.
*/
VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
memset
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
0
,
(
need_pages
<<
LG_PAGE
));
/* The run is dirty, so all pages must be zeroed. */
arena_run_zero
(
chunk
,
run_ind
,
need_pages
);
}
}
else
{
VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
}
/*
* Set the last element first, in case the run only contains one
*
page
(i.e. both statements set the same element).
* Set the last element first, in case the run only contains one
page
* (i.e. both statements set the same element).
*/
arena_mapbits_large_set
(
chunk
,
run_ind
+
need_pages
-
1
,
0
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
run_ind
+
need_pages
-
1
,
0
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
run_ind
,
size
,
flag_dirty
);
}
else
{
assert
(
zero
==
false
);
}
static
void
arena_run_split_large
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
zero
)
{
arena_run_split_large_helper
(
arena
,
run
,
size
,
true
,
zero
);
}
static
void
arena_run_init_large
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
zero
)
{
arena_run_split_large_helper
(
arena
,
run
,
size
,
false
,
zero
);
}
static
void
arena_run_split_small
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
size_t
binind
)
{
arena_chunk_t
*
chunk
;
size_t
flag_dirty
,
run_ind
,
need_pages
,
i
;
assert
(
binind
!=
BININD_INVALID
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
unsigned
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
run_ind
);
need_pages
=
(
size
>>
LG_PAGE
);
assert
(
need_pages
>
0
);
arena_run_split_remove
(
arena
,
chunk
,
run_ind
,
flag_dirty
,
need_pages
);
/*
* Propagate the dirty and unzeroed flags to the allocated
*
small run,
so that arena_dalloc_bin_run() has the ability to
*
conditionally trim
clean pages.
* Propagate the dirty and unzeroed flags to the allocated
small run,
* so that arena_dalloc_bin_run() has the ability to
conditionally trim
* clean pages.
*/
arena_mapbits_small_set
(
chunk
,
run_ind
,
0
,
binind
,
flag_dirty
);
/*
* The first page will always be dirtied during small run
* initialization, so a validation failure here would not
*
actually
cause an observable failure.
* initialization, so a validation failure here would not
actually
* cause an observable failure.
*/
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
)
==
0
)
arena_
chunk
_validate_zeroed
(
chunk
,
run_ind
);
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
)
==
0
)
arena_
run_page
_validate_zeroed
(
chunk
,
run_ind
);
for
(
i
=
1
;
i
<
need_pages
-
1
;
i
++
)
{
arena_mapbits_small_set
(
chunk
,
run_ind
+
i
,
i
,
binind
,
0
);
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
)
==
0
)
arena_chunk_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
arena_mapbits_small_set
(
chunk
,
run_ind
+
need_pages
-
1
,
need_pages
-
1
,
binind
,
flag_dirty
);
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
need_pages
-
1
)
==
0
)
{
arena_chunk_validate_zeroed
(
chunk
,
run_ind
+
need_pages
-
1
);
}
}
arena_run_page_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
arena_mapbits_small_set
(
chunk
,
run_ind
+
need_pages
-
1
,
need_pages
-
1
,
binind
,
flag_dirty
);
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
need_pages
-
1
)
==
0
)
arena_run_page_validate_zeroed
(
chunk
,
run_ind
+
need_pages
-
1
);
VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
}
static
arena_chunk_t
*
arena_chunk_
alloc
(
arena_t
*
arena
)
arena_chunk_
init_spare
(
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
size_t
i
;
if
(
arena
->
spare
!=
NULL
)
{
assert
(
arena
->
spare
!=
NULL
);
chunk
=
arena
->
spare
;
arena
->
spare
=
NULL
;
...
...
@@ -524,18 +534,27 @@ arena_chunk_alloc(arena_t *arena)
assert
(
arena_mapbits_allocated_get
(
chunk
,
chunk_npages
-
1
)
==
0
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
map_bias
)
==
arena_maxclass
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
chunk_npages
-
1
)
==
arena_maxclass
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
chunk_npages
-
1
)
==
arena_maxclass
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
==
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
));
}
else
{
return
(
chunk
);
}
static
arena_chunk_t
*
arena_chunk_init_hard
(
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
bool
zero
;
size_t
unzeroed
;
size_t
unzeroed
,
i
;
assert
(
arena
->
spare
==
NULL
);
zero
=
false
;
malloc_mutex_unlock
(
&
arena
->
lock
);
chunk
=
(
arena_chunk_t
*
)
chunk_alloc
(
chunksize
,
chunksize
,
false
,
&
zero
,
arena
->
dss_prec
);
chunk
=
(
arena_chunk_t
*
)
chunk_alloc
(
chunksize
,
chunksize
,
false
,
&
zero
,
arena
->
dss_prec
);
malloc_mutex_lock
(
&
arena
->
lock
);
if
(
chunk
==
NULL
)
return
(
NULL
);
...
...
@@ -545,8 +564,7 @@ arena_chunk_alloc(arena_t *arena)
chunk
->
arena
=
arena
;
/*
* Claim that no pages are in use, since the header is merely
* overhead.
* Claim that no pages are in use, since the header is merely overhead.
*/
chunk
->
ndirty
=
0
;
...
...
@@ -554,28 +572,52 @@ arena_chunk_alloc(arena_t *arena)
chunk
->
nruns_adjac
=
0
;
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
* chunk.
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
*/
unzeroed
=
zero
?
0
:
CHUNK_MAP_UNZEROED
;
arena_mapbits_unallocated_set
(
chunk
,
map_bias
,
arena_maxclass
,
unzeroed
);
/*
* There is no need to initialize the internal page map entries
*
unless
the chunk is not zeroed.
* There is no need to initialize the internal page map entries
unless
* the chunk is not zeroed.
*/
if
(
zero
==
false
)
{
VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)
arena_mapp_get
(
chunk
,
map_bias
+
1
),
(
size_t
)((
uintptr_t
)
arena_mapp_get
(
chunk
,
chunk_npages
-
1
)
-
(
uintptr_t
)
arena_mapp_get
(
chunk
,
map_bias
+
1
)));
for
(
i
=
map_bias
+
1
;
i
<
chunk_npages
-
1
;
i
++
)
arena_mapbits_unzeroed_set
(
chunk
,
i
,
unzeroed
);
}
else
if
(
config_debug
)
{
}
else
{
VALGRIND_MAKE_MEM_DEFINED
((
void
*
)
arena_mapp_get
(
chunk
,
map_bias
+
1
),
(
size_t
)((
uintptr_t
)
arena_mapp_get
(
chunk
,
chunk_npages
-
1
)
-
(
uintptr_t
)
arena_mapp_get
(
chunk
,
map_bias
+
1
)));
if
(
config_debug
)
{
for
(
i
=
map_bias
+
1
;
i
<
chunk_npages
-
1
;
i
++
)
{
assert
(
arena_mapbits_unzeroed_get
(
chunk
,
i
)
==
unzeroed
);
}
}
arena_mapbits_unallocated_set
(
chunk
,
chunk_npages
-
1
,
arena_maxclass
,
unzeroed
);
}
arena_mapbits_unallocated_set
(
chunk
,
chunk_npages
-
1
,
arena_maxclass
,
unzeroed
);
return
(
chunk
);
}
static
arena_chunk_t
*
arena_chunk_alloc
(
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
if
(
arena
->
spare
!=
NULL
)
chunk
=
arena_chunk_init_spare
(
arena
);
else
{
chunk
=
arena_chunk_init_hard
(
arena
);
if
(
chunk
==
NULL
)
return
(
NULL
);
}
/* Insert the run into the runs_avail tree. */
...
...
@@ -618,8 +660,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
}
static
arena_run_t
*
arena_run_alloc_helper
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
)
arena_run_alloc_large_helper
(
arena_t
*
arena
,
size_t
size
,
bool
zero
)
{
arena_run_t
*
run
;
arena_chunk_map_t
*
mapelm
,
key
;
...
...
@@ -634,7 +675,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
run
=
(
arena_run_t
*
)((
uintptr_t
)
run_chunk
+
(
pageind
<<
LG_PAGE
));
arena_run_split
(
arena
,
run
,
size
,
large
,
binind
,
zero
);
arena_run_split
_large
(
arena
,
run
,
size
,
zero
);
return
(
run
);
}
...
...
@@ -642,19 +683,16 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
}
static
arena_run_t
*
arena_run_alloc
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
)
arena_run_alloc_large
(
arena_t
*
arena
,
size_t
size
,
bool
zero
)
{
arena_chunk_t
*
chunk
;
arena_run_t
*
run
;
assert
(
size
<=
arena_maxclass
);
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
((
large
&&
binind
==
BININD_INVALID
)
||
(
large
==
false
&&
binind
!=
BININD_INVALID
));
/* Search the arena's chunks for the lowest best fit. */
run
=
arena_run_alloc_helper
(
arena
,
size
,
large
,
binind
,
zero
);
run
=
arena_run_alloc_
large_
helper
(
arena
,
size
,
zero
);
if
(
run
!=
NULL
)
return
(
run
);
...
...
@@ -664,7 +702,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
chunk
=
arena_chunk_alloc
(
arena
);
if
(
chunk
!=
NULL
)
{
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
map_bias
<<
LG_PAGE
));
arena_run_split
(
arena
,
run
,
size
,
large
,
binind
,
zero
);
arena_run_split
_large
(
arena
,
run
,
size
,
zero
);
return
(
run
);
}
...
...
@@ -673,7 +711,63 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return
(
arena_run_alloc_helper
(
arena
,
size
,
large
,
binind
,
zero
));
return
(
arena_run_alloc_large_helper
(
arena
,
size
,
zero
));
}
static
arena_run_t
*
arena_run_alloc_small_helper
(
arena_t
*
arena
,
size_t
size
,
size_t
binind
)
{
arena_run_t
*
run
;
arena_chunk_map_t
*
mapelm
,
key
;
key
.
bits
=
size
|
CHUNK_MAP_KEY
;
mapelm
=
arena_avail_tree_nsearch
(
&
arena
->
runs_avail
,
&
key
);
if
(
mapelm
!=
NULL
)
{
arena_chunk_t
*
run_chunk
=
CHUNK_ADDR2BASE
(
mapelm
);
size_t
pageind
=
(((
uintptr_t
)
mapelm
-
(
uintptr_t
)
run_chunk
->
map
)
/
sizeof
(
arena_chunk_map_t
))
+
map_bias
;
run
=
(
arena_run_t
*
)((
uintptr_t
)
run_chunk
+
(
pageind
<<
LG_PAGE
));
arena_run_split_small
(
arena
,
run
,
size
,
binind
);
return
(
run
);
}
return
(
NULL
);
}
static
arena_run_t
*
arena_run_alloc_small
(
arena_t
*
arena
,
size_t
size
,
size_t
binind
)
{
arena_chunk_t
*
chunk
;
arena_run_t
*
run
;
assert
(
size
<=
arena_maxclass
);
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
(
binind
!=
BININD_INVALID
);
/* Search the arena's chunks for the lowest best fit. */
run
=
arena_run_alloc_small_helper
(
arena
,
size
,
binind
);
if
(
run
!=
NULL
)
return
(
run
);
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk
=
arena_chunk_alloc
(
arena
);
if
(
chunk
!=
NULL
)
{
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
map_bias
<<
LG_PAGE
));
arena_run_split_small
(
arena
,
run
,
size
,
binind
);
return
(
run
);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return
(
arena_run_alloc_small_helper
(
arena
,
size
,
binind
));
}
static
inline
void
...
...
@@ -699,48 +793,42 @@ arena_maybe_purge(arena_t *arena)
arena_purge
(
arena
,
false
);
}
static
inline
size_t
arena_chunk_
purge
(
arena
_t
*
a
re
na
,
arena_chunk_t
*
chunk
,
bool
all
)
static
arena_chunk_t
*
chunks_dirty_iter_cb
(
arena_chunk_
tree
_t
*
t
re
e
,
arena_chunk_t
*
chunk
,
void
*
arg
)
{
size_t
npurged
;
ql_head
(
arena_chunk_map_t
)
mapelms
;
arena_chunk_map_t
*
mapelm
;
size_t
pageind
,
npages
;
size_t
nmadvise
;
size_t
*
ndirty
=
(
size_t
*
)
arg
;
ql_new
(
&
mapelms
);
assert
(
chunk
->
ndirty
!=
0
);
*
ndirty
+=
chunk
->
ndirty
;
return
(
NULL
);
}
static
size_t
arena_compute_npurgatory
(
arena_t
*
arena
,
bool
all
)
{
size_t
npurgatory
,
npurgeable
;
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
* Compute the minimum number of pages that this thread should try to
* purge.
*/
if
(
chunk
==
arena
->
spare
)
{
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
!=
0
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
)
!=
0
);
npurgeable
=
arena
->
ndirty
-
arena
->
npurgatory
;
arena_chunk_alloc
(
arena
);
}
if
(
all
==
false
)
{
size_t
threshold
=
(
arena
->
nactive
>>
opt_lg_dirty_mult
);
if
(
config_stats
)
arena
->
stats
.
purged
+=
chunk
->
ndirty
;
npurgatory
=
npurgeable
-
threshold
;
}
else
npurgatory
=
npurgeable
;
/*
* Operate on all dirty runs if there is no clean/dirty run
* fragmentation.
*/
if
(
chunk
->
nruns_adjac
==
0
)
all
=
true
;
return
(
npurgatory
);
}
static
void
arena_chunk_stash_dirty
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
bool
all
,
arena_chunk_mapelms_t
*
mapelms
)
{
size_t
pageind
,
npages
;
/*
* Temporarily allocate free dirty runs within chunk. If all is false,
...
...
@@ -748,7 +836,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
* all dirty runs.
*/
for
(
pageind
=
map_bias
;
pageind
<
chunk_npages
;
pageind
+=
npages
)
{
mapelm
=
arena_mapp_get
(
chunk
,
pageind
);
arena_chunk_map_t
*
mapelm
=
arena_mapp_get
(
chunk
,
pageind
);
if
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
==
0
)
{
size_t
run_size
=
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
);
...
...
@@ -764,11 +852,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
arena_run_t
*
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)(
pageind
<<
LG_PAGE
));
arena_run_split
(
arena
,
run
,
run_size
,
true
,
BININD_INVALID
,
false
);
arena_run_split
_large
(
arena
,
run
,
run_size
,
false
);
/* Append to list for later processing. */
ql_elm_new
(
mapelm
,
u
.
ql_link
);
ql_tail_insert
(
&
mapelms
,
mapelm
,
u
.
ql_link
);
ql_tail_insert
(
mapelms
,
mapelm
,
u
.
ql_link
);
}
}
else
{
/* Skip run. */
...
...
@@ -792,12 +880,20 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
assert
(
pageind
==
chunk_npages
);
assert
(
chunk
->
ndirty
==
0
||
all
==
false
);
assert
(
chunk
->
nruns_adjac
==
0
);
}
static
size_t
arena_chunk_purge_stashed
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_chunk_mapelms_t
*
mapelms
)
{
size_t
npurged
,
pageind
,
npages
,
nmadvise
;
arena_chunk_map_t
*
mapelm
;
malloc_mutex_unlock
(
&
arena
->
lock
);
if
(
config_stats
)
nmadvise
=
0
;
npurged
=
0
;
ql_foreach
(
mapelm
,
&
mapelms
,
u
.
ql_link
)
{
ql_foreach
(
mapelm
,
mapelms
,
u
.
ql_link
)
{
bool
unzeroed
;
size_t
flag_unzeroed
,
i
;
...
...
@@ -831,30 +927,75 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
if
(
config_stats
)
arena
->
stats
.
nmadvise
+=
nmadvise
;
return
(
npurged
);
}
static
void
arena_chunk_unstash_purged
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_chunk_mapelms_t
*
mapelms
)
{
arena_chunk_map_t
*
mapelm
;
size_t
pageind
;
/* Deallocate runs. */
for
(
mapelm
=
ql_first
(
&
mapelms
);
mapelm
!=
NULL
;
mapelm
=
ql_first
(
&
mapelms
))
{
for
(
mapelm
=
ql_first
(
mapelms
);
mapelm
!=
NULL
;
mapelm
=
ql_first
(
mapelms
))
{
arena_run_t
*
run
;
pageind
=
(((
uintptr_t
)
mapelm
-
(
uintptr_t
)
chunk
->
map
)
/
sizeof
(
arena_chunk_map_t
))
+
map_bias
;
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)(
pageind
<<
LG_PAGE
));
ql_remove
(
&
mapelms
,
mapelm
,
u
.
ql_link
);
ql_remove
(
mapelms
,
mapelm
,
u
.
ql_link
);
arena_run_dalloc
(
arena
,
run
,
false
,
true
);
}
return
(
npurged
);
}
static
arena_chunk_t
*
chunks_dirty_iter_cb
(
arena_chunk_
tree
_t
*
t
re
e
,
arena_chunk_t
*
chunk
,
void
*
arg
)
static
inline
size_t
arena_chunk_
purge
(
arena
_t
*
a
re
na
,
arena_chunk_t
*
chunk
,
bool
all
)
{
size_t
*
ndirty
=
(
size_t
*
)
arg
;
size_t
npurged
;
arena_chunk_mapelms_t
mapelms
;
assert
(
chunk
->
ndirty
!=
0
);
*
ndirty
+=
chunk
->
ndirty
;
return
(
NULL
);
ql_new
(
&
mapelms
);
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
*/
if
(
chunk
==
arena
->
spare
)
{
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
!=
0
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
)
!=
0
);
arena_chunk_alloc
(
arena
);
}
if
(
config_stats
)
arena
->
stats
.
purged
+=
chunk
->
ndirty
;
/*
* Operate on all dirty runs if there is no clean/dirty run
* fragmentation.
*/
if
(
chunk
->
nruns_adjac
==
0
)
all
=
true
;
arena_chunk_stash_dirty
(
arena
,
chunk
,
all
,
&
mapelms
);
npurged
=
arena_chunk_purge_stashed
(
arena
,
chunk
,
&
mapelms
);
arena_chunk_unstash_purged
(
arena
,
chunk
,
&
mapelms
);
return
(
npurged
);
}
static
void
...
...
@@ -877,21 +1018,11 @@ arena_purge(arena_t *arena, bool all)
arena
->
stats
.
npurge
++
;
/*
*
Compute
the minimum number of pages
that
this thread should try to
*
purge, and add the result to
arena->npurgatory. This will keep
*
multiple threads from racing to
reduce ndirty below the threshold.
*
Add
the minimum number of pages this thread should try to
purge to
* arena->npurgatory. This will keep
multiple threads from racing to
* reduce ndirty below the threshold.
*/
{
size_t
npurgeable
=
arena
->
ndirty
-
arena
->
npurgatory
;
if
(
all
==
false
)
{
size_t
threshold
=
(
arena
->
nactive
>>
opt_lg_dirty_mult
);
npurgatory
=
npurgeable
-
threshold
;
}
else
npurgatory
=
npurgeable
;
}
npurgatory
=
arena_compute_npurgatory
(
arena
,
all
);
arena
->
npurgatory
+=
npurgatory
;
while
(
npurgatory
>
0
)
{
...
...
@@ -958,61 +1089,12 @@ arena_purge_all(arena_t *arena)
}
static
void
arena_run_dalloc
(
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
)
arena_run_coalesce
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
*
p_size
,
size_t
*
p_run_ind
,
size_t
*
p_run_pages
,
size_t
flag_dirty
)
{
arena_chunk_t
*
chunk
;
size_t
size
,
run_ind
,
run_pages
,
flag_dirty
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
size_t
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
assert
(
run_ind
>=
map_bias
);
assert
(
run_ind
<
chunk_npages
);
if
(
arena_mapbits_large_get
(
chunk
,
run_ind
)
!=
0
)
{
size
=
arena_mapbits_large_size_get
(
chunk
,
run_ind
);
assert
(
size
==
PAGE
||
arena_mapbits_large_size_get
(
chunk
,
run_ind
+
(
size
>>
LG_PAGE
)
-
1
)
==
0
);
}
else
{
size_t
binind
=
arena_bin_index
(
arena
,
run
->
bin
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
size
=
bin_info
->
run_size
;
}
run_pages
=
(
size
>>
LG_PAGE
);
if
(
config_stats
)
{
/*
* Update stats_cactive if nactive is crossing a chunk
* multiple.
*/
size_t
cactive_diff
=
CHUNK_CEILING
(
arena
->
nactive
<<
LG_PAGE
)
-
CHUNK_CEILING
((
arena
->
nactive
-
run_pages
)
<<
LG_PAGE
);
if
(
cactive_diff
!=
0
)
stats_cactive_sub
(
cactive_diff
);
}
arena
->
nactive
-=
run_pages
;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
-
1
));
if
(
cleaned
==
false
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
!=
0
)
dirty
=
true
;
flag_dirty
=
dirty
?
CHUNK_MAP_DIRTY
:
0
;
/* Mark pages as unallocated in the chunk map. */
if
(
dirty
)
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
CHUNK_MAP_DIRTY
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
CHUNK_MAP_DIRTY
);
}
else
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
));
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
run_pages
-
1
));
}
size_t
size
=
*
p_size
;
size_t
run_ind
=
*
p_run_ind
;
size_t
run_pages
=
*
p_run_pages
;
/* Try to coalesce forward. */
if
(
run_ind
+
run_pages
<
chunk_npages
&&
...
...
@@ -1042,8 +1124,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
/* Try to coalesce backward. */
if
(
run_ind
>
map_bias
&&
arena_mapbits_allocated_get
(
chunk
,
run_ind
-
1
)
==
0
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
-
1
)
==
flag_dirty
)
{
if
(
run_ind
>
map_bias
&&
arena_mapbits_allocated_get
(
chunk
,
run_ind
-
1
)
==
0
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
-
1
)
==
flag_dirty
)
{
size_t
prun_size
=
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
-
1
);
size_t
prun_pages
=
prun_size
>>
LG_PAGE
;
...
...
@@ -1068,6 +1151,62 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
size
);
}
*
p_size
=
size
;
*
p_run_ind
=
run_ind
;
*
p_run_pages
=
run_pages
;
}
static
void
arena_run_dalloc
(
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
)
{
arena_chunk_t
*
chunk
;
size_t
size
,
run_ind
,
run_pages
,
flag_dirty
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
size_t
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
assert
(
run_ind
>=
map_bias
);
assert
(
run_ind
<
chunk_npages
);
if
(
arena_mapbits_large_get
(
chunk
,
run_ind
)
!=
0
)
{
size
=
arena_mapbits_large_size_get
(
chunk
,
run_ind
);
assert
(
size
==
PAGE
||
arena_mapbits_large_size_get
(
chunk
,
run_ind
+
(
size
>>
LG_PAGE
)
-
1
)
==
0
);
}
else
{
size_t
binind
=
arena_bin_index
(
arena
,
run
->
bin
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
size
=
bin_info
->
run_size
;
}
run_pages
=
(
size
>>
LG_PAGE
);
arena_cactive_update
(
arena
,
0
,
run_pages
);
arena
->
nactive
-=
run_pages
;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
-
1
));
if
(
cleaned
==
false
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
!=
0
)
dirty
=
true
;
flag_dirty
=
dirty
?
CHUNK_MAP_DIRTY
:
0
;
/* Mark pages as unallocated in the chunk map. */
if
(
dirty
)
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
CHUNK_MAP_DIRTY
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
CHUNK_MAP_DIRTY
);
}
else
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
));
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
run_pages
-
1
));
}
arena_run_coalesce
(
arena
,
chunk
,
&
size
,
&
run_ind
,
&
run_pages
,
flag_dirty
);
/* Insert into runs_avail, now that coalescing is complete. */
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
)
==
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
+
run_pages
-
1
));
...
...
@@ -1235,14 +1374,12 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_unlock
(
&
bin
->
lock
);
/******************************/
malloc_mutex_lock
(
&
arena
->
lock
);
run
=
arena_run_alloc
(
arena
,
bin_info
->
run_size
,
false
,
binind
,
false
);
run
=
arena_run_alloc
_small
(
arena
,
bin_info
->
run_size
,
binind
);
if
(
run
!=
NULL
)
{
bitmap_t
*
bitmap
=
(
bitmap_t
*
)((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
bitmap_offset
);
/* Initialize run internals. */
VALGRIND_MAKE_MEM_UNDEFINED
(
run
,
bin_info
->
reg0_offset
-
bin_info
->
redzone_size
);
run
->
bin
=
bin
;
run
->
nextind
=
0
;
run
->
nfree
=
bin_info
->
nregs
;
...
...
@@ -1260,7 +1397,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
}
/*
* arena_run_alloc() failed, but another thread may have made
* arena_run_alloc
_small
() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
...
...
@@ -1295,12 +1432,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
arena_chunk_t
*
chunk
;
/*
* arena_run_alloc() may have allocated run, or
it may
* have pulled run from the bin's run tree.
Therefore
* it is unsafe to make any assumptions about
how run
* has previously been used, and
arena_bin_lower_run()
* must be called, as if a region
were just deallocated
* from the run.
* arena_run_alloc
_small
() may have allocated run, or
*
it may
have pulled run from the bin's run tree.
*
Therefore
it is unsafe to make any assumptions about
*
how run
has previously been used, and
*
arena_bin_lower_run()
must be called, as if a region
*
were just deallocated
from the run.
*/
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
if
(
run
->
nfree
==
bin_info
->
nregs
)
...
...
@@ -1321,21 +1458,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
return
(
arena_run_reg_alloc
(
bin
->
runcur
,
bin_info
));
}
void
arena_prof_accum
(
arena_t
*
arena
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
if
(
config_prof
&&
prof_interval
!=
0
)
{
arena
->
prof_accumbytes
+=
accumbytes
;
if
(
arena
->
prof_accumbytes
>=
prof_interval
)
{
prof_idump
();
arena
->
prof_accumbytes
-=
prof_interval
;
}
}
}
void
arena_tcache_fill_small
(
arena_t
*
arena
,
tcache_bin_t
*
tbin
,
size_t
binind
,
uint64_t
prof_accumbytes
)
...
...
@@ -1347,11 +1469,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert
(
tbin
->
ncached
==
0
);
if
(
config_prof
)
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_prof_accum
(
arena
,
prof_accumbytes
);
malloc_mutex_unlock
(
&
arena
->
lock
);
}
if
(
config_prof
&&
arena_prof_accum
(
arena
,
prof_accumbytes
))
prof_idump
();
bin
=
&
arena
->
bins
[
binind
];
malloc_mutex_lock
(
&
bin
->
lock
);
for
(
i
=
0
,
nfill
=
(
tcache_bin_info
[
binind
].
ncached_max
>>
...
...
@@ -1396,8 +1515,28 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
}
}
void
arena_dalloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
)
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif
static
void
arena_redzone_corruption
(
void
*
ptr
,
size_t
usize
,
bool
after
,
size_t
offset
,
uint8_t
byte
)
{
malloc_printf
(
"<jemalloc>: Corrupt redzone %zu byte%s %s %p "
"(size %zu), byte=%#x
\n
"
,
offset
,
(
offset
==
1
)
?
""
:
"s"
,
after
?
"after"
:
"before"
,
ptr
,
usize
,
byte
);
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t
*
arena_redzone_corruption
=
JEMALLOC_N
(
arena_redzone_corruption_impl
);
#endif
static
void
arena_redzones_validate
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
,
bool
reset
)
{
size_t
size
=
bin_info
->
reg_size
;
size_t
redzone_size
=
bin_info
->
redzone_size
;
...
...
@@ -1405,29 +1544,61 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
bool
error
=
false
;
for
(
i
=
1
;
i
<=
redzone_size
;
i
++
)
{
u
nsigned
byte
;
if
(
(
byte
=
*
(
uint8_t
*
)((
uintptr_t
)
ptr
-
i
))
!=
0xa5
)
{
u
int8_t
*
byte
=
(
uint8_t
*
)((
uintptr_t
)
ptr
-
i
)
;
if
(
*
byte
!=
0xa5
)
{
error
=
true
;
malloc_printf
(
"<jemalloc>: Corrupt redzone "
"%zu byte%s before %p (size %zu), byte=%#x
\n
"
,
i
,
(
i
==
1
)
?
""
:
"s"
,
ptr
,
size
,
byte
)
;
arena_redzone_corruption
(
ptr
,
size
,
false
,
i
,
*
byte
);
if
(
reset
)
*
byte
=
0xa5
;
}
}
for
(
i
=
0
;
i
<
redzone_size
;
i
++
)
{
u
nsigned
byte
;
if
(
(
byte
=
*
(
uint8_t
*
)((
uintptr_t
)
ptr
+
size
+
i
))
!=
0xa5
)
{
u
int8_t
*
byte
=
(
uint8_t
*
)((
uintptr_t
)
ptr
+
size
+
i
)
;
if
(
*
byte
!=
0xa5
)
{
error
=
true
;
malloc_printf
(
"<jemalloc>: Corrupt redzone "
"%zu byte%s after end of %p (size %zu), byte=%#x
\n
"
,
i
,
(
i
==
1
)
?
""
:
"s"
,
ptr
,
size
,
byte
)
;
arena_redzone_corruption
(
ptr
,
size
,
true
,
i
,
*
byte
);
if
(
reset
)
*
byte
=
0xa5
;
}
}
if
(
opt_abort
&&
error
)
abort
();
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif
void
arena_dalloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
)
{
size_t
redzone_size
=
bin_info
->
redzone_size
;
arena_redzones_validate
(
ptr
,
bin_info
,
false
);
memset
((
void
*
)((
uintptr_t
)
ptr
-
redzone_size
),
0x5a
,
bin_info
->
reg_interval
);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t
*
arena_dalloc_junk_small
=
JEMALLOC_N
(
arena_dalloc_junk_small_impl
);
#endif
void
arena_quarantine_junk_small
(
void
*
ptr
,
size_t
usize
)
{
size_t
binind
;
arena_bin_info_t
*
bin_info
;
cassert
(
config_fill
);
assert
(
opt_junk
);
assert
(
opt_quarantine
);
assert
(
usize
<=
SMALL_MAXCLASS
);
binind
=
SMALL_SIZE2BIN
(
usize
);
bin_info
=
&
arena_bin_info
[
binind
];
arena_redzones_validate
(
ptr
,
bin_info
,
true
);
}
void
*
arena_malloc_small
(
arena_t
*
arena
,
size_t
size
,
bool
zero
)
...
...
@@ -1459,11 +1630,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin
->
stats
.
nrequests
++
;
}
malloc_mutex_unlock
(
&
bin
->
lock
);
if
(
config_prof
&&
isthreaded
==
false
)
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_prof_accum
(
arena
,
size
);
malloc_mutex_unlock
(
&
arena
->
lock
);
}
if
(
config_prof
&&
isthreaded
==
false
&&
arena_prof_accum
(
arena
,
size
))
prof_idump
();
if
(
zero
==
false
)
{
if
(
config_fill
)
{
...
...
@@ -1473,6 +1641,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
if
(
config_fill
&&
opt_junk
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
...
...
@@ -1489,11 +1658,12 @@ void *
arena_malloc_large
(
arena_t
*
arena
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
UNUSED
bool
idump
;
/* Large allocation. */
size
=
PAGE_CEILING
(
size
);
malloc_mutex_lock
(
&
arena
->
lock
);
ret
=
(
void
*
)
arena_run_alloc
(
arena
,
size
,
true
,
BININD_INVALID
,
zero
);
ret
=
(
void
*
)
arena_run_alloc
_large
(
arena
,
size
,
zero
);
if
(
ret
==
NULL
)
{
malloc_mutex_unlock
(
&
arena
->
lock
);
return
(
NULL
);
...
...
@@ -1507,8 +1677,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena
->
stats
.
lstats
[(
size
>>
LG_PAGE
)
-
1
].
curruns
++
;
}
if
(
config_prof
)
arena_prof_accum
(
arena
,
size
);
idump
=
arena_prof_accum
_locked
(
arena
,
size
);
malloc_mutex_unlock
(
&
arena
->
lock
);
if
(
config_prof
&&
idump
)
prof_idump
();
if
(
zero
==
false
)
{
if
(
config_fill
)
{
...
...
@@ -1537,7 +1709,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
alloc_size
=
size
+
alignment
-
PAGE
;
malloc_mutex_lock
(
&
arena
->
lock
);
run
=
arena_run_alloc
(
arena
,
alloc_size
,
true
,
BININD_INVALID
,
zero
);
run
=
arena_run_alloc
_large
(
arena
,
alloc_size
,
false
);
if
(
run
==
NULL
)
{
malloc_mutex_unlock
(
&
arena
->
lock
);
return
(
NULL
);
...
...
@@ -1557,6 +1729,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
arena_run_trim_tail
(
arena
,
chunk
,
ret
,
size
+
trailsize
,
size
,
false
);
}
arena_run_init_large
(
arena
,
(
arena_run_t
*
)
ret
,
size
,
zero
);
if
(
config_stats
)
{
arena
->
stats
.
nmalloc_large
++
;
...
...
@@ -1760,21 +1933,38 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_dalloc_bin
(
arena
,
chunk
,
ptr
,
pageind
,
mapelm
);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
static
void
arena_dalloc_junk_large
(
void
*
ptr
,
size_t
usize
)
{
if
(
config_fill
&&
opt_junk
)
memset
(
ptr
,
0x5a
,
usize
);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t
*
arena_dalloc_junk_large
=
JEMALLOC_N
(
arena_dalloc_junk_large_impl
);
#endif
void
arena_dalloc_large_locked
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
)
{
if
(
config_fill
||
config_stats
)
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
size_t
size
=
arena_mapbits_large_size_get
(
chunk
,
pageind
);
size_t
u
size
=
arena_mapbits_large_size_get
(
chunk
,
pageind
);
if
(
config_fill
&&
config_stats
&&
opt_junk
)
memset
(
ptr
,
0x5a
,
size
);
arena_dalloc_junk_large
(
ptr
,
usize
);
if
(
config_stats
)
{
arena
->
stats
.
ndalloc_large
++
;
arena
->
stats
.
allocated_large
-=
size
;
arena
->
stats
.
lstats
[(
size
>>
LG_PAGE
)
-
1
].
ndalloc
++
;
arena
->
stats
.
lstats
[(
size
>>
LG_PAGE
)
-
1
].
curruns
--
;
arena
->
stats
.
allocated_large
-=
u
size
;
arena
->
stats
.
lstats
[(
u
size
>>
LG_PAGE
)
-
1
].
ndalloc
++
;
arena
->
stats
.
lstats
[(
u
size
>>
LG_PAGE
)
-
1
].
curruns
--
;
}
}
...
...
@@ -1845,9 +2035,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t
flag_dirty
;
size_t
splitsize
=
(
oldsize
+
followsize
<=
size
+
extra
)
?
followsize
:
size
+
extra
-
oldsize
;
arena_run_split
(
arena
,
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
((
pageind
+
npages
)
<<
LG_PAGE
)),
splitsize
,
true
,
BININD_INVALID
,
zero
);
arena_run_split_large
(
arena
,
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
((
pageind
+
npages
)
<<
LG_PAGE
)),
splitsize
,
zero
);
size
=
oldsize
+
splitsize
;
npages
=
size
>>
LG_PAGE
;
...
...
@@ -1886,6 +2075,26 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
return
(
true
);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif
static
void
arena_ralloc_junk_large
(
void
*
ptr
,
size_t
old_usize
,
size_t
usize
)
{
if
(
config_fill
&&
opt_junk
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
usize
),
0x5a
,
old_usize
-
usize
);
}
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t
*
arena_ralloc_junk_large
=
JEMALLOC_N
(
arena_ralloc_junk_large_impl
);
#endif
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
...
...
@@ -1899,10 +2108,6 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
psize
=
PAGE_CEILING
(
size
+
extra
);
if
(
psize
==
oldsize
)
{
/* Same size class. */
if
(
config_fill
&&
opt_junk
&&
size
<
oldsize
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
return
(
false
);
}
else
{
arena_chunk_t
*
chunk
;
...
...
@@ -1913,10 +2118,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
if
(
psize
<
oldsize
)
{
/* Fill before shrinking in order avoid a race. */
if
(
config_fill
&&
opt_junk
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
arena_ralloc_junk_large
(
ptr
,
oldsize
,
psize
);
arena_ralloc_large_shrink
(
arena
,
chunk
,
ptr
,
oldsize
,
psize
);
return
(
false
);
...
...
@@ -1924,17 +2126,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool
ret
=
arena_ralloc_large_grow
(
arena
,
chunk
,
ptr
,
oldsize
,
PAGE_CEILING
(
size
),
psize
-
PAGE_CEILING
(
size
),
zero
);
if
(
config_fill
&&
ret
==
false
&&
zero
==
false
&&
opt_zero
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
0
,
size
-
oldsize
);
if
(
config_fill
&&
ret
==
false
&&
zero
==
false
)
{
if
(
opt_junk
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
0xa5
,
isalloc
(
ptr
,
config_prof
)
-
oldsize
);
}
else
if
(
opt_zero
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
0
,
isalloc
(
ptr
,
config_prof
)
-
oldsize
);
}
}
return
(
ret
);
}
}
}
void
*
bool
arena_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
)
{
...
...
@@ -1949,25 +2157,20 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
if
((
size
+
extra
<=
SMALL_MAXCLASS
&&
SMALL_SIZE2BIN
(
size
+
extra
)
==
SMALL_SIZE2BIN
(
oldsize
))
||
(
size
<=
oldsize
&&
size
+
extra
>=
oldsize
))
{
if
(
config_fill
&&
opt_junk
&&
size
<
oldsize
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
return
(
ptr
);
}
size
+
extra
>=
oldsize
))
return
(
false
);
}
else
{
assert
(
size
<=
arena_maxclass
);
if
(
size
+
extra
>
SMALL_MAXCLASS
)
{
if
(
arena_ralloc_large
(
ptr
,
oldsize
,
size
,
extra
,
zero
)
==
false
)
return
(
ptr
);
return
(
false
);
}
}
}
/* Reallocation would require a move. */
return
(
NULL
);
return
(
true
);
}
void
*
...
...
@@ -1979,9 +2182,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t
copysize
;
/* Try to avoid moving the allocation. */
ret
=
arena_ralloc_no_move
(
ptr
,
oldsize
,
size
,
extra
,
zero
);
if
(
ret
!=
NULL
)
return
(
ret
);
if
(
arena_ralloc_no_move
(
ptr
,
oldsize
,
size
,
extra
,
zero
)
==
false
)
return
(
ptr
);
/*
* size and oldsize are different enough that we need to move the
...
...
@@ -1992,7 +2194,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t
usize
=
sa2u
(
size
+
extra
,
alignment
);
if
(
usize
==
0
)
return
(
NULL
);
ret
=
ipalloc
x
(
usize
,
alignment
,
zero
,
try_tcache_alloc
,
arena
);
ret
=
ipalloc
t
(
usize
,
alignment
,
zero
,
try_tcache_alloc
,
arena
);
}
else
ret
=
arena_malloc
(
arena
,
size
+
extra
,
zero
,
try_tcache_alloc
);
...
...
@@ -2004,7 +2206,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t
usize
=
sa2u
(
size
,
alignment
);
if
(
usize
==
0
)
return
(
NULL
);
ret
=
ipalloc
x
(
usize
,
alignment
,
zero
,
try_tcache_alloc
,
ret
=
ipalloc
t
(
usize
,
alignment
,
zero
,
try_tcache_alloc
,
arena
);
}
else
ret
=
arena_malloc
(
arena
,
size
,
zero
,
try_tcache_alloc
);
...
...
@@ -2022,7 +2224,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize
=
(
size
<
oldsize
)
?
size
:
oldsize
;
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
copysize
);
memcpy
(
ret
,
ptr
,
copysize
);
iqalloc
x
(
ptr
,
try_tcache_dalloc
);
iqalloc
t
(
ptr
,
try_tcache_dalloc
);
return
(
ret
);
}
...
...
@@ -2277,7 +2479,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
bin_info
->
reg_interval
)
-
pad_size
;
}
while
(
try_hdr_size
>
try_redzone0_offset
);
}
while
(
try_run_size
<=
arena_maxclass
&&
try_run_size
<=
arena_maxclass
&&
RUN_MAX_OVRHD
*
(
bin_info
->
reg_interval
<<
3
)
>
RUN_MAX_OVRHD_RELAX
&&
(
try_redzone0_offset
<<
RUN_BFP
)
>
RUN_MAX_OVRHD
*
try_run_size
...
...
deps/jemalloc/src/base.c
View file @
fceef8e0
...
...
@@ -63,6 +63,7 @@ base_alloc(size_t size)
ret
=
base_next_addr
;
base_next_addr
=
(
void
*
)((
uintptr_t
)
base_next_addr
+
csize
);
malloc_mutex_unlock
(
&
base_mtx
);
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
csize
);
return
(
ret
);
}
...
...
@@ -88,6 +89,7 @@ base_node_alloc(void)
ret
=
base_nodes
;
base_nodes
=
*
(
extent_node_t
**
)
ret
;
malloc_mutex_unlock
(
&
base_mtx
);
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
sizeof
(
extent_node_t
));
}
else
{
malloc_mutex_unlock
(
&
base_mtx
);
ret
=
(
extent_node_t
*
)
base_alloc
(
sizeof
(
extent_node_t
));
...
...
@@ -100,6 +102,7 @@ void
base_node_dealloc
(
extent_node_t
*
node
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
node
,
sizeof
(
extent_node_t
));
malloc_mutex_lock
(
&
base_mtx
);
*
(
extent_node_t
**
)
node
=
base_nodes
;
base_nodes
=
node
;
...
...
deps/jemalloc/src/bitmap.c
View file @
fceef8e0
deps/jemalloc/src/chunk.c
View file @
fceef8e0
...
...
@@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
assert
(
node
->
size
>=
leadsize
+
size
);
trailsize
=
node
->
size
-
leadsize
-
size
;
ret
=
(
void
*
)((
uintptr_t
)
node
->
addr
+
leadsize
);
zeroed
=
node
->
zeroed
;
if
(
zeroed
)
*
zero
=
true
;
/* Remove node from the tree. */
extent_tree_szad_remove
(
chunks_szad
,
node
);
extent_tree_ad_remove
(
chunks_ad
,
node
);
...
...
@@ -108,23 +111,26 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
}
node
->
addr
=
(
void
*
)((
uintptr_t
)(
ret
)
+
size
);
node
->
size
=
trailsize
;
node
->
zeroed
=
zeroed
;
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
node
=
NULL
;
}
malloc_mutex_unlock
(
&
chunks_mtx
);
zeroed
=
false
;
if
(
node
!=
NULL
)
{
if
(
node
->
zeroed
)
{
zeroed
=
true
;
*
zero
=
true
;
}
if
(
node
!=
NULL
)
base_node_dealloc
(
node
);
}
if
(
zeroed
==
false
&&
*
zero
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
if
(
*
zero
)
{
if
(
zeroed
==
false
)
memset
(
ret
,
0
,
size
);
else
if
(
config_debug
)
{
size_t
i
;
size_t
*
p
=
(
size_t
*
)(
uintptr_t
)
ret
;
VALGRIND_MAKE_MEM_DEFINED
(
ret
,
size
);
for
(
i
=
0
;
i
<
size
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
}
}
return
(
ret
);
}
...
...
@@ -172,20 +178,22 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
/* All strategies for allocation failed. */
ret
=
NULL
;
label_return:
if
(
config_ivsalloc
&&
base
==
false
&&
ret
!=
NULL
)
{
if
(
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
ret
,
ret
))
{
if
(
ret
!=
NULL
)
{
if
(
config_ivsalloc
&&
base
==
false
)
{
if
(
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
ret
,
1
))
{
chunk_dealloc
(
ret
,
size
,
true
);
return
(
NULL
);
}
}
if
(
(
config_stats
||
config_prof
)
&&
ret
!=
NULL
)
{
if
(
config_stats
||
config_prof
)
{
bool
gdump
;
malloc_mutex_lock
(
&
chunks_mtx
);
if
(
config_stats
)
stats_chunks
.
nchunks
+=
(
size
/
chunksize
);
stats_chunks
.
curchunks
+=
(
size
/
chunksize
);
if
(
stats_chunks
.
curchunks
>
stats_chunks
.
highchunks
)
{
stats_chunks
.
highchunks
=
stats_chunks
.
curchunks
;
stats_chunks
.
highchunks
=
stats_chunks
.
curchunks
;
if
(
config_prof
)
gdump
=
true
;
}
else
if
(
config_prof
)
...
...
@@ -194,13 +202,8 @@ label_return:
if
(
config_prof
&&
opt_prof
&&
opt_prof_gdump
&&
gdump
)
prof_gdump
();
}
if
(
config_debug
&&
*
zero
&&
ret
!=
NULL
)
{
size_t
i
;
size_t
*
p
=
(
size_t
*
)(
uintptr_t
)
ret
;
VALGRIND_MAKE_MEM_DEFINED
(
ret
,
size
);
for
(
i
=
0
;
i
<
size
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
if
(
config_valgrind
)
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
assert
(
CHUNK_ADDR2BASE
(
ret
)
==
ret
);
return
(
ret
);
...
...
@@ -211,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t
size
)
{
bool
unzeroed
;
extent_node_t
*
xnode
,
*
node
,
*
prev
,
key
;
extent_node_t
*
xnode
,
*
node
,
*
prev
,
*
xprev
,
key
;
unzeroed
=
pages_purge
(
chunk
,
size
);
VALGRIND_MAKE_MEM_NOACCESS
(
chunk
,
size
);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
...
...
@@ -222,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held.
*/
xnode
=
base_node_alloc
();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev
=
NULL
;
malloc_mutex_lock
(
&
chunks_mtx
);
key
.
addr
=
(
void
*
)((
uintptr_t
)
chunk
+
size
);
...
...
@@ -238,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node
->
size
+=
size
;
node
->
zeroed
=
(
node
->
zeroed
&&
(
unzeroed
==
false
));
extent_tree_szad_insert
(
chunks_szad
,
node
);
if
(
xnode
!=
NULL
)
base_node_dealloc
(
xnode
);
}
else
{
/* Coalescing forward failed, so insert a new node. */
if
(
xnode
==
NULL
)
{
...
...
@@ -249,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual
* memory leak.
*/
malloc_mutex_unlock
(
&
chunks_mtx
);
return
;
goto
label_return
;
}
node
=
xnode
;
xnode
=
NULL
;
/* Prevent deallocation below. */
node
->
addr
=
chunk
;
node
->
size
=
size
;
node
->
zeroed
=
(
unzeroed
==
false
);
...
...
@@ -278,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node
->
zeroed
=
(
node
->
zeroed
&&
prev
->
zeroed
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
base_node_dealloc
(
prev
)
;
xprev
=
prev
;
}
label_return:
malloc_mutex_unlock
(
&
chunks_mtx
);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if
(
xnode
!=
NULL
)
base_node_dealloc
(
xnode
);
if
(
xprev
!=
NULL
)
base_node_dealloc
(
xprev
);
}
void
...
...
@@ -307,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert
((
size
&
chunksize_mask
)
==
0
);
if
(
config_ivsalloc
)
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
chunk
,
NULL
);
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
chunk
,
0
);
if
(
config_stats
||
config_prof
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
assert
(
stats_chunks
.
curchunks
>=
(
size
/
chunksize
));
...
...
@@ -342,7 +356,7 @@ chunk_boot(void)
extent_tree_ad_new
(
&
chunks_ad_dss
);
if
(
config_ivsalloc
)
{
chunks_rtree
=
rtree_new
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
opt_lg_chunk
);
opt_lg_chunk
,
base_alloc
,
NULL
);
if
(
chunks_rtree
==
NULL
)
return
(
true
);
}
...
...
@@ -354,7 +368,7 @@ void
chunk_prefork
(
void
)
{
malloc_mutex_
loc
k
(
&
chunks_mtx
);
malloc_mutex_
prefor
k
(
&
chunks_mtx
);
if
(
config_ivsalloc
)
rtree_prefork
(
chunks_rtree
);
chunk_dss_prefork
();
...
...
deps/jemalloc/src/chunk_dss.c
View file @
fceef8e0
...
...
@@ -28,16 +28,17 @@ static void *dss_max;
/******************************************************************************/
#ifndef JEMALLOC_HAVE_SBRK
static
void
*
sbrk
(
intptr_t
increment
)
chunk_dss_
sbrk
(
intptr_t
increment
)
{
#ifdef JEMALLOC_HAVE_SBRK
return
(
sbrk
(
increment
));
#else
not_implemented
();
return
(
NULL
);
}
#endif
}
dss_prec_t
chunk_dss_prec_get
(
void
)
...
...
@@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
*/
do
{
/* Get the current end of the DSS. */
dss_max
=
sbrk
(
0
);
dss_max
=
chunk_dss_
sbrk
(
0
);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
...
...
@@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
return
(
NULL
);
}
incr
=
gap_size
+
cpad_size
+
size
;
dss_prev
=
sbrk
(
incr
);
dss_prev
=
chunk_dss_
sbrk
(
incr
);
if
(
dss_prev
==
dss_max
)
{
/* Success. */
dss_max
=
dss_next
;
...
...
@@ -163,7 +164,7 @@ chunk_dss_boot(void)
if
(
malloc_mutex_init
(
&
dss_mtx
))
return
(
true
);
dss_base
=
sbrk
(
0
);
dss_base
=
chunk_dss_
sbrk
(
0
);
dss_prev
=
dss_base
;
dss_max
=
dss_base
;
...
...
deps/jemalloc/src/chunk_mmap.c
View file @
fceef8e0
...
...
@@ -43,7 +43,7 @@ pages_map(void *addr, size_t size)
if
(
munmap
(
ret
,
size
)
==
-
1
)
{
char
buf
[
BUFERROR_BUF
];
buferror
(
buf
,
sizeof
(
buf
));
buferror
(
get_errno
(),
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc: Error in munmap(): %s
\n
"
,
buf
);
if
(
opt_abort
)
...
...
@@ -69,7 +69,7 @@ pages_unmap(void *addr, size_t size)
{
char
buf
[
BUFERROR_BUF
];
buferror
(
buf
,
sizeof
(
buf
));
buferror
(
get_errno
(),
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
...
...
deps/jemalloc/src/ckh.c
View file @
fceef8e0
...
...
@@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE
size_t
JEMALLOC_INLINE
_C
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
ckhc_t
*
cell
;
...
...
@@ -67,28 +67,28 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE
size_t
JEMALLOC_INLINE
_C
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
size_t
hash
1
,
hash2
,
bucket
,
cell
;
size_t
hash
es
[
2
]
,
bucket
,
cell
;
assert
(
ckh
!=
NULL
);
ckh
->
hash
(
key
,
ckh
->
lg_curbuckets
,
&
hash1
,
&
hash2
);
ckh
->
hash
(
key
,
hashes
);
/* Search primary bucket. */
bucket
=
hash
1
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
bucket
=
hash
es
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
if
(
cell
!=
SIZE_T_MAX
)
return
(
cell
);
/* Search secondary bucket. */
bucket
=
hash
2
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
bucket
=
hash
es
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
return
(
cell
);
}
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_try_bucket_insert
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
,
const
void
*
data
)
{
...
...
@@ -120,13 +120,13 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_evict_reloc_insert
(
ckh_t
*
ckh
,
size_t
argbucket
,
void
const
**
argkey
,
void
const
**
argdata
)
{
const
void
*
key
,
*
data
,
*
tkey
,
*
tdata
;
ckhc_t
*
cell
;
size_t
hash
1
,
hash2
,
bucket
,
tbucket
;
size_t
hash
es
[
2
]
,
bucket
,
tbucket
;
unsigned
i
;
bucket
=
argbucket
;
...
...
@@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
#endif
/* Find the alternate bucket for the evicted item. */
ckh
->
hash
(
key
,
ckh
->
lg_curbuckets
,
&
hash1
,
&
hash2
);
tbucket
=
hash
2
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
ckh
->
hash
(
key
,
hashes
);
tbucket
=
hash
es
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
tbucket
==
bucket
)
{
tbucket
=
hash1
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
tbucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
...
...
@@ -189,22 +190,22 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
}
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
size_t
hash
1
,
hash2
,
bucket
;
size_t
hash
es
[
2
]
,
bucket
;
const
void
*
key
=
*
argkey
;
const
void
*
data
=
*
argdata
;
ckh
->
hash
(
key
,
ckh
->
lg_curbuckets
,
&
hash1
,
&
hash2
);
ckh
->
hash
(
key
,
hashes
);
/* Try to insert in primary bucket. */
bucket
=
hash
1
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
bucket
=
hash
es
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
return
(
false
);
/* Try to insert in secondary bucket. */
bucket
=
hash
2
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
bucket
=
hash
es
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
return
(
false
);
...
...
@@ -218,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
size_t
count
,
i
,
nins
;
...
...
@@ -417,9 +418,8 @@ ckh_delete(ckh_t *ckh)
#endif
idalloc
(
ckh
->
tab
);
#
if
def JEMALLOC_DEBUG
if
(
config_debug
)
memset
(
ckh
,
0x5a
,
sizeof
(
ckh_t
));
#endif
}
size_t
...
...
@@ -526,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
}
void
ckh_string_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash
2
)
ckh_string_hash
(
const
void
*
key
,
size_t
r_
hash
[
2
]
)
{
size_t
ret1
,
ret2
;
uint64_t
h
;
assert
(
minbits
<=
32
||
(
SIZEOF_PTR
==
8
&&
minbits
<=
64
));
assert
(
hash1
!=
NULL
);
assert
(
hash2
!=
NULL
);
h
=
hash
(
key
,
strlen
((
const
char
*
)
key
),
UINT64_C
(
0x94122f335b332aea
));
if
(
minbits
<=
32
)
{
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1
=
h
&
ZU
(
0xffffffffU
);
ret2
=
h
>>
32
;
}
else
{
ret1
=
h
;
ret2
=
hash
(
key
,
strlen
((
const
char
*
)
key
),
UINT64_C
(
0x8432a476666bbc13
));
}
*
hash1
=
ret1
;
*
hash2
=
ret2
;
hash
(
key
,
strlen
((
const
char
*
)
key
),
0x94122f33U
,
r_hash
);
}
bool
...
...
@@ -564,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
}
void
ckh_pointer_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
)
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
size_t
ret1
,
ret2
;
uint64_t
h
;
union
{
const
void
*
v
;
uint64
_t
i
;
size
_t
i
;
}
u
;
assert
(
minbits
<=
32
||
(
SIZEOF_PTR
==
8
&&
minbits
<=
64
));
assert
(
hash1
!=
NULL
);
assert
(
hash2
!=
NULL
);
assert
(
sizeof
(
u
.
v
)
==
sizeof
(
u
.
i
));
#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
u
.
i
=
0
;
#endif
u
.
v
=
key
;
h
=
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
UINT64_C
(
0xd983396e68886082
));
if
(
minbits
<=
32
)
{
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1
=
h
&
ZU
(
0xffffffffU
);
ret2
=
h
>>
32
;
}
else
{
assert
(
SIZEOF_PTR
==
8
);
ret1
=
h
;
ret2
=
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
UINT64_C
(
0x5e2be9aff8709a5d
));
}
*
hash1
=
ret1
;
*
hash2
=
ret2
;
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
0xd983396eU
,
r_hash
);
}
bool
...
...
deps/jemalloc/src/ctl.c
View file @
fceef8e0
...
...
@@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
static
bool
ctl_grow
(
void
)
{
size_t
astats_size
;
ctl_arena_stats_t
*
astats
;
arena_t
**
tarenas
;
/* Extend arena stats and arenas arrays. */
astats_size
=
(
ctl_stats
.
narenas
+
2
)
*
sizeof
(
ctl_arena_stats_t
);
if
(
ctl_stats
.
narenas
==
narenas_auto
)
{
/* ctl_stats.arenas and arenas came from base_alloc(). */
astats
=
(
ctl_arena_stats_t
*
)
imalloc
(
astats_size
);
/* Allocate extended arena stats and arenas arrays. */
astats
=
(
ctl_arena_stats_t
*
)
imalloc
((
ctl_stats
.
narenas
+
2
)
*
sizeof
(
ctl_arena_stats_t
));
if
(
astats
==
NULL
)
return
(
true
);
memcpy
(
astats
,
ctl_stats
.
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
tarenas
=
(
arena_t
**
)
imalloc
((
ctl_stats
.
narenas
+
1
)
*
sizeof
(
arena_t
*
));
if
(
tarenas
==
NULL
)
{
idalloc
(
astats
);
return
(
true
);
}
memcpy
(
tarenas
,
arenas
,
ctl_stats
.
narenas
*
sizeof
(
arena_t
*
));
}
else
{
astats
=
(
ctl_arena_stats_t
*
)
iralloc
(
ctl_stats
.
arenas
,
astats_size
,
0
,
0
,
false
,
false
);
if
(
astats
==
NULL
)
return
(
true
);
tarenas
=
(
arena_t
**
)
iralloc
(
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
arena_t
*
),
0
,
0
,
false
,
false
);
if
(
tarenas
==
NULL
)
return
(
true
);
}
/* Initialize the new astats and arenas elements. */
/* Initialize the new astats element. */
memcpy
(
astats
,
ctl_stats
.
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
memset
(
&
astats
[
ctl_stats
.
narenas
+
1
],
0
,
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_arena_init
(
&
astats
[
ctl_stats
.
narenas
+
1
]))
if
(
ctl_arena_init
(
&
astats
[
ctl_stats
.
narenas
+
1
]))
{
idalloc
(
tarenas
);
idalloc
(
astats
);
return
(
true
);
tarenas
[
ctl_stats
.
narenas
]
=
NULL
;
}
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t
tstats
;
...
...
@@ -593,13 +580,34 @@ ctl_grow(void)
memcpy
(
&
astats
[
ctl_stats
.
narenas
+
1
],
&
tstats
,
sizeof
(
ctl_arena_stats_t
));
}
ctl_stats
.
arenas
=
astats
;
ctl_stats
.
narenas
++
;
/* Initialize the new arenas element. */
tarenas
[
ctl_stats
.
narenas
]
=
NULL
;
{
arena_t
**
arenas_old
=
arenas
;
/*
* Swap extended arenas array into place. Although ctl_mtx
* protects this function from other threads extending the
* array, it does not protect from other threads mutating it
* (i.e. initializing arenas and setting array elements to
* point to them). Therefore, array copying must happen under
* the protection of arenas_lock.
*/
malloc_mutex_lock
(
&
arenas_lock
);
arenas
=
tarenas
;
memcpy
(
arenas
,
arenas_old
,
ctl_stats
.
narenas
*
sizeof
(
arena_t
*
));
narenas_total
++
;
arenas_extend
(
narenas_total
-
1
);
malloc_mutex_unlock
(
&
arenas_lock
);
/*
* Deallocate arenas_old only if it came from imalloc() (not
* base_alloc()).
*/
if
(
ctl_stats
.
narenas
!=
narenas_auto
)
idalloc
(
arenas_old
);
}
ctl_stats
.
arenas
=
astats
;
ctl_stats
.
narenas
++
;
return
(
false
);
}
...
...
@@ -921,7 +929,7 @@ void
ctl_prefork
(
void
)
{
malloc_mutex_
loc
k
(
&
ctl_mtx
);
malloc_mutex_
prefor
k
(
&
ctl_mtx
);
}
void
...
...
@@ -960,11 +968,11 @@ ctl_postfork_child(void)
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&
v
, copylen); \
memcpy(oldp, (void *)&
(v)
, copylen); \
ret = EINVAL; \
goto label_return; \
} else \
*(t *)oldp =
v;
\
*(t *)oldp =
(v);
\
} \
} while (0)
...
...
@@ -974,7 +982,7 @@ ctl_postfork_child(void)
ret = EINVAL; \
goto label_return; \
} \
v
= *(t *)newp;
\
(v)
= *(t *)newp; \
} \
} while (0)
...
...
@@ -995,7 +1003,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1017,7 +1025,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1036,7 +1044,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1060,7 +1068,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if ((c) == false) \
return (ENOENT); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1077,7 +1085,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
t oldval; \
\
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1102,6 +1110,8 @@ label_return: \
return (ret); \
}
/******************************************************************************/
CTL_RO_NL_GEN
(
version
,
JEMALLOC_VERSION
,
const
char
*
)
static
int
...
...
@@ -1109,7 +1119,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
uint64_t
newval
;
UNUSED
uint64_t
newval
;
malloc_mutex_lock
(
&
ctl_mtx
);
WRITE
(
newval
,
uint64_t
);
...
...
@@ -1123,49 +1133,52 @@ label_return:
return
(
ret
);
}
static
int
thread_tcache_enabled_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
config_tcache
==
false
)
return
(
ENOENT
);
oldval
=
tcache_enabled_get
();
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
tcache_enabled_set
(
*
(
bool
*
)
newp
);
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
thread_tcache_flush_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
/******************************************************************************/
if
(
config_tcache
==
false
)
return
(
ENOENT
);
CTL_RO_BOOL_CONFIG_GEN
(
config_debug
)
CTL_RO_BOOL_CONFIG_GEN
(
config_dss
)
CTL_RO_BOOL_CONFIG_GEN
(
config_fill
)
CTL_RO_BOOL_CONFIG_GEN
(
config_lazy_lock
)
CTL_RO_BOOL_CONFIG_GEN
(
config_mremap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_munmap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libgcc
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libunwind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_stats
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tcache
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tls
)
CTL_RO_BOOL_CONFIG_GEN
(
config_utrace
)
CTL_RO_BOOL_CONFIG_GEN
(
config_valgrind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_xmalloc
)
READONLY
();
WRITEONLY
();
/******************************************************************************/
tcache_flush
();
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_stats_print
,
opt_stats_print
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_junk
,
opt_junk
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_quarantine
,
opt_quarantine
,
size_t
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_redzone
,
opt_redzone
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_zero
,
opt_zero
,
bool
)
CTL_RO_NL_CGEN
(
config_utrace
,
opt_utrace
,
opt_utrace
,
bool
)
CTL_RO_NL_CGEN
(
config_valgrind
,
opt_valgrind
,
opt_valgrind
,
bool
)
CTL_RO_NL_CGEN
(
config_xmalloc
,
opt_xmalloc
,
opt_xmalloc
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof
,
opt_prof
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_prefix
,
opt_prof_prefix
,
const
char
*
)
CTL_RO_CGEN
(
config_prof
,
opt_prof_active
,
opt_prof_active
,
bool
)
/* Mutable. */
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_sample
,
opt_lg_prof_sample
,
size_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_interval
,
opt_lg_prof_interval
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_gdump
,
opt_prof_gdump
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_final
,
opt_prof_final
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
ret
=
0
;
label_return:
return
(
ret
);
}
/******************************************************************************/
static
int
thread_arena_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
...
...
@@ -1227,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated,
CTL_RO_NL_CGEN
(
config_stats
,
thread_deallocatedp
,
&
thread_allocated_tsd_get
()
->
deallocated
,
uint64_t
*
)
/******************************************************************************/
static
int
thread_tcache_enabled_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
CTL_RO_BOOL_CONFIG_GEN
(
config_debug
)
CTL_RO_BOOL_CONFIG_GEN
(
config_dss
)
CTL_RO_BOOL_CONFIG_GEN
(
config_fill
)
CTL_RO_BOOL_CONFIG_GEN
(
config_lazy_lock
)
CTL_RO_BOOL_CONFIG_GEN
(
config_mremap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_munmap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libgcc
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libunwind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_stats
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tcache
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tls
)
CTL_RO_BOOL_CONFIG_GEN
(
config_utrace
)
CTL_RO_BOOL_CONFIG_GEN
(
config_valgrind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_xmalloc
)
if
(
config_tcache
==
false
)
return
(
ENOENT
);
/******************************************************************************/
oldval
=
tcache_enabled_get
();
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
tcache_enabled_set
(
*
(
bool
*
)
newp
);
}
READ
(
oldval
,
bool
);
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_stats_print
,
opt_stats_print
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_junk
,
opt_junk
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_zero
,
opt_zero
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_quarantine
,
opt_quarantine
,
size_t
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_redzone
,
opt_redzone
,
bool
)
CTL_RO_NL_CGEN
(
config_utrace
,
opt_utrace
,
opt_utrace
,
bool
)
CTL_RO_NL_CGEN
(
config_valgrind
,
opt_valgrind
,
opt_valgrind
,
bool
)
CTL_RO_NL_CGEN
(
config_xmalloc
,
opt_xmalloc
,
opt_xmalloc
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof
,
opt_prof
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_prefix
,
opt_prof_prefix
,
const
char
*
)
CTL_RO_CGEN
(
config_prof
,
opt_prof_active
,
opt_prof_active
,
bool
)
/* Mutable. */
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_sample
,
opt_lg_prof_sample
,
size_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_interval
,
opt_lg_prof_interval
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_gdump
,
opt_prof_gdump
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_final
,
opt_prof_final
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
thread_tcache_flush_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
config_tcache
==
false
)
return
(
ENOENT
);
READONLY
();
WRITEONLY
();
tcache_flush
();
ret
=
0
;
label_return:
return
(
ret
);
}
/******************************************************************************/
...
...
@@ -1382,31 +1394,8 @@ label_return:
return
(
ret
);
}
/******************************************************************************/
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_run_size
,
arena_bin_info
[
mib
[
2
]].
run_size
,
size_t
)
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NBINS
)
return
(
NULL
);
return
(
super_arenas_bin_i_node
);
}
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
((
mib
[
2
]
+
1
)
<<
LG_PAGE
),
size_t
)
static
const
ctl_named_node_t
*
arenas_lrun_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
nlclasses
)
return
(
NULL
);
return
(
super_arenas_lrun_i_node
);
}
static
int
arenas_narenas_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
...
...
@@ -1460,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN
(
config_tcache
,
arenas_tcache_max
,
tcache_maxclass
,
size_t
)
CTL_RO_NL_GEN
(
arenas_nbins
,
NBINS
,
unsigned
)
CTL_RO_NL_CGEN
(
config_tcache
,
arenas_nhbins
,
nhbins
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_run_size
,
arena_bin_info
[
mib
[
2
]].
run_size
,
size_t
)
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NBINS
)
return
(
NULL
);
return
(
super_arenas_bin_i_node
);
}
CTL_RO_NL_GEN
(
arenas_nlruns
,
nlclasses
,
size_t
)
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
((
mib
[
2
]
+
1
)
<<
LG_PAGE
),
size_t
)
static
const
ctl_named_node_t
*
arenas_lrun_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
nlclasses
)
return
(
NULL
);
return
(
super_arenas_lrun_i_node
);
}
static
int
arenas_purge_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
...
...
@@ -1492,6 +1502,7 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
malloc_mutex_lock
(
&
ctl_mtx
);
READONLY
();
...
...
@@ -1499,7 +1510,8 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret
=
EAGAIN
;
goto
label_return
;
}
READ
(
ctl_stats
.
narenas
-
1
,
unsigned
);
narenas
=
ctl_stats
.
narenas
-
1
;
READ
(
narenas
,
unsigned
);
ret
=
0
;
label_return:
...
...
@@ -1565,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/
CTL_RO_CGEN
(
config_stats
,
stats_cactive
,
&
stats_cactive
,
size_t
*
)
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
.
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
.
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_chunks_current
,
ctl_stats
.
chunks
.
current
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_chunks_total
,
ctl_stats
.
chunks
.
total
,
uint64_t
)
...
...
@@ -1572,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
CTL_RO_CGEN
(
config_stats
,
stats_huge_allocated
,
huge_allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_huge_nmalloc
,
huge_nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_huge_ndalloc
,
huge_ndalloc
,
uint64_t
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
ctl_stats
.
arenas
[
mib
[
2
]].
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_npurge
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
npurge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_nmadvise
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmadvise
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
allocated_small
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_nmalloc
,
...
...
@@ -1635,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return
(
super_stats_arenas_i_lruns_j_node
);
}
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
ctl_stats
.
arenas
[
mib
[
2
]].
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_npurge
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
npurge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_nmadvise
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmadvise
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
...
...
@@ -1664,8 +1682,3 @@ label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
CTL_RO_CGEN
(
config_stats
,
stats_cactive
,
&
stats_cactive
,
size_t
*
)
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
.
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
.
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
.
mapped
,
size_t
)
deps/jemalloc/src/huge.c
View file @
fceef8e0
...
...
@@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx;
static
extent_tree_t
huge
;
void
*
huge_malloc
(
size_t
size
,
bool
zero
)
huge_malloc
(
size_t
size
,
bool
zero
,
dss_prec_t
dss_prec
)
{
return
(
huge_palloc
(
size
,
chunksize
,
zero
));
return
(
huge_palloc
(
size
,
chunksize
,
zero
,
dss_prec
));
}
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
)
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
,
dss_prec_t
dss_prec
)
{
void
*
ret
;
size_t
csize
;
...
...
@@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed
=
zero
;
ret
=
chunk_alloc
(
csize
,
alignment
,
false
,
&
is_zeroed
,
chunk_dss_prec_get
());
ret
=
chunk_alloc
(
csize
,
alignment
,
false
,
&
is_zeroed
,
dss_prec
);
if
(
ret
==
NULL
)
{
base_node_dealloc
(
node
);
return
(
NULL
);
...
...
@@ -78,7 +77,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
return
(
ret
);
}
void
*
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
)
{
...
...
@@ -89,28 +88,23 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&&
CHUNK_CEILING
(
oldsize
)
>=
CHUNK_CEILING
(
size
)
&&
CHUNK_CEILING
(
oldsize
)
<=
CHUNK_CEILING
(
size
+
extra
))
{
assert
(
CHUNK_CEILING
(
oldsize
)
==
oldsize
);
if
(
config_fill
&&
opt_junk
&&
size
<
oldsize
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
return
(
ptr
);
return
(
false
);
}
/* Reallocation would require a move. */
return
(
NULL
);
return
(
true
);
}
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
)
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
,
dss_prec_t
dss_prec
)
{
void
*
ret
;
size_t
copysize
;
/* Try to avoid moving the allocation. */
ret
=
huge_ralloc_no_move
(
ptr
,
oldsize
,
size
,
extra
);
if
(
ret
!=
NULL
)
return
(
ret
);
if
(
huge_ralloc_no_move
(
ptr
,
oldsize
,
size
,
extra
)
==
false
)
return
(
ptr
);
/*
* size and oldsize are different enough that we need to use a
...
...
@@ -118,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if
(
alignment
>
chunksize
)
ret
=
huge_palloc
(
size
+
extra
,
alignment
,
zero
);
ret
=
huge_palloc
(
size
+
extra
,
alignment
,
zero
,
dss_prec
);
else
ret
=
huge_malloc
(
size
+
extra
,
zero
);
ret
=
huge_malloc
(
size
+
extra
,
zero
,
dss_prec
);
if
(
ret
==
NULL
)
{
if
(
extra
==
0
)
return
(
NULL
);
/* Try again, this time without extra. */
if
(
alignment
>
chunksize
)
ret
=
huge_palloc
(
size
,
alignment
,
zero
);
ret
=
huge_palloc
(
size
,
alignment
,
zero
,
dss_prec
);
else
ret
=
huge_malloc
(
size
,
zero
);
ret
=
huge_malloc
(
size
,
zero
,
dss_prec
);
if
(
ret
==
NULL
)
return
(
NULL
);
...
...
@@ -169,23 +163,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
char
buf
[
BUFERROR_BUF
];
buferror
(
buf
,
sizeof
(
buf
));
buferror
(
get_errno
(),
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc>: Error in mremap(): %s
\n
"
,
buf
);
if
(
opt_abort
)
abort
();
memcpy
(
ret
,
ptr
,
copysize
);
chunk_dealloc_mmap
(
ptr
,
oldsize
);
}
else
if
(
config_fill
&&
zero
==
false
&&
opt_junk
&&
oldsize
<
newsize
)
{
/*
* mremap(2) clobbers the original mapping, so
* junk/zero filling is not preserved. There is no
* need to zero fill here, since any trailing
* uninititialized memory is demand-zeroed by the
* kernel, but junk filling must be redone.
*/
memset
(
ret
+
oldsize
,
0xa5
,
newsize
-
oldsize
);
}
}
else
#endif
{
memcpy
(
ret
,
ptr
,
copysize
);
iqalloc
x
(
ptr
,
try_tcache_dalloc
);
iqalloc
t
(
ptr
,
try_tcache_dalloc
);
}
return
(
ret
);
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static
void
huge_dalloc_junk
(
void
*
ptr
,
size_t
usize
)
{
if
(
config_fill
&&
config_dss
&&
opt_junk
)
{
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if
(
config_munmap
==
false
||
(
config_dss
&&
chunk_in_dss
(
ptr
)))
memset
(
ptr
,
0x5a
,
usize
);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t
*
huge_dalloc_junk
=
JEMALLOC_N
(
huge_dalloc_junk_impl
);
#endif
void
huge_dalloc
(
void
*
ptr
,
bool
unmap
)
{
...
...
@@ -208,8 +235,8 @@ huge_dalloc(void *ptr, bool unmap)
malloc_mutex_unlock
(
&
huge_mtx
);
if
(
unmap
&&
config_fill
&&
config_dss
&&
opt_junk
)
memset
(
node
->
addr
,
0x5a
,
node
->
size
);
if
(
unmap
)
huge_dalloc_junk
(
node
->
addr
,
node
->
size
);
chunk_dealloc
(
node
->
addr
,
node
->
size
,
unmap
);
...
...
@@ -236,6 +263,13 @@ huge_salloc(const void *ptr)
return
(
ret
);
}
dss_prec_t
huge_dss_prec_get
(
arena_t
*
arena
)
{
return
(
arena_dss_prec_get
(
choose_arena
(
arena
)));
}
prof_ctx_t
*
huge_prof_ctx_get
(
const
void
*
ptr
)
{
...
...
deps/jemalloc/src/jemalloc.c
View file @
fceef8e0
...
...
@@ -10,17 +10,20 @@ malloc_tsd_data(, thread_allocated, thread_allocated_t,
/* Runtime configuration options. */
const
char
*
je_malloc_conf
;
bool
opt_abort
=
#ifdef JEMALLOC_DEBUG
bool
opt_abort
=
true
;
# ifdef JEMALLOC_FILL
bool
opt_junk
=
true
;
# else
bool
opt_junk
=
false
;
# endif
true
#else
false
#endif
;
bool
opt_junk
=
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
bool
opt_abort
=
false
;
bool
opt_junk
=
false
;
false
#endif
;
size_t
opt_quarantine
=
ZU
(
0
);
bool
opt_redzone
=
false
;
bool
opt_utrace
=
false
;
...
...
@@ -83,11 +86,13 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
if (opt_utrace) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
utrace(&ut, sizeof(ut)); \
errno = utrace_serrno; \
} \
} while (0)
#else
...
...
@@ -95,18 +100,12 @@ typedef struct {
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
stats_print_atexit
(
void
);
static
unsigned
malloc_ncpus
(
void
);
static
bool
malloc_conf_next
(
char
const
**
opts_p
,
char
const
**
k_p
,
size_t
*
klen_p
,
char
const
**
v_p
,
size_t
*
vlen_p
);
static
void
malloc_conf_error
(
const
char
*
msg
,
const
char
*
k
,
size_t
klen
,
const
char
*
v
,
size_t
vlen
);
static
void
malloc_conf_init
(
void
);
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
bool
malloc_init_hard
(
void
);
static
int
imemalign
(
void
**
memptr
,
size_t
alignment
,
size_t
size
,
size_t
min_alignment
);
/******************************************************************************/
/*
...
...
@@ -247,7 +246,6 @@ stats_print_atexit(void)
static
unsigned
malloc_ncpus
(
void
)
{
unsigned
ret
;
long
result
;
#ifdef _WIN32
...
...
@@ -257,14 +255,7 @@ malloc_ncpus(void)
#else
result
=
sysconf
(
_SC_NPROCESSORS_ONLN
);
#endif
if
(
result
==
-
1
)
{
/* Error. */
ret
=
1
;
}
else
{
ret
=
(
unsigned
)
result
;
}
return
(
ret
);
return
((
result
==
-
1
)
?
1
:
(
unsigned
)
result
);
}
void
...
...
@@ -277,12 +268,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock
(
&
arenas_lock
);
}
static
inline
bool
JEMALLOC_ALWAYS_INLINE_C
void
malloc_thread_init
(
void
)
{
/*
* TSD initialization can't be safely done as a side effect of
* deallocation, because it is possible for a thread to do nothing but
* deallocate its TLS data via free(), in which case writing to TLS
* would cause write-after-free memory corruption. The quarantine
* facility *only* gets used as a side effect of deallocation, so make
* a best effort attempt at initializing its TSD by hooking all
* allocation events.
*/
if
(
config_fill
&&
opt_quarantine
)
quarantine_alloc_hook
();
}
JEMALLOC_ALWAYS_INLINE_C
bool
malloc_init
(
void
)
{
if
(
malloc_initialized
==
false
)
return
(
malloc_init_hard
());
if
(
malloc_initialized
==
false
&&
malloc_init_hard
())
return
(
true
);
malloc_thread_init
();
return
(
false
);
}
...
...
@@ -413,8 +422,9 @@ malloc_conf_init(void)
}
break
;
case
1
:
{
int
linklen
=
0
;
#ifndef _WIN32
int
linklen
;
int
saved_errno
=
errno
;
const
char
*
linkname
=
# ifdef JEMALLOC_PREFIX
"/etc/"
JEMALLOC_PREFIX
"malloc.conf"
...
...
@@ -423,21 +433,20 @@ malloc_conf_init(void)
# endif
;
if
((
linklen
=
readlink
(
linkname
,
buf
,
sizeof
(
buf
)
-
1
))
!=
-
1
)
{
/*
*
U
se the contents of the "/etc/malloc.conf"
*
Try to u
se the contents of the "/etc/malloc.conf"
* symbolic link's name.
*/
buf
[
linklen
]
=
'\0'
;
opts
=
buf
;
}
else
#endif
{
linklen
=
readlink
(
linkname
,
buf
,
sizeof
(
buf
)
-
1
);
if
(
linklen
==
-
1
)
{
/* No configuration specified. */
buf
[
0
]
=
'\0'
;
opts
=
buf
;
linklen
=
0
;
/* restore errno */
set_errno
(
saved_errno
);
}
#endif
buf
[
linklen
]
=
'\0'
;
opts
=
buf
;
break
;
}
case
2
:
{
const
char
*
envname
=
...
...
@@ -461,15 +470,14 @@ malloc_conf_init(void)
}
break
;
}
default
:
/* NOTREACHED */
assert
(
false
);
not_reached
();
buf
[
0
]
=
'\0'
;
opts
=
buf
;
}
while
(
*
opts
!=
'\0'
&&
malloc_conf_next
(
&
opts
,
&
k
,
&
klen
,
&
v
,
&
vlen
)
==
false
)
{
#define CONF_HANDLE_BOOL
_HIT
(o, n
, hit
) \
#define CONF_HANDLE_BOOL(o, n)
\
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
...
...
@@ -483,16 +491,9 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} \
hit = true; \
} else \
hit = false;
#define CONF_HANDLE_BOOL(o, n) { \
bool hit; \
CONF_HANDLE_BOOL_HIT(o, n, hit); \
if (hit) \
continue; \
}
#define CONF_HANDLE_SIZE_T(o, n, min, max)
\
}
#define CONF_HANDLE_SIZE_T(o, n, min, max
, clip
) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
uintmax_t um; \
...
...
@@ -505,12 +506,23 @@ malloc_conf_init(void)
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (um < min || um > max) { \
} else if (clip) { \
if (min != 0 && um < min) \
o = min; \
else if (um > max) \
o = max; \
else \
o = um; \
} else { \
if ((min != 0 && um < min) || \
um > max) { \
malloc_conf_error( \
"Out-of-range conf value", \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else \
o = um; \
} \
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
...
...
@@ -555,7 +567,8 @@ malloc_conf_init(void)
* config_fill.
*/
CONF_HANDLE_SIZE_T
(
opt_lg_chunk
,
"lg_chunk"
,
LG_PAGE
+
(
config_fill
?
2
:
1
),
(
sizeof
(
size_t
)
<<
3
)
-
1
)
(
config_fill
?
2
:
1
),
(
sizeof
(
size_t
)
<<
3
)
-
1
,
true
)
if
(
strncmp
(
"dss"
,
k
,
klen
)
==
0
)
{
int
i
;
bool
match
=
false
;
...
...
@@ -581,14 +594,14 @@ malloc_conf_init(void)
continue
;
}
CONF_HANDLE_SIZE_T
(
opt_narenas
,
"narenas"
,
1
,
SIZE_T_MAX
)
SIZE_T_MAX
,
false
)
CONF_HANDLE_SSIZE_T
(
opt_lg_dirty_mult
,
"lg_dirty_mult"
,
-
1
,
(
sizeof
(
size_t
)
<<
3
)
-
1
)
CONF_HANDLE_BOOL
(
opt_stats_print
,
"stats_print"
)
if
(
config_fill
)
{
CONF_HANDLE_BOOL
(
opt_junk
,
"junk"
)
CONF_HANDLE_SIZE_T
(
opt_quarantine
,
"quarantine"
,
0
,
SIZE_T_MAX
)
0
,
SIZE_T_MAX
,
false
)
CONF_HANDLE_BOOL
(
opt_redzone
,
"redzone"
)
CONF_HANDLE_BOOL
(
opt_zero
,
"zero"
)
}
...
...
@@ -668,17 +681,6 @@ malloc_init_hard(void)
malloc_conf_init
();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(_WIN32))
/* Register fork handlers. */
if
(
pthread_atfork
(
jemalloc_prefork
,
jemalloc_postfork_parent
,
jemalloc_postfork_child
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error in pthread_atfork()
\n
"
);
if
(
opt_abort
)
abort
();
}
#endif
if
(
opt_stats_print
)
{
/* Print statistics at exit. */
if
(
atexit
(
stats_print_atexit
)
!=
0
)
{
...
...
@@ -718,8 +720,10 @@ malloc_init_hard(void)
return
(
true
);
}
if
(
malloc_mutex_init
(
&
arenas_lock
))
if
(
malloc_mutex_init
(
&
arenas_lock
))
{
malloc_mutex_unlock
(
&
init_lock
);
return
(
true
);
}
/*
* Create enough scaffolding to allow recursive allocation in
...
...
@@ -765,9 +769,25 @@ malloc_init_hard(void)
return
(
true
);
}
/* Get number of CPUs. */
malloc_mutex_unlock
(
&
init_lock
);
/**********************************************************************/
/* Recursive allocation may follow. */
ncpus
=
malloc_ncpus
();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
&& !defined(_WIN32))
/* LinuxThreads's pthread_atfork() allocates. */
if
(
pthread_atfork
(
jemalloc_prefork
,
jemalloc_postfork_parent
,
jemalloc_postfork_child
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error in pthread_atfork()
\n
"
);
if
(
opt_abort
)
abort
();
}
#endif
/* Done recursively allocating. */
/**********************************************************************/
malloc_mutex_lock
(
&
init_lock
);
if
(
mutex_boot
())
{
...
...
@@ -814,6 +834,7 @@ malloc_init_hard(void)
malloc_initialized
=
true
;
malloc_mutex_unlock
(
&
init_lock
);
return
(
false
);
}
...
...
@@ -825,42 +846,88 @@ malloc_init_hard(void)
* Begin malloc(3)-compatible functions.
*/
static
void
*
imalloc_prof_sample
(
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
(
cnt
==
NULL
)
return
(
NULL
);
if
(
prof_promote
&&
usize
<=
SMALL_MAXCLASS
)
{
p
=
imalloc
(
SMALL_MAXCLASS
+
1
);
if
(
p
==
NULL
)
return
(
NULL
);
arena_prof_promoted
(
p
,
usize
);
}
else
p
=
imalloc
(
usize
);
return
(
p
);
}
JEMALLOC_ALWAYS_INLINE_C
void
*
imalloc_prof
(
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
p
=
imalloc_prof_sample
(
usize
,
cnt
);
else
p
=
imalloc
(
usize
);
if
(
p
==
NULL
)
return
(
NULL
);
prof_malloc
(
p
,
usize
,
cnt
);
return
(
p
);
}
/*
* MALLOC_BODY() is a macro rather than a function because its contents are in
* the fast path, but inlining would cause reliability issues when determining
* how many frames to discard from heap profiling backtraces.
*/
#define MALLOC_BODY(ret, size, usize) do { \
if (malloc_init()) \
ret = NULL; \
else { \
if (config_prof && opt_prof) { \
prof_thr_cnt_t *cnt; \
\
usize = s2u(size); \
/* \
* Call PROF_ALLOC_PREP() here rather than in \
* imalloc_prof() so that imalloc_prof() can be \
* inlined without introducing uncertainty \
* about the number of backtrace frames to \
* ignore. imalloc_prof() is in the fast path \
* when heap profiling is enabled, so inlining \
* is critical to performance. (For \
* consistency all callers of PROF_ALLOC_PREP() \
* are structured similarly, even though e.g. \
* realloc() isn't called enough for inlining \
* to be critical.) \
*/
\
PROF_ALLOC_PREP(1, usize, cnt); \
ret = imalloc_prof(usize, cnt); \
} else { \
if (config_stats || (config_valgrind && \
opt_valgrind)) \
usize = s2u(size); \
ret = imalloc(size); \
} \
} \
} while (0)
void
*
je_malloc
(
size_t
size
)
{
void
*
ret
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
prof_thr_cnt_t
*
cnt
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
if
(
malloc_init
())
{
ret
=
NULL
;
goto
label_oom
;
}
if
(
size
==
0
)
size
=
1
;
if
(
config_prof
&&
opt_prof
)
{
usize
=
s2u
(
size
);
PROF_ALLOC_PREP
(
1
,
usize
,
cnt
);
if
(
cnt
==
NULL
)
{
ret
=
NULL
;
goto
label_oom
;
}
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
usize
<=
SMALL_MAXCLASS
)
{
ret
=
imalloc
(
SMALL_MAXCLASS
+
1
);
if
(
ret
!=
NULL
)
arena_prof_promoted
(
ret
,
usize
);
}
else
ret
=
imalloc
(
size
);
}
else
{
if
(
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
usize
=
s2u
(
size
);
ret
=
imalloc
(
size
);
}
MALLOC_BODY
(
ret
,
size
,
usize
);
label_oom:
if
(
ret
==
NULL
)
{
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error in malloc(): "
...
...
@@ -869,8 +936,6 @@ label_oom:
}
set_errno
(
ENOMEM
);
}
if
(
config_prof
&&
opt_prof
&&
ret
!=
NULL
)
prof_malloc
(
ret
,
usize
,
cnt
);
if
(
config_stats
&&
ret
!=
NULL
)
{
assert
(
usize
==
isalloc
(
ret
,
config_prof
));
thread_allocated_tsd_get
()
->
allocated
+=
usize
;
...
...
@@ -880,28 +945,63 @@ label_oom:
return
(
ret
);
}
static
void
*
imemalign_prof_sample
(
size_t
alignment
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
(
cnt
==
NULL
)
return
(
NULL
);
if
(
prof_promote
&&
usize
<=
SMALL_MAXCLASS
)
{
assert
(
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
)
!=
0
);
p
=
ipalloc
(
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
),
alignment
,
false
);
if
(
p
==
NULL
)
return
(
NULL
);
arena_prof_promoted
(
p
,
usize
);
}
else
p
=
ipalloc
(
usize
,
alignment
,
false
);
return
(
p
);
}
JEMALLOC_ALWAYS_INLINE_C
void
*
imemalign_prof
(
size_t
alignment
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
p
=
imemalign_prof_sample
(
alignment
,
usize
,
cnt
);
else
p
=
ipalloc
(
usize
,
alignment
,
false
);
if
(
p
==
NULL
)
return
(
NULL
);
prof_malloc
(
p
,
usize
,
cnt
);
return
(
p
);
}
JEMALLOC_ATTR
(
nonnull
(
1
))
#ifdef JEMALLOC_PROF
/*
* Avoid any uncertainty as to how many backtrace frames to ignore in
* PROF_ALLOC_PREP().
*/
JEMALLOC_
ATTR
(
noinline
)
JEMALLOC_
NOINLINE
#endif
static
int
imemalign
(
void
**
memptr
,
size_t
alignment
,
size_t
size
,
size_t
min_alignment
)
imemalign
(
void
**
memptr
,
size_t
alignment
,
size_t
size
,
size_t
min_alignment
)
{
int
ret
;
size_t
usize
;
void
*
result
;
prof_thr_cnt_t
*
cnt
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
assert
(
min_alignment
!=
0
);
if
(
malloc_init
())
if
(
malloc_init
())
{
result
=
NULL
;
else
{
goto
label_oom
;
}
else
{
if
(
size
==
0
)
size
=
1
;
...
...
@@ -921,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size,
usize
=
sa2u
(
size
,
alignment
);
if
(
usize
==
0
)
{
result
=
NULL
;
ret
=
ENOMEM
;
goto
label_return
;
goto
label_oom
;
}
if
(
config_prof
&&
opt_prof
)
{
prof_thr_cnt_t
*
cnt
;
PROF_ALLOC_PREP
(
2
,
usize
,
cnt
);
if
(
cnt
==
NULL
)
{
result
=
NULL
;
ret
=
EINVAL
;
}
else
{
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
usize
<=
SMALL_MAXCLASS
)
{
assert
(
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
)
!=
0
);
result
=
ipalloc
(
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
),
alignment
,
false
);
if
(
result
!=
NULL
)
{
arena_prof_promoted
(
result
,
usize
);
}
}
else
{
result
=
ipalloc
(
usize
,
alignment
,
false
);
}
}
result
=
imemalign_prof
(
alignment
,
usize
,
cnt
);
}
else
result
=
ipalloc
(
usize
,
alignment
,
false
);
}
if
(
result
==
NULL
)
{
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error allocating aligned "
"memory: out of memory
\n
"
);
abort
();
}
ret
=
ENOMEM
;
goto
label_return
;
if
(
result
==
NULL
)
goto
label_oom
;
}
*
memptr
=
result
;
ret
=
0
;
label_return:
if
(
config_stats
&&
result
!=
NULL
)
{
assert
(
usize
==
isalloc
(
result
,
config_prof
));
thread_allocated_tsd_get
()
->
allocated
+=
usize
;
}
if
(
config_prof
&&
opt_prof
&&
result
!=
NULL
)
prof_malloc
(
result
,
usize
,
cnt
);
UTRACE
(
0
,
size
,
result
);
return
(
ret
);
label_oom:
assert
(
result
==
NULL
);
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error allocating aligned memory: "
"out of memory
\n
"
);
abort
();
}
ret
=
ENOMEM
;
goto
label_return
;
}
int
...
...
@@ -998,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size)
return
(
ret
);
}
static
void
*
icalloc_prof_sample
(
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
(
cnt
==
NULL
)
return
(
NULL
);
if
(
prof_promote
&&
usize
<=
SMALL_MAXCLASS
)
{
p
=
icalloc
(
SMALL_MAXCLASS
+
1
);
if
(
p
==
NULL
)
return
(
NULL
);
arena_prof_promoted
(
p
,
usize
);
}
else
p
=
icalloc
(
usize
);
return
(
p
);
}
JEMALLOC_ALWAYS_INLINE_C
void
*
icalloc_prof
(
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
p
=
icalloc_prof_sample
(
usize
,
cnt
);
else
p
=
icalloc
(
usize
);
if
(
p
==
NULL
)
return
(
NULL
);
prof_malloc
(
p
,
usize
,
cnt
);
return
(
p
);
}
void
*
je_calloc
(
size_t
num
,
size_t
size
)
{
void
*
ret
;
size_t
num_size
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
prof_thr_cnt_t
*
cnt
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
if
(
malloc_init
())
{
num_size
=
0
;
...
...
@@ -1033,19 +1147,11 @@ je_calloc(size_t num, size_t size)
}
if
(
config_prof
&&
opt_prof
)
{
prof_thr_cnt_t
*
cnt
;
usize
=
s2u
(
num_size
);
PROF_ALLOC_PREP
(
1
,
usize
,
cnt
);
if
(
cnt
==
NULL
)
{
ret
=
NULL
;
goto
label_return
;
}
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
usize
<=
SMALL_MAXCLASS
)
{
ret
=
icalloc
(
SMALL_MAXCLASS
+
1
);
if
(
ret
!=
NULL
)
arena_prof_promoted
(
ret
,
usize
);
}
else
ret
=
icalloc
(
num_size
);
ret
=
icalloc_prof
(
usize
,
cnt
);
}
else
{
if
(
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
usize
=
s2u
(
num_size
);
...
...
@@ -1061,9 +1167,6 @@ label_return:
}
set_errno
(
ENOMEM
);
}
if
(
config_prof
&&
opt_prof
&&
ret
!=
NULL
)
prof_malloc
(
ret
,
usize
,
cnt
);
if
(
config_stats
&&
ret
!=
NULL
)
{
assert
(
usize
==
isalloc
(
ret
,
config_prof
));
thread_allocated_tsd_get
()
->
allocated
+=
usize
;
...
...
@@ -1073,126 +1176,106 @@ label_return:
return
(
ret
);
}
static
void
*
irealloc_prof_sample
(
void
*
oldptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
(
cnt
==
NULL
)
return
(
NULL
);
if
(
prof_promote
&&
usize
<=
SMALL_MAXCLASS
)
{
p
=
iralloc
(
oldptr
,
SMALL_MAXCLASS
+
1
,
0
,
0
,
false
);
if
(
p
==
NULL
)
return
(
NULL
);
arena_prof_promoted
(
p
,
usize
);
}
else
p
=
iralloc
(
oldptr
,
usize
,
0
,
0
,
false
);
return
(
p
);
}
JEMALLOC_ALWAYS_INLINE_C
void
*
irealloc_prof
(
void
*
oldptr
,
size_t
old_usize
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
prof_ctx_t
*
old_ctx
;
old_ctx
=
prof_ctx_get
(
oldptr
);
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
p
=
irealloc_prof_sample
(
oldptr
,
usize
,
cnt
);
else
p
=
iralloc
(
oldptr
,
usize
,
0
,
0
,
false
);
if
(
p
==
NULL
)
return
(
NULL
);
prof_realloc
(
p
,
usize
,
cnt
,
old_usize
,
old_ctx
);
return
(
p
);
}
JEMALLOC_INLINE_C
void
ifree
(
void
*
ptr
)
{
size_t
usize
;
UNUSED
size_t
rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
ptr
!=
NULL
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
if
(
config_prof
&&
opt_prof
)
{
usize
=
isalloc
(
ptr
,
config_prof
);
prof_free
(
ptr
,
usize
);
}
else
if
(
config_stats
||
config_valgrind
)
usize
=
isalloc
(
ptr
,
config_prof
);
if
(
config_stats
)
thread_allocated_tsd_get
()
->
deallocated
+=
usize
;
if
(
config_valgrind
&&
opt_valgrind
)
rzsize
=
p2rz
(
ptr
);
iqalloc
(
ptr
);
JEMALLOC_VALGRIND_FREE
(
ptr
,
rzsize
);
}
void
*
je_realloc
(
void
*
ptr
,
size_t
size
)
{
void
*
ret
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
size_t
old_size
=
0
;
size_t
old_rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
prof_thr_cnt_t
*
cnt
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
prof_ctx_t
*
old_ctx
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
size_t
old_usize
=
0
;
UNUSED
size_t
old_rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
if
(
size
==
0
)
{
if
(
ptr
!=
NULL
)
{
/* realloc(ptr, 0) is equivalent to free(p). */
if
(
config_prof
)
{
old_size
=
isalloc
(
ptr
,
true
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
p2rz
(
ptr
);
}
else
if
(
config_stats
)
{
old_size
=
isalloc
(
ptr
,
false
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
u2rz
(
old_size
);
}
else
if
(
config_valgrind
&&
opt_valgrind
)
{
old_size
=
isalloc
(
ptr
,
false
);
old_rzsize
=
u2rz
(
old_size
);
}
if
(
config_prof
&&
opt_prof
)
{
old_ctx
=
prof_ctx_get
(
ptr
);
cnt
=
NULL
;
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE
(
ptr
,
0
,
0
);
ifree
(
ptr
);
return
(
NULL
);
}
iqalloc
(
ptr
);
ret
=
NULL
;
goto
label_return
;
}
else
size
=
1
;
}
if
(
ptr
!=
NULL
)
{
assert
(
malloc_initialized
||
IS_INITIALIZER
);
malloc_thread_init
();
if
(
config_prof
)
{
old_size
=
isalloc
(
ptr
,
true
);
if
((
config_prof
&&
opt_prof
)
||
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
old_usize
=
isalloc
(
ptr
,
config_prof
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
p2rz
(
ptr
);
}
else
if
(
config_stats
)
{
old_size
=
isalloc
(
ptr
,
false
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
u2rz
(
old_size
);
}
else
if
(
config_valgrind
&&
opt_valgrind
)
{
old_size
=
isalloc
(
ptr
,
false
);
old_rzsize
=
u2rz
(
old_size
);
}
old_rzsize
=
config_prof
?
p2rz
(
ptr
)
:
u2rz
(
old_usize
);
if
(
config_prof
&&
opt_prof
)
{
usize
=
s2u
(
size
);
old_ctx
=
prof_ctx_get
(
ptr
);
prof_thr_cnt_t
*
cnt
;
usize
=
s2u
(
size
);
PROF_ALLOC_PREP
(
1
,
usize
,
cnt
);
if
(
cnt
==
NULL
)
{
old_ctx
=
NULL
;
ret
=
NULL
;
goto
label_oom
;
}
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
usize
<=
SMALL_MAXCLASS
)
{
ret
=
iralloc
(
ptr
,
SMALL_MAXCLASS
+
1
,
0
,
0
,
false
,
false
);
if
(
ret
!=
NULL
)
arena_prof_promoted
(
ret
,
usize
);
else
old_ctx
=
NULL
;
}
else
{
ret
=
iralloc
(
ptr
,
size
,
0
,
0
,
false
,
false
);
if
(
ret
==
NULL
)
old_ctx
=
NULL
;
}
ret
=
irealloc_prof
(
ptr
,
old_usize
,
usize
,
cnt
);
}
else
{
if
(
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
usize
=
s2u
(
size
);
ret
=
iralloc
(
ptr
,
size
,
0
,
0
,
false
,
false
);
}
label_oom:
if
(
ret
==
NULL
)
{
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error in realloc(): "
"out of memory
\n
"
);
abort
();
}
set_errno
(
ENOMEM
);
ret
=
iralloc
(
ptr
,
size
,
0
,
0
,
false
);
}
}
else
{
/* realloc(NULL, size) is equivalent to malloc(size). */
if
(
config_prof
&&
opt_prof
)
old_ctx
=
NULL
;
if
(
malloc_init
())
{
if
(
config_prof
&&
opt_prof
)
cnt
=
NULL
;
ret
=
NULL
;
}
else
{
if
(
config_prof
&&
opt_prof
)
{
usize
=
s2u
(
size
);
PROF_ALLOC_PREP
(
1
,
usize
,
cnt
);
if
(
cnt
==
NULL
)
ret
=
NULL
;
else
{
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
usize
<=
SMALL_MAXCLASS
)
{
ret
=
imalloc
(
SMALL_MAXCLASS
+
1
);
if
(
ret
!=
NULL
)
{
arena_prof_promoted
(
ret
,
usize
);
}
}
else
ret
=
imalloc
(
size
);
}
}
else
{
if
(
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
usize
=
s2u
(
size
);
ret
=
imalloc
(
size
);
}
MALLOC_BODY
(
ret
,
size
,
usize
);
}
if
(
ret
==
NULL
)
{
...
...
@@ -1203,20 +1286,16 @@ label_oom:
}
set_errno
(
ENOMEM
);
}
}
label_return:
if
(
config_prof
&&
opt_prof
)
prof_realloc
(
ret
,
usize
,
cnt
,
old_size
,
old_ctx
);
if
(
config_stats
&&
ret
!=
NULL
)
{
thread_allocated_t
*
ta
;
assert
(
usize
==
isalloc
(
ret
,
config_prof
));
ta
=
thread_allocated_tsd_get
();
ta
->
allocated
+=
usize
;
ta
->
deallocated
+=
old_size
;
ta
->
deallocated
+=
old_
u
size
;
}
UTRACE
(
ptr
,
size
,
ret
);
JEMALLOC_VALGRIND_REALLOC
(
ret
,
usize
,
ptr
,
old_size
,
old_rzsize
,
false
);
JEMALLOC_VALGRIND_REALLOC
(
ret
,
usize
,
ptr
,
old_usize
,
old_rzsize
,
false
);
return
(
ret
);
}
...
...
@@ -1225,24 +1304,8 @@ je_free(void *ptr)
{
UTRACE
(
ptr
,
0
,
0
);
if
(
ptr
!=
NULL
)
{
size_t
usize
;
size_t
rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
if
(
config_prof
&&
opt_prof
)
{
usize
=
isalloc
(
ptr
,
config_prof
);
prof_free
(
ptr
,
usize
);
}
else
if
(
config_stats
||
config_valgrind
)
usize
=
isalloc
(
ptr
,
config_prof
);
if
(
config_stats
)
thread_allocated_tsd_get
()
->
deallocated
+=
usize
;
if
(
config_valgrind
&&
opt_valgrind
)
rzsize
=
p2rz
(
ptr
);
iqalloc
(
ptr
);
JEMALLOC_VALGRIND_FREE
(
ptr
,
rzsize
);
}
if
(
ptr
!=
NULL
)
ifree
(
ptr
);
}
/*
...
...
@@ -1308,99 +1371,75 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
size_t
je_malloc_usable_size
(
JEMALLOC_USABLE_SIZE_CONST
void
*
ptr
)
JEMALLOC_ALWAYS_INLINE_C
void
*
imallocx
(
size_t
usize
,
size_t
alignment
,
bool
zero
,
bool
try_tcache
,
arena_t
*
arena
)
{
size_t
ret
;
assert
(
malloc_initialized
||
IS_INITIALIZER
);
assert
(
usize
==
((
alignment
==
0
)
?
s2u
(
usize
)
:
sa2u
(
usize
,
alignment
)));
if
(
config_ivsalloc
)
ret
=
ivsalloc
(
ptr
,
config_prof
);
if
(
alignment
!=
0
)
return
(
ipalloct
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
));
else
if
(
zero
)
return
(
icalloct
(
usize
,
try_tcache
,
arena
));
else
ret
=
(
ptr
!=
NULL
)
?
isalloc
(
ptr
,
config_prof
)
:
0
;
return
(
ret
);
}
void
je_malloc_stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
)
{
stats_print
(
write_cb
,
cbopaque
,
opts
);
}
int
je_mallctl
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
if
(
malloc_init
())
return
(
EAGAIN
);
return
(
ctl_byname
(
name
,
oldp
,
oldlenp
,
newp
,
newlen
));
}
int
je_mallctlnametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
if
(
malloc_init
())
return
(
EAGAIN
);
return
(
ctl_nametomib
(
name
,
mibp
,
miblenp
));
return
(
imalloct
(
usize
,
try_tcache
,
arena
));
}
int
je_
mall
ctlbymib
(
const
size_t
*
mib
,
size_t
mibl
en
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
static
void
*
i
mall
ocx_prof_sample
(
size_t
usize
,
size_t
alignm
en
t
,
bool
zero
,
bool
try_tcache
,
arena_t
*
arena
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
if
(
malloc_init
())
return
(
EAGAIN
);
if
(
cnt
==
NULL
)
return
(
NULL
);
if
(
prof_promote
&&
usize
<=
SMALL_MAXCLASS
)
{
size_t
usize_promoted
=
(
alignment
==
0
)
?
s2u
(
SMALL_MAXCLASS
+
1
)
:
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
);
assert
(
usize_promoted
!=
0
);
p
=
imallocx
(
usize_promoted
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
return
(
NULL
);
arena_prof_promoted
(
p
,
usize
);
}
else
p
=
imallocx
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
return
(
ctl_bymib
(
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
)
);
return
(
p
);
}
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* Begin experimental functions.
*/
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_INLINE
void
*
iallocm
(
size_t
usize
,
size_t
alignment
,
bool
zero
,
bool
try_tcache
,
arena_t
*
arena
)
JEMALLOC_ALWAYS_INLINE_C
void
*
imallocx_prof
(
size_t
usize
,
size_t
alignment
,
bool
zero
,
bool
try_tcache
,
arena_t
*
arena
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
assert
(
usize
==
((
alignment
==
0
)
?
s2u
(
usize
)
:
sa2u
(
usize
,
alignment
)));
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
{
p
=
imallocx_prof_sample
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
,
cnt
);
}
else
p
=
imallocx
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
return
(
NULL
);
prof_malloc
(
p
,
usize
,
cnt
);
if
(
alignment
!=
0
)
return
(
ipallocx
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
));
else
if
(
zero
)
return
(
icallocx
(
usize
,
try_tcache
,
arena
));
else
return
(
imallocx
(
usize
,
try_tcache
,
arena
));
return
(
p
);
}
int
je_alloc
m
(
void
**
ptr
,
size_t
*
rsize
,
size_t
size
,
int
flags
)
void
*
je_
m
alloc
x
(
size_t
size
,
int
flags
)
{
void
*
p
;
size_t
usize
;
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
ALLOC
M
_LG_ALIGN_MASK
)
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
M
ALLOC
X
_LG_ALIGN_MASK
)
&
(
SIZE_T_MAX
-
1
));
bool
zero
=
flags
&
ALLOC
M
_ZERO
;
bool
zero
=
flags
&
M
ALLOC
X
_ZERO
;
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
arena_t
*
arena
;
bool
try_tcache
;
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
if
(
malloc_init
())
...
...
@@ -1415,85 +1454,117 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
}
usize
=
(
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
);
if
(
usize
==
0
)
goto
label_oom
;
assert
(
usize
!=
0
);
if
(
config_prof
&&
opt_prof
)
{
prof_thr_cnt_t
*
cnt
;
PROF_ALLOC_PREP
(
1
,
usize
,
cnt
);
if
(
cnt
==
NULL
)
goto
label_oom
;
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
usize
<=
SMALL_MAXCLASS
)
{
size_t
usize_promoted
=
(
alignment
==
0
)
?
s2u
(
SMALL_MAXCLASS
+
1
)
:
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
);
assert
(
usize_promoted
!=
0
);
p
=
iallocm
(
usize_promoted
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
goto
label_oom
;
arena_prof_promoted
(
p
,
usize
);
}
else
{
p
=
iallocm
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
goto
label_oom
;
}
prof_malloc
(
p
,
usize
,
cnt
);
}
else
{
p
=
iallocm
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
p
=
imallocx_prof
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
,
cnt
);
}
else
p
=
imallocx
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
goto
label_oom
;
}
if
(
rsize
!=
NULL
)
*
rsize
=
usize
;
*
ptr
=
p
;
if
(
config_stats
)
{
assert
(
usize
==
isalloc
(
p
,
config_prof
));
thread_allocated_tsd_get
()
->
allocated
+=
usize
;
}
UTRACE
(
0
,
size
,
p
);
JEMALLOC_VALGRIND_MALLOC
(
true
,
p
,
usize
,
zero
);
return
(
ALLOCM_SUCCESS
);
return
(
p
);
label_oom:
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error in allocm(): "
"out of memory
\n
"
);
malloc_write
(
"<jemalloc>: Error in mallocx(): out of memory
\n
"
);
abort
();
}
*
ptr
=
NULL
;
UTRACE
(
0
,
size
,
0
);
return
(
ALLOCM_ERR_OOM
);
return
(
NULL
);
}
int
je_rallocm
(
void
**
ptr
,
size_t
*
rsize
,
size_t
size
,
size_t
extra
,
int
flags
)
static
void
*
irallocx_prof_sample
(
void
*
oldptr
,
size_t
size
,
size_t
alignment
,
size_t
usize
,
bool
zero
,
bool
try_tcache_alloc
,
bool
try_tcache_dalloc
,
arena_t
*
arena
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
,
*
q
;
size_t
usize
;
size_t
old_size
;
size_t
old_rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
ALLOCM_LG_ALIGN_MASK
)
void
*
p
;
if
(
cnt
==
NULL
)
return
(
NULL
);
if
(
prof_promote
&&
usize
<=
SMALL_MAXCLASS
)
{
p
=
iralloct
(
oldptr
,
SMALL_MAXCLASS
+
1
,
(
SMALL_MAXCLASS
+
1
>=
size
)
?
0
:
size
-
(
SMALL_MAXCLASS
+
1
),
alignment
,
zero
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
p
==
NULL
)
return
(
NULL
);
arena_prof_promoted
(
p
,
usize
);
}
else
{
p
=
iralloct
(
oldptr
,
size
,
0
,
alignment
,
zero
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
}
return
(
p
);
}
JEMALLOC_ALWAYS_INLINE_C
void
*
irallocx_prof
(
void
*
oldptr
,
size_t
old_usize
,
size_t
size
,
size_t
alignment
,
size_t
*
usize
,
bool
zero
,
bool
try_tcache_alloc
,
bool
try_tcache_dalloc
,
arena_t
*
arena
,
prof_thr_cnt_t
*
cnt
)
{
void
*
p
;
prof_ctx_t
*
old_ctx
;
old_ctx
=
prof_ctx_get
(
oldptr
);
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
p
=
irallocx_prof_sample
(
oldptr
,
size
,
alignment
,
*
usize
,
zero
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
,
cnt
);
else
{
p
=
iralloct
(
oldptr
,
size
,
0
,
alignment
,
zero
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
}
if
(
p
==
NULL
)
return
(
NULL
);
if
(
p
==
oldptr
&&
alignment
!=
0
)
{
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
* alignment, and that the alignment constraint was
* serendipitously satisfied. Additionally, old_usize may not
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
*
usize
=
isalloc
(
p
,
config_prof
);
}
prof_realloc
(
p
,
*
usize
,
cnt
,
old_usize
,
old_ctx
);
return
(
p
);
}
void
*
je_rallocx
(
void
*
ptr
,
size_t
size
,
int
flags
)
{
void
*
p
;
size_t
usize
,
old_usize
;
UNUSED
size_t
old_rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
MALLOCX_LG_ALIGN_MASK
)
&
(
SIZE_T_MAX
-
1
));
bool
zero
=
flags
&
ALLOCM_ZERO
;
bool
no_move
=
flags
&
ALLOCM_NO_MOVE
;
bool
zero
=
flags
&
MALLOCX_ZERO
;
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
bool
try_tcache_alloc
,
try_tcache_dalloc
;
arena_t
*
arena
;
assert
(
ptr
!=
NULL
);
assert
(
*
ptr
!=
NULL
);
assert
(
size
!=
0
);
assert
(
SIZE_T_MAX
-
size
>=
extra
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
malloc_thread_init
();
if
(
arena_ind
!=
UINT_MAX
)
{
arena_chunk_t
*
chunk
;
try_tcache_alloc
=
tru
e
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
*
ptr
);
try_tcache_dalloc
=
(
chunk
==
*
ptr
||
chunk
->
arena
!=
try_tcache_alloc
=
fals
e
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
try_tcache_dalloc
=
(
chunk
==
ptr
||
chunk
->
arena
!=
arenas
[
arena_ind
]);
arena
=
arenas
[
arena_ind
];
}
else
{
...
...
@@ -1502,12 +1573,142 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
arena
=
NULL
;
}
p
=
*
ptr
;
if
((
config_prof
&&
opt_prof
)
||
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
old_usize
=
isalloc
(
ptr
,
config_prof
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
u2rz
(
old_usize
);
if
(
config_prof
&&
opt_prof
)
{
prof_thr_cnt_t
*
cnt
;
usize
=
(
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
);
assert
(
usize
!=
0
);
PROF_ALLOC_PREP
(
1
,
usize
,
cnt
);
p
=
irallocx_prof
(
ptr
,
old_usize
,
size
,
alignment
,
&
usize
,
zero
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
,
cnt
);
if
(
p
==
NULL
)
goto
label_oom
;
}
else
{
p
=
iralloct
(
ptr
,
size
,
0
,
alignment
,
zero
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
p
==
NULL
)
goto
label_oom
;
if
(
config_stats
||
(
config_valgrind
&&
opt_valgrind
))
usize
=
isalloc
(
p
,
config_prof
);
}
if
(
config_stats
)
{
thread_allocated_t
*
ta
;
ta
=
thread_allocated_tsd_get
();
ta
->
allocated
+=
usize
;
ta
->
deallocated
+=
old_usize
;
}
UTRACE
(
ptr
,
size
,
p
);
JEMALLOC_VALGRIND_REALLOC
(
p
,
usize
,
ptr
,
old_usize
,
old_rzsize
,
zero
);
return
(
p
);
label_oom:
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error in rallocx(): out of memory
\n
"
);
abort
();
}
UTRACE
(
ptr
,
size
,
0
);
return
(
NULL
);
}
JEMALLOC_ALWAYS_INLINE_C
size_t
ixallocx_helper
(
void
*
ptr
,
size_t
old_usize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
arena_t
*
arena
)
{
size_t
usize
;
if
(
ixalloc
(
ptr
,
size
,
extra
,
alignment
,
zero
))
return
(
old_usize
);
usize
=
isalloc
(
ptr
,
config_prof
);
return
(
usize
);
}
static
size_t
ixallocx_prof_sample
(
void
*
ptr
,
size_t
old_usize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
size_t
max_usize
,
bool
zero
,
arena_t
*
arena
,
prof_thr_cnt_t
*
cnt
)
{
size_t
usize
;
if
(
cnt
==
NULL
)
return
(
old_usize
);
/* Use minimum usize to determine whether promotion may happen. */
if
(
prof_promote
&&
((
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
))
<=
SMALL_MAXCLASS
)
{
if
(
ixalloc
(
ptr
,
SMALL_MAXCLASS
+
1
,
(
SMALL_MAXCLASS
+
1
>=
size
+
extra
)
?
0
:
size
+
extra
-
(
SMALL_MAXCLASS
+
1
),
alignment
,
zero
))
return
(
old_usize
);
usize
=
isalloc
(
ptr
,
config_prof
);
if
(
max_usize
<
PAGE
)
arena_prof_promoted
(
ptr
,
usize
);
}
else
{
usize
=
ixallocx_helper
(
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
,
arena
);
}
return
(
usize
);
}
JEMALLOC_ALWAYS_INLINE_C
size_t
ixallocx_prof
(
void
*
ptr
,
size_t
old_usize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
size_t
max_usize
,
bool
zero
,
arena_t
*
arena
,
prof_thr_cnt_t
*
cnt
)
{
size_t
usize
;
prof_ctx_t
*
old_ctx
;
old_ctx
=
prof_ctx_get
(
ptr
);
if
((
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
)
{
usize
=
ixallocx_prof_sample
(
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
,
max_usize
,
arena
,
cnt
);
}
else
{
usize
=
ixallocx_helper
(
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
,
arena
);
}
if
(
usize
==
old_usize
)
return
(
usize
);
prof_realloc
(
ptr
,
usize
,
cnt
,
old_usize
,
old_ctx
);
return
(
usize
);
}
size_t
je_xallocx
(
void
*
ptr
,
size_t
size
,
size_t
extra
,
int
flags
)
{
size_t
usize
,
old_usize
;
UNUSED
size_t
old_rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
MALLOCX_LG_ALIGN_MASK
)
&
(
SIZE_T_MAX
-
1
));
bool
zero
=
flags
&
MALLOCX_ZERO
;
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
arena_t
*
arena
;
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
assert
(
SIZE_T_MAX
-
size
>=
extra
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
malloc_thread_init
();
if
(
arena_ind
!=
UINT_MAX
)
arena
=
arenas
[
arena_ind
];
else
arena
=
NULL
;
old_usize
=
isalloc
(
ptr
,
config_prof
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
u2rz
(
old_usize
);
if
(
config_prof
&&
opt_prof
)
{
prof_thr_cnt_t
*
cnt
;
/*
* usize isn't knowable before i
r
alloc() returns when extra is
* usize isn't knowable before i
x
alloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
* use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
...
...
@@ -1515,111 +1716,51 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
*/
size_t
max_usize
=
(
alignment
==
0
)
?
s2u
(
size
+
extra
)
:
sa2u
(
size
+
extra
,
alignment
);
prof_ctx_t
*
old_ctx
=
prof_ctx_get
(
p
);
old_size
=
isalloc
(
p
,
true
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
p2rz
(
p
);
PROF_ALLOC_PREP
(
1
,
max_usize
,
cnt
);
if
(
cnt
==
NULL
)
goto
label_oom
;
/*
* Use minimum usize to determine whether promotion may happen.
*/
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
((
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
))
<=
SMALL_MAXCLASS
)
{
q
=
irallocx
(
p
,
SMALL_MAXCLASS
+
1
,
(
SMALL_MAXCLASS
+
1
>=
size
+
extra
)
?
0
:
size
+
extra
-
(
SMALL_MAXCLASS
+
1
),
alignment
,
zero
,
no_move
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
q
==
NULL
)
goto
label_err
;
if
(
max_usize
<
PAGE
)
{
usize
=
max_usize
;
arena_prof_promoted
(
q
,
usize
);
}
else
usize
=
isalloc
(
q
,
config_prof
);
}
else
{
q
=
irallocx
(
p
,
size
,
extra
,
alignment
,
zero
,
no_move
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
q
==
NULL
)
goto
label_err
;
usize
=
isalloc
(
q
,
config_prof
);
}
prof_realloc
(
q
,
usize
,
cnt
,
old_size
,
old_ctx
);
if
(
rsize
!=
NULL
)
*
rsize
=
usize
;
usize
=
ixallocx_prof
(
ptr
,
old_usize
,
size
,
extra
,
alignment
,
max_usize
,
zero
,
arena
,
cnt
);
}
else
{
if
(
config_stats
)
{
old_size
=
isalloc
(
p
,
false
);
if
(
config_valgrind
&&
opt_valgrind
)
old_rzsize
=
u2rz
(
old_size
);
}
else
if
(
config_valgrind
&&
opt_valgrind
)
{
old_size
=
isalloc
(
p
,
false
);
old_rzsize
=
u2rz
(
old_size
);
}
q
=
irallocx
(
p
,
size
,
extra
,
alignment
,
zero
,
no_move
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
q
==
NULL
)
goto
label_err
;
if
(
config_stats
)
usize
=
isalloc
(
q
,
config_prof
);
if
(
rsize
!=
NULL
)
{
if
(
config_stats
==
false
)
usize
=
isalloc
(
q
,
config_prof
);
*
rsize
=
usize
;
}
usize
=
ixallocx_helper
(
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
,
arena
);
}
if
(
usize
==
old_usize
)
goto
label_not_resized
;
*
ptr
=
q
;
if
(
config_stats
)
{
thread_allocated_t
*
ta
;
ta
=
thread_allocated_tsd_get
();
ta
->
allocated
+=
usize
;
ta
->
deallocated
+=
old_size
;
ta
->
deallocated
+=
old_
u
size
;
}
UTRACE
(
p
,
size
,
q
);
JEMALLOC_VALGRIND_REALLOC
(
q
,
usize
,
p
,
old_size
,
old_rzsize
,
zero
);
return
(
ALLOCM_SUCCESS
);
label_err:
if
(
no_move
)
{
UTRACE
(
p
,
size
,
q
);
return
(
ALLOCM_ERR_NOT_MOVED
);
}
label_oom:
if
(
config_xmalloc
&&
opt_xmalloc
)
{
malloc_write
(
"<jemalloc>: Error in rallocm(): "
"out of memory
\n
"
);
abort
();
}
UTRACE
(
p
,
size
,
0
);
return
(
ALLOCM_ERR_OOM
);
JEMALLOC_VALGRIND_REALLOC
(
ptr
,
usize
,
ptr
,
old_usize
,
old_rzsize
,
zero
);
label_not_resized:
UTRACE
(
ptr
,
size
,
ptr
);
return
(
usize
);
}
in
t
je_salloc
m
(
const
void
*
ptr
,
size_t
*
rsize
,
int
flags
)
size_
t
je_salloc
x
(
const
void
*
ptr
,
int
flags
)
{
size_t
sz
;
size_t
usize
;
assert
(
malloc_initialized
||
IS_INITIALIZER
);
malloc_thread_init
();
if
(
config_ivsalloc
)
sz
=
ivsalloc
(
ptr
,
config_prof
);
usize
=
ivsalloc
(
ptr
,
config_prof
);
else
{
assert
(
ptr
!=
NULL
);
sz
=
isalloc
(
ptr
,
config_prof
);
usize
=
isalloc
(
ptr
,
config_prof
);
}
assert
(
rsize
!=
NULL
);
*
rsize
=
sz
;
return
(
ALLOCM_SUCCESS
);
return
(
usize
);
}
int
je_dalloc
m
(
void
*
ptr
,
int
flags
)
void
je_dalloc
x
(
void
*
ptr
,
int
flags
)
{
size_t
usize
;
size_t
rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
UNUSED
size_t
rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
bool
try_tcache
;
...
...
@@ -1645,28 +1786,162 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get
()
->
deallocated
+=
usize
;
if
(
config_valgrind
&&
opt_valgrind
)
rzsize
=
p2rz
(
ptr
);
iqalloc
x
(
ptr
,
try_tcache
);
iqalloc
t
(
ptr
,
try_tcache
);
JEMALLOC_VALGRIND_FREE
(
ptr
,
rzsize
);
return
(
ALLOCM_SUCCESS
);
}
in
t
je_nalloc
m
(
size_t
*
rsize
,
size_t
size
,
int
flags
)
size_
t
je_nalloc
x
(
size_t
size
,
int
flags
)
{
size_t
usize
;
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
ALLOC
M
_LG_ALIGN_MASK
)
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
M
ALLOC
X
_LG_ALIGN_MASK
)
&
(
SIZE_T_MAX
-
1
));
assert
(
size
!=
0
);
if
(
malloc_init
())
return
(
ALLOCM_ERR_OOM
);
return
(
0
);
usize
=
(
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
);
if
(
usize
==
0
)
assert
(
usize
!=
0
);
return
(
usize
);
}
int
je_mallctl
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
if
(
malloc_init
())
return
(
EAGAIN
);
return
(
ctl_byname
(
name
,
oldp
,
oldlenp
,
newp
,
newlen
));
}
int
je_mallctlnametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
if
(
malloc_init
())
return
(
EAGAIN
);
return
(
ctl_nametomib
(
name
,
mibp
,
miblenp
));
}
int
je_mallctlbymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
if
(
malloc_init
())
return
(
EAGAIN
);
return
(
ctl_bymib
(
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
));
}
void
je_malloc_stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
)
{
stats_print
(
write_cb
,
cbopaque
,
opts
);
}
size_t
je_malloc_usable_size
(
JEMALLOC_USABLE_SIZE_CONST
void
*
ptr
)
{
size_t
ret
;
assert
(
malloc_initialized
||
IS_INITIALIZER
);
malloc_thread_init
();
if
(
config_ivsalloc
)
ret
=
ivsalloc
(
ptr
,
config_prof
);
else
ret
=
(
ptr
!=
NULL
)
?
isalloc
(
ptr
,
config_prof
)
:
0
;
return
(
ret
);
}
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* Begin experimental functions.
*/
#ifdef JEMALLOC_EXPERIMENTAL
int
je_allocm
(
void
**
ptr
,
size_t
*
rsize
,
size_t
size
,
int
flags
)
{
void
*
p
;
assert
(
ptr
!=
NULL
);
p
=
je_mallocx
(
size
,
flags
);
if
(
p
==
NULL
)
return
(
ALLOCM_ERR_OOM
);
if
(
rsize
!=
NULL
)
*
rsize
=
isalloc
(
p
,
config_prof
);
*
ptr
=
p
;
return
(
ALLOCM_SUCCESS
);
}
int
je_rallocm
(
void
**
ptr
,
size_t
*
rsize
,
size_t
size
,
size_t
extra
,
int
flags
)
{
int
ret
;
bool
no_move
=
flags
&
ALLOCM_NO_MOVE
;
assert
(
ptr
!=
NULL
);
assert
(
*
ptr
!=
NULL
);
assert
(
size
!=
0
);
assert
(
SIZE_T_MAX
-
size
>=
extra
);
if
(
no_move
)
{
size_t
usize
=
je_xallocx
(
*
ptr
,
size
,
extra
,
flags
);
ret
=
(
usize
>=
size
)
?
ALLOCM_SUCCESS
:
ALLOCM_ERR_NOT_MOVED
;
if
(
rsize
!=
NULL
)
*
rsize
=
usize
;
}
else
{
void
*
p
=
je_rallocx
(
*
ptr
,
size
+
extra
,
flags
);
if
(
p
!=
NULL
)
{
*
ptr
=
p
;
ret
=
ALLOCM_SUCCESS
;
}
else
ret
=
ALLOCM_ERR_OOM
;
if
(
rsize
!=
NULL
)
*
rsize
=
isalloc
(
*
ptr
,
config_prof
);
}
return
(
ret
);
}
int
je_sallocm
(
const
void
*
ptr
,
size_t
*
rsize
,
int
flags
)
{
assert
(
rsize
!=
NULL
);
*
rsize
=
je_sallocx
(
ptr
,
flags
);
return
(
ALLOCM_SUCCESS
);
}
int
je_dallocm
(
void
*
ptr
,
int
flags
)
{
je_dallocx
(
ptr
,
flags
);
return
(
ALLOCM_SUCCESS
);
}
int
je_nallocm
(
size_t
*
rsize
,
size_t
size
,
int
flags
)
{
size_t
usize
;
usize
=
je_nallocx
(
size
,
flags
);
if
(
usize
==
0
)
return
(
ALLOCM_ERR_OOM
);
if
(
rsize
!=
NULL
)
*
rsize
=
usize
;
return
(
ALLOCM_SUCCESS
);
...
...
@@ -1721,12 +1996,12 @@ _malloc_prefork(void)
/* Acquire all mutexes in a safe order. */
ctl_prefork
();
prof_prefork
();
malloc_mutex_prefork
(
&
arenas_lock
);
for
(
i
=
0
;
i
<
narenas_total
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
arena_prefork
(
arenas
[
i
]);
}
prof_prefork
();
chunk_prefork
();
base_prefork
();
huge_prefork
();
...
...
@@ -1752,12 +2027,12 @@ _malloc_postfork(void)
huge_postfork_parent
();
base_postfork_parent
();
chunk_postfork_parent
();
prof_postfork_parent
();
for
(
i
=
0
;
i
<
narenas_total
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
arena_postfork_parent
(
arenas
[
i
]);
}
malloc_mutex_postfork_parent
(
&
arenas_lock
);
prof_postfork_parent
();
ctl_postfork_parent
();
}
...
...
@@ -1772,12 +2047,12 @@ jemalloc_postfork_child(void)
huge_postfork_child
();
base_postfork_child
();
chunk_postfork_child
();
prof_postfork_child
();
for
(
i
=
0
;
i
<
narenas_total
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
arena_postfork_child
(
arenas
[
i
]);
}
malloc_mutex_postfork_child
(
&
arenas_lock
);
prof_postfork_child
();
ctl_postfork_child
();
}
...
...
@@ -1801,7 +2076,7 @@ a0alloc(size_t size, bool zero)
if
(
size
<=
arena_maxclass
)
return
(
arena_malloc
(
arenas
[
0
],
size
,
zero
,
false
));
else
return
(
huge_malloc
(
size
,
zero
));
return
(
huge_malloc
(
size
,
zero
,
huge_dss_prec_get
(
arenas
[
0
])
));
}
void
*
...
...
deps/jemalloc/src/mutex.c
View file @
fceef8e0
deps/jemalloc/src/prof.c
View file @
fceef8e0
...
...
@@ -24,9 +24,14 @@ bool opt_prof_gdump = false;
bool
opt_prof_final
=
true
;
bool
opt_prof_leak
=
false
;
bool
opt_prof_accum
=
false
;
char
opt_prof_prefix
[
PATH_MAX
+
1
];
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
uint64_t
prof_interval
;
uint64_t
prof_interval
=
0
;
bool
prof_promote
;
/*
...
...
@@ -54,47 +59,23 @@ static uint64_t prof_dump_useq;
/*
* This buffer is rather large for stack allocation, so use a single buffer for
* all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
* it must be locked anyway during dumping.
* all profile dumps.
*/
static
char
prof_dump_buf
[
PROF_DUMP_BUFSIZE
];
static
malloc_mutex_t
prof_dump_mtx
;
static
char
prof_dump_buf
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PROF_DUMP_BUFSIZE
#else
1
#endif
];
static
unsigned
prof_dump_buf_end
;
static
int
prof_dump_fd
;
/* Do not dump any profiles until bootstrapping is complete. */
static
bool
prof_booted
=
false
;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
prof_bt_t
*
bt_dup
(
prof_bt_t
*
bt
);
static
void
bt_destroy
(
prof_bt_t
*
bt
);
#ifdef JEMALLOC_PROF_LIBGCC
static
_Unwind_Reason_Code
prof_unwind_init_callback
(
struct
_Unwind_Context
*
context
,
void
*
arg
);
static
_Unwind_Reason_Code
prof_unwind_callback
(
struct
_Unwind_Context
*
context
,
void
*
arg
);
#endif
static
bool
prof_flush
(
bool
propagate_err
);
static
bool
prof_write
(
bool
propagate_err
,
const
char
*
s
);
static
bool
prof_printf
(
bool
propagate_err
,
const
char
*
format
,
...)
JEMALLOC_ATTR
(
format
(
printf
,
2
,
3
));
static
void
prof_ctx_sum
(
prof_ctx_t
*
ctx
,
prof_cnt_t
*
cnt_all
,
size_t
*
leak_nctx
);
static
void
prof_ctx_destroy
(
prof_ctx_t
*
ctx
);
static
void
prof_ctx_merge
(
prof_ctx_t
*
ctx
,
prof_thr_cnt_t
*
cnt
);
static
bool
prof_dump_ctx
(
bool
propagate_err
,
prof_ctx_t
*
ctx
,
prof_bt_t
*
bt
);
static
bool
prof_dump_maps
(
bool
propagate_err
);
static
bool
prof_dump
(
bool
propagate_err
,
const
char
*
filename
,
bool
leakcheck
);
static
void
prof_dump_filename
(
char
*
filename
,
char
v
,
int64_t
vseq
);
static
void
prof_fdump
(
void
);
static
void
prof_bt_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
);
static
bool
prof_bt_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
static
malloc_mutex_t
*
prof_ctx_mutex_choose
(
void
);
/******************************************************************************/
void
...
...
@@ -424,75 +405,155 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
{
cassert
(
config_prof
);
assert
(
false
);
not_reached
(
);
}
#endif
prof_thr_cnt_t
*
prof_lookup
(
prof_bt_t
*
bt
)
static
malloc_mutex_t
*
prof_ctx_mutex_choose
(
void
)
{
unsigned
nctxs
=
atomic_add_u
(
&
cum_ctxs
,
1
);
return
(
&
ctx_locks
[(
nctxs
-
1
)
%
PROF_NCTX_LOCKS
]);
}
static
void
prof_ctx_init
(
prof_ctx_t
*
ctx
,
prof_bt_t
*
bt
)
{
ctx
->
bt
=
bt
;
ctx
->
lock
=
prof_ctx_mutex_choose
();
/*
* Set nlimbo to 1, in order to avoid a race condition with
* prof_ctx_merge()/prof_ctx_destroy().
*/
ctx
->
nlimbo
=
1
;
ql_elm_new
(
ctx
,
dump_link
);
memset
(
&
ctx
->
cnt_merged
,
0
,
sizeof
(
prof_cnt_t
));
ql_new
(
&
ctx
->
cnts_ql
);
}
static
void
prof_ctx_destroy
(
prof_ctx_t
*
ctx
)
{
union
{
prof_thr_cnt_t
*
p
;
void
*
v
;
}
ret
;
prof_tdata_t
*
prof_tdata
;
cassert
(
config_prof
);
prof_tdata
=
prof_tdata_get
();
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
(
NULL
);
/*
* Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() increments ctx->nlimbo in order to avoid a race
* condition with this function, as does prof_ctx_merge() in order to
* avoid a race between the main body of prof_ctx_merge() and entry
* into this function.
*/
prof_tdata
=
prof_tdata_get
(
false
);
assert
((
uintptr_t
)
prof_tdata
>
(
uintptr_t
)
PROF_TDATA_STATE_MAX
);
prof_enter
(
prof_tdata
);
malloc_mutex_lock
(
ctx
->
lock
);
if
(
ql_first
(
&
ctx
->
cnts_ql
)
==
NULL
&&
ctx
->
cnt_merged
.
curobjs
==
0
&&
ctx
->
nlimbo
==
1
)
{
assert
(
ctx
->
cnt_merged
.
curbytes
==
0
);
assert
(
ctx
->
cnt_merged
.
accumobjs
==
0
);
assert
(
ctx
->
cnt_merged
.
accumbytes
==
0
);
/* Remove ctx from bt2ctx. */
if
(
ckh_remove
(
&
bt2ctx
,
ctx
->
bt
,
NULL
,
NULL
))
not_reached
();
prof_leave
(
prof_tdata
);
/* Destroy ctx. */
malloc_mutex_unlock
(
ctx
->
lock
);
bt_destroy
(
ctx
->
bt
);
idalloc
(
ctx
);
}
else
{
/*
* Compensate for increment in prof_ctx_merge() or
* prof_lookup().
*/
ctx
->
nlimbo
--
;
malloc_mutex_unlock
(
ctx
->
lock
);
prof_leave
(
prof_tdata
);
}
}
if
(
ckh_search
(
&
prof_tdata
->
bt2cnt
,
bt
,
NULL
,
&
ret
.
v
))
{
union
{
prof_bt_t
*
p
;
void
*
v
;
}
btkey
;
static
void
prof_ctx_merge
(
prof_ctx_t
*
ctx
,
prof_thr_cnt_t
*
cnt
)
{
bool
destroy
;
cassert
(
config_prof
);
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock
(
ctx
->
lock
);
ctx
->
cnt_merged
.
curobjs
+=
cnt
->
cnts
.
curobjs
;
ctx
->
cnt_merged
.
curbytes
+=
cnt
->
cnts
.
curbytes
;
ctx
->
cnt_merged
.
accumobjs
+=
cnt
->
cnts
.
accumobjs
;
ctx
->
cnt_merged
.
accumbytes
+=
cnt
->
cnts
.
accumbytes
;
ql_remove
(
&
ctx
->
cnts_ql
,
cnt
,
cnts_link
);
if
(
opt_prof_accum
==
false
&&
ql_first
(
&
ctx
->
cnts_ql
)
==
NULL
&&
ctx
->
cnt_merged
.
curobjs
==
0
&&
ctx
->
nlimbo
==
0
)
{
/*
* Increment ctx->nlimbo in order to keep another thread from
* winning the race to destroy ctx while this one has ctx->lock
* dropped. Without this, it would be possible for another
* thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
* 3) Successfully prof_ctx_destroy(ctx).
*
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
ctx
->
nlimbo
++
;
destroy
=
true
;
}
else
destroy
=
false
;
malloc_mutex_unlock
(
ctx
->
lock
);
if
(
destroy
)
prof_ctx_destroy
(
ctx
);
}
static
bool
prof_lookup_global
(
prof_bt_t
*
bt
,
prof_tdata_t
*
prof_tdata
,
void
**
p_btkey
,
prof_ctx_t
**
p_ctx
,
bool
*
p_new_ctx
)
{
union
{
prof_ctx_t
*
p
;
void
*
v
;
}
ctx
;
union
{
prof_bt_t
*
p
;
void
*
v
;
}
btkey
;
bool
new_ctx
;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
prof_enter
(
prof_tdata
);
if
(
ckh_search
(
&
bt2ctx
,
bt
,
&
btkey
.
v
,
&
ctx
.
v
))
{
/* bt has never been seen before. Insert it. */
ctx
.
v
=
imalloc
(
sizeof
(
prof_ctx_t
));
if
(
ctx
.
v
==
NULL
)
{
prof_leave
(
prof_tdata
);
return
(
NULL
);
return
(
true
);
}
btkey
.
p
=
bt_dup
(
bt
);
if
(
btkey
.
v
==
NULL
)
{
prof_leave
(
prof_tdata
);
idalloc
(
ctx
.
v
);
return
(
NULL
);
return
(
true
);
}
ctx
.
p
->
bt
=
btkey
.
p
;
ctx
.
p
->
lock
=
prof_ctx_mutex_choose
();
/*
* Set nlimbo to 1, in order to avoid a race condition
* with prof_ctx_merge()/prof_ctx_destroy().
*/
ctx
.
p
->
nlimbo
=
1
;
memset
(
&
ctx
.
p
->
cnt_merged
,
0
,
sizeof
(
prof_cnt_t
));
ql_new
(
&
ctx
.
p
->
cnts_ql
);
prof_ctx_init
(
ctx
.
p
,
btkey
.
p
);
if
(
ckh_insert
(
&
bt2ctx
,
btkey
.
v
,
ctx
.
v
))
{
/* OOM. */
prof_leave
(
prof_tdata
);
idalloc
(
btkey
.
v
);
idalloc
(
ctx
.
v
);
return
(
NULL
);
return
(
true
);
}
new_ctx
=
true
;
}
else
{
/*
* Increment nlimbo, in order to avoid a race condition
*
with
prof_ctx_merge()/prof_ctx_destroy().
* Increment nlimbo, in order to avoid a race condition
with
* prof_ctx_merge()/prof_ctx_destroy().
*/
malloc_mutex_lock
(
ctx
.
p
->
lock
);
ctx
.
p
->
nlimbo
++
;
...
...
@@ -501,6 +562,39 @@ prof_lookup(prof_bt_t *bt)
}
prof_leave
(
prof_tdata
);
*
p_btkey
=
btkey
.
v
;
*
p_ctx
=
ctx
.
p
;
*
p_new_ctx
=
new_ctx
;
return
(
false
);
}
prof_thr_cnt_t
*
prof_lookup
(
prof_bt_t
*
bt
)
{
union
{
prof_thr_cnt_t
*
p
;
void
*
v
;
}
ret
;
prof_tdata_t
*
prof_tdata
;
cassert
(
config_prof
);
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
(
NULL
);
if
(
ckh_search
(
&
prof_tdata
->
bt2cnt
,
bt
,
NULL
,
&
ret
.
v
))
{
void
*
btkey
;
prof_ctx_t
*
ctx
;
bool
new_ctx
;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
if
(
prof_lookup_global
(
bt
,
prof_tdata
,
&
btkey
,
&
ctx
,
&
new_ctx
))
return
(
NULL
);
/* Link a prof_thd_cnt_t into ctx for this thread. */
if
(
ckh_count
(
&
prof_tdata
->
bt2cnt
)
==
PROF_TCMAX
)
{
assert
(
ckh_count
(
&
prof_tdata
->
bt2cnt
)
>
0
);
...
...
@@ -512,7 +606,7 @@ prof_lookup(prof_bt_t *bt)
assert
(
ret
.
v
!=
NULL
);
if
(
ckh_remove
(
&
prof_tdata
->
bt2cnt
,
ret
.
p
->
ctx
->
bt
,
NULL
,
NULL
))
assert
(
false
);
not_reached
(
);
ql_remove
(
&
prof_tdata
->
lru_ql
,
ret
.
p
,
lru_link
);
prof_ctx_merge
(
ret
.
p
->
ctx
,
ret
.
p
);
/* ret can now be re-used. */
...
...
@@ -522,27 +616,27 @@ prof_lookup(prof_bt_t *bt)
ret
.
v
=
imalloc
(
sizeof
(
prof_thr_cnt_t
));
if
(
ret
.
p
==
NULL
)
{
if
(
new_ctx
)
prof_ctx_destroy
(
ctx
.
p
);
prof_ctx_destroy
(
ctx
);
return
(
NULL
);
}
ql_elm_new
(
ret
.
p
,
cnts_link
);
ql_elm_new
(
ret
.
p
,
lru_link
);
}
/* Finish initializing ret. */
ret
.
p
->
ctx
=
ctx
.
p
;
ret
.
p
->
ctx
=
ctx
;
ret
.
p
->
epoch
=
0
;
memset
(
&
ret
.
p
->
cnts
,
0
,
sizeof
(
prof_cnt_t
));
if
(
ckh_insert
(
&
prof_tdata
->
bt2cnt
,
btkey
.
v
,
ret
.
v
))
{
if
(
ckh_insert
(
&
prof_tdata
->
bt2cnt
,
btkey
,
ret
.
v
))
{
if
(
new_ctx
)
prof_ctx_destroy
(
ctx
.
p
);
prof_ctx_destroy
(
ctx
);
idalloc
(
ret
.
v
);
return
(
NULL
);
}
ql_head_insert
(
&
prof_tdata
->
lru_ql
,
ret
.
p
,
lru_link
);
malloc_mutex_lock
(
ctx
.
p
->
lock
);
ql_tail_insert
(
&
ctx
.
p
->
cnts_ql
,
ret
.
p
,
cnts_link
);
ctx
.
p
->
nlimbo
--
;
malloc_mutex_unlock
(
ctx
.
p
->
lock
);
malloc_mutex_lock
(
ctx
->
lock
);
ql_tail_insert
(
&
ctx
->
cnts_ql
,
ret
.
p
,
cnts_link
);
ctx
->
nlimbo
--
;
malloc_mutex_unlock
(
ctx
->
lock
);
}
else
{
/* Move ret to the front of the LRU. */
ql_remove
(
&
prof_tdata
->
lru_ql
,
ret
.
p
,
lru_link
);
...
...
@@ -552,8 +646,52 @@ prof_lookup(prof_bt_t *bt)
return
(
ret
.
p
);
}
#ifdef JEMALLOC_JET
size_t
prof_bt_count
(
void
)
{
size_t
bt_count
;
prof_tdata_t
*
prof_tdata
;
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
(
0
);
prof_enter
(
prof_tdata
);
bt_count
=
ckh_count
(
&
bt2ctx
);
prof_leave
(
prof_tdata
);
return
(
bt_count
);
}
#endif
#ifdef JEMALLOC_JET
#undef prof_dump_open
#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
#endif
static
int
prof_dump_open
(
bool
propagate_err
,
const
char
*
filename
)
{
int
fd
;
fd
=
creat
(
filename
,
0644
);
if
(
fd
==
-
1
&&
propagate_err
==
false
)
{
malloc_printf
(
"<jemalloc>: creat(
\"
%s
\"
), 0644) failed
\n
"
,
filename
);
if
(
opt_abort
)
abort
();
}
return
(
fd
);
}
#ifdef JEMALLOC_JET
#undef prof_dump_open
#define prof_dump_open JEMALLOC_N(prof_dump_open)
prof_dump_open_t
*
prof_dump_open
=
JEMALLOC_N
(
prof_dump_open_impl
);
#endif
static
bool
prof_flush
(
bool
propagate_err
)
prof_
dump_
flush
(
bool
propagate_err
)
{
bool
ret
=
false
;
ssize_t
err
;
...
...
@@ -576,7 +714,20 @@ prof_flush(bool propagate_err)
}
static
bool
prof_write
(
bool
propagate_err
,
const
char
*
s
)
prof_dump_close
(
bool
propagate_err
)
{
bool
ret
;
assert
(
prof_dump_fd
!=
-
1
);
ret
=
prof_dump_flush
(
propagate_err
);
close
(
prof_dump_fd
);
prof_dump_fd
=
-
1
;
return
(
ret
);
}
static
bool
prof_dump_write
(
bool
propagate_err
,
const
char
*
s
)
{
unsigned
i
,
slen
,
n
;
...
...
@@ -587,7 +738,7 @@ prof_write(bool propagate_err, const char *s)
while
(
i
<
slen
)
{
/* Flush the buffer if it is full. */
if
(
prof_dump_buf_end
==
PROF_DUMP_BUFSIZE
)
if
(
prof_flush
(
propagate_err
)
&&
propagate_err
)
if
(
prof_
dump_
flush
(
propagate_err
)
&&
propagate_err
)
return
(
true
);
if
(
prof_dump_buf_end
+
slen
<=
PROF_DUMP_BUFSIZE
)
{
...
...
@@ -607,7 +758,7 @@ prof_write(bool propagate_err, const char *s)
JEMALLOC_ATTR
(
format
(
printf
,
2
,
3
))
static
bool
prof_printf
(
bool
propagate_err
,
const
char
*
format
,
...)
prof_
dump_
printf
(
bool
propagate_err
,
const
char
*
format
,
...)
{
bool
ret
;
va_list
ap
;
...
...
@@ -616,13 +767,14 @@ prof_printf(bool propagate_err, const char *format, ...)
va_start
(
ap
,
format
);
malloc_vsnprintf
(
buf
,
sizeof
(
buf
),
format
,
ap
);
va_end
(
ap
);
ret
=
prof_write
(
propagate_err
,
buf
);
ret
=
prof_
dump_
write
(
propagate_err
,
buf
);
return
(
ret
);
}
static
void
prof_ctx_sum
(
prof_ctx_t
*
ctx
,
prof_cnt_t
*
cnt_all
,
size_t
*
leak_nctx
)
prof_dump_ctx_prep
(
prof_ctx_t
*
ctx
,
prof_cnt_t
*
cnt_all
,
size_t
*
leak_nctx
,
prof_ctx_list_t
*
ctx_ql
)
{
prof_thr_cnt_t
*
thr_cnt
;
prof_cnt_t
tcnt
;
...
...
@@ -631,6 +783,14 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_lock
(
ctx
->
lock
);
/*
* Increment nlimbo so that ctx won't go away before dump.
* Additionally, link ctx into the dump list so that it is included in
* prof_dump()'s second pass.
*/
ctx
->
nlimbo
++
;
ql_tail_insert
(
ctx_ql
,
ctx
,
dump_link
);
memcpy
(
&
ctx
->
cnt_summed
,
&
ctx
->
cnt_merged
,
sizeof
(
prof_cnt_t
));
ql_foreach
(
thr_cnt
,
&
ctx
->
cnts_ql
,
cnts_link
)
{
volatile
unsigned
*
epoch
=
&
thr_cnt
->
epoch
;
...
...
@@ -671,89 +831,52 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_unlock
(
ctx
->
lock
);
}
static
void
prof_
ctx_destroy
(
prof_ct
x
_t
*
c
tx
)
static
bool
prof_
dump_header
(
bool
propagate_err
,
const
prof_c
n
t_t
*
c
nt_all
)
{
prof_tdata_t
*
prof_tdata
;
cassert
(
config_prof
);
/*
* Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() increments ctx->nlimbo in order to avoid a race
* condition with this function, as does prof_ctx_merge() in order to
* avoid a race between the main body of prof_ctx_merge() and entry
* into this function.
*/
prof_tdata
=
*
prof_tdata_tsd_get
();
assert
((
uintptr_t
)
prof_tdata
>
(
uintptr_t
)
PROF_TDATA_STATE_MAX
);
prof_enter
(
prof_tdata
);
malloc_mutex_lock
(
ctx
->
lock
);
if
(
ql_first
(
&
ctx
->
cnts_ql
)
==
NULL
&&
ctx
->
cnt_merged
.
curobjs
==
0
&&
ctx
->
nlimbo
==
1
)
{
assert
(
ctx
->
cnt_merged
.
curbytes
==
0
);
assert
(
ctx
->
cnt_merged
.
accumobjs
==
0
);
assert
(
ctx
->
cnt_merged
.
accumbytes
==
0
);
/* Remove ctx from bt2ctx. */
if
(
ckh_remove
(
&
bt2ctx
,
ctx
->
bt
,
NULL
,
NULL
))
assert
(
false
);
prof_leave
(
prof_tdata
);
/* Destroy ctx. */
malloc_mutex_unlock
(
ctx
->
lock
);
bt_destroy
(
ctx
->
bt
);
idalloc
(
ctx
);
if
(
opt_lg_prof_sample
==
0
)
{
if
(
prof_dump_printf
(
propagate_err
,
"heap profile: %"
PRId64
": %"
PRId64
" [%"
PRIu64
": %"
PRIu64
"] @ heapprofile
\n
"
,
cnt_all
->
curobjs
,
cnt_all
->
curbytes
,
cnt_all
->
accumobjs
,
cnt_all
->
accumbytes
))
return
(
true
);
}
else
{
/*
* Compensate for increment in prof_ctx_merge() or
* prof_lookup().
*/
ctx
->
nlimbo
--
;
malloc_mutex_unlock
(
ctx
->
lock
);
prof_leave
(
prof_tdata
);
if
(
prof_dump_printf
(
propagate_err
,
"heap profile: %"
PRId64
": %"
PRId64
" [%"
PRIu64
": %"
PRIu64
"] @ heap_v2/%"
PRIu64
"
\n
"
,
cnt_all
->
curobjs
,
cnt_all
->
curbytes
,
cnt_all
->
accumobjs
,
cnt_all
->
accumbytes
,
((
uint64_t
)
1U
<<
opt_lg_prof_sample
)))
return
(
true
);
}
return
(
false
);
}
static
void
prof_
ctx_merge
(
prof_ctx_t
*
ctx
,
prof_
thr_cn
t_t
*
c
n
t
)
prof_
dump_ctx_cleanup_locked
(
prof_ctx_t
*
ctx
,
prof_
ctx_lis
t_t
*
ct
x_ql
)
{
bool
destroy
;
cassert
(
config_prof
);
ctx
->
nlimbo
--
;
ql_remove
(
ctx_ql
,
ctx
,
dump_link
);
}
static
void
prof_dump_ctx_cleanup
(
prof_ctx_t
*
ctx
,
prof_ctx_list_t
*
ctx_ql
)
{
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock
(
ctx
->
lock
);
ctx
->
cnt_merged
.
curobjs
+=
cnt
->
cnts
.
curobjs
;
ctx
->
cnt_merged
.
curbytes
+=
cnt
->
cnts
.
curbytes
;
ctx
->
cnt_merged
.
accumobjs
+=
cnt
->
cnts
.
accumobjs
;
ctx
->
cnt_merged
.
accumbytes
+=
cnt
->
cnts
.
accumbytes
;
ql_remove
(
&
ctx
->
cnts_ql
,
cnt
,
cnts_link
);
if
(
opt_prof_accum
==
false
&&
ql_first
(
&
ctx
->
cnts_ql
)
==
NULL
&&
ctx
->
cnt_merged
.
curobjs
==
0
&&
ctx
->
nlimbo
==
0
)
{
/*
* Increment ctx->nlimbo in order to keep another thread from
* winning the race to destroy ctx while this one has ctx->lock
* dropped. Without this, it would be possible for another
* thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
* 3) Successfully prof_ctx_destroy(ctx).
*
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
ctx
->
nlimbo
++
;
destroy
=
true
;
}
else
destroy
=
false
;
prof_dump_ctx_cleanup_locked
(
ctx
,
ctx_ql
);
malloc_mutex_unlock
(
ctx
->
lock
);
if
(
destroy
)
prof_ctx_destroy
(
ctx
);
}
static
bool
prof_dump_ctx
(
bool
propagate_err
,
prof_ctx_t
*
ctx
,
prof_bt_t
*
bt
)
prof_dump_ctx
(
bool
propagate_err
,
prof_ctx_t
*
ctx
,
const
prof_bt_t
*
bt
,
prof_ctx_list_t
*
ctx_ql
)
{
bool
ret
;
unsigned
i
;
cassert
(
config_prof
);
...
...
@@ -765,66 +888,109 @@ prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
* filled in. Avoid dumping any ctx that is an artifact of either
* implementation detail.
*/
malloc_mutex_lock
(
ctx
->
lock
);
if
((
opt_prof_accum
==
false
&&
ctx
->
cnt_summed
.
curobjs
==
0
)
||
(
opt_prof_accum
&&
ctx
->
cnt_summed
.
accumobjs
==
0
))
{
assert
(
ctx
->
cnt_summed
.
curobjs
==
0
);
assert
(
ctx
->
cnt_summed
.
curbytes
==
0
);
assert
(
ctx
->
cnt_summed
.
accumobjs
==
0
);
assert
(
ctx
->
cnt_summed
.
accumbytes
==
0
);
return
(
false
);
ret
=
false
;
goto
label_return
;
}
if
(
prof_printf
(
propagate_err
,
"%"
PRId64
": %"
PRId64
if
(
prof_
dump_
printf
(
propagate_err
,
"%"
PRId64
": %"
PRId64
" [%"
PRIu64
": %"
PRIu64
"] @"
,
ctx
->
cnt_summed
.
curobjs
,
ctx
->
cnt_summed
.
curbytes
,
ctx
->
cnt_summed
.
accumobjs
,
ctx
->
cnt_summed
.
accumbytes
))
return
(
true
);
ctx
->
cnt_summed
.
accumobjs
,
ctx
->
cnt_summed
.
accumbytes
))
{
ret
=
true
;
goto
label_return
;
}
for
(
i
=
0
;
i
<
bt
->
len
;
i
++
)
{
if
(
prof_printf
(
propagate_err
,
" %#"
PRIxPTR
,
(
uintptr_t
)
bt
->
vec
[
i
]))
return
(
true
);
if
(
prof_dump_printf
(
propagate_err
,
" %#"
PRIxPTR
,
(
uintptr_t
)
bt
->
vec
[
i
]))
{
ret
=
true
;
goto
label_return
;
}
}
if
(
prof_write
(
propagate_err
,
"
\n
"
))
return
(
true
);
if
(
prof_dump_write
(
propagate_err
,
"
\n
"
))
{
ret
=
true
;
goto
label_return
;
}
return
(
false
);
ret
=
false
;
label_return:
prof_dump_ctx_cleanup_locked
(
ctx
,
ctx_ql
);
malloc_mutex_unlock
(
ctx
->
lock
);
return
(
ret
);
}
static
bool
prof_dump_maps
(
bool
propagate_err
)
{
bool
ret
;
int
mfd
;
char
filename
[
PATH_MAX
+
1
];
cassert
(
config_prof
);
#ifdef __FreeBSD__
malloc_snprintf
(
filename
,
sizeof
(
filename
),
"/proc/curproc/map"
);
#else
malloc_snprintf
(
filename
,
sizeof
(
filename
),
"/proc/%d/maps"
,
(
int
)
getpid
());
#endif
mfd
=
open
(
filename
,
O_RDONLY
);
if
(
mfd
!=
-
1
)
{
ssize_t
nread
;
if
(
prof_write
(
propagate_err
,
"
\n
MAPPED_LIBRARIES:
\n
"
)
&&
propagate_err
)
return
(
true
);
if
(
prof_dump_write
(
propagate_err
,
"
\n
MAPPED_LIBRARIES:
\n
"
)
&&
propagate_err
)
{
ret
=
true
;
goto
label_return
;
}
nread
=
0
;
do
{
prof_dump_buf_end
+=
nread
;
if
(
prof_dump_buf_end
==
PROF_DUMP_BUFSIZE
)
{
/* Make space in prof_dump_buf before read(). */
if
(
prof_flush
(
propagate_err
)
&&
propagate_err
)
return
(
true
);
if
(
prof_dump_flush
(
propagate_err
)
&&
propagate_err
)
{
ret
=
true
;
goto
label_return
;
}
}
nread
=
read
(
mfd
,
&
prof_dump_buf
[
prof_dump_buf_end
],
PROF_DUMP_BUFSIZE
-
prof_dump_buf_end
);
}
while
(
nread
>
0
);
}
else
{
ret
=
true
;
goto
label_return
;
}
ret
=
false
;
label_return:
if
(
mfd
!=
-
1
)
close
(
mfd
);
}
else
return
(
true
);
return
(
ret
);
}
return
(
false
);
static
void
prof_leakcheck
(
const
prof_cnt_t
*
cnt_all
,
size_t
leak_nctx
,
const
char
*
filename
)
{
if
(
cnt_all
->
curbytes
!=
0
)
{
malloc_printf
(
"<jemalloc>: Leak summary: %"
PRId64
" byte%s, %"
PRId64
" object%s, %zu context%s
\n
"
,
cnt_all
->
curbytes
,
(
cnt_all
->
curbytes
!=
1
)
?
"s"
:
""
,
cnt_all
->
curobjs
,
(
cnt_all
->
curobjs
!=
1
)
?
"s"
:
""
,
leak_nctx
,
(
leak_nctx
!=
1
)
?
"s"
:
""
);
malloc_printf
(
"<jemalloc>: Run pprof on
\"
%s
\"
for leak detail
\n
"
,
filename
);
}
}
static
bool
...
...
@@ -833,99 +999,75 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
prof_tdata_t
*
prof_tdata
;
prof_cnt_t
cnt_all
;
size_t
tabind
;
union
{
prof_bt_t
*
p
;
void
*
v
;
}
bt
;
union
{
prof_ctx_t
*
p
;
void
*
v
;
}
ctx
;
size_t
leak_nctx
;
prof_ctx_list_t
ctx_ql
;
cassert
(
config_prof
);
prof_tdata
=
prof_tdata_get
();
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
(
true
);
prof_enter
(
prof_tdata
);
prof_dump_fd
=
creat
(
filename
,
0644
);
if
(
prof_dump_fd
==
-
1
)
{
if
(
propagate_err
==
false
)
{
malloc_printf
(
"<jemalloc>: creat(
\"
%s
\"
), 0644) failed
\n
"
,
filename
);
if
(
opt_abort
)
abort
();
}
goto
label_error
;
}
malloc_mutex_lock
(
&
prof_dump_mtx
);
/* Merge per thread profile stats, and sum them in cnt_all. */
memset
(
&
cnt_all
,
0
,
sizeof
(
prof_cnt_t
));
leak_nctx
=
0
;
ql_new
(
&
ctx_ql
);
prof_enter
(
prof_tdata
);
for
(
tabind
=
0
;
ckh_iter
(
&
bt2ctx
,
&
tabind
,
NULL
,
&
ctx
.
v
)
==
false
;)
prof_ctx_sum
(
ctx
.
p
,
&
cnt_all
,
&
leak_nctx
);
prof_dump_ctx_prep
(
ctx
.
p
,
&
cnt_all
,
&
leak_nctx
,
&
ctx_ql
);
prof_leave
(
prof_tdata
);
/* Create dump file. */
if
((
prof_dump_fd
=
prof_dump_open
(
propagate_err
,
filename
))
==
-
1
)
goto
label_open_close_error
;
/* Dump profile header. */
if
(
opt_lg_prof_sample
==
0
)
{
if
(
prof_printf
(
propagate_err
,
"heap profile: %"
PRId64
": %"
PRId64
" [%"
PRIu64
": %"
PRIu64
"] @ heapprofile
\n
"
,
cnt_all
.
curobjs
,
cnt_all
.
curbytes
,
cnt_all
.
accumobjs
,
cnt_all
.
accumbytes
))
goto
label_error
;
}
else
{
if
(
prof_printf
(
propagate_err
,
"heap profile: %"
PRId64
": %"
PRId64
" [%"
PRIu64
": %"
PRIu64
"] @ heap_v2/%"
PRIu64
"
\n
"
,
cnt_all
.
curobjs
,
cnt_all
.
curbytes
,
cnt_all
.
accumobjs
,
cnt_all
.
accumbytes
,
((
uint64_t
)
1U
<<
opt_lg_prof_sample
)))
goto
label_error
;
}
if
(
prof_dump_header
(
propagate_err
,
&
cnt_all
))
goto
label_write_error
;
/* Dump per ctx profile stats. */
for
(
tabind
=
0
;
ckh_iter
(
&
bt2ctx
,
&
tabind
,
&
bt
.
v
,
&
ctx
.
v
)
==
false
;)
{
if
(
prof_dump_ctx
(
propagate_err
,
ctx
.
p
,
bt
.
p
))
goto
label_error
;
while
((
ctx
.
p
=
ql_first
(
&
ctx_ql
))
!=
NULL
)
{
if
(
prof_dump_ctx
(
propagate_err
,
ctx
.
p
,
ctx
.
p
->
bt
,
&
ctx_ql
))
goto
label_write_error
;
}
/* Dump /proc/<pid>/maps if possible. */
if
(
prof_dump_maps
(
propagate_err
))
goto
label_error
;
goto
label_
write_
error
;
if
(
prof_flush
(
propagate_err
))
goto
label_error
;
close
(
prof_dump_fd
);
prof_leave
(
prof_tdata
);
if
(
prof_dump_close
(
propagate_err
))
goto
label_open_close_error
;
if
(
leakcheck
&&
cnt_all
.
curbytes
!=
0
)
{
malloc_printf
(
"<jemalloc>: Leak summary: %"
PRId64
" byte%s, %"
PRId64
" object%s, %zu context%s
\n
"
,
cnt_all
.
curbytes
,
(
cnt_all
.
curbytes
!=
1
)
?
"s"
:
""
,
cnt_all
.
curobjs
,
(
cnt_all
.
curobjs
!=
1
)
?
"s"
:
""
,
leak_nctx
,
(
leak_nctx
!=
1
)
?
"s"
:
""
);
malloc_printf
(
"<jemalloc>: Run pprof on
\"
%s
\"
for leak detail
\n
"
,
filename
);
}
malloc_mutex_unlock
(
&
prof_dump_mtx
);
if
(
leakcheck
)
prof_leakcheck
(
&
cnt_all
,
leak_nctx
,
filename
);
return
(
false
);
label_error:
prof_leave
(
prof_tdata
);
label_write_error:
prof_dump_close
(
propagate_err
);
label_open_close_error:
while
((
ctx
.
p
=
ql_first
(
&
ctx_ql
))
!=
NULL
)
prof_dump_ctx_cleanup
(
ctx
.
p
,
&
ctx_ql
);
malloc_mutex_unlock
(
&
prof_dump_mtx
);
return
(
true
);
}
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static
void
prof_dump_filename
(
char
*
filename
,
char
v
,
int64_t
vseq
)
{
cassert
(
config_prof
);
if
(
vseq
!=
UINT64_C
(
0xffffffffffffffff
)
)
{
if
(
vseq
!=
VSEQ_INVALID
)
{
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf
(
filename
,
DUMP_FILENAME_BUFSIZE
,
"%s.%d.%"
PRIu64
".%c%"
PRId64
".heap"
,
...
...
@@ -951,7 +1093,7 @@ prof_fdump(void)
if
(
opt_prof_final
&&
opt_prof_prefix
[
0
]
!=
'\0'
)
{
malloc_mutex_lock
(
&
prof_dump_seq_mtx
);
prof_dump_filename
(
filename
,
'f'
,
UINT64_C
(
0xffffffffffffffff
)
);
prof_dump_filename
(
filename
,
'f'
,
VSEQ_INVALID
);
malloc_mutex_unlock
(
&
prof_dump_seq_mtx
);
prof_dump
(
false
,
filename
,
opt_prof_leak
);
}
...
...
@@ -967,11 +1109,7 @@ prof_idump(void)
if
(
prof_booted
==
false
)
return
;
/*
* Don't call prof_tdata_get() here, because it could cause recursive
* allocation.
*/
prof_tdata
=
*
prof_tdata_tsd_get
();
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
;
if
(
prof_tdata
->
enq
)
{
...
...
@@ -1021,11 +1159,7 @@ prof_gdump(void)
if
(
prof_booted
==
false
)
return
;
/*
* Don't call prof_tdata_get() here, because it could cause recursive
* allocation.
*/
prof_tdata
=
*
prof_tdata_tsd_get
();
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
;
if
(
prof_tdata
->
enq
)
{
...
...
@@ -1043,34 +1177,13 @@ prof_gdump(void)
}
static
void
prof_bt_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash
2
)
prof_bt_hash
(
const
void
*
key
,
size_t
r_
hash
[
2
]
)
{
size_t
ret1
,
ret2
;
uint64_t
h
;
prof_bt_t
*
bt
=
(
prof_bt_t
*
)
key
;
cassert
(
config_prof
);
assert
(
minbits
<=
32
||
(
SIZEOF_PTR
==
8
&&
minbits
<=
64
));
assert
(
hash1
!=
NULL
);
assert
(
hash2
!=
NULL
);
h
=
hash
(
bt
->
vec
,
bt
->
len
*
sizeof
(
void
*
),
UINT64_C
(
0x94122f335b332aea
));
if
(
minbits
<=
32
)
{
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1
=
h
&
ZU
(
0xffffffffU
);
ret2
=
h
>>
32
;
}
else
{
ret1
=
h
;
ret2
=
hash
(
bt
->
vec
,
bt
->
len
*
sizeof
(
void
*
),
UINT64_C
(
0x8432a476666bbc13
));
}
*
hash1
=
ret1
;
*
hash2
=
ret2
;
hash
(
bt
->
vec
,
bt
->
len
*
sizeof
(
void
*
),
0x94122f33U
,
r_hash
);
}
static
bool
...
...
@@ -1086,14 +1199,6 @@ prof_bt_keycomp(const void *k1, const void *k2)
return
(
memcmp
(
bt1
->
vec
,
bt2
->
vec
,
bt1
->
len
*
sizeof
(
void
*
))
==
0
);
}
static
malloc_mutex_t
*
prof_ctx_mutex_choose
(
void
)
{
unsigned
nctxs
=
atomic_add_u
(
&
cum_ctxs
,
1
);
return
(
&
ctx_locks
[(
nctxs
-
1
)
%
PROF_NCTX_LOCKS
]);
}
prof_tdata_t
*
prof_tdata_init
(
void
)
{
...
...
@@ -1206,13 +1311,11 @@ prof_boot1(void)
*/
opt_prof
=
true
;
opt_prof_gdump
=
false
;
prof_interval
=
0
;
}
else
if
(
opt_prof
)
{
if
(
opt_lg_prof_interval
>=
0
)
{
prof_interval
=
(((
uint64_t
)
1U
)
<<
opt_lg_prof_interval
);
}
else
prof_interval
=
0
;
}
}
prof_promote
=
(
opt_prof
&&
opt_lg_prof_sample
>
LG_PAGE
);
...
...
@@ -1240,6 +1343,8 @@ prof_boot2(void)
if
(
malloc_mutex_init
(
&
prof_dump_seq_mtx
))
return
(
true
);
if
(
malloc_mutex_init
(
&
prof_dump_mtx
))
return
(
true
);
if
(
atexit
(
prof_fdump
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error in atexit()
\n
"
);
...
...
@@ -1277,10 +1382,10 @@ prof_prefork(void)
if
(
opt_prof
)
{
unsigned
i
;
malloc_mutex_
loc
k
(
&
bt2ctx_mtx
);
malloc_mutex_
loc
k
(
&
prof_dump_seq_mtx
);
malloc_mutex_
prefor
k
(
&
bt2ctx_mtx
);
malloc_mutex_
prefor
k
(
&
prof_dump_seq_mtx
);
for
(
i
=
0
;
i
<
PROF_NCTX_LOCKS
;
i
++
)
malloc_mutex_
loc
k
(
&
ctx_locks
[
i
]);
malloc_mutex_
prefor
k
(
&
ctx_locks
[
i
]);
}
}
...
...
deps/jemalloc/src/quarantine.c
View file @
fceef8e0
#define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/*
...
...
@@ -11,39 +12,18 @@
/******************************************************************************/
/* Data. */
typedef
struct
quarantine_obj_s
quarantine_obj_t
;
typedef
struct
quarantine_s
quarantine_t
;
struct
quarantine_obj_s
{
void
*
ptr
;
size_t
usize
;
};
struct
quarantine_s
{
size_t
curbytes
;
size_t
curobjs
;
size_t
first
;
#define LG_MAXOBJS_INIT 10
size_t
lg_maxobjs
;
quarantine_obj_t
objs
[
1
];
/* Dynamically sized ring buffer. */
};
static
void
quarantine_cleanup
(
void
*
arg
);
malloc_tsd_data
(
static
,
quarantine
,
quarantine_t
*
,
NULL
)
malloc_tsd_funcs
(
JEMALLOC_INLINE
,
quarantine
,
quarantine_t
*
,
NULL
,
quarantine_cleanup
)
malloc_tsd_data
(,
quarantine
,
quarantine_t
*
,
NULL
)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
quarantine_t
*
quarantine_init
(
size_t
lg_maxobjs
);
static
quarantine_t
*
quarantine_grow
(
quarantine_t
*
quarantine
);
static
void
quarantine_drain_one
(
quarantine_t
*
quarantine
);
static
void
quarantine_drain
(
quarantine_t
*
quarantine
,
size_t
upper_bound
);
/******************************************************************************/
static
quarantine_t
*
quarantine_t
*
quarantine_init
(
size_t
lg_maxobjs
)
{
quarantine_t
*
quarantine
;
...
...
@@ -68,8 +48,10 @@ quarantine_grow(quarantine_t *quarantine)
quarantine_t
*
ret
;
ret
=
quarantine_init
(
quarantine
->
lg_maxobjs
+
1
);
if
(
ret
==
NULL
)
if
(
ret
==
NULL
)
{
quarantine_drain_one
(
quarantine
);
return
(
quarantine
);
}
ret
->
curbytes
=
quarantine
->
curbytes
;
ret
->
curobjs
=
quarantine
->
curobjs
;
...
...
@@ -89,15 +71,14 @@ quarantine_grow(quarantine_t *quarantine)
memcpy
(
&
ret
->
objs
[
ncopy_a
],
quarantine
->
objs
,
ncopy_b
*
sizeof
(
quarantine_obj_t
));
}
idalloc
(
quarantine
);
return
(
ret
);
}
static
void
quarantine_drain
(
quarantine_t
*
quarantine
,
size_t
upper_bound
)
quarantine_drain
_one
(
quarantine_t
*
quarantine
)
{
while
(
quarantine
->
curbytes
>
upper_bound
&&
quarantine
->
curobjs
>
0
)
{
quarantine_obj_t
*
obj
=
&
quarantine
->
objs
[
quarantine
->
first
];
assert
(
obj
->
usize
==
isalloc
(
obj
->
ptr
,
config_prof
));
idalloc
(
obj
->
ptr
);
...
...
@@ -105,7 +86,14 @@ quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
quarantine
->
curobjs
--
;
quarantine
->
first
=
(
quarantine
->
first
+
1
)
&
((
ZU
(
1
)
<<
quarantine
->
lg_maxobjs
)
-
1
);
}
}
static
void
quarantine_drain
(
quarantine_t
*
quarantine
,
size_t
upper_bound
)
{
while
(
quarantine
->
curbytes
>
upper_bound
&&
quarantine
->
curobjs
>
0
)
quarantine_drain_one
(
quarantine
);
}
void
...
...
@@ -119,17 +107,10 @@ quarantine(void *ptr)
quarantine
=
*
quarantine_tsd_get
();
if
((
uintptr_t
)
quarantine
<=
(
uintptr_t
)
QUARANTINE_STATE_MAX
)
{
if
(
quarantine
==
NULL
)
{
if
((
quarantine
=
quarantine_init
(
LG_MAXOBJS_INIT
))
==
NULL
)
{
idalloc
(
ptr
);
return
;
}
}
else
{
if
(
quarantine
==
QUARANTINE_STATE_PURGATORY
)
{
/*
* Make a note that quarantine() was called
*
after
quarantine_cleanup() was called.
* Make a note that quarantine() was called
after
* quarantine_cleanup() was called.
*/
quarantine
=
QUARANTINE_STATE_REINCARNATED
;
quarantine_tsd_set
(
&
quarantine
);
...
...
@@ -137,7 +118,6 @@ quarantine(void *ptr)
idalloc
(
ptr
);
return
;
}
}
/*
* Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr.
...
...
@@ -161,15 +141,24 @@ quarantine(void *ptr)
obj
->
usize
=
usize
;
quarantine
->
curbytes
+=
usize
;
quarantine
->
curobjs
++
;
if
(
opt_junk
)
if
(
config_fill
&&
opt_junk
)
{
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
if
((
config_valgrind
==
false
||
opt_valgrind
==
false
)
&&
usize
<=
SMALL_MAXCLASS
)
arena_quarantine_junk_small
(
ptr
,
usize
);
else
memset
(
ptr
,
0x5a
,
usize
);
}
}
else
{
assert
(
quarantine
->
curbytes
==
0
);
idalloc
(
ptr
);
}
}
static
void
void
quarantine_cleanup
(
void
*
arg
)
{
quarantine_t
*
quarantine
=
*
(
quarantine_t
**
)
arg
;
...
...
Prev
1
2
3
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment