Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4a884343
Commit
4a884343
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Delete old jemalloc before pulling in subtree.
parent
7ff7536e
Changes
169
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
169 of 169+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/extent_types.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef
struct
extent_s
extent_t
;
typedef
struct
extents_s
extents_t
;
#define EXTENT_HOOKS_INITIALIZER NULL
#define EXTENT_GROW_MAX_PIND (NPSIZES - 1)
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
#endif
/* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
deps/jemalloc/include/jemalloc/internal/hash.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_HASH_H
#define JEMALLOC_INTERNAL_HASH_H
#include "jemalloc/internal/assert.h"
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
/* Internal implementation. */
static
inline
uint32_t
hash_rotl_32
(
uint32_t
x
,
int8_t
r
)
{
return
((
x
<<
r
)
|
(
x
>>
(
32
-
r
)));
}
static
inline
uint64_t
hash_rotl_64
(
uint64_t
x
,
int8_t
r
)
{
return
((
x
<<
r
)
|
(
x
>>
(
64
-
r
)));
}
static
inline
uint32_t
hash_get_block_32
(
const
uint32_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint32_t
)
-
1
))
!=
0
)
{
uint32_t
ret
;
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint32_t
));
return
ret
;
}
return
p
[
i
];
}
static
inline
uint64_t
hash_get_block_64
(
const
uint64_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint64_t
)
-
1
))
!=
0
)
{
uint64_t
ret
;
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint64_t
));
return
ret
;
}
return
p
[
i
];
}
static
inline
uint32_t
hash_fmix_32
(
uint32_t
h
)
{
h
^=
h
>>
16
;
h
*=
0x85ebca6b
;
h
^=
h
>>
13
;
h
*=
0xc2b2ae35
;
h
^=
h
>>
16
;
return
h
;
}
static
inline
uint64_t
hash_fmix_64
(
uint64_t
k
)
{
k
^=
k
>>
33
;
k
*=
KQU
(
0xff51afd7ed558ccd
);
k
^=
k
>>
33
;
k
*=
KQU
(
0xc4ceb9fe1a85ec53
);
k
^=
k
>>
33
;
return
k
;
}
static
inline
uint32_t
hash_x86_32
(
const
void
*
key
,
int
len
,
uint32_t
seed
)
{
const
uint8_t
*
data
=
(
const
uint8_t
*
)
key
;
const
int
nblocks
=
len
/
4
;
uint32_t
h1
=
seed
;
const
uint32_t
c1
=
0xcc9e2d51
;
const
uint32_t
c2
=
0x1b873593
;
/* body */
{
const
uint32_t
*
blocks
=
(
const
uint32_t
*
)
(
data
+
nblocks
*
4
);
int
i
;
for
(
i
=
-
nblocks
;
i
;
i
++
)
{
uint32_t
k1
=
hash_get_block_32
(
blocks
,
i
);
k1
*=
c1
;
k1
=
hash_rotl_32
(
k1
,
15
);
k1
*=
c2
;
h1
^=
k1
;
h1
=
hash_rotl_32
(
h1
,
13
);
h1
=
h1
*
5
+
0xe6546b64
;
}
}
/* tail */
{
const
uint8_t
*
tail
=
(
const
uint8_t
*
)
(
data
+
nblocks
*
4
);
uint32_t
k1
=
0
;
switch
(
len
&
3
)
{
case
3
:
k1
^=
tail
[
2
]
<<
16
;
case
2
:
k1
^=
tail
[
1
]
<<
8
;
case
1
:
k1
^=
tail
[
0
];
k1
*=
c1
;
k1
=
hash_rotl_32
(
k1
,
15
);
k1
*=
c2
;
h1
^=
k1
;
}
}
/* finalization */
h1
^=
len
;
h1
=
hash_fmix_32
(
h1
);
return
h1
;
}
UNUSED
static
inline
void
hash_x86_128
(
const
void
*
key
,
const
int
len
,
uint32_t
seed
,
uint64_t
r_out
[
2
])
{
const
uint8_t
*
data
=
(
const
uint8_t
*
)
key
;
const
int
nblocks
=
len
/
16
;
uint32_t
h1
=
seed
;
uint32_t
h2
=
seed
;
uint32_t
h3
=
seed
;
uint32_t
h4
=
seed
;
const
uint32_t
c1
=
0x239b961b
;
const
uint32_t
c2
=
0xab0e9789
;
const
uint32_t
c3
=
0x38b34ae5
;
const
uint32_t
c4
=
0xa1e38b93
;
/* body */
{
const
uint32_t
*
blocks
=
(
const
uint32_t
*
)
(
data
+
nblocks
*
16
);
int
i
;
for
(
i
=
-
nblocks
;
i
;
i
++
)
{
uint32_t
k1
=
hash_get_block_32
(
blocks
,
i
*
4
+
0
);
uint32_t
k2
=
hash_get_block_32
(
blocks
,
i
*
4
+
1
);
uint32_t
k3
=
hash_get_block_32
(
blocks
,
i
*
4
+
2
);
uint32_t
k4
=
hash_get_block_32
(
blocks
,
i
*
4
+
3
);
k1
*=
c1
;
k1
=
hash_rotl_32
(
k1
,
15
);
k1
*=
c2
;
h1
^=
k1
;
h1
=
hash_rotl_32
(
h1
,
19
);
h1
+=
h2
;
h1
=
h1
*
5
+
0x561ccd1b
;
k2
*=
c2
;
k2
=
hash_rotl_32
(
k2
,
16
);
k2
*=
c3
;
h2
^=
k2
;
h2
=
hash_rotl_32
(
h2
,
17
);
h2
+=
h3
;
h2
=
h2
*
5
+
0x0bcaa747
;
k3
*=
c3
;
k3
=
hash_rotl_32
(
k3
,
17
);
k3
*=
c4
;
h3
^=
k3
;
h3
=
hash_rotl_32
(
h3
,
15
);
h3
+=
h4
;
h3
=
h3
*
5
+
0x96cd1c35
;
k4
*=
c4
;
k4
=
hash_rotl_32
(
k4
,
18
);
k4
*=
c1
;
h4
^=
k4
;
h4
=
hash_rotl_32
(
h4
,
13
);
h4
+=
h1
;
h4
=
h4
*
5
+
0x32ac3b17
;
}
}
/* tail */
{
const
uint8_t
*
tail
=
(
const
uint8_t
*
)
(
data
+
nblocks
*
16
);
uint32_t
k1
=
0
;
uint32_t
k2
=
0
;
uint32_t
k3
=
0
;
uint32_t
k4
=
0
;
switch
(
len
&
15
)
{
case
15
:
k4
^=
tail
[
14
]
<<
16
;
case
14
:
k4
^=
tail
[
13
]
<<
8
;
case
13
:
k4
^=
tail
[
12
]
<<
0
;
k4
*=
c4
;
k4
=
hash_rotl_32
(
k4
,
18
);
k4
*=
c1
;
h4
^=
k4
;
case
12
:
k3
^=
tail
[
11
]
<<
24
;
case
11
:
k3
^=
tail
[
10
]
<<
16
;
case
10
:
k3
^=
tail
[
9
]
<<
8
;
case
9
:
k3
^=
tail
[
8
]
<<
0
;
k3
*=
c3
;
k3
=
hash_rotl_32
(
k3
,
17
);
k3
*=
c4
;
h3
^=
k3
;
case
8
:
k2
^=
tail
[
7
]
<<
24
;
case
7
:
k2
^=
tail
[
6
]
<<
16
;
case
6
:
k2
^=
tail
[
5
]
<<
8
;
case
5
:
k2
^=
tail
[
4
]
<<
0
;
k2
*=
c2
;
k2
=
hash_rotl_32
(
k2
,
16
);
k2
*=
c3
;
h2
^=
k2
;
case
4
:
k1
^=
tail
[
3
]
<<
24
;
case
3
:
k1
^=
tail
[
2
]
<<
16
;
case
2
:
k1
^=
tail
[
1
]
<<
8
;
case
1
:
k1
^=
tail
[
0
]
<<
0
;
k1
*=
c1
;
k1
=
hash_rotl_32
(
k1
,
15
);
k1
*=
c2
;
h1
^=
k1
;
}
}
/* finalization */
h1
^=
len
;
h2
^=
len
;
h3
^=
len
;
h4
^=
len
;
h1
+=
h2
;
h1
+=
h3
;
h1
+=
h4
;
h2
+=
h1
;
h3
+=
h1
;
h4
+=
h1
;
h1
=
hash_fmix_32
(
h1
);
h2
=
hash_fmix_32
(
h2
);
h3
=
hash_fmix_32
(
h3
);
h4
=
hash_fmix_32
(
h4
);
h1
+=
h2
;
h1
+=
h3
;
h1
+=
h4
;
h2
+=
h1
;
h3
+=
h1
;
h4
+=
h1
;
r_out
[
0
]
=
(((
uint64_t
)
h2
)
<<
32
)
|
h1
;
r_out
[
1
]
=
(((
uint64_t
)
h4
)
<<
32
)
|
h3
;
}
UNUSED
static
inline
void
hash_x64_128
(
const
void
*
key
,
const
int
len
,
const
uint32_t
seed
,
uint64_t
r_out
[
2
])
{
const
uint8_t
*
data
=
(
const
uint8_t
*
)
key
;
const
int
nblocks
=
len
/
16
;
uint64_t
h1
=
seed
;
uint64_t
h2
=
seed
;
const
uint64_t
c1
=
KQU
(
0x87c37b91114253d5
);
const
uint64_t
c2
=
KQU
(
0x4cf5ad432745937f
);
/* body */
{
const
uint64_t
*
blocks
=
(
const
uint64_t
*
)
(
data
);
int
i
;
for
(
i
=
0
;
i
<
nblocks
;
i
++
)
{
uint64_t
k1
=
hash_get_block_64
(
blocks
,
i
*
2
+
0
);
uint64_t
k2
=
hash_get_block_64
(
blocks
,
i
*
2
+
1
);
k1
*=
c1
;
k1
=
hash_rotl_64
(
k1
,
31
);
k1
*=
c2
;
h1
^=
k1
;
h1
=
hash_rotl_64
(
h1
,
27
);
h1
+=
h2
;
h1
=
h1
*
5
+
0x52dce729
;
k2
*=
c2
;
k2
=
hash_rotl_64
(
k2
,
33
);
k2
*=
c1
;
h2
^=
k2
;
h2
=
hash_rotl_64
(
h2
,
31
);
h2
+=
h1
;
h2
=
h2
*
5
+
0x38495ab5
;
}
}
/* tail */
{
const
uint8_t
*
tail
=
(
const
uint8_t
*
)(
data
+
nblocks
*
16
);
uint64_t
k1
=
0
;
uint64_t
k2
=
0
;
switch
(
len
&
15
)
{
case
15
:
k2
^=
((
uint64_t
)(
tail
[
14
]))
<<
48
;
/* falls through */
case
14
:
k2
^=
((
uint64_t
)(
tail
[
13
]))
<<
40
;
/* falls through */
case
13
:
k2
^=
((
uint64_t
)(
tail
[
12
]))
<<
32
;
/* falls through */
case
12
:
k2
^=
((
uint64_t
)(
tail
[
11
]))
<<
24
;
/* falls through */
case
11
:
k2
^=
((
uint64_t
)(
tail
[
10
]))
<<
16
;
/* falls through */
case
10
:
k2
^=
((
uint64_t
)(
tail
[
9
]))
<<
8
;
/* falls through */
case
9
:
k2
^=
((
uint64_t
)(
tail
[
8
]))
<<
0
;
k2
*=
c2
;
k2
=
hash_rotl_64
(
k2
,
33
);
k2
*=
c1
;
h2
^=
k2
;
/* falls through */
case
8
:
k1
^=
((
uint64_t
)(
tail
[
7
]))
<<
56
;
/* falls through */
case
7
:
k1
^=
((
uint64_t
)(
tail
[
6
]))
<<
48
;
/* falls through */
case
6
:
k1
^=
((
uint64_t
)(
tail
[
5
]))
<<
40
;
/* falls through */
case
5
:
k1
^=
((
uint64_t
)(
tail
[
4
]))
<<
32
;
/* falls through */
case
4
:
k1
^=
((
uint64_t
)(
tail
[
3
]))
<<
24
;
/* falls through */
case
3
:
k1
^=
((
uint64_t
)(
tail
[
2
]))
<<
16
;
/* falls through */
case
2
:
k1
^=
((
uint64_t
)(
tail
[
1
]))
<<
8
;
/* falls through */
case
1
:
k1
^=
((
uint64_t
)(
tail
[
0
]))
<<
0
;
k1
*=
c1
;
k1
=
hash_rotl_64
(
k1
,
31
);
k1
*=
c2
;
h1
^=
k1
;
}
}
/* finalization */
h1
^=
len
;
h2
^=
len
;
h1
+=
h2
;
h2
+=
h1
;
h1
=
hash_fmix_64
(
h1
);
h2
=
hash_fmix_64
(
h2
);
h1
+=
h2
;
h2
+=
h1
;
r_out
[
0
]
=
h1
;
r_out
[
1
]
=
h2
;
}
/******************************************************************************/
/* API. */
static
inline
void
hash
(
const
void
*
key
,
size_t
len
,
const
uint32_t
seed
,
size_t
r_hash
[
2
])
{
assert
(
len
<=
INT_MAX
);
/* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128
(
key
,
(
int
)
len
,
seed
,
(
uint64_t
*
)
r_hash
);
#else
{
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
(
int
)
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
}
#endif
}
#endif
/* JEMALLOC_INTERNAL_HASH_H */
deps/jemalloc/include/jemalloc/internal/hooks.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
extern
JEMALLOC_EXPORT
void
(
*
hooks_arena_new_hook
)();
extern
JEMALLOC_EXPORT
void
(
*
hooks_libc_hook
)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, hooks_libc_hook)
#define read JEMALLOC_HOOK(read, hooks_libc_hook)
#define write JEMALLOC_HOOK(write, hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
#define close JEMALLOC_HOOK(close, hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
#endif
/* JEMALLOC_INTERNAL_HOOKS_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
# endif
# else
# if LG_VADDR > 32
# undef LG_VADDR
# define LG_VADDR 32
# endif
# endif
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# if defined(SYS_open) && defined(__aarch64__)
/* Android headers may define SYS_open to __NR_open even though
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
# undef SYS_open
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#ifndef SSIZE_MAX
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef
intptr_t
ssize_t
;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static
int
isblank
(
int
c
)
{
return
(
c
==
'\t'
||
c
==
' '
);
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif
/* JEMALLOC_INTERNAL_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
#undef JEMALLOC_OVERRIDE___LIBC_FREE
#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
#undef HAVE_CPU_SPINWAIT
/*
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#undef LG_VADDR
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11_ATOMICS
/* Defined if GCC __atomic atomics are available. */
#undef JEMALLOC_GCC_ATOMIC_ATOMICS
/* Defined if GCC __sync atomics are available. */
#undef JEMALLOC_GCC_SYNC_ATOMICS
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
#undef JEMALLOC_OS_UNFAIR_LOCK
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
#undef JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#undef LG_QUANTUM
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#undef LG_HUGEPAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#undef JEMALLOC_RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/*
* If defined, enable logging facilities. We make this a configure option to
* avoid taking extra branches everywhere.
*/
#undef JEMALLOC_LOG
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched;
* otherwise this behaves similarly to
* MADV_FREE, though typically with higher
* system overhead.
*/
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
#undef JEMALLOC_DEFINE_MADVISE_FREE
/*
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
*/
#undef JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
#undef JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#undef JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#undef JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* pthread support */
#undef JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#undef JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#undef JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#undef JEMALLOC_BACKGROUND_THREAD
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#undef JEMALLOC_IS_MALLOC
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
*/
#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd_types.h"
/* TSD checks this to set thread local slow state accordingly. */
extern
bool
malloc_slow
;
/* Run-time options. */
extern
bool
opt_abort
;
extern
bool
opt_abort_conf
;
extern
const
char
*
opt_junk
;
extern
bool
opt_junk_alloc
;
extern
bool
opt_junk_free
;
extern
bool
opt_utrace
;
extern
bool
opt_xmalloc
;
extern
bool
opt_zero
;
extern
unsigned
opt_narenas
;
/* Number of CPUs. */
extern
unsigned
ncpus
;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern
unsigned
narenas_auto
;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern
atomic_p_t
arenas
[];
void
*
a0malloc
(
size_t
size
);
void
a0dalloc
(
void
*
ptr
);
void
*
bootstrap_malloc
(
size_t
size
);
void
*
bootstrap_calloc
(
size_t
num
,
size_t
size
);
void
bootstrap_free
(
void
*
ptr
);
void
arena_set
(
unsigned
ind
,
arena_t
*
arena
);
unsigned
narenas_total_get
(
void
);
arena_t
*
arena_init
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
arena_tdata_t
*
arena_tdata_get_hard
(
tsd_t
*
tsd
,
unsigned
ind
);
arena_t
*
arena_choose_hard
(
tsd_t
*
tsd
,
bool
internal
);
void
arena_migrate
(
tsd_t
*
tsd
,
unsigned
oldind
,
unsigned
newind
);
void
iarena_cleanup
(
tsd_t
*
tsd
);
void
arena_cleanup
(
tsd_t
*
tsd
);
void
arenas_tdata_cleanup
(
tsd_t
*
tsd
);
void
jemalloc_prefork
(
void
);
void
jemalloc_postfork_parent
(
void
);
void
jemalloc_postfork_child
(
void
);
bool
malloc_initialized
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
#define JEMALLOC_INTERNAL_INCLUDES_H
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation.
*
* Historically, we dealt with this by each header into four sections (types,
* structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the
* following #defines:
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*
* We're moving toward a world in which the dependencies are explicit; each file
* will #include the headers it depends on (rather than relying on them being
* implicitly available via this file including every header file in the
* project).
*
* We're now in an intermediate state: we've broken up the header files to avoid
* having to include each one multiple times, but have not yet moved the
* dependency information into the header files (i.e. we still rely on the
* ordering in this file to ensure all a header's dependencies are available in
* its translation unit). Each component is now broken up into multiple header
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
*
* Those files which have been converted to explicitly include their
* inter-component dependencies are now in the initial HERMETIC HEADERS
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
* must be included first in every translation unit) for system headers and
* global jemalloc definitions, however.
*/
/******************************************************************************/
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h"
/******************************************************************************/
/* STRUCTS */
/******************************************************************************/
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h"
#include "jemalloc/internal/arena_structs_b.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h"
/******************************************************************************/
/* EXTERNS */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h"
#include "jemalloc/internal/prof_externs.h"
#include "jemalloc/internal/background_thread_externs.h"
/******************************************************************************/
/* INLINES */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/base_inlines.h"
/*
* Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies.
*/
#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h"
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
#include "jemalloc/internal/prof_inlines_b.h"
#include "jemalloc/internal/background_thread_inlines.h"
#endif
/* JEMALLOC_INTERNAL_INCLUDES_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
#define JEMALLOC_INTERNAL_INLINES_A_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE
malloc_cpuid_t
malloc_getcpu
(
void
)
{
assert
(
have_percpu_arena
);
#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
return
(
malloc_cpuid_t
)
sched_getcpu
();
#else
not_reached
();
return
-
1
;
#endif
}
/* Return the chosen arena index based on current cpu. */
JEMALLOC_ALWAYS_INLINE
unsigned
percpu_arena_choose
(
void
)
{
assert
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
));
malloc_cpuid_t
cpuid
=
malloc_getcpu
();
assert
(
cpuid
>=
0
);
unsigned
arena_ind
;
if
((
opt_percpu_arena
==
percpu_arena
)
||
((
unsigned
)
cpuid
<
ncpus
/
2
))
{
arena_ind
=
cpuid
;
}
else
{
assert
(
opt_percpu_arena
==
per_phycpu_arena
);
/* Hyper threads on the same physical CPU share arena. */
arena_ind
=
cpuid
-
ncpus
/
2
;
}
return
arena_ind
;
}
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
JEMALLOC_ALWAYS_INLINE
unsigned
percpu_arena_ind_limit
(
percpu_arena_mode_t
mode
)
{
assert
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
mode
));
if
(
mode
==
per_phycpu_arena
&&
ncpus
>
1
)
{
if
(
ncpus
%
2
)
{
/* This likely means a misconfig. */
return
ncpus
/
2
+
1
;
}
return
ncpus
/
2
;
}
else
{
return
ncpus
;
}
}
static
inline
arena_tdata_t
*
arena_tdata_get
(
tsd_t
*
tsd
,
unsigned
ind
,
bool
refresh_if_missing
)
{
arena_tdata_t
*
tdata
;
arena_tdata_t
*
arenas_tdata
=
tsd_arenas_tdata_get
(
tsd
);
if
(
unlikely
(
arenas_tdata
==
NULL
))
{
/* arenas_tdata hasn't been initialized yet. */
return
arena_tdata_get_hard
(
tsd
,
ind
);
}
if
(
unlikely
(
ind
>=
tsd_narenas_tdata_get
(
tsd
)))
{
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return
(
refresh_if_missing
?
arena_tdata_get_hard
(
tsd
,
ind
)
:
NULL
);
}
tdata
=
&
arenas_tdata
[
ind
];
if
(
likely
(
tdata
!=
NULL
)
||
!
refresh_if_missing
)
{
return
tdata
;
}
return
arena_tdata_get_hard
(
tsd
,
ind
);
}
static
inline
arena_t
*
arena_get
(
tsdn_t
*
tsdn
,
unsigned
ind
,
bool
init_if_missing
)
{
arena_t
*
ret
;
assert
(
ind
<
MALLOCX_ARENA_LIMIT
);
ret
=
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
ind
],
ATOMIC_ACQUIRE
);
if
(
unlikely
(
ret
==
NULL
))
{
if
(
init_if_missing
)
{
ret
=
arena_init
(
tsdn
,
ind
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
}
}
return
ret
;
}
static
inline
ticker_t
*
decay_ticker_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
arena_tdata_t
*
tdata
;
tdata
=
arena_tdata_get
(
tsd
,
ind
,
true
);
if
(
unlikely
(
tdata
==
NULL
))
{
return
NULL
;
}
return
&
tdata
->
decay_ticker
;
}
JEMALLOC_ALWAYS_INLINE
cache_bin_t
*
tcache_small_bin_get
(
tcache_t
*
tcache
,
szind_t
binind
)
{
assert
(
binind
<
NBINS
);
return
&
tcache
->
bins_small
[
binind
];
}
JEMALLOC_ALWAYS_INLINE
cache_bin_t
*
tcache_large_bin_get
(
tcache_t
*
tcache
,
szind_t
binind
)
{
assert
(
binind
>=
NBINS
&&
binind
<
nhbins
);
return
&
tcache
->
bins_large
[
binind
-
NBINS
];
}
JEMALLOC_ALWAYS_INLINE
bool
tcache_available
(
tsd_t
*
tsd
)
{
/*
* Thread specific auto tcache might be unavailable if: 1) during tcache
* initialization, or 2) disabled through thread.tcache.enabled mallctl
* or config options. This check covers all cases.
*/
if
(
likely
(
tsd_tcache_enabled_get
(
tsd
)))
{
/* Associated arena == NULL implies tcache init in progress. */
assert
(
tsd_tcachep_get
(
tsd
)
->
arena
==
NULL
||
tcache_small_bin_get
(
tsd_tcachep_get
(
tsd
),
0
)
->
avail
!=
NULL
);
return
true
;
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcache_get
(
tsd_t
*
tsd
)
{
if
(
!
tcache_available
(
tsd
))
{
return
NULL
;
}
return
tsd_tcachep_get
(
tsd
);
}
static
inline
void
pre_reentrancy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
/* arena is the current context. Reentry from a0 is not allowed. */
assert
(
arena
!=
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
));
bool
fast
=
tsd_fast
(
tsd
);
assert
(
tsd_reentrancy_level_get
(
tsd
)
<
INT8_MAX
);
++*
tsd_reentrancy_levelp_get
(
tsd
);
if
(
fast
)
{
/* Prepare slow path for reentrancy. */
tsd_slow_update
(
tsd
);
assert
(
tsd
->
state
==
tsd_state_nominal_slow
);
}
}
static
inline
void
post_reentrancy
(
tsd_t
*
tsd
)
{
int8_t
*
reentrancy_level
=
tsd_reentrancy_levelp_get
(
tsd
);
assert
(
*
reentrancy_level
>
0
);
if
(
--*
reentrancy_level
==
0
)
{
tsd_slow_update
(
tsd
);
}
}
#endif
/* JEMALLOC_INTERNAL_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/rtree.h"
/* Choose an arena based on a per-thread value. */
static
inline
arena_t
*
arena_choose_impl
(
tsd_t
*
tsd
,
arena_t
*
arena
,
bool
internal
)
{
arena_t
*
ret
;
if
(
arena
!=
NULL
)
{
return
arena
;
}
/* During reentrancy, arena 0 is the safest bet. */
if
(
unlikely
(
tsd_reentrancy_level_get
(
tsd
)
>
0
))
{
return
arena_get
(
tsd_tsdn
(
tsd
),
0
,
true
);
}
ret
=
internal
?
tsd_iarena_get
(
tsd
)
:
tsd_arena_get
(
tsd
);
if
(
unlikely
(
ret
==
NULL
))
{
ret
=
arena_choose_hard
(
tsd
,
internal
);
assert
(
ret
);
if
(
tcache_available
(
tsd
))
{
tcache_t
*
tcache
=
tcache_get
(
tsd
);
if
(
tcache
->
arena
!=
NULL
)
{
/* See comments in tcache_data_init().*/
assert
(
tcache
->
arena
==
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
));
if
(
tcache
->
arena
!=
ret
)
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tcache
,
ret
);
}
}
else
{
tcache_arena_associate
(
tsd_tsdn
(
tsd
),
tcache
,
ret
);
}
}
}
/*
* Note that for percpu arena, if the current arena is outside of the
* auto percpu arena range, (i.e. thread is assigned to a manually
* managed arena), then percpu arena is skipped.
*/
if
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
)
&&
!
internal
&&
(
arena_ind_get
(
ret
)
<
percpu_arena_ind_limit
(
opt_percpu_arena
))
&&
(
ret
->
last_thd
!=
tsd_tsdn
(
tsd
)))
{
unsigned
ind
=
percpu_arena_choose
();
if
(
arena_ind_get
(
ret
)
!=
ind
)
{
percpu_arena_update
(
tsd
,
ind
);
ret
=
tsd_arena_get
(
tsd
);
}
ret
->
last_thd
=
tsd_tsdn
(
tsd
);
}
return
ret
;
}
static
inline
arena_t
*
arena_choose
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
return
arena_choose_impl
(
tsd
,
arena
,
false
);
}
static
inline
arena_t
*
arena_ichoose
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
return
arena_choose_impl
(
tsd
,
arena
,
true
);
}
static
inline
bool
arena_is_auto
(
arena_t
*
arena
)
{
assert
(
narenas_auto
>
0
);
return
(
arena_ind_get
(
arena
)
<
narenas_auto
);
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
iealloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
return
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
}
#endif
/* JEMALLOC_INTERNAL_INLINES_B_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
#define JEMALLOC_INTERNAL_INLINES_C_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/witness.h"
/*
* Translating the names of the 'i' functions:
* Abbreviations used in the first part of the function name (before
* alloc/dalloc) describe what that function accomplishes:
* a: arena (query)
* s: size (query, or sized deallocation)
* e: extent (query)
* p: aligned (allocates)
* vs: size (query, without knowing that the pointer is into the heap)
* r: rallocx implementation
* x: xallocx implementation
* Abbreviations used in the second part of the function name (after
* alloc/dalloc) describe the arguments it takes
* z: whether to return zeroed memory
* t: accepts a tcache_t * parameter
* m: accepts an arena_t * parameter
*/
JEMALLOC_ALWAYS_INLINE
arena_t
*
iaalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
return
arena_aalloc
(
tsdn
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
size_t
isalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
return
arena_salloc
(
tsdn
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
void
*
iallocztm
(
tsdn_t
*
tsdn
,
size_t
size
,
szind_t
ind
,
bool
zero
,
tcache_t
*
tcache
,
bool
is_internal
,
arena_t
*
arena
,
bool
slow_path
)
{
void
*
ret
;
assert
(
size
!=
0
);
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena
==
NULL
||
arena_is_auto
(
arena
));
if
(
!
tsdn_null
(
tsdn
)
&&
tsd_reentrancy_level_get
(
tsdn_tsd
(
tsdn
))
==
0
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
}
ret
=
arena_malloc
(
tsdn
,
arena
,
size
,
ind
,
zero
,
tcache
,
slow_path
);
if
(
config_stats
&&
is_internal
&&
likely
(
ret
!=
NULL
))
{
arena_internal_add
(
iaalloc
(
tsdn
,
ret
),
isalloc
(
tsdn
,
ret
));
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ialloc
(
tsd_t
*
tsd
,
size_t
size
,
szind_t
ind
,
bool
zero
,
bool
slow_path
)
{
return
iallocztm
(
tsd_tsdn
(
tsd
),
size
,
ind
,
zero
,
tcache_get
(
tsd
),
false
,
NULL
,
slow_path
);
}
JEMALLOC_ALWAYS_INLINE
void
*
ipallocztm
(
tsdn_t
*
tsdn
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
bool
is_internal
,
arena_t
*
arena
)
{
void
*
ret
;
assert
(
usize
!=
0
);
assert
(
usize
==
sz_sa2u
(
usize
,
alignment
));
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena
==
NULL
||
arena_is_auto
(
arena
));
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
ret
=
arena_palloc
(
tsdn
,
arena
,
usize
,
alignment
,
zero
,
tcache
);
assert
(
ALIGNMENT_ADDR2BASE
(
ret
,
alignment
)
==
ret
);
if
(
config_stats
&&
is_internal
&&
likely
(
ret
!=
NULL
))
{
arena_internal_add
(
iaalloc
(
tsdn
,
ret
),
isalloc
(
tsdn
,
ret
));
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ipalloct
(
tsdn_t
*
tsdn
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
)
{
return
ipallocztm
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
false
,
arena
);
}
JEMALLOC_ALWAYS_INLINE
void
*
ipalloc
(
tsd_t
*
tsd
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
return
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
alignment
,
zero
,
tcache_get
(
tsd
),
false
,
NULL
);
}
JEMALLOC_ALWAYS_INLINE
size_t
ivsalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
return
arena_vsalloc
(
tsdn
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
void
idalloctm
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
is_internal
,
bool
slow_path
)
{
assert
(
ptr
!=
NULL
);
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena_is_auto
(
iaalloc
(
tsdn
,
ptr
)));
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
config_stats
&&
is_internal
)
{
arena_internal_sub
(
iaalloc
(
tsdn
,
ptr
),
isalloc
(
tsdn
,
ptr
));
}
if
(
!
is_internal
&&
!
tsdn_null
(
tsdn
)
&&
tsd_reentrancy_level_get
(
tsdn_tsd
(
tsdn
))
!=
0
)
{
assert
(
tcache
==
NULL
);
}
arena_dalloc
(
tsdn
,
ptr
,
tcache
,
alloc_ctx
,
slow_path
);
}
JEMALLOC_ALWAYS_INLINE
void
idalloc
(
tsd_t
*
tsd
,
void
*
ptr
)
{
idalloctm
(
tsd_tsdn
(
tsd
),
ptr
,
tcache_get
(
tsd
),
NULL
,
false
,
true
);
}
JEMALLOC_ALWAYS_INLINE
void
isdalloct
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
slow_path
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
arena_sdalloc
(
tsdn
,
ptr
,
size
,
tcache
,
alloc_ctx
,
slow_path
);
}
JEMALLOC_ALWAYS_INLINE
void
*
iralloct_realign
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
void
*
p
;
size_t
usize
,
copysize
;
usize
=
sz_sa2u
(
size
+
extra
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
p
=
ipalloct
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
arena
);
if
(
p
==
NULL
)
{
if
(
extra
==
0
)
{
return
NULL
;
}
/* Try again, without extra this time. */
usize
=
sz_sa2u
(
size
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
p
=
ipalloct
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
arena
);
if
(
p
==
NULL
)
{
return
NULL
;
}
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize
=
(
size
<
oldsize
)
?
size
:
oldsize
;
memcpy
(
p
,
ptr
,
copysize
);
isdalloct
(
tsdn
,
ptr
,
oldsize
,
tcache
,
NULL
,
true
);
return
p
;
}
JEMALLOC_ALWAYS_INLINE
void
*
iralloct
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
)
{
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
alignment
!=
0
&&
((
uintptr_t
)
ptr
&
((
uintptr_t
)
alignment
-
1
))
!=
0
)
{
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return
iralloct_realign
(
tsdn
,
ptr
,
oldsize
,
size
,
0
,
alignment
,
zero
,
tcache
,
arena
);
}
return
arena_ralloc
(
tsdn
,
arena
,
ptr
,
oldsize
,
size
,
alignment
,
zero
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
*
iralloc
(
tsd_t
*
tsd
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
)
{
return
iralloct
(
tsd_tsdn
(
tsd
),
ptr
,
oldsize
,
size
,
alignment
,
zero
,
tcache_get
(
tsd
),
NULL
);
}
JEMALLOC_ALWAYS_INLINE
bool
ixalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
)
{
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
alignment
!=
0
&&
((
uintptr_t
)
ptr
&
((
uintptr_t
)
alignment
-
1
))
!=
0
)
{
/* Existing object alignment is inadequate. */
return
true
;
}
return
arena_ralloc_no_move
(
tsdn
,
ptr
,
oldsize
,
size
,
extra
,
zero
);
}
JEMALLOC_ALWAYS_INLINE
int
iget_defrag_hint
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
int
defrag
=
0
;
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
szind_t
szind
;
bool
is_slab
;
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
is_slab
);
if
(
likely
(
is_slab
))
{
/* Small allocation. */
extent_t
*
slab
=
iealloc
(
tsdn
,
ptr
);
arena_t
*
arena
=
extent_arena_get
(
slab
);
szind_t
binind
=
extent_szind_get
(
slab
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
];
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
/* don't bother moving allocations from the slab currently used for new allocations */
if
(
slab
!=
bin
->
slabcur
)
{
int
free_in_slab
=
extent_nfree_get
(
slab
);
if
(
free_in_slab
)
{
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
unsigned
long
curslabs
=
bin
->
stats
.
curslabs
;
size_t
curregs
=
bin
->
stats
.
curregs
;
if
(
bin
->
slabcur
)
{
/* remove slabcur from the overall utilization */
curregs
-=
bin_info
->
nregs
-
extent_nfree_get
(
bin
->
slabcur
);
curslabs
-=
1
;
}
/* Compare the utilization ratio of the slab in question to the total average,
* to avoid precision lost and division, we do that by extrapolating the usage
* of the slab as if all slabs have the same usage. If this slab is less used
* than the average, we'll prefer to evict the data to hopefully more used ones */
defrag
=
(
bin_info
->
nregs
-
free_in_slab
)
*
curslabs
<=
curregs
;
}
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
return
defrag
;
}
#endif
/* JEMALLOC_INTERNAL_INLINES_C_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_MACROS_H
#define JEMALLOC_INTERNAL_MACROS_H
#ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline
#else
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
#endif
#ifdef _MSC_VER
# define inline _inline
#endif
#define UNUSED JEMALLOC_ATTR(unused)
#define ZU(z) ((size_t)z)
#define ZD(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QD(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZD(z) ZD(z##LL)
#define KQU(q) QU(q##ULL)
#define KQD(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
# define restrict
#endif
/* Various function pointers are statick and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
#else
# define JET_MUTABLE const
#endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
#endif
/* JEMALLOC_INTERNAL_MACROS_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TYPES_H
#define JEMALLOC_INTERNAL_TYPES_H
/* Page size index type. */
typedef
unsigned
pszind_t
;
/* Size class index type. */
typedef
unsigned
szind_t
;
/* Processor / core id type. */
typedef
int
malloc_cpuid_t
;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
#define MALLOCX_TCACHE_MASK \
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# if defined(__riscv) || defined(__riscv__)
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
defined(__SH4_SINGLE_ONLY__))
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#endif
/* JEMALLOC_INTERNAL_TYPES_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_PREAMBLE_H
#define JEMALLOC_PREAMBLE_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# undef JEMALLOC_IS_MALLOC
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#endif
#include "jemalloc/internal/jemalloc_internal_macros.h"
/*
* Note that the ordering matters here; the hook itself is name-mangled. We
* want the inclusion of hooks to happen early, so that we hook as much as
* possible.
*/
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
# ifndef JEMALLOC_JET
# include "jemalloc/internal/private_namespace.h"
# else
# include "jemalloc/internal/private_namespace_jet.h"
# endif
#endif
#include "jemalloc/internal/hooks.h"
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
# define JEMALLOC_MADV_FREE 8
#endif
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool have_madvise_huge =
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
/*
* Undocumented, for jemalloc development use only at the moment. See the note
* in jemalloc/internal/log.h.
*/
static const bool config_log =
#ifdef JEMALLOC_LOG
true
#else
false
#endif
;
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
#endif
static const bool have_percpu_arena =
#ifdef JEMALLOC_PERCPU_ARENA
true
#else
false
#endif
;
/*
* Undocumented, and not recommended; the application should take full
* responsibility for tracking provenance.
*/
static const bool force_ivsalloc =
#ifdef JEMALLOC_FORCE_IVSALLOC
true
#else
false
#endif
;
static const bool have_background_thread =
#ifdef JEMALLOC_BACKGROUND_THREAD
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */
deps/jemalloc/include/jemalloc/internal/large_externs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
void
*
large_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
large_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
);
bool
large_ralloc_no_move
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
large_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
typedef
void
(
large_dalloc_junk_t
)(
void
*
,
size_t
);
extern
large_dalloc_junk_t
*
JET_MUTABLE
large_dalloc_junk
;
typedef
void
(
large_dalloc_maybe_junk_t
)(
void
*
,
size_t
);
extern
large_dalloc_maybe_junk_t
*
JET_MUTABLE
large_dalloc_maybe_junk
;
void
large_dalloc_prep_junked_locked
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_dalloc_finish
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_dalloc
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
size_t
large_salloc
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
prof_tctx_t
*
large_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
void
large_prof_tctx_set
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
prof_tctx_t
*
tctx
);
void
large_prof_tctx_reset
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
#endif
/* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/log.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_LOG_H
#define JEMALLOC_INTERNAL_LOG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#ifdef JEMALLOC_LOG
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
#else
# define JEMALLOC_LOG_VAR_BUFSIZE 1
#endif
#define JEMALLOC_LOG_BUFSIZE 4096
/*
* The log malloc_conf option is a '|'-delimited list of log_var name segments
* which should be logged. The names are themselves hierarchical, with '.' as
* the delimiter (a "segment" is just a prefix in the log namespace). So, if
* you have:
*
* log("arena", "log msg for arena"); // 1
* log("arena.a", "log msg for arena.a"); // 2
* log("arena.b", "log msg for arena.b"); // 3
* log("arena.a.a", "log msg for arena.a.a"); // 4
* log("extent.a", "log msg for extent.a"); // 5
* log("extent.b", "log msg for extent.b"); // 6
*
* And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
* 6 will print at runtime. You can enable logging from all log vars by
* writing "log=.".
*
* None of this should be regarded as a stable API for right now. It's intended
* as a debugging interface, to let us keep around some of our printf-debugging
* statements.
*/
extern
char
log_var_names
[
JEMALLOC_LOG_VAR_BUFSIZE
];
extern
atomic_b_t
log_init_done
;
typedef
struct
log_var_s
log_var_t
;
struct
log_var_s
{
/*
* Lowest bit is "inited", second lowest is "enabled". Putting them in
* a single word lets us avoid any fences on weak architectures.
*/
atomic_u_t
state
;
const
char
*
name
;
};
#define LOG_NOT_INITIALIZED 0U
#define LOG_INITIALIZED_NOT_ENABLED 1U
#define LOG_ENABLED 2U
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
/*
* Returns the value we should assume for state (which is not necessarily
* accurate; if logging is done before logging has finished initializing, then
* we default to doing the safe thing by logging everything).
*/
unsigned
log_var_update_state
(
log_var_t
*
log_var
);
/* We factor out the metadata management to allow us to test more easily. */
#define log_do_begin(log_var) \
if (config_log) { \
unsigned log_state = atomic_load_u(&(log_var).state, \
ATOMIC_RELAXED); \
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
log_state = log_var_update_state(&(log_var)); \
assert(log_state != LOG_NOT_INITIALIZED); \
} \
if (log_state == LOG_ENABLED) { \
{
/* User code executes here. */
#define log_do_end(log_var) \
} \
} \
}
/*
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
* preprocessing. To work around this, we take all potential extra arguments in
* a var-args functions. Since a varargs macro needs at least one argument in
* the "...", we accept the format string there, and require that the first
* argument in this "..." is a const char *.
*/
static
inline
void
log_impl_varargs
(
const
char
*
name
,
...)
{
char
buf
[
JEMALLOC_LOG_BUFSIZE
];
va_list
ap
;
va_start
(
ap
,
name
);
const
char
*
format
=
va_arg
(
ap
,
const
char
*
);
size_t
dst_offset
=
0
;
dst_offset
+=
malloc_snprintf
(
buf
,
JEMALLOC_LOG_BUFSIZE
,
"%s: "
,
name
);
dst_offset
+=
malloc_vsnprintf
(
buf
+
dst_offset
,
JEMALLOC_LOG_BUFSIZE
-
dst_offset
,
format
,
ap
);
dst_offset
+=
malloc_snprintf
(
buf
+
dst_offset
,
JEMALLOC_LOG_BUFSIZE
-
dst_offset
,
"
\n
"
);
va_end
(
ap
);
malloc_write
(
buf
);
}
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
#define LOG(log_var_str, ...) \
do { \
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
log_do_begin(log_var) \
log_impl_varargs((log_var).name, __VA_ARGS__); \
log_do_end(log_var) \
} while (0)
#endif
/* JEMALLOC_INTERNAL_LOG_H */
deps/jemalloc/include/jemalloc/internal/malloc_io.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
);
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
int
base
);
void
malloc_write
(
const
char
*
s
);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
va_list
ap
);
size_t
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
/*
* The caller can set write_cb and cbopaque to null to choose to print with the
* je_malloc_message hook.
*/
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
va_list
ap
);
void
malloc_cprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
void
malloc_printf
(
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
1
,
2
);
static
inline
ssize_t
malloc_write_fd
(
int
fd
,
const
void
*
buf
,
size_t
count
)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*
* syscall() returns long or int, depending on platform, so capture the
* result in the widest plausible type to avoid compiler warnings.
*/
long
result
=
syscall
(
SYS_write
,
fd
,
buf
,
count
);
#else
ssize_t
result
=
(
ssize_t
)
write
(
fd
,
buf
,
#ifdef _WIN32
(
unsigned
int
)
#endif
count
);
#endif
return
(
ssize_t
)
result
;
}
static
inline
ssize_t
malloc_read_fd
(
int
fd
,
void
*
buf
,
size_t
count
)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
long
result
=
syscall
(
SYS_read
,
fd
,
buf
,
count
);
#else
ssize_t
result
=
read
(
fd
,
buf
,
#ifdef _WIN32
(
unsigned
int
)
#endif
count
);
#endif
return
(
ssize_t
)
result
;
}
#endif
/* JEMALLOC_INTERNAL_MALLOC_IO_H */
deps/jemalloc/include/jemalloc/internal/mutex.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_MUTEX_H
#define JEMALLOC_INTERNAL_MUTEX_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
typedef
enum
{
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive
,
/*
* Can acquire multiple mutexes of the same witness rank, but in
* address-ascending order only.
*/
malloc_mutex_address_ordered
}
malloc_mutex_lock_order_t
;
typedef
struct
malloc_mutex_s
malloc_mutex_t
;
struct
malloc_mutex_s
{
union
{
struct
{
/*
* prof_data is defined first to reduce cacheline
* bouncing: the data is not touched by the mutex holder
* during unlocking, while might be modified by
* contenders. Having it before the mutex itself could
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t
prof_data
;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK
lock
;
# else
CRITICAL_SECTION
lock
;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock
lock
;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock
lock
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t
lock
;
malloc_mutex_t
*
postponed_next
;
#else
pthread_mutex_t
lock
;
#endif
};
/*
* We only touch witness when configured w/ debug. However we
* keep the field in a union when !debug so that we don't have
* to pollute the code base with #ifdefs, while avoid paying the
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t
witness
;
malloc_mutex_lock_order_t
lock_order
;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t
witness
;
malloc_mutex_lock_order_t
lock_order
;
#endif
};
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
#ifdef JEMALLOC_LAZY_LOCK
extern
bool
isthreaded
;
#else
# undef isthreaded
/* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
,
const
char
*
name
,
witness_rank_t
rank
,
malloc_mutex_lock_order_t
lock_order
);
void
malloc_mutex_prefork
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_boot
(
void
);
void
malloc_mutex_prof_data_reset
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_lock_slow
(
malloc_mutex_t
*
mutex
);
static
inline
void
malloc_mutex_lock_final
(
malloc_mutex_t
*
mutex
)
{
MALLOC_MUTEX_LOCK
(
mutex
);
}
static
inline
bool
malloc_mutex_trylock_final
(
malloc_mutex_t
*
mutex
)
{
return
MALLOC_MUTEX_TRYLOCK
(
mutex
);
}
static
inline
void
mutex_owner_stats_update
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
config_stats
)
{
mutex_prof_data_t
*
data
=
&
mutex
->
prof_data
;
data
->
n_lock_ops
++
;
if
(
data
->
prev_owner
!=
tsdn
)
{
data
->
prev_owner
=
tsdn
;
data
->
n_owner_switches
++
;
}
}
}
/* Trylock: return false if the lock is successfully acquired. */
static
inline
bool
malloc_mutex_trylock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
if
(
malloc_mutex_trylock_final
(
mutex
))
{
return
true
;
}
mutex_owner_stats_update
(
tsdn
,
mutex
);
}
witness_lock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
return
false
;
}
/* Aggregate lock prof data. */
static
inline
void
malloc_mutex_prof_merge
(
mutex_prof_data_t
*
sum
,
mutex_prof_data_t
*
data
)
{
nstime_add
(
&
sum
->
tot_wait_time
,
&
data
->
tot_wait_time
);
if
(
nstime_compare
(
&
sum
->
max_wait_time
,
&
data
->
max_wait_time
)
<
0
)
{
nstime_copy
(
&
sum
->
max_wait_time
,
&
data
->
max_wait_time
);
}
sum
->
n_wait_times
+=
data
->
n_wait_times
;
sum
->
n_spin_acquired
+=
data
->
n_spin_acquired
;
if
(
sum
->
max_n_thds
<
data
->
max_n_thds
)
{
sum
->
max_n_thds
=
data
->
max_n_thds
;
}
uint32_t
cur_n_waiting_thds
=
atomic_load_u32
(
&
sum
->
n_waiting_thds
,
ATOMIC_RELAXED
);
uint32_t
new_n_waiting_thds
=
cur_n_waiting_thds
+
atomic_load_u32
(
&
data
->
n_waiting_thds
,
ATOMIC_RELAXED
);
atomic_store_u32
(
&
sum
->
n_waiting_thds
,
new_n_waiting_thds
,
ATOMIC_RELAXED
);
sum
->
n_owner_switches
+=
data
->
n_owner_switches
;
sum
->
n_lock_ops
+=
data
->
n_lock_ops
;
}
static
inline
void
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
if
(
malloc_mutex_trylock_final
(
mutex
))
{
malloc_mutex_lock_slow
(
mutex
);
}
mutex_owner_stats_update
(
tsdn
,
mutex
);
}
witness_lock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
static
inline
void
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_unlock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
MALLOC_MUTEX_UNLOCK
(
mutex
);
}
}
static
inline
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
static
inline
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
/* Copy the prof data from mutex for processing. */
static
inline
void
malloc_mutex_prof_read
(
tsdn_t
*
tsdn
,
mutex_prof_data_t
*
data
,
malloc_mutex_t
*
mutex
)
{
mutex_prof_data_t
*
source
=
&
mutex
->
prof_data
;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
*
data
=
*
source
;
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32
(
&
data
->
n_waiting_thds
,
0
,
ATOMIC_RELAXED
);
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_H */
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/witness.h"
/* We do mod reductions by this value, so it should be kept a power of 2. */
#define MUTEX_POOL_SIZE 256
typedef
struct
mutex_pool_s
mutex_pool_t
;
struct
mutex_pool_s
{
malloc_mutex_t
mutexes
[
MUTEX_POOL_SIZE
];
};
bool
mutex_pool_init
(
mutex_pool_t
*
pool
,
const
char
*
name
,
witness_rank_t
rank
);
/* Internal helper - not meant to be called outside this module. */
static
inline
malloc_mutex_t
*
mutex_pool_mutex
(
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
size_t
hash_result
[
2
];
hash
(
&
key
,
sizeof
(
key
),
0xd50dcc1b
,
hash_result
);
return
&
pool
->
mutexes
[
hash_result
[
0
]
%
MUTEX_POOL_SIZE
];
}
static
inline
void
mutex_pool_assert_not_held
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
)
{
for
(
int
i
=
0
;
i
<
MUTEX_POOL_SIZE
;
i
++
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
pool
->
mutexes
[
i
]);
}
}
/*
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
* You're not allowed to acquire mutexes in the pool one at a time. You have to
* acquire all the mutexes you'll need in a single function call, and then
* release them all in a single function call.
*/
static
inline
void
mutex_pool_lock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_lock
(
tsdn
,
mutex
);
}
static
inline
void
mutex_pool_unlock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_unlock
(
tsdn
,
mutex
);
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_lock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
((
uintptr_t
)
mutex1
<
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
malloc_mutex_lock
(
tsdn
,
mutex2
);
}
else
if
((
uintptr_t
)
mutex1
==
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_lock
(
tsdn
,
mutex2
);
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
}
static
inline
void
mutex_pool_unlock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
(
mutex1
==
mutex2
)
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
malloc_mutex_unlock
(
tsdn
,
mutex2
);
}
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_assert_owner
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_assert_owner
(
tsdn
,
mutex_pool_mutex
(
pool
,
key
));
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_POOL_H */
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/tsd_types.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(ctl) \
OP(prof)
typedef
enum
{
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
}
mutex_prof_global_ind_t
;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef
enum
{
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
}
mutex_prof_arena_ind_t
;
#define MUTEX_PROF_UINT64_COUNTERS \
OP(num_ops, uint64_t, "n_lock_ops") \
OP(num_wait, uint64_t, "n_waiting") \
OP(num_spin_acq, uint64_t, "n_spin_acq") \
OP(num_owner_switch, uint64_t, "n_owner_switch") \
OP(total_wait_time, uint64_t, "total_wait_ns") \
OP(max_wait_time, uint64_t, "max_wait_ns")
#define MUTEX_PROF_UINT32_COUNTERS \
OP(max_num_thds, uint32_t, "max_n_thds")
#define MUTEX_PROF_COUNTERS \
MUTEX_PROF_UINT64_COUNTERS \
MUTEX_PROF_UINT32_COUNTERS
#define OP(counter, type, human) mutex_counter_##counter,
#define COUNTER_ENUM(counter_list, t) \
typedef enum { \
counter_list \
mutex_prof_num_##t##_counters \
} mutex_prof_##t##_counter_ind_t;
COUNTER_ENUM
(
MUTEX_PROF_UINT64_COUNTERS
,
uint64_t
)
COUNTER_ENUM
(
MUTEX_PROF_UINT32_COUNTERS
,
uint32_t
)
#undef COUNTER_ENUM
#undef OP
typedef
struct
{
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t
tot_wait_time
;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t
max_wait_time
;
/* # of times have to wait for this mutex (after spinning). */
uint64_t
n_wait_times
;
/* # of times acquired the mutex through local spinning. */
uint64_t
n_spin_acquired
;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t
max_n_thds
;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t
n_waiting_thds
;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t
n_owner_switches
;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t
*
prev_owner
;
/* # of lock() operations in total. */
uint64_t
n_lock_ops
;
}
mutex_prof_data_t
;
#endif
/* JEMALLOC_INTERNAL_MUTEX_PROF_H */
deps/jemalloc/include/jemalloc/internal/nstime.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_NSTIME_H
#define JEMALLOC_INTERNAL_NSTIME_H
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_ZERO_INITIALIZER {0}
typedef
struct
{
uint64_t
ns
;
}
nstime_t
;
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_sec
(
const
nstime_t
*
time
);
uint64_t
nstime_msec
(
const
nstime_t
*
time
);
uint64_t
nstime_nsec
(
const
nstime_t
*
time
);
void
nstime_copy
(
nstime_t
*
time
,
const
nstime_t
*
source
);
int
nstime_compare
(
const
nstime_t
*
a
,
const
nstime_t
*
b
);
void
nstime_add
(
nstime_t
*
time
,
const
nstime_t
*
addend
);
void
nstime_iadd
(
nstime_t
*
time
,
uint64_t
addend
);
void
nstime_subtract
(
nstime_t
*
time
,
const
nstime_t
*
subtrahend
);
void
nstime_isubtract
(
nstime_t
*
time
,
uint64_t
subtrahend
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
Prev
1
2
3
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment