Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4a884343
Commit
4a884343
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Delete old jemalloc before pulling in subtree.
parent
7ff7536e
Changes
169
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
169 of 169+
files are displayed.
Plain diff
Email patch
deps/jemalloc/src/jemalloc_cpp.cpp
deleted
100644 → 0
View file @
7ff7536e
#include <mutex>
#include <new>
#define JEMALLOC_CPP_CPP_
#ifdef __cplusplus
extern
"C"
{
#endif
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#ifdef __cplusplus
}
#endif
// All operators in this file are exported.
// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
// thunk?
//
// extern __typeof (sdallocx) sdallocx_int
// __attribute ((alias ("sdallocx"),
// visibility ("hidden")));
//
// ... but it needs to work with jemalloc namespaces.
void
*
operator
new
(
std
::
size_t
size
);
void
*
operator
new
[](
std
::
size_t
size
);
void
*
operator
new
(
std
::
size_t
size
,
const
std
::
nothrow_t
&
)
noexcept
;
void
*
operator
new
[](
std
::
size_t
size
,
const
std
::
nothrow_t
&
)
noexcept
;
void
operator
delete
(
void
*
ptr
)
noexcept
;
void
operator
delete
[](
void
*
ptr
)
noexcept
;
void
operator
delete
(
void
*
ptr
,
const
std
::
nothrow_t
&
)
noexcept
;
void
operator
delete
[](
void
*
ptr
,
const
std
::
nothrow_t
&
)
noexcept
;
#if __cpp_sized_deallocation >= 201309
/* C++14's sized-delete operators. */
void
operator
delete
(
void
*
ptr
,
std
::
size_t
size
)
noexcept
;
void
operator
delete
[](
void
*
ptr
,
std
::
size_t
size
)
noexcept
;
#endif
JEMALLOC_NOINLINE
static
void
*
handleOOM
(
std
::
size_t
size
,
bool
nothrow
)
{
void
*
ptr
=
nullptr
;
while
(
ptr
==
nullptr
)
{
std
::
new_handler
handler
;
// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
{
static
std
::
mutex
mtx
;
std
::
lock_guard
<
std
::
mutex
>
lock
(
mtx
);
handler
=
std
::
set_new_handler
(
nullptr
);
std
::
set_new_handler
(
handler
);
}
if
(
handler
==
nullptr
)
break
;
try
{
handler
();
}
catch
(
const
std
::
bad_alloc
&
)
{
break
;
}
ptr
=
je_malloc
(
size
);
}
if
(
ptr
==
nullptr
&&
!
nothrow
)
std
::
__throw_bad_alloc
();
return
ptr
;
}
template
<
bool
IsNoExcept
>
JEMALLOC_ALWAYS_INLINE
void
*
newImpl
(
std
::
size_t
size
)
noexcept
(
IsNoExcept
)
{
void
*
ptr
=
je_malloc
(
size
);
if
(
likely
(
ptr
!=
nullptr
))
return
ptr
;
return
handleOOM
(
size
,
IsNoExcept
);
}
void
*
operator
new
(
std
::
size_t
size
)
{
return
newImpl
<
false
>
(
size
);
}
void
*
operator
new
[](
std
::
size_t
size
)
{
return
newImpl
<
false
>
(
size
);
}
void
*
operator
new
(
std
::
size_t
size
,
const
std
::
nothrow_t
&
)
noexcept
{
return
newImpl
<
true
>
(
size
);
}
void
*
operator
new
[](
std
::
size_t
size
,
const
std
::
nothrow_t
&
)
noexcept
{
return
newImpl
<
true
>
(
size
);
}
void
operator
delete
(
void
*
ptr
)
noexcept
{
je_free
(
ptr
);
}
void
operator
delete
[](
void
*
ptr
)
noexcept
{
je_free
(
ptr
);
}
void
operator
delete
(
void
*
ptr
,
const
std
::
nothrow_t
&
)
noexcept
{
je_free
(
ptr
);
}
void
operator
delete
[](
void
*
ptr
,
const
std
::
nothrow_t
&
)
noexcept
{
je_free
(
ptr
);
}
#if __cpp_sized_deallocation >= 201309
void
operator
delete
(
void
*
ptr
,
std
::
size_t
size
)
noexcept
{
if
(
unlikely
(
ptr
==
nullptr
))
{
return
;
}
je_sdallocx
(
ptr
,
size
,
/*flags=*/
0
);
}
void
operator
delete
[](
void
*
ptr
,
std
::
size_t
size
)
noexcept
{
if
(
unlikely
(
ptr
==
nullptr
))
{
return
;
}
je_sdallocx
(
ptr
,
size
,
/*flags=*/
0
);
}
#endif // __cpp_sized_deallocation
deps/jemalloc/src/large.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_LARGE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
void
*
large_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
)
{
assert
(
usize
==
sz_s2u
(
usize
));
return
large_palloc
(
tsdn
,
arena
,
usize
,
CACHELINE
,
zero
);
}
void
*
large_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
size_t
ausize
;
extent_t
*
extent
;
bool
is_zeroed
;
UNUSED
bool
idump
JEMALLOC_CC_SILENCE_INIT
(
false
);
assert
(
!
tsdn_null
(
tsdn
)
||
arena
!=
NULL
);
ausize
=
sz_sa2u
(
usize
,
alignment
);
if
(
unlikely
(
ausize
==
0
||
ausize
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
if
(
config_fill
&&
unlikely
(
opt_zero
))
{
zero
=
true
;
}
/*
* Copy zero into is_zeroed and pass the copy when allocating the
* extent, so that it is possible to make correct junk/zero fill
* decisions below, even if is_zeroed ends up true when zero is false.
*/
is_zeroed
=
zero
;
if
(
likely
(
!
tsdn_null
(
tsdn
)))
{
arena
=
arena_choose
(
tsdn_tsd
(
tsdn
),
arena
);
}
if
(
unlikely
(
arena
==
NULL
)
||
(
extent
=
arena_extent_alloc_large
(
tsdn
,
arena
,
usize
,
alignment
,
&
is_zeroed
))
==
NULL
)
{
return
NULL
;
}
/* See comments in arena_bin_slabs_full_insert(). */
if
(
!
arena_is_auto
(
arena
))
{
/* Insert extent into large. */
malloc_mutex_lock
(
tsdn
,
&
arena
->
large_mtx
);
extent_list_append
(
&
arena
->
large
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
large_mtx
);
}
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
usize
))
{
prof_idump
(
tsdn
);
}
if
(
zero
)
{
assert
(
is_zeroed
);
}
else
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
memset
(
extent_addr_get
(
extent
),
JEMALLOC_ALLOC_JUNK
,
extent_usize_get
(
extent
));
}
arena_decay_tick
(
tsdn
,
arena
);
return
extent_addr_get
(
extent
);
}
static
void
large_dalloc_junk_impl
(
void
*
ptr
,
size_t
size
)
{
memset
(
ptr
,
JEMALLOC_FREE_JUNK
,
size
);
}
large_dalloc_junk_t
*
JET_MUTABLE
large_dalloc_junk
=
large_dalloc_junk_impl
;
static
void
large_dalloc_maybe_junk_impl
(
void
*
ptr
,
size_t
size
)
{
if
(
config_fill
&&
have_dss
&&
unlikely
(
opt_junk_free
))
{
/*
* Only bother junk filling if the extent isn't about to be
* unmapped.
*/
if
(
opt_retain
||
(
have_dss
&&
extent_in_dss
(
ptr
)))
{
large_dalloc_junk
(
ptr
,
size
);
}
}
}
large_dalloc_maybe_junk_t
*
JET_MUTABLE
large_dalloc_maybe_junk
=
large_dalloc_maybe_junk_impl
;
static
bool
large_ralloc_no_move_shrink
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
usize
)
{
arena_t
*
arena
=
extent_arena_get
(
extent
);
size_t
oldusize
=
extent_usize_get
(
extent
);
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
size_t
diff
=
extent_size_get
(
extent
)
-
(
usize
+
sz_large_pad
);
assert
(
oldusize
>
usize
);
if
(
extent_hooks
->
split
==
NULL
)
{
return
true
;
}
/* Split excess pages. */
if
(
diff
!=
0
)
{
extent_t
*
trail
=
extent_split_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent
,
usize
+
sz_large_pad
,
sz_size2index
(
usize
),
false
,
diff
,
NSIZES
,
false
);
if
(
trail
==
NULL
)
{
return
true
;
}
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
{
large_dalloc_maybe_junk
(
extent_addr_get
(
trail
),
extent_size_get
(
trail
));
}
arena_extents_dirty_dalloc
(
tsdn
,
arena
,
&
extent_hooks
,
trail
);
}
arena_extent_ralloc_large_shrink
(
tsdn
,
arena
,
extent
,
oldusize
);
return
false
;
}
static
bool
large_ralloc_no_move_expand
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
usize
,
bool
zero
)
{
arena_t
*
arena
=
extent_arena_get
(
extent
);
size_t
oldusize
=
extent_usize_get
(
extent
);
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
size_t
trailsize
=
usize
-
oldusize
;
if
(
extent_hooks
->
merge
==
NULL
)
{
return
true
;
}
if
(
config_fill
&&
unlikely
(
opt_zero
))
{
zero
=
true
;
}
/*
* Copy zero into is_zeroed_trail and pass the copy when allocating the
* extent, so that it is possible to make correct junk/zero fill
* decisions below, even if is_zeroed_trail ends up true when zero is
* false.
*/
bool
is_zeroed_trail
=
zero
;
bool
commit
=
true
;
extent_t
*
trail
;
bool
new_mapping
;
if
((
trail
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_dirty
,
extent_past_get
(
extent
),
trailsize
,
0
,
CACHELINE
,
false
,
NSIZES
,
&
is_zeroed_trail
,
&
commit
))
!=
NULL
||
(
trail
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_muzzy
,
extent_past_get
(
extent
),
trailsize
,
0
,
CACHELINE
,
false
,
NSIZES
,
&
is_zeroed_trail
,
&
commit
))
!=
NULL
)
{
if
(
config_stats
)
{
new_mapping
=
false
;
}
}
else
{
if
((
trail
=
extent_alloc_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent_past_get
(
extent
),
trailsize
,
0
,
CACHELINE
,
false
,
NSIZES
,
&
is_zeroed_trail
,
&
commit
))
==
NULL
)
{
return
true
;
}
if
(
config_stats
)
{
new_mapping
=
true
;
}
}
if
(
extent_merge_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent
,
trail
))
{
extent_dalloc_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
trail
);
return
true
;
}
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
szind_t
szind
=
sz_size2index
(
usize
);
extent_szind_set
(
extent
,
szind
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_addr_get
(
extent
),
szind
,
false
);
if
(
config_stats
&&
new_mapping
)
{
arena_stats_mapped_add
(
tsdn
,
&
arena
->
stats
,
trailsize
);
}
if
(
zero
)
{
if
(
config_cache_oblivious
)
{
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the extent is a multiple
* of CACHELINE in [0 .. PAGE).
*/
void
*
zbase
=
(
void
*
)
((
uintptr_t
)
extent_addr_get
(
extent
)
+
oldusize
);
void
*
zpast
=
PAGE_ADDR2BASE
((
void
*
)((
uintptr_t
)
zbase
+
PAGE
));
size_t
nzero
=
(
uintptr_t
)
zpast
-
(
uintptr_t
)
zbase
;
assert
(
nzero
>
0
);
memset
(
zbase
,
0
,
nzero
);
}
assert
(
is_zeroed_trail
);
}
else
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
memset
((
void
*
)((
uintptr_t
)
extent_addr_get
(
extent
)
+
oldusize
),
JEMALLOC_ALLOC_JUNK
,
usize
-
oldusize
);
}
arena_extent_ralloc_large_expand
(
tsdn
,
arena
,
extent
,
oldusize
);
return
false
;
}
bool
large_ralloc_no_move
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
)
{
size_t
oldusize
=
extent_usize_get
(
extent
);
/* The following should have been caught by callers. */
assert
(
usize_min
>
0
&&
usize_max
<=
LARGE_MAXCLASS
);
/* Both allocation sizes must be large to avoid a move. */
assert
(
oldusize
>=
LARGE_MINCLASS
&&
usize_max
>=
LARGE_MINCLASS
);
if
(
usize_max
>
oldusize
)
{
/* Attempt to expand the allocation in-place. */
if
(
!
large_ralloc_no_move_expand
(
tsdn
,
extent
,
usize_max
,
zero
))
{
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
return
false
;
}
/* Try again, this time with usize_min. */
if
(
usize_min
<
usize_max
&&
usize_min
>
oldusize
&&
large_ralloc_no_move_expand
(
tsdn
,
extent
,
usize_min
,
zero
))
{
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
return
false
;
}
}
/*
* Avoid moving the allocation if the existing extent size accommodates
* the new size.
*/
if
(
oldusize
>=
usize_min
&&
oldusize
<=
usize_max
)
{
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
return
false
;
}
/* Attempt to shrink the allocation in-place. */
if
(
oldusize
>
usize_max
)
{
if
(
!
large_ralloc_no_move_shrink
(
tsdn
,
extent
,
usize_max
))
{
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
return
false
;
}
}
return
true
;
}
static
void
*
large_ralloc_move_helper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
if
(
alignment
<=
CACHELINE
)
{
return
large_malloc
(
tsdn
,
arena
,
usize
,
zero
);
}
return
large_palloc
(
tsdn
,
arena
,
usize
,
alignment
,
zero
);
}
void
*
large_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
size_t
oldusize
=
extent_usize_get
(
extent
);
/* The following should have been caught by callers. */
assert
(
usize
>
0
&&
usize
<=
LARGE_MAXCLASS
);
/* Both allocation sizes must be large to avoid a move. */
assert
(
oldusize
>=
LARGE_MINCLASS
&&
usize
>=
LARGE_MINCLASS
);
/* Try to avoid moving the allocation. */
if
(
!
large_ralloc_no_move
(
tsdn
,
extent
,
usize
,
usize
,
zero
))
{
return
extent_addr_get
(
extent
);
}
/*
* usize and old size are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
void
*
ret
=
large_ralloc_move_helper
(
tsdn
,
arena
,
usize
,
alignment
,
zero
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
size_t
copysize
=
(
usize
<
oldusize
)
?
usize
:
oldusize
;
memcpy
(
ret
,
extent_addr_get
(
extent
),
copysize
);
isdalloct
(
tsdn
,
extent_addr_get
(
extent
),
oldusize
,
tcache
,
NULL
,
true
);
return
ret
;
}
/*
* junked_locked indicates whether the extent's data have been junk-filled, and
* whether the arena's large_mtx is currently held.
*/
static
void
large_dalloc_prep_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
bool
junked_locked
)
{
if
(
!
junked_locked
)
{
/* See comments in arena_bin_slabs_full_insert(). */
if
(
!
arena_is_auto
(
arena
))
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
large_mtx
);
extent_list_remove
(
&
arena
->
large
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
large_mtx
);
}
large_dalloc_maybe_junk
(
extent_addr_get
(
extent
),
extent_usize_get
(
extent
));
}
else
{
malloc_mutex_assert_owner
(
tsdn
,
&
arena
->
large_mtx
);
if
(
!
arena_is_auto
(
arena
))
{
extent_list_remove
(
&
arena
->
large
,
extent
);
}
}
arena_extent_dalloc_large_prep
(
tsdn
,
arena
,
extent
);
}
static
void
large_dalloc_finish_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
)
{
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
arena_extents_dirty_dalloc
(
tsdn
,
arena
,
&
extent_hooks
,
extent
);
}
void
large_dalloc_prep_junked_locked
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
large_dalloc_prep_impl
(
tsdn
,
extent_arena_get
(
extent
),
extent
,
true
);
}
void
large_dalloc_finish
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
large_dalloc_finish_impl
(
tsdn
,
extent_arena_get
(
extent
),
extent
);
}
void
large_dalloc
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
arena_t
*
arena
=
extent_arena_get
(
extent
);
large_dalloc_prep_impl
(
tsdn
,
arena
,
extent
,
false
);
large_dalloc_finish_impl
(
tsdn
,
arena
,
extent
);
arena_decay_tick
(
tsdn
,
arena
);
}
size_t
large_salloc
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
)
{
return
extent_usize_get
(
extent
);
}
prof_tctx_t
*
large_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
)
{
return
extent_prof_tctx_get
(
extent
);
}
void
large_prof_tctx_set
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
prof_tctx_t
*
tctx
)
{
extent_prof_tctx_set
(
extent
,
tctx
);
}
void
large_prof_tctx_reset
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
large_prof_tctx_set
(
tsdn
,
extent
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
deps/jemalloc/src/log.c
deleted
100644 → 0
View file @
7ff7536e
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/log.h"
char
log_var_names
[
JEMALLOC_LOG_VAR_BUFSIZE
];
atomic_b_t
log_init_done
=
ATOMIC_INIT
(
false
);
/*
* Returns true if we were able to pick out a segment. Fills in r_segment_end
* with a pointer to the first character after the end of the string.
*/
static
const
char
*
log_var_extract_segment
(
const
char
*
segment_begin
)
{
const
char
*
end
;
for
(
end
=
segment_begin
;
*
end
!=
'\0'
&&
*
end
!=
'|'
;
end
++
)
{
}
return
end
;
}
static
bool
log_var_matches_segment
(
const
char
*
segment_begin
,
const
char
*
segment_end
,
const
char
*
log_var_begin
,
const
char
*
log_var_end
)
{
assert
(
segment_begin
<=
segment_end
);
assert
(
log_var_begin
<
log_var_end
);
ptrdiff_t
segment_len
=
segment_end
-
segment_begin
;
ptrdiff_t
log_var_len
=
log_var_end
-
log_var_begin
;
/* The special '.' segment matches everything. */
if
(
segment_len
==
1
&&
*
segment_begin
==
'.'
)
{
return
true
;
}
if
(
segment_len
==
log_var_len
)
{
return
strncmp
(
segment_begin
,
log_var_begin
,
segment_len
)
==
0
;
}
else
if
(
segment_len
<
log_var_len
)
{
return
strncmp
(
segment_begin
,
log_var_begin
,
segment_len
)
==
0
&&
log_var_begin
[
segment_len
]
==
'.'
;
}
else
{
return
false
;
}
}
unsigned
log_var_update_state
(
log_var_t
*
log_var
)
{
const
char
*
log_var_begin
=
log_var
->
name
;
const
char
*
log_var_end
=
log_var
->
name
+
strlen
(
log_var
->
name
);
/* Pointer to one before the beginning of the current segment. */
const
char
*
segment_begin
=
log_var_names
;
/*
* If log_init done is false, we haven't parsed the malloc conf yet. To
* avoid log-spew, we default to not displaying anything.
*/
if
(
!
atomic_load_b
(
&
log_init_done
,
ATOMIC_ACQUIRE
))
{
return
LOG_INITIALIZED_NOT_ENABLED
;
}
while
(
true
)
{
const
char
*
segment_end
=
log_var_extract_segment
(
segment_begin
);
assert
(
segment_end
<
log_var_names
+
JEMALLOC_LOG_VAR_BUFSIZE
);
if
(
log_var_matches_segment
(
segment_begin
,
segment_end
,
log_var_begin
,
log_var_end
))
{
atomic_store_u
(
&
log_var
->
state
,
LOG_ENABLED
,
ATOMIC_RELAXED
);
return
LOG_ENABLED
;
}
if
(
*
segment_end
==
'\0'
)
{
/* Hit the end of the segment string with no match. */
atomic_store_u
(
&
log_var
->
state
,
LOG_INITIALIZED_NOT_ENABLED
,
ATOMIC_RELAXED
);
return
LOG_INITIALIZED_NOT_ENABLED
;
}
/* Otherwise, skip the delimiter and continue. */
segment_begin
=
segment_end
+
1
;
}
}
deps/jemalloc/src/malloc_io.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_MALLOC_IO_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/util.h"
#ifdef assert
# undef assert
#endif
#ifdef not_reached
# undef not_reached
#endif
#ifdef not_implemented
# undef not_implemented
#endif
#ifdef assert_not_implemented
# undef assert_not_implemented
#endif
/*
* Define simple versions of assertion macros that won't recurse in case
* of assertion failures in malloc_*printf().
*/
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
unreachable(); \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
wrtmessage
(
void
*
cbopaque
,
const
char
*
s
);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static
char
*
u2s
(
uintmax_t
x
,
unsigned
base
,
bool
uppercase
,
char
*
s
,
size_t
*
slen_p
);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static
char
*
d2s
(
intmax_t
x
,
char
sign
,
char
*
s
,
size_t
*
slen_p
);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static
char
*
o2s
(
uintmax_t
x
,
bool
alt_form
,
char
*
s
,
size_t
*
slen_p
);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static
char
*
x2s
(
uintmax_t
x
,
bool
alt_form
,
bool
uppercase
,
char
*
s
,
size_t
*
slen_p
);
/******************************************************************************/
/* malloc_message() setup. */
static
void
wrtmessage
(
void
*
cbopaque
,
const
char
*
s
)
{
malloc_write_fd
(
STDERR_FILENO
,
s
,
strlen
(
s
));
}
JEMALLOC_EXPORT
void
(
*
je_malloc_message
)(
void
*
,
const
char
*
s
);
/*
* Wrapper around malloc_message() that avoids the need for
* je_malloc_message(...) throughout the code.
*/
void
malloc_write
(
const
char
*
s
)
{
if
(
je_malloc_message
!=
NULL
)
{
je_malloc_message
(
NULL
,
s
);
}
else
{
wrtmessage
(
NULL
,
s
);
}
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
)
{
#ifdef _WIN32
FormatMessageA
(
FORMAT_MESSAGE_FROM_SYSTEM
,
NULL
,
err
,
0
,
(
LPSTR
)
buf
,
(
DWORD
)
buflen
,
NULL
);
return
0
;
#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
char
*
b
=
strerror_r
(
err
,
buf
,
buflen
);
if
(
b
!=
buf
)
{
strncpy
(
buf
,
b
,
buflen
);
buf
[
buflen
-
1
]
=
'\0'
;
}
return
0
;
#else
return
strerror_r
(
err
,
buf
,
buflen
);
#endif
}
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
int
base
)
{
uintmax_t
ret
,
digit
;
unsigned
b
;
bool
neg
;
const
char
*
p
,
*
ns
;
p
=
nptr
;
if
(
base
<
0
||
base
==
1
||
base
>
36
)
{
ns
=
p
;
set_errno
(
EINVAL
);
ret
=
UINTMAX_MAX
;
goto
label_return
;
}
b
=
base
;
/* Swallow leading whitespace and get sign, if any. */
neg
=
false
;
while
(
true
)
{
switch
(
*
p
)
{
case
'\t'
:
case
'\n'
:
case
'\v'
:
case
'\f'
:
case
'\r'
:
case
' '
:
p
++
;
break
;
case
'-'
:
neg
=
true
;
/* Fall through. */
case
'+'
:
p
++
;
/* Fall through. */
default:
goto
label_prefix
;
}
}
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns
=
p
;
if
(
*
p
==
'0'
)
{
switch
(
p
[
1
])
{
case
'0'
:
case
'1'
:
case
'2'
:
case
'3'
:
case
'4'
:
case
'5'
:
case
'6'
:
case
'7'
:
if
(
b
==
0
)
{
b
=
8
;
}
if
(
b
==
8
)
{
p
++
;
}
break
;
case
'X'
:
case
'x'
:
switch
(
p
[
2
])
{
case
'0'
:
case
'1'
:
case
'2'
:
case
'3'
:
case
'4'
:
case
'5'
:
case
'6'
:
case
'7'
:
case
'8'
:
case
'9'
:
case
'A'
:
case
'B'
:
case
'C'
:
case
'D'
:
case
'E'
:
case
'F'
:
case
'a'
:
case
'b'
:
case
'c'
:
case
'd'
:
case
'e'
:
case
'f'
:
if
(
b
==
0
)
{
b
=
16
;
}
if
(
b
==
16
)
{
p
+=
2
;
}
break
;
default:
break
;
}
break
;
default:
p
++
;
ret
=
0
;
goto
label_return
;
}
}
if
(
b
==
0
)
{
b
=
10
;
}
/* Convert. */
ret
=
0
;
while
((
*
p
>=
'0'
&&
*
p
<=
'9'
&&
(
digit
=
*
p
-
'0'
)
<
b
)
||
(
*
p
>=
'A'
&&
*
p
<=
'Z'
&&
(
digit
=
10
+
*
p
-
'A'
)
<
b
)
||
(
*
p
>=
'a'
&&
*
p
<=
'z'
&&
(
digit
=
10
+
*
p
-
'a'
)
<
b
))
{
uintmax_t
pret
=
ret
;
ret
*=
b
;
ret
+=
digit
;
if
(
ret
<
pret
)
{
/* Overflow. */
set_errno
(
ERANGE
);
ret
=
UINTMAX_MAX
;
goto
label_return
;
}
p
++
;
}
if
(
neg
)
{
ret
=
(
uintmax_t
)(
-
((
intmax_t
)
ret
));
}
if
(
p
==
ns
)
{
/* No conversion performed. */
set_errno
(
EINVAL
);
ret
=
UINTMAX_MAX
;
goto
label_return
;
}
label_return:
if
(
endptr
!=
NULL
)
{
if
(
p
==
ns
)
{
/* No characters were converted. */
*
endptr
=
(
char
*
)
nptr
;
}
else
{
*
endptr
=
(
char
*
)
p
;
}
}
return
ret
;
}
static
char
*
u2s
(
uintmax_t
x
,
unsigned
base
,
bool
uppercase
,
char
*
s
,
size_t
*
slen_p
)
{
unsigned
i
;
i
=
U2S_BUFSIZE
-
1
;
s
[
i
]
=
'\0'
;
switch
(
base
)
{
case
10
:
do
{
i
--
;
s
[
i
]
=
"0123456789"
[
x
%
(
uint64_t
)
10
];
x
/=
(
uint64_t
)
10
;
}
while
(
x
>
0
);
break
;
case
16
:
{
const
char
*
digits
=
(
uppercase
)
?
"0123456789ABCDEF"
:
"0123456789abcdef"
;
do
{
i
--
;
s
[
i
]
=
digits
[
x
&
0xf
];
x
>>=
4
;
}
while
(
x
>
0
);
break
;
}
default
:
{
const
char
*
digits
=
(
uppercase
)
?
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
:
"0123456789abcdefghijklmnopqrstuvwxyz"
;
assert
(
base
>=
2
&&
base
<=
36
);
do
{
i
--
;
s
[
i
]
=
digits
[
x
%
(
uint64_t
)
base
];
x
/=
(
uint64_t
)
base
;
}
while
(
x
>
0
);
}}
*
slen_p
=
U2S_BUFSIZE
-
1
-
i
;
return
&
s
[
i
];
}
static
char
*
d2s
(
intmax_t
x
,
char
sign
,
char
*
s
,
size_t
*
slen_p
)
{
bool
neg
;
if
((
neg
=
(
x
<
0
)))
{
x
=
-
x
;
}
s
=
u2s
(
x
,
10
,
false
,
s
,
slen_p
);
if
(
neg
)
{
sign
=
'-'
;
}
switch
(
sign
)
{
case
'-'
:
if
(
!
neg
)
{
break
;
}
/* Fall through. */
case
' '
:
case
'+'
:
s
--
;
(
*
slen_p
)
++
;
*
s
=
sign
;
break
;
default:
not_reached
();
}
return
s
;
}
static
char
*
o2s
(
uintmax_t
x
,
bool
alt_form
,
char
*
s
,
size_t
*
slen_p
)
{
s
=
u2s
(
x
,
8
,
false
,
s
,
slen_p
);
if
(
alt_form
&&
*
s
!=
'0'
)
{
s
--
;
(
*
slen_p
)
++
;
*
s
=
'0'
;
}
return
s
;
}
static
char
*
x2s
(
uintmax_t
x
,
bool
alt_form
,
bool
uppercase
,
char
*
s
,
size_t
*
slen_p
)
{
s
=
u2s
(
x
,
16
,
uppercase
,
s
,
slen_p
);
if
(
alt_form
)
{
s
-=
2
;
(
*
slen_p
)
+=
2
;
memcpy
(
s
,
uppercase
?
"0X"
:
"0x"
,
2
);
}
return
s
;
}
size_t
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
va_list
ap
)
{
size_t
i
;
const
char
*
f
;
#define APPEND_C(c) do { \
if (i < size) { \
str[i] = (c); \
} \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = (slen <= size - i) ? slen : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += slen; \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */
\
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
} \
} \
/* Value. */
\
APPEND_S(s, slen); \
/* Right padding. */
\
if (left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
} \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch ((unsigned char)len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 'j' | 0x80: \
val = va_arg(ap, uintmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p':
/* Synthetic; used for %p. */
\
val = va_arg(ap, uintptr_t); \
break; \
default: \
not_reached(); \
val = 0; \
} \
} while (0)
i
=
0
;
f
=
format
;
while
(
true
)
{
switch
(
*
f
)
{
case
'\0'
:
goto
label_out
;
case
'%'
:
{
bool
alt_form
=
false
;
bool
left_justify
=
false
;
bool
plus_space
=
false
;
bool
plus_plus
=
false
;
int
prec
=
-
1
;
int
width
=
-
1
;
unsigned
char
len
=
'?'
;
char
*
s
;
size_t
slen
;
f
++
;
/* Flags. */
while
(
true
)
{
switch
(
*
f
)
{
case
'#'
:
assert
(
!
alt_form
);
alt_form
=
true
;
break
;
case
'-'
:
assert
(
!
left_justify
);
left_justify
=
true
;
break
;
case
' '
:
assert
(
!
plus_space
);
plus_space
=
true
;
break
;
case
'+'
:
assert
(
!
plus_plus
);
plus_plus
=
true
;
break
;
default:
goto
label_width
;
}
f
++
;
}
/* Width. */
label_width:
switch
(
*
f
)
{
case
'*'
:
width
=
va_arg
(
ap
,
int
);
f
++
;
if
(
width
<
0
)
{
left_justify
=
true
;
width
=
-
width
;
}
break
;
case
'0'
:
case
'1'
:
case
'2'
:
case
'3'
:
case
'4'
:
case
'5'
:
case
'6'
:
case
'7'
:
case
'8'
:
case
'9'
:
{
uintmax_t
uwidth
;
set_errno
(
0
);
uwidth
=
malloc_strtoumax
(
f
,
(
char
**
)
&
f
,
10
);
assert
(
uwidth
!=
UINTMAX_MAX
||
get_errno
()
!=
ERANGE
);
width
=
(
int
)
uwidth
;
break
;
}
default
:
break
;
}
/* Width/precision separator. */
if
(
*
f
==
'.'
)
{
f
++
;
}
else
{
goto
label_length
;
}
/* Precision. */
switch
(
*
f
)
{
case
'*'
:
prec
=
va_arg
(
ap
,
int
);
f
++
;
break
;
case
'0'
:
case
'1'
:
case
'2'
:
case
'3'
:
case
'4'
:
case
'5'
:
case
'6'
:
case
'7'
:
case
'8'
:
case
'9'
:
{
uintmax_t
uprec
;
set_errno
(
0
);
uprec
=
malloc_strtoumax
(
f
,
(
char
**
)
&
f
,
10
);
assert
(
uprec
!=
UINTMAX_MAX
||
get_errno
()
!=
ERANGE
);
prec
=
(
int
)
uprec
;
break
;
}
default:
break
;
}
/* Length. */
label_length:
switch
(
*
f
)
{
case
'l'
:
f
++
;
if
(
*
f
==
'l'
)
{
len
=
'q'
;
f
++
;
}
else
{
len
=
'l'
;
}
break
;
case
'q'
:
case
'j'
:
case
't'
:
case
'z'
:
len
=
*
f
;
f
++
;
break
;
default:
break
;
}
/* Conversion specifier. */
switch
(
*
f
)
{
case
'%'
:
/* %% */
APPEND_C
(
*
f
);
f
++
;
break
;
case
'd'
:
case
'i'
:
{
intmax_t
val
JEMALLOC_CC_SILENCE_INIT
(
0
);
char
buf
[
D2S_BUFSIZE
];
GET_ARG_NUMERIC
(
val
,
len
);
s
=
d2s
(
val
,
(
plus_plus
?
'+'
:
(
plus_space
?
' '
:
'-'
)),
buf
,
&
slen
);
APPEND_PADDED_S
(
s
,
slen
,
width
,
left_justify
);
f
++
;
break
;
}
case
'o'
:
{
uintmax_t
val
JEMALLOC_CC_SILENCE_INIT
(
0
);
char
buf
[
O2S_BUFSIZE
];
GET_ARG_NUMERIC
(
val
,
len
|
0x80
);
s
=
o2s
(
val
,
alt_form
,
buf
,
&
slen
);
APPEND_PADDED_S
(
s
,
slen
,
width
,
left_justify
);
f
++
;
break
;
}
case
'u'
:
{
uintmax_t
val
JEMALLOC_CC_SILENCE_INIT
(
0
);
char
buf
[
U2S_BUFSIZE
];
GET_ARG_NUMERIC
(
val
,
len
|
0x80
);
s
=
u2s
(
val
,
10
,
false
,
buf
,
&
slen
);
APPEND_PADDED_S
(
s
,
slen
,
width
,
left_justify
);
f
++
;
break
;
}
case
'x'
:
case
'X'
:
{
uintmax_t
val
JEMALLOC_CC_SILENCE_INIT
(
0
);
char
buf
[
X2S_BUFSIZE
];
GET_ARG_NUMERIC
(
val
,
len
|
0x80
);
s
=
x2s
(
val
,
alt_form
,
*
f
==
'X'
,
buf
,
&
slen
);
APPEND_PADDED_S
(
s
,
slen
,
width
,
left_justify
);
f
++
;
break
;
}
case
'c'
:
{
unsigned
char
val
;
char
buf
[
2
];
assert
(
len
==
'?'
||
len
==
'l'
);
assert_not_implemented
(
len
!=
'l'
);
val
=
va_arg
(
ap
,
int
);
buf
[
0
]
=
val
;
buf
[
1
]
=
'\0'
;
APPEND_PADDED_S
(
buf
,
1
,
width
,
left_justify
);
f
++
;
break
;
}
case
's'
:
assert
(
len
==
'?'
||
len
==
'l'
);
assert_not_implemented
(
len
!=
'l'
);
s
=
va_arg
(
ap
,
char
*
);
slen
=
(
prec
<
0
)
?
strlen
(
s
)
:
(
size_t
)
prec
;
APPEND_PADDED_S
(
s
,
slen
,
width
,
left_justify
);
f
++
;
break
;
case
'p'
:
{
uintmax_t
val
;
char
buf
[
X2S_BUFSIZE
];
GET_ARG_NUMERIC
(
val
,
'p'
);
s
=
x2s
(
val
,
true
,
false
,
buf
,
&
slen
);
APPEND_PADDED_S
(
s
,
slen
,
width
,
left_justify
);
f
++
;
break
;
}
default
:
not_reached
();
}
break
;
}
default
:
{
APPEND_C
(
*
f
);
f
++
;
break
;
}}
}
label_out:
if
(
i
<
size
)
{
str
[
i
]
=
'\0'
;
}
else
{
str
[
size
-
1
]
=
'\0'
;
}
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return
i
;
}
JEMALLOC_FORMAT_PRINTF
(
3
,
4
)
size_t
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
{
size_t
ret
;
va_list
ap
;
va_start
(
ap
,
format
);
ret
=
malloc_vsnprintf
(
str
,
size
,
format
,
ap
);
va_end
(
ap
);
return
ret
;
}
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
va_list
ap
)
{
char
buf
[
MALLOC_PRINTF_BUFSIZE
];
if
(
write_cb
==
NULL
)
{
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb
=
(
je_malloc_message
!=
NULL
)
?
je_malloc_message
:
wrtmessage
;
cbopaque
=
NULL
;
}
malloc_vsnprintf
(
buf
,
sizeof
(
buf
),
format
,
ap
);
write_cb
(
cbopaque
,
buf
);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_FORMAT_PRINTF
(
3
,
4
)
void
malloc_cprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
...)
{
va_list
ap
;
va_start
(
ap
,
format
);
malloc_vcprintf
(
write_cb
,
cbopaque
,
format
,
ap
);
va_end
(
ap
);
}
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_FORMAT_PRINTF
(
1
,
2
)
void
malloc_printf
(
const
char
*
format
,
...)
{
va_list
ap
;
va_start
(
ap
,
format
);
malloc_vcprintf
(
NULL
,
NULL
,
format
,
ap
);
va_end
(
ap
);
}
/*
* Restore normal assertion macros, in order to make it possible to compile all
* C files as a single concatenation.
*/
#undef assert
#undef not_reached
#undef not_implemented
#undef assert_not_implemented
#include "jemalloc/internal/assert.h"
deps/jemalloc/src/mutex.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/spin.h"
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool
isthreaded
=
false
;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static
bool
postpone_init
=
true
;
static
malloc_mutex_t
*
postponed_mutexes
=
NULL
;
#endif
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
* process goes multi-threaded.
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
JEMALLOC_EXPORT
int
pthread_create
(
pthread_t
*
__restrict
thread
,
const
pthread_attr_t
*
__restrict
attr
,
void
*
(
*
start_routine
)(
void
*
),
void
*
__restrict
arg
)
{
return
pthread_create_wrapper
(
thread
,
attr
,
start_routine
,
arg
);
}
#endif
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT
int
_pthread_mutex_init_calloc_cb
(
pthread_mutex_t
*
mutex
,
void
*
(
calloc_cb
)(
size_t
,
size_t
));
#endif
void
malloc_mutex_lock_slow
(
malloc_mutex_t
*
mutex
)
{
mutex_prof_data_t
*
data
=
&
mutex
->
prof_data
;
UNUSED
nstime_t
before
=
NSTIME_ZERO_INITIALIZER
;
if
(
ncpus
==
1
)
{
goto
label_spin_done
;
}
int
cnt
=
0
,
max_cnt
=
MALLOC_MUTEX_MAX_SPIN
;
do
{
spin_cpu_spinwait
();
if
(
!
malloc_mutex_trylock_final
(
mutex
))
{
data
->
n_spin_acquired
++
;
return
;
}
}
while
(
cnt
++
<
max_cnt
);
if
(
!
config_stats
)
{
/* Only spin is useful when stats is off. */
malloc_mutex_lock_final
(
mutex
);
return
;
}
label_spin_done:
nstime_update
(
&
before
);
/* Copy before to after to avoid clock skews. */
nstime_t
after
;
nstime_copy
(
&
after
,
&
before
);
uint32_t
n_thds
=
atomic_fetch_add_u32
(
&
data
->
n_waiting_thds
,
1
,
ATOMIC_RELAXED
)
+
1
;
/* One last try as above two calls may take quite some cycles. */
if
(
!
malloc_mutex_trylock_final
(
mutex
))
{
atomic_fetch_sub_u32
(
&
data
->
n_waiting_thds
,
1
,
ATOMIC_RELAXED
);
data
->
n_spin_acquired
++
;
return
;
}
/* True slow path. */
malloc_mutex_lock_final
(
mutex
);
/* Update more slow-path only counters. */
atomic_fetch_sub_u32
(
&
data
->
n_waiting_thds
,
1
,
ATOMIC_RELAXED
);
nstime_update
(
&
after
);
nstime_t
delta
;
nstime_copy
(
&
delta
,
&
after
);
nstime_subtract
(
&
delta
,
&
before
);
data
->
n_wait_times
++
;
nstime_add
(
&
data
->
tot_wait_time
,
&
delta
);
if
(
nstime_compare
(
&
data
->
max_wait_time
,
&
delta
)
<
0
)
{
nstime_copy
(
&
data
->
max_wait_time
,
&
delta
);
}
if
(
n_thds
>
data
->
max_n_thds
)
{
data
->
max_n_thds
=
n_thds
;
}
}
static
void
mutex_prof_data_init
(
mutex_prof_data_t
*
data
)
{
memset
(
data
,
0
,
sizeof
(
mutex_prof_data_t
));
nstime_init
(
&
data
->
max_wait_time
,
0
);
nstime_init
(
&
data
->
tot_wait_time
,
0
);
data
->
prev_owner
=
NULL
;
}
void
malloc_mutex_prof_data_reset
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
mutex_prof_data_init
(
&
mutex
->
prof_data
);
}
static
int
mutex_addr_comp
(
const
witness_t
*
witness1
,
void
*
mutex1
,
const
witness_t
*
witness2
,
void
*
mutex2
)
{
assert
(
mutex1
!=
NULL
);
assert
(
mutex2
!=
NULL
);
uintptr_t
mu1int
=
(
uintptr_t
)
mutex1
;
uintptr_t
mu2int
=
(
uintptr_t
)
mutex2
;
if
(
mu1int
<
mu2int
)
{
return
-
1
;
}
else
if
(
mu1int
==
mu2int
)
{
return
0
;
}
else
{
return
1
;
}
}
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
,
const
char
*
name
,
witness_rank_t
rank
,
malloc_mutex_lock_order_t
lock_order
)
{
mutex_prof_data_init
(
&
mutex
->
prof_data
);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
InitializeSRWLock
(
&
mutex
->
lock
);
# else
if
(
!
InitializeCriticalSectionAndSpinCount
(
&
mutex
->
lock
,
_CRT_SPINCOUNT
))
{
return
true
;
}
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex
->
lock
=
OS_UNFAIR_LOCK_INIT
;
#elif (defined(JEMALLOC_OSSPIN))
mutex
->
lock
=
0
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if
(
postpone_init
)
{
mutex
->
postponed_next
=
postponed_mutexes
;
postponed_mutexes
=
mutex
;
}
else
{
if
(
_pthread_mutex_init_calloc_cb
(
&
mutex
->
lock
,
bootstrap_calloc
)
!=
0
)
{
return
true
;
}
}
#else
pthread_mutexattr_t
attr
;
if
(
pthread_mutexattr_init
(
&
attr
)
!=
0
)
{
return
true
;
}
pthread_mutexattr_settype
(
&
attr
,
MALLOC_MUTEX_TYPE
);
if
(
pthread_mutex_init
(
&
mutex
->
lock
,
&
attr
)
!=
0
)
{
pthread_mutexattr_destroy
(
&
attr
);
return
true
;
}
pthread_mutexattr_destroy
(
&
attr
);
#endif
if
(
config_debug
)
{
mutex
->
lock_order
=
lock_order
;
if
(
lock_order
==
malloc_mutex_address_ordered
)
{
witness_init
(
&
mutex
->
witness
,
name
,
rank
,
mutex_addr_comp
,
mutex
);
}
else
{
witness_init
(
&
mutex
->
witness
,
name
,
rank
,
NULL
,
NULL
);
}
}
return
false
;
}
void
malloc_mutex_prefork
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
malloc_mutex_lock
(
tsdn
,
mutex
);
}
void
malloc_mutex_postfork_parent
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
malloc_mutex_unlock
(
tsdn
,
mutex
);
}
void
malloc_mutex_postfork_child
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock
(
tsdn
,
mutex
);
#else
if
(
malloc_mutex_init
(
mutex
,
mutex
->
witness
.
name
,
mutex
->
witness
.
rank
,
mutex
->
lock_order
))
{
malloc_printf
(
"<jemalloc>: Error re-initializing mutex in "
"child
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
#endif
}
bool
malloc_mutex_boot
(
void
)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init
=
false
;
while
(
postponed_mutexes
!=
NULL
)
{
if
(
_pthread_mutex_init_calloc_cb
(
&
postponed_mutexes
->
lock
,
bootstrap_calloc
)
!=
0
)
{
return
true
;
}
postponed_mutexes
=
postponed_mutexes
->
postponed_next
;
}
#endif
return
false
;
}
deps/jemalloc/src/mutex_pool.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_MUTEX_POOL_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
bool
mutex_pool_init
(
mutex_pool_t
*
pool
,
const
char
*
name
,
witness_rank_t
rank
)
{
for
(
int
i
=
0
;
i
<
MUTEX_POOL_SIZE
;
++
i
)
{
if
(
malloc_mutex_init
(
&
pool
->
mutexes
[
i
],
name
,
rank
,
malloc_mutex_address_ordered
))
{
return
true
;
}
}
return
false
;
}
deps/jemalloc/src/nstime.c
deleted
100644 → 0
View file @
7ff7536e
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/assert.h"
#define BILLION UINT64_C(1000000000)
#define MILLION UINT64_C(1000000)
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
)
{
time
->
ns
=
ns
;
}
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
)
{
time
->
ns
=
sec
*
BILLION
+
nsec
;
}
uint64_t
nstime_ns
(
const
nstime_t
*
time
)
{
return
time
->
ns
;
}
uint64_t
nstime_msec
(
const
nstime_t
*
time
)
{
return
time
->
ns
/
MILLION
;
}
uint64_t
nstime_sec
(
const
nstime_t
*
time
)
{
return
time
->
ns
/
BILLION
;
}
uint64_t
nstime_nsec
(
const
nstime_t
*
time
)
{
return
time
->
ns
%
BILLION
;
}
void
nstime_copy
(
nstime_t
*
time
,
const
nstime_t
*
source
)
{
*
time
=
*
source
;
}
int
nstime_compare
(
const
nstime_t
*
a
,
const
nstime_t
*
b
)
{
return
(
a
->
ns
>
b
->
ns
)
-
(
a
->
ns
<
b
->
ns
);
}
void
nstime_add
(
nstime_t
*
time
,
const
nstime_t
*
addend
)
{
assert
(
UINT64_MAX
-
time
->
ns
>=
addend
->
ns
);
time
->
ns
+=
addend
->
ns
;
}
void
nstime_iadd
(
nstime_t
*
time
,
uint64_t
addend
)
{
assert
(
UINT64_MAX
-
time
->
ns
>=
addend
);
time
->
ns
+=
addend
;
}
void
nstime_subtract
(
nstime_t
*
time
,
const
nstime_t
*
subtrahend
)
{
assert
(
nstime_compare
(
time
,
subtrahend
)
>=
0
);
time
->
ns
-=
subtrahend
->
ns
;
}
void
nstime_isubtract
(
nstime_t
*
time
,
uint64_t
subtrahend
)
{
assert
(
time
->
ns
>=
subtrahend
);
time
->
ns
-=
subtrahend
;
}
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
)
{
assert
((((
time
->
ns
|
multiplier
)
&
(
UINT64_MAX
<<
(
sizeof
(
uint64_t
)
<<
2
)))
==
0
)
||
((
time
->
ns
*
multiplier
)
/
multiplier
==
time
->
ns
));
time
->
ns
*=
multiplier
;
}
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
)
{
assert
(
divisor
!=
0
);
time
->
ns
/=
divisor
;
}
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
)
{
assert
(
divisor
->
ns
!=
0
);
return
time
->
ns
/
divisor
->
ns
;
}
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static
void
nstime_get
(
nstime_t
*
time
)
{
FILETIME
ft
;
uint64_t
ticks_100ns
;
GetSystemTimeAsFileTime
(
&
ft
);
ticks_100ns
=
(((
uint64_t
)
ft
.
dwHighDateTime
)
<<
32
)
|
ft
.
dwLowDateTime
;
nstime_init
(
time
,
ticks_100ns
*
100
);
}
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
# define NSTIME_MONOTONIC true
static
void
nstime_get
(
nstime_t
*
time
)
{
struct
timespec
ts
;
clock_gettime
(
CLOCK_MONOTONIC_COARSE
,
&
ts
);
nstime_init2
(
time
,
ts
.
tv_sec
,
ts
.
tv_nsec
);
}
#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
# define NSTIME_MONOTONIC true
static
void
nstime_get
(
nstime_t
*
time
)
{
struct
timespec
ts
;
clock_gettime
(
CLOCK_MONOTONIC
,
&
ts
);
nstime_init2
(
time
,
ts
.
tv_sec
,
ts
.
tv_nsec
);
}
#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
# define NSTIME_MONOTONIC true
static
void
nstime_get
(
nstime_t
*
time
)
{
nstime_init
(
time
,
mach_absolute_time
());
}
#else
# define NSTIME_MONOTONIC false
static
void
nstime_get
(
nstime_t
*
time
)
{
struct
timeval
tv
;
gettimeofday
(
&
tv
,
NULL
);
nstime_init2
(
time
,
tv
.
tv_sec
,
tv
.
tv_usec
*
1000
);
}
#endif
static
bool
nstime_monotonic_impl
(
void
)
{
return
NSTIME_MONOTONIC
;
#undef NSTIME_MONOTONIC
}
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
=
nstime_monotonic_impl
;
static
bool
nstime_update_impl
(
nstime_t
*
time
)
{
nstime_t
old_time
;
nstime_copy
(
&
old_time
,
time
);
nstime_get
(
time
);
/* Handle non-monotonic clocks. */
if
(
unlikely
(
nstime_compare
(
&
old_time
,
time
)
>
0
))
{
nstime_copy
(
time
,
&
old_time
);
return
true
;
}
return
false
;
}
nstime_update_t
*
JET_MUTABLE
nstime_update
=
nstime_update_impl
;
deps/jemalloc/src/pages.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/malloc_io.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
#ifdef __FreeBSD__
#include <vm/vm_param.h>
#endif
#endif
/******************************************************************************/
/* Data. */
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
static
size_t
os_page
;
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static
int
mmap_flags
;
#endif
static
bool
os_overcommits
;
const
char
*
thp_mode_names
[]
=
{
"default"
,
"always"
,
"never"
,
"not supported"
};
thp_mode_t
opt_thp
=
THP_MODE_DEFAULT
;
thp_mode_t
init_system_thp_mode
;
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
static
bool
pages_can_purge_lazy_runtime
=
true
;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
os_pages_unmap
(
void
*
addr
,
size_t
size
);
/******************************************************************************/
static
void
*
os_pages_map
(
void
*
addr
,
size_t
size
,
size_t
alignment
,
bool
*
commit
)
{
assert
(
ALIGNMENT_ADDR2BASE
(
addr
,
os_page
)
==
addr
);
assert
(
ALIGNMENT_CEILING
(
size
,
os_page
)
==
size
);
assert
(
size
!=
0
);
if
(
os_overcommits
)
{
*
commit
=
true
;
}
void
*
ret
;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret
=
VirtualAlloc
(
addr
,
size
,
MEM_RESERVE
|
(
*
commit
?
MEM_COMMIT
:
0
),
PAGE_READWRITE
);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
{
int
prot
=
*
commit
?
PAGES_PROT_COMMIT
:
PAGES_PROT_DECOMMIT
;
ret
=
mmap
(
addr
,
size
,
prot
,
mmap_flags
,
-
1
,
0
);
}
assert
(
ret
!=
NULL
);
if
(
ret
==
MAP_FAILED
)
{
ret
=
NULL
;
}
else
if
(
addr
!=
NULL
&&
ret
!=
addr
)
{
/*
* We succeeded in mapping memory, but not in the right place.
*/
os_pages_unmap
(
ret
,
size
);
ret
=
NULL
;
}
#endif
assert
(
ret
==
NULL
||
(
addr
==
NULL
&&
ret
!=
addr
)
||
(
addr
!=
NULL
&&
ret
==
addr
));
return
ret
;
}
static
void
*
os_pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
,
bool
*
commit
)
{
void
*
ret
=
(
void
*
)((
uintptr_t
)
addr
+
leadsize
);
assert
(
alloc_size
>=
leadsize
+
size
);
#ifdef _WIN32
os_pages_unmap
(
addr
,
alloc_size
);
void
*
new_addr
=
os_pages_map
(
ret
,
size
,
PAGE
,
commit
);
if
(
new_addr
==
ret
)
{
return
ret
;
}
if
(
new_addr
!=
NULL
)
{
os_pages_unmap
(
new_addr
,
size
);
}
return
NULL
;
#else
size_t
trailsize
=
alloc_size
-
leadsize
-
size
;
if
(
leadsize
!=
0
)
{
os_pages_unmap
(
addr
,
leadsize
);
}
if
(
trailsize
!=
0
)
{
os_pages_unmap
((
void
*
)((
uintptr_t
)
ret
+
size
),
trailsize
);
}
return
ret
;
#endif
}
static
void
os_pages_unmap
(
void
*
addr
,
size_t
size
)
{
assert
(
ALIGNMENT_ADDR2BASE
(
addr
,
os_page
)
==
addr
);
assert
(
ALIGNMENT_CEILING
(
size
,
os_page
)
==
size
);
#ifdef _WIN32
if
(
VirtualFree
(
addr
,
0
,
MEM_RELEASE
)
==
0
)
#else
if
(
munmap
(
addr
,
size
)
==
-
1
)
#endif
{
char
buf
[
BUFERROR_BUF
];
buferror
(
get_errno
(),
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s
\n
"
,
buf
);
if
(
opt_abort
)
{
abort
();
}
}
}
static
void
*
pages_map_slow
(
size_t
size
,
size_t
alignment
,
bool
*
commit
)
{
size_t
alloc_size
=
size
+
alignment
-
os_page
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
{
return
NULL
;
}
void
*
ret
;
do
{
void
*
pages
=
os_pages_map
(
NULL
,
alloc_size
,
alignment
,
commit
);
if
(
pages
==
NULL
)
{
return
NULL
;
}
size_t
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
pages
,
alignment
)
-
(
uintptr_t
)
pages
;
ret
=
os_pages_trim
(
pages
,
alloc_size
,
leadsize
,
size
,
commit
);
}
while
(
ret
==
NULL
);
assert
(
ret
!=
NULL
);
assert
(
PAGE_ADDR2BASE
(
ret
)
==
ret
);
return
ret
;
}
void
*
pages_map
(
void
*
addr
,
size_t
size
,
size_t
alignment
,
bool
*
commit
)
{
assert
(
alignment
>=
PAGE
);
assert
(
ALIGNMENT_ADDR2BASE
(
addr
,
alignment
)
==
addr
);
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* os_pages_unmap(), and it can leave holes in the process's virtual
* memory map if memory grows downward.
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
void
*
ret
=
os_pages_map
(
addr
,
size
,
os_page
,
commit
);
if
(
ret
==
NULL
||
ret
==
addr
)
{
return
ret
;
}
assert
(
addr
==
NULL
);
if
(
ALIGNMENT_ADDR2OFFSET
(
ret
,
alignment
)
!=
0
)
{
os_pages_unmap
(
ret
,
size
);
return
pages_map_slow
(
size
,
alignment
,
commit
);
}
assert
(
PAGE_ADDR2BASE
(
ret
)
==
ret
);
return
ret
;
}
void
pages_unmap
(
void
*
addr
,
size_t
size
)
{
assert
(
PAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
PAGE_CEILING
(
size
)
==
size
);
os_pages_unmap
(
addr
,
size
);
}
static
bool
pages_commit_impl
(
void
*
addr
,
size_t
size
,
bool
commit
)
{
assert
(
PAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
PAGE_CEILING
(
size
)
==
size
);
if
(
os_overcommits
)
{
return
true
;
}
#ifdef _WIN32
return
(
commit
?
(
addr
!=
VirtualAlloc
(
addr
,
size
,
MEM_COMMIT
,
PAGE_READWRITE
))
:
(
!
VirtualFree
(
addr
,
size
,
MEM_DECOMMIT
)));
#else
{
int
prot
=
commit
?
PAGES_PROT_COMMIT
:
PAGES_PROT_DECOMMIT
;
void
*
result
=
mmap
(
addr
,
size
,
prot
,
mmap_flags
|
MAP_FIXED
,
-
1
,
0
);
if
(
result
==
MAP_FAILED
)
{
return
true
;
}
if
(
result
!=
addr
)
{
/*
* We succeeded in mapping memory, but not in the right
* place.
*/
os_pages_unmap
(
result
,
size
);
return
true
;
}
return
false
;
}
#endif
}
bool
pages_commit
(
void
*
addr
,
size_t
size
)
{
return
pages_commit_impl
(
addr
,
size
,
true
);
}
bool
pages_decommit
(
void
*
addr
,
size_t
size
)
{
return
pages_commit_impl
(
addr
,
size
,
false
);
}
bool
pages_purge_lazy
(
void
*
addr
,
size_t
size
)
{
assert
(
PAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
PAGE_CEILING
(
size
)
==
size
);
if
(
!
pages_can_purge_lazy
)
{
return
true
;
}
if
(
!
pages_can_purge_lazy_runtime
)
{
/*
* Built with lazy purge enabled, but detected it was not
* supported on the current system.
*/
return
true
;
}
#ifdef _WIN32
VirtualAlloc
(
addr
,
size
,
MEM_RESET
,
PAGE_READWRITE
);
return
false
;
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
return
(
madvise
(
addr
,
size
,
# ifdef MADV_FREE
MADV_FREE
# else
JEMALLOC_MADV_FREE
# endif
)
!=
0
);
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return
(
madvise
(
addr
,
size
,
MADV_DONTNEED
)
!=
0
);
#else
not_reached
();
#endif
}
bool
pages_purge_forced
(
void
*
addr
,
size_t
size
)
{
assert
(
PAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
PAGE_CEILING
(
size
)
==
size
);
if
(
!
pages_can_purge_forced
)
{
return
true
;
}
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return
(
madvise
(
addr
,
size
,
MADV_DONTNEED
)
!=
0
);
#elif defined(JEMALLOC_MAPS_COALESCE)
/* Try to overlay a new demand-zeroed mapping. */
return
pages_commit
(
addr
,
size
);
#else
not_reached
();
#endif
}
static
bool
pages_huge_impl
(
void
*
addr
,
size_t
size
,
bool
aligned
)
{
if
(
aligned
)
{
assert
(
HUGEPAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
HUGEPAGE_CEILING
(
size
)
==
size
);
}
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
return
(
madvise
(
addr
,
size
,
MADV_HUGEPAGE
)
!=
0
);
#else
return
true
;
#endif
}
bool
pages_huge
(
void
*
addr
,
size_t
size
)
{
return
pages_huge_impl
(
addr
,
size
,
true
);
}
static
bool
pages_huge_unaligned
(
void
*
addr
,
size_t
size
)
{
return
pages_huge_impl
(
addr
,
size
,
false
);
}
static
bool
pages_nohuge_impl
(
void
*
addr
,
size_t
size
,
bool
aligned
)
{
if
(
aligned
)
{
assert
(
HUGEPAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
HUGEPAGE_CEILING
(
size
)
==
size
);
}
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
return
(
madvise
(
addr
,
size
,
MADV_NOHUGEPAGE
)
!=
0
);
#else
return
false
;
#endif
}
bool
pages_nohuge
(
void
*
addr
,
size_t
size
)
{
return
pages_nohuge_impl
(
addr
,
size
,
true
);
}
static
bool
pages_nohuge_unaligned
(
void
*
addr
,
size_t
size
)
{
return
pages_nohuge_impl
(
addr
,
size
,
false
);
}
bool
pages_dontdump
(
void
*
addr
,
size_t
size
)
{
assert
(
PAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
PAGE_CEILING
(
size
)
==
size
);
#ifdef JEMALLOC_MADVISE_DONTDUMP
return
madvise
(
addr
,
size
,
MADV_DONTDUMP
)
!=
0
;
#else
return
false
;
#endif
}
bool
pages_dodump
(
void
*
addr
,
size_t
size
)
{
assert
(
PAGE_ADDR2BASE
(
addr
)
==
addr
);
assert
(
PAGE_CEILING
(
size
)
==
size
);
#ifdef JEMALLOC_MADVISE_DONTDUMP
return
madvise
(
addr
,
size
,
MADV_DODUMP
)
!=
0
;
#else
return
false
;
#endif
}
static
size_t
os_page_detect
(
void
)
{
#ifdef _WIN32
SYSTEM_INFO
si
;
GetSystemInfo
(
&
si
);
return
si
.
dwPageSize
;
#elif defined(__FreeBSD__)
return
getpagesize
();
#else
long
result
=
sysconf
(
_SC_PAGESIZE
);
if
(
result
==
-
1
)
{
return
LG_PAGE
;
}
return
(
size_t
)
result
;
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static
bool
os_overcommits_sysctl
(
void
)
{
int
vm_overcommit
;
size_t
sz
;
sz
=
sizeof
(
vm_overcommit
);
#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
int
mib
[
2
];
mib
[
0
]
=
CTL_VM
;
mib
[
1
]
=
VM_OVERCOMMIT
;
if
(
sysctl
(
mib
,
2
,
&
vm_overcommit
,
&
sz
,
NULL
,
0
)
!=
0
)
{
return
false
;
/* Error. */
}
#else
if
(
sysctlbyname
(
"vm.overcommit"
,
&
vm_overcommit
,
&
sz
,
NULL
,
0
)
!=
0
)
{
return
false
;
/* Error. */
}
#endif
return
((
vm_overcommit
&
0x3
)
==
0
);
}
#endif
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/*
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
* reentry during bootstrapping if another library has interposed system call
* wrappers.
*/
static
bool
os_overcommits_proc
(
void
)
{
int
fd
;
char
buf
[
1
];
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
#if defined(O_CLOEXEC)
fd
=
(
int
)
syscall
(
SYS_open
,
"/proc/sys/vm/overcommit_memory"
,
O_RDONLY
|
O_CLOEXEC
);
#else
fd
=
(
int
)
syscall
(
SYS_open
,
"/proc/sys/vm/overcommit_memory"
,
O_RDONLY
);
if
(
fd
!=
-
1
)
{
fcntl
(
fd
,
F_SETFD
,
fcntl
(
fd
,
F_GETFD
)
|
FD_CLOEXEC
);
}
#endif
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
#if defined(O_CLOEXEC)
fd
=
(
int
)
syscall
(
SYS_openat
,
AT_FDCWD
,
"/proc/sys/vm/overcommit_memory"
,
O_RDONLY
|
O_CLOEXEC
);
#else
fd
=
(
int
)
syscall
(
SYS_openat
,
AT_FDCWD
,
"/proc/sys/vm/overcommit_memory"
,
O_RDONLY
);
if
(
fd
!=
-
1
)
{
fcntl
(
fd
,
F_SETFD
,
fcntl
(
fd
,
F_GETFD
)
|
FD_CLOEXEC
);
}
#endif
#else
#if defined(O_CLOEXEC)
fd
=
open
(
"/proc/sys/vm/overcommit_memory"
,
O_RDONLY
|
O_CLOEXEC
);
#else
fd
=
open
(
"/proc/sys/vm/overcommit_memory"
,
O_RDONLY
);
if
(
fd
!=
-
1
)
{
fcntl
(
fd
,
F_SETFD
,
fcntl
(
fd
,
F_GETFD
)
|
FD_CLOEXEC
);
}
#endif
#endif
if
(
fd
==
-
1
)
{
return
false
;
/* Error. */
}
ssize_t
nread
=
malloc_read_fd
(
fd
,
&
buf
,
sizeof
(
buf
));
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall
(
SYS_close
,
fd
);
#else
close
(
fd
);
#endif
if
(
nread
<
1
)
{
return
false
;
/* Error. */
}
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
* 1: Always overcommit.
* 2: Never overcommit.
*/
return
(
buf
[
0
]
==
'0'
||
buf
[
0
]
==
'1'
);
}
#endif
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
)
{
if
(
opt_thp
==
thp_mode_default
||
opt_thp
==
init_system_thp_mode
)
{
return
;
}
assert
(
opt_thp
!=
thp_mode_not_supported
&&
init_system_thp_mode
!=
thp_mode_not_supported
);
if
(
opt_thp
==
thp_mode_always
&&
init_system_thp_mode
!=
thp_mode_never
)
{
assert
(
init_system_thp_mode
==
thp_mode_default
);
pages_huge_unaligned
(
ptr
,
size
);
}
else
if
(
opt_thp
==
thp_mode_never
)
{
assert
(
init_system_thp_mode
==
thp_mode_default
||
init_system_thp_mode
==
thp_mode_always
);
pages_nohuge_unaligned
(
ptr
,
size
);
}
}
static
void
init_thp_state
(
void
)
{
if
(
!
have_madvise_huge
)
{
if
(
metadata_thp_enabled
()
&&
opt_abort
)
{
malloc_write
(
"<jemalloc>: no MADV_HUGEPAGE support
\n
"
);
abort
();
}
goto
label_error
;
}
static
const
char
sys_state_madvise
[]
=
"always [madvise] never
\n
"
;
static
const
char
sys_state_always
[]
=
"[always] madvise never
\n
"
;
static
const
char
sys_state_never
[]
=
"always madvise [never]
\n
"
;
char
buf
[
sizeof
(
sys_state_madvise
)];
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
int
fd
=
(
int
)
syscall
(
SYS_open
,
"/sys/kernel/mm/transparent_hugepage/enabled"
,
O_RDONLY
);
#else
int
fd
=
open
(
"/sys/kernel/mm/transparent_hugepage/enabled"
,
O_RDONLY
);
#endif
if
(
fd
==
-
1
)
{
goto
label_error
;
}
ssize_t
nread
=
malloc_read_fd
(
fd
,
&
buf
,
sizeof
(
buf
));
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall
(
SYS_close
,
fd
);
#else
close
(
fd
);
#endif
if
(
strncmp
(
buf
,
sys_state_madvise
,
(
size_t
)
nread
)
==
0
)
{
init_system_thp_mode
=
thp_mode_default
;
}
else
if
(
strncmp
(
buf
,
sys_state_always
,
(
size_t
)
nread
)
==
0
)
{
init_system_thp_mode
=
thp_mode_always
;
}
else
if
(
strncmp
(
buf
,
sys_state_never
,
(
size_t
)
nread
)
==
0
)
{
init_system_thp_mode
=
thp_mode_never
;
}
else
{
goto
label_error
;
}
return
;
label_error:
opt_thp
=
init_system_thp_mode
=
thp_mode_not_supported
;
}
bool
pages_boot
(
void
)
{
os_page
=
os_page_detect
();
if
(
os_page
>
PAGE
)
{
malloc_write
(
"<jemalloc>: Unsupported system page size
\n
"
);
if
(
opt_abort
)
{
abort
();
}
return
true
;
}
#ifndef _WIN32
mmap_flags
=
MAP_PRIVATE
|
MAP_ANON
;
#endif
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
os_overcommits
=
os_overcommits_sysctl
();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits
=
os_overcommits_proc
();
# ifdef MAP_NORESERVE
if
(
os_overcommits
)
{
mmap_flags
|=
MAP_NORESERVE
;
}
# endif
#else
os_overcommits
=
false
;
#endif
init_thp_state
();
/* Detect lazy purge runtime support. */
if
(
pages_can_purge_lazy
)
{
bool
committed
=
false
;
void
*
madv_free_page
=
os_pages_map
(
NULL
,
PAGE
,
PAGE
,
&
committed
);
if
(
madv_free_page
==
NULL
)
{
return
true
;
}
assert
(
pages_can_purge_lazy_runtime
);
if
(
pages_purge_lazy
(
madv_free_page
,
PAGE
))
{
pages_can_purge_lazy_runtime
=
false
;
}
os_pages_unmap
(
madv_free_page
,
PAGE
);
}
return
false
;
}
deps/jemalloc/src/prng.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_PRNG_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
Prev
1
…
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment