Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
e3b8492e
Commit
e3b8492e
authored
Apr 22, 2017
by
antirez
Browse files
Revert "Jemalloc updated to 4.4.0."
This reverts commit
36c1acc2
.
parent
238cebdd
Changes
150
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/private_symbols.txt
View file @
e3b8492e
...
...
@@ -3,15 +3,12 @@ a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
arena_basic_stats_merge
arena_bin_index
arena_bin_info
arena_bitselm_get_const
arena_bitselm_get_mutable
arena_bitselm_get
arena_boot
arena_choose
arena_choose_hard
arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
...
...
@@ -28,25 +25,18 @@ arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
arena_decay_tick
arena_decay_ticks
arena_decay_time_default_get
arena_decay_time_default_set
arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_sn_next
arena_get
arena_
ichoose
arena_
get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_hard
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
...
...
@@ -57,6 +47,9 @@ arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
...
...
@@ -65,33 +58,23 @@ arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbitsp_get_const
arena_mapbitsp_get_mutable
arena_mapbitsp_read
arena_mapbitsp_write
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
arena_miscelm_get_const
arena_miscelm_get_mutable
arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_nbound
arena_new
arena_node_alloc
arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
...
...
@@ -100,25 +83,21 @@ arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge
arena_purge
_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
arenas_cache_bypass_cleanup
arenas_cache_cleanup
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
arena_tdata_get
arena_tdata_get_hard
arenas
arenas_tdata_bypass_cleanup
arenas_tdata_cleanup
atomic_add_p
atomic_add_u
atomic_add_uint32
...
...
@@ -134,11 +113,6 @@ atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
atomic_write_p
atomic_write_u
atomic_write_uint32
atomic_write_uint64
atomic_write_z
base_alloc
base_boot
base_postfork_child
...
...
@@ -148,6 +122,7 @@ base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
...
...
@@ -164,25 +139,32 @@ chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_mergeable
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_purge_arena
chunk_purge_wrapper
chunk_register
chunks_rtree
chunksize
chunksize_mask
chunks_rtree
ckh_count
ckh_delete
ckh_insert
...
...
@@ -201,7 +183,6 @@ ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
...
...
@@ -209,8 +190,6 @@ extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_committed_get
extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
...
...
@@ -219,12 +198,8 @@ extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
...
...
@@ -242,31 +217,23 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szsnad_destroy
extent_tree_szsnad_destroy_recurse
extent_tree_szsnad_empty
extent_tree_szsnad_first
extent_tree_szsnad_insert
extent_tree_szsnad_iter
extent_tree_szsnad_iter_recurse
extent_tree_szsnad_iter_start
extent_tree_szsnad_last
extent_tree_szsnad_new
extent_tree_szsnad_next
extent_tree_szsnad_nsearch
extent_tree_szsnad_prev
extent_tree_szsnad_psearch
extent_tree_szsnad_remove
extent_tree_szsnad_reverse_iter
extent_tree_szsnad_reverse_iter_recurse
extent_tree_szsnad_reverse_iter_start
extent_tree_szsnad_search
ffs_llu
ffs_lu
ffs_u
ffs_u32
ffs_u64
ffs_zu
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
get_errno
hash
hash_fmix_32
...
...
@@ -290,16 +257,19 @@ huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
ialloc
iallocztm
iarena_cleanup
icalloc
icalloct
idalloc
idalloct
idalloctm
in_valgrind
imalloc
imalloct
index2size
index2size_compute
index2size_lookup
index2size_tab
in_valgrind
ipalloc
ipalloct
ipallocztm
...
...
@@ -318,11 +288,7 @@ jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
lg_prof_sample
malloc_cprintf
malloc_mutex_assert_not_owner
malloc_mutex_assert_owner
malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
...
...
@@ -344,29 +310,12 @@ malloc_write
map_bias
map_misc_offset
mb_write
narenas_auto
narenas_
tdata
_cleanup
mutex_boot
narenas_
cache
_cleanup
narenas_total_get
ncpus
nhbins
nhclasses
nlclasses
nstime_add
nstime_compare
nstime_copy
nstime_divide
nstime_idivide
nstime_imultiply
nstime_init
nstime_init2
nstime_monotonic
nstime_ns
nstime_nsec
nstime_sec
nstime_subtract
nstime_update
opt_abort
opt_decay_time
opt_dss
opt_junk
opt_junk_alloc
...
...
@@ -385,7 +334,6 @@ opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_purge
opt_quarantine
opt_redzone
opt_stats_print
...
...
@@ -394,32 +342,13 @@ opt_utrace
opt_xmalloc
opt_zero
p2rz
pages_boot
pages_commit
pages_decommit
pages_huge
pages_map
pages_nohuge
pages_purge
pages_trim
pages_unmap
pind2sz
pind2sz_compute
pind2sz_lookup
pind2sz_tab
pow2_ceil_u32
pow2_ceil_u64
pow2_ceil_zu
prng_lg_range_u32
prng_lg_range_u64
prng_lg_range_zu
prng_range_u32
prng_range_u64
prng_range_zu
prng_state_next_u32
prng_state_next_u64
prng_state_next_zu
prof_active
pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
...
...
@@ -429,7 +358,6 @@ prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_dump_header
prof_dump_open
prof_free
...
...
@@ -447,8 +375,7 @@ prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork0
prof_prefork1
prof_prefork
prof_realloc
prof_reset
prof_sample_accum_update
...
...
@@ -457,7 +384,6 @@ prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
...
...
@@ -467,13 +393,11 @@ prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
psz2ind
psz2u
purge_mode_names
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
...
...
@@ -489,8 +413,6 @@ rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
run_quantize_ceil
run_quantize_floor
s2u
s2u_compute
s2u_lookup
...
...
@@ -500,8 +422,6 @@ size2index
size2index_compute
size2index_lookup
size2index_tab
spin_adaptive
spin_init
stats_cactive
stats_cactive_add
stats_cactive_get
...
...
@@ -511,6 +431,8 @@ tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
...
...
@@ -529,103 +451,49 @@ tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
tcache_salloc
tcache_stats_merge
tcaches
tcache_salloc
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
tcache_stats_merge
thread_allocated_cleanup
thread_deallocated_cleanup
ticker_copy
ticker_init
ticker_read
ticker_tick
ticker_ticks
tsd_arena_get
tsd_arena_set
tsd_arenap_get
tsd_arenas_tdata_bypass_get
tsd_arenas_tdata_bypass_set
tsd_arenas_tdata_bypassp_get
tsd_arenas_tdata_get
tsd_arenas_tdata_set
tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_fetch_impl
tsd_get
tsd_get_allocates
tsd_iarena_get
tsd_iarena_set
tsd_iarenap_get
tsd_wrapper_get
tsd_wrapper_set
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
tsd_tcachep_get
tsd_tls
tsd_tsd
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
tsd_thread_deallocatedp_get
tsd_tls
tsd_tsd
tsd_tsdn
tsd_witness_fork_get
tsd_witness_fork_set
tsd_witness_forkp_get
tsd_witnesses_get
tsd_witnesses_set
tsd_witnessesp_get
tsdn_fetch
tsdn_null
tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_lockless_error
witness_not_owner_error
witness_owner
witness_owner_error
witness_postfork_child
witness_postfork_parent
witness_prefork
witness_unlock
witnesses_cleanup
zone_register
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
e3b8492e
...
...
@@ -18,13 +18,31 @@
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
#define prng32(r, lg_range, state, a, c) do { \
assert((lg_range) > 0); \
assert((lg_range) <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - (lg_range)); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert((lg_range) > 0); \
assert((lg_range) <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - (lg_range)); \
} while (false)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
...
...
@@ -38,170 +56,5 @@
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t
prng_state_next_u32
(
uint32_t
state
);
uint64_t
prng_state_next_u64
(
uint64_t
state
);
size_t
prng_state_next_zu
(
size_t
state
);
uint32_t
prng_lg_range_u32
(
uint32_t
*
state
,
unsigned
lg_range
,
bool
atomic
);
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
);
size_t
prng_lg_range_zu
(
size_t
*
state
,
unsigned
lg_range
,
bool
atomic
);
uint32_t
prng_range_u32
(
uint32_t
*
state
,
uint32_t
range
,
bool
atomic
);
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
);
size_t
prng_range_zu
(
size_t
*
state
,
size_t
range
,
bool
atomic
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_state_next_u32
(
uint32_t
state
)
{
return
((
state
*
PRNG_A_32
)
+
PRNG_C_32
);
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_state_next_u64
(
uint64_t
state
)
{
return
((
state
*
PRNG_A_64
)
+
PRNG_C_64
);
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_state_next_zu
(
size_t
state
)
{
#if LG_SIZEOF_PTR == 2
return
((
state
*
PRNG_A_32
)
+
PRNG_C_32
);
#elif LG_SIZEOF_PTR == 3
return
((
state
*
PRNG_A_64
)
+
PRNG_C_64
);
#else
#error Unsupported pointer size
#endif
}
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_lg_range_u32
(
uint32_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
uint32_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
32
);
if
(
atomic
)
{
uint32_t
state0
;
do
{
state0
=
atomic_read_uint32
(
state
);
state1
=
prng_state_next_u32
(
state0
);
}
while
(
atomic_cas_uint32
(
state
,
state0
,
state1
));
}
else
{
state1
=
prng_state_next_u32
(
*
state
);
*
state
=
state1
;
}
ret
=
state1
>>
(
32
-
lg_range
);
return
(
ret
);
}
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
uint64_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
64
);
state1
=
prng_state_next_u64
(
*
state
);
*
state
=
state1
;
ret
=
state1
>>
(
64
-
lg_range
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_lg_range_zu
(
size_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
size_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
if
(
atomic
)
{
size_t
state0
;
do
{
state0
=
atomic_read_z
(
state
);
state1
=
prng_state_next_zu
(
state0
);
}
while
(
atomic_cas_z
(
state
,
state0
,
state1
));
}
else
{
state1
=
prng_state_next_zu
(
*
state
);
*
state
=
state1
;
}
ret
=
state1
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_range_u32
(
uint32_t
*
state
,
uint32_t
range
,
bool
atomic
)
{
uint32_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u32
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
uint64_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_range_zu
(
size_t
*
state
,
size_t
range
,
bool
atomic
)
{
size_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_zu
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
(
ret
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/prof.h
View file @
e3b8492e
...
...
@@ -281,7 +281,7 @@ extern uint64_t prof_interval;
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
void
prof_malloc_sample_object
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
void
prof_malloc_sample_object
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
...
...
@@ -293,33 +293,32 @@ size_t prof_bt_count(void);
const
prof_cnt_t
*
prof_cnt_all
(
void
);
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
prof_dump_open
;
typedef
bool
(
prof_dump_header_t
)(
tsdn_t
*
,
bool
,
const
prof_cnt_t
*
);
typedef
bool
(
prof_dump_header_t
)(
bool
,
const
prof_cnt_t
*
);
extern
prof_dump_header_t
*
prof_dump_header
;
#endif
void
prof_idump
(
tsdn_t
*
tsdn
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_
s
et
(
tsdn_t
*
tsdn
,
bool
active
);
const
char
*
prof_thread_name_get
(
tsd_t
*
tsd
);
const
char
*
prof_thread_name_get
(
void
);
bool
prof_active_
g
et
(
void
);
bool
prof_active_set
(
bool
active
);
int
prof_thread_name_set
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
bool
prof_thread_active_get
(
tsd_t
*
ts
d
);
bool
prof_thread_active_set
(
tsd_t
*
tsd
,
bool
active
);
bool
prof_thread_active_init_get
(
tsdn_t
*
tsdn
);
bool
prof_thread_active_init_set
(
tsdn_t
*
tsdn
,
bool
active_init
);
bool
prof_gdump_get
(
tsdn_t
*
tsdn
);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
bool
prof_thread_active_get
(
voi
d
);
bool
prof_thread_active_set
(
bool
active
);
bool
prof_thread_active_init_get
(
void
);
bool
prof_thread_active_init_set
(
bool
active_init
);
bool
prof_gdump_get
(
void
);
bool
prof_gdump_set
(
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
tsd_t
*
tsd
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
bool
prof_boot2
(
void
);
void
prof_prefork
(
void
);
void
prof_postfork_parent
(
void
);
void
prof_postfork_child
(
void
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
#endif
/* JEMALLOC_H_EXTERNS */
...
...
@@ -330,17 +329,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
bool
prof_active_get_unlocked
(
void
);
bool
prof_gdump_get_unlocked
(
void
);
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
);
prof_tctx_t
*
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
tctx
);
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
commit
,
prof_tdata_t
**
tdata_out
);
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
);
void
prof_malloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
prof_tctx_get
(
const
void
*
ptr
);
void
prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
tctx
);
void
prof_malloc_sample_object
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
);
...
...
@@ -398,34 +397,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
prof_tctx_get
(
const
void
*
ptr
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
(
arena_prof_tctx_get
(
tsdn
,
ptr
));
return
(
arena_prof_tctx_get
(
ptr
));
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_set
(
tsdn
,
ptr
,
usize
,
tctx
);
arena_prof_tctx_set
(
ptr
,
usize
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
old_tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsdn
,
ptr
,
usize
,
old_ptr
,
old_tctx
);
arena_prof_tctx_reset
(
ptr
,
usize
,
old_ptr
,
old_tctx
);
}
JEMALLOC_ALWAYS_INLINE
bool
...
...
@@ -437,16 +436,16 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
cassert
(
config_prof
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
)
if
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
tdata
=
NULL
;
if
(
tdata_out
!=
NULL
)
*
tdata_out
=
tdata
;
if
(
unlikely
(
tdata
==
NULL
)
)
if
(
tdata
==
NULL
)
return
(
true
);
if
(
likely
(
tdata
->
bytes_until_sample
>=
usize
)
)
{
if
(
tdata
->
bytes_until_sample
>=
usize
)
{
if
(
update
)
tdata
->
bytes_until_sample
-=
usize
;
return
(
true
);
...
...
@@ -480,17 +479,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
}
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsdn
,
ptr
,
true
));
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
prof_malloc_sample_object
(
tsdn
,
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
ptr
,
usize
,
tctx
);
else
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
prof_tctx_set
(
ptr
,
usize
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_ALWAYS_INLINE
void
...
...
@@ -504,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
,
true
));
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
prof_sample_accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
/*
* Don't sample. The usize passed to prof_alloc_prep()
...
...
@@ -513,7 +512,6 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
...
...
@@ -522,9 +520,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
if
(
unlikely
(
sampled
))
prof_malloc_sample_object
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
ptr
,
usize
,
tctx
);
else
prof_tctx_reset
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
old_ptr
,
old_tctx
);
prof_tctx_reset
(
ptr
,
usize
,
old_ptr
,
old_tctx
);
if
(
unlikely
(
old_sampled
))
prof_free_sampled_object
(
tsd
,
old_usize
,
old_tctx
);
...
...
@@ -533,10 +531,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
)
{
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
);
prof_tctx_t
*
tctx
=
prof_tctx_get
(
ptr
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
,
true
));
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
prof_free_sampled_object
(
tsd
,
usize
,
tctx
);
...
...
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
e3b8492e
...
...
@@ -42,6 +42,7 @@ struct { \
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
a_type rbt_nil; \
}
/* Left accessors. */
...
...
@@ -78,15 +79,6 @@ struct { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */
\
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
...
...
@@ -107,26 +99,28 @@ struct { \
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
#endif
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node),
NULL
); \
rbtn_right_set(a_type, a_field, (a_node),
NULL
); \
rbtn_left_set(a_type, a_field, (a_node),
&(a_rbt)->rbt_nil
); \
rbtn_right_set(a_type, a_field, (a_node),
&(a_rbt)->rbt_nil
); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = NULL; \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) !=
NULL
) {
\
if ((r_node) !=
&(a_rbt)->rbt_nil
) { \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) !=
NULL;
\
rbtn_left_get(a_type, a_field, (r_node)) !=
&(a_rbt)->rbt_nil;
\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
...
...
@@ -134,9 +128,10 @@ struct { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
(r_node))) { \
} \
} \
} while (0)
...
...
@@ -174,11 +169,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree,
const
a_type *key); \
a_prefix##search(a_rbt_type *rbtree, a_type *key);
\
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree,
const
a_type *key); \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key);
\
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree,
const
a_type *key); \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key);
\
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
...
...
@@ -188,10 +183,7 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
...
...
@@ -262,7 +254,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *tree,
const
ex_node_t *key);
* ex_search(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
...
...
@@ -270,9 +262,9 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *tree,
const
ex_node_t *key);
* ex_nsearch(ex_t *tree, ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *tree,
const
ex_node_t *key);
* ex_psearch(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
...
...
@@ -320,20 +312,6 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
...
...
@@ -342,30 +320,36 @@ a_prefix##new(a_rbt_type *rbtree) { \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root ==
NULL);
\
return (rbtree->rbt_root ==
&rbtree->rbt_nil);
\
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) !=
NULL
) {
\
if (rbtn_right_get(a_type, a_field, node) !=
&rbtree->rbt_nil
) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode !=
NULL);
\
ret =
NULL;
\
assert(tnode !=
&rbtree->rbt_nil);
\
ret =
&rbtree->rbt_nil;
\
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
...
...
@@ -376,21 +360,24 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
assert(tnode !=
NULL
);
\
assert(tnode !=
&rbtree->rbt_nil
); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) !=
NULL
) {
\
if (rbtn_left_get(a_type, a_field, node) !=
&rbtree->rbt_nil
) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode !=
NULL);
\
ret =
NULL;
\
assert(tnode !=
&rbtree->rbt_nil);
\
ret =
&rbtree->rbt_nil;
\
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
...
...
@@ -401,17 +388,20 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
assert(tnode !=
NULL
);
\
assert(tnode !=
&rbtree->rbt_nil
); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree,
const
a_type *key) { \
a_prefix##search(a_rbt_type *rbtree, a_type *key) {
\
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret !=
NULL
\
while (ret !=
&rbtree->rbt_nil
\
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
...
...
@@ -419,14 +409,17 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree,
const
a_type *key) { \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) {
\
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret =
NULL;
\
while (tnode !=
NULL
) {
\
ret =
&rbtree->rbt_nil;
\
while (tnode !=
&rbtree->rbt_nil
) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
...
...
@@ -438,14 +431,17 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree,
const
a_type *key) { \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) {
\
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret =
NULL;
\
while (tnode !=
NULL
) {
\
ret =
&rbtree->rbt_nil;
\
while (tnode !=
&rbtree->rbt_nil
) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
...
...
@@ -457,6 +453,9 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr void \
...
...
@@ -468,7 +467,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node !=
NULL
; pathp++) {
\
for (pathp = path; pathp->node !=
&rbtree->rbt_nil
; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
...
...
@@ -488,8 +487,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* Fix up 4-node. */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
...
...
@@ -504,8 +502,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
if (rbtn_red_get(a_type, a_field, left)) { \
/* Split 4-node. */
\
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
...
...
@@ -538,7 +535,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node !=
NULL
; pathp++) {
\
for (pathp = path; pathp->node !=
&rbtree->rbt_nil
; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
...
...
@@ -550,7 +547,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */
\
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node !=
NULL;
\
for (pathp++; pathp->node !=
&rbtree->rbt_nil;
\
pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
...
...
@@ -593,7 +590,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left !=
NULL
) {
\
if (left !=
&rbtree->rbt_nil
) { \
/* node has no successor, but it has a left child. */
\
/* Splice node out, without losing the left child. */
\
assert(!rbtn_red_get(a_type, a_field, node)); \
...
...
@@ -613,32 +610,33 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */
\
rbtree->rbt_root =
NULL;
\
rbtree->rbt_root =
&rbtree->rbt_nil;
\
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
&rbtree->rbt_nil); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */
\
/* restored. */
\
pathp->node =
NULL;
\
pathp->node =
&rbtree->rbt_nil;
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* In the following diagrams, ||, //, and \\ */
\
/* indicate the path to the removed node. */
\
/* */
\
...
...
@@ -681,8 +679,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* || */
\
/* pathp(b) */
\
/* // \ */
\
...
...
@@ -736,8 +733,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
...
...
@@ -763,7 +759,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */
\
/* / */
\
/* (b) */
\
assert(leftright !=
NULL);
\
assert(leftright !=
&rbtree->rbt_nil);
\
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
...
...
@@ -786,8 +782,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */
\
/* pathp(r) */
\
/* / \\ */
\
...
...
@@ -825,8 +820,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
...
...
@@ -872,13 +866,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node ==
NULL
) {
\
return (
NULL);
\
if (node ==
&rbtree->rbt_nil
) { \
return (
&rbtree->rbt_nil);
\
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) !=
NULL || (ret = cb(rbtree, node,
\
arg)) != NULL) {
\
a_field, node), cb, arg)) !=
&rbtree->rbt_nil
\
|| (ret = cb(rbtree, node,
arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
...
...
@@ -892,8 +886,8 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
(ret = cb(rbtree, node, arg)) != NULL) {
\
rbtn_left_get(a_type, a_field, node), cb, arg)) !=
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
...
...
@@ -920,18 +914,21 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node ==
NULL
) {
\
return (
NULL);
\
if (node ==
&rbtree->rbt_nil
) { \
return (
&rbtree->rbt_nil);
\
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
(ret = cb(rbtree, node, arg)) != NULL) {
\
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
...
...
@@ -946,8 +943,8 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
(ret = cb(rbtree, node, arg)) != NULL) {
\
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
...
...
@@ -975,29 +972,10 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
return (ret); \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
return (ret); \
}
#endif
/* RB_H_ */
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
e3b8492e
...
...
@@ -15,10 +15,9 @@ typedef struct rtree_s rtree_t;
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
/* Maximum rtree height. */
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_HEIGHT_MAX \
((
1U
<< (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
((
ZU(1)
<< (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
...
...
@@ -112,25 +111,22 @@ unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t
rtree_subkey
(
rtree_t
*
rtree
,
uintptr_t
key
,
unsigned
level
);
bool
rtree_node_valid
(
rtree_node_elm_t
*
node
);
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
,
bool
dependent
);
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
);
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
,
bool
dependent
);
unsigned
level
);
extent_node_t
*
rtree_val_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
bool
dependent
);
void
rtree_val_write
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
const
extent_node_t
*
val
);
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
,
bool
dependent
);
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
,
bool
dependent
);
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
);
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
);
extent_node_t
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
,
bool
dependent
);
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
const
extent_node_t
*
val
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_
ALWAYS_
INLINE
unsigned
JEMALLOC_INLINE
unsigned
rtree_start_level
(
rtree_t
*
rtree
,
uintptr_t
key
)
{
unsigned
start_level
;
...
...
@@ -144,7 +140,7 @@ rtree_start_level(rtree_t *rtree, uintptr_t key)
return
(
start_level
);
}
JEMALLOC_
ALWAYS_
INLINE
uintptr_t
JEMALLOC_INLINE
uintptr_t
rtree_subkey
(
rtree_t
*
rtree
,
uintptr_t
key
,
unsigned
level
)
{
...
...
@@ -153,40 +149,37 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
rtree
->
levels
[
level
].
bits
)
-
1
));
}
JEMALLOC_
ALWAYS_
INLINE
bool
JEMALLOC_INLINE
bool
rtree_node_valid
(
rtree_node_elm_t
*
node
)
{
return
((
uintptr_t
)
node
>
(
uintptr_t
)
RTREE_NODE_INITIALIZING
);
}
JEMALLOC_
ALWAYS_
INLINE
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
,
bool
dependent
)
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
)
{
rtree_node_elm_t
*
child
;
/* Double-checked read (first read may be stale. */
child
=
elm
->
child
;
if
(
!
dependent
&&
!
rtree_node_valid
(
child
))
if
(
!
rtree_node_valid
(
child
))
child
=
atomic_read_p
(
&
elm
->
pun
);
assert
(
!
dependent
||
child
!=
NULL
);
return
(
child
);
}
JEMALLOC_ALWAYS_INLINE
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
,
bool
dependent
)
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
)
{
rtree_node_elm_t
*
child
;
child
=
rtree_child_tryread
(
elm
,
dependent
);
if
(
!
dependent
&&
unlikely
(
!
rtree_node_valid
(
child
)))
child
=
rtree_child_tryread
(
elm
);
if
(
unlikely
(
!
rtree_node_valid
(
child
)))
child
=
rtree_child_read_hard
(
rtree
,
elm
,
level
);
assert
(
!
dependent
||
child
!=
NULL
);
return
(
child
);
}
JEMALLOC_
ALWAYS_
INLINE
extent_node_t
*
JEMALLOC_INLINE
extent_node_t
*
rtree_val_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
bool
dependent
)
{
...
...
@@ -215,119 +208,54 @@ rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
atomic_write_p
(
&
elm
->
pun
,
val
);
}
JEMALLOC_
ALWAYS_
INLINE
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
,
bool
dependent
)
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
)
{
rtree_node_elm_t
*
subtree
;
/* Double-checked read (first read may be stale. */
subtree
=
rtree
->
levels
[
level
].
subtree
;
if
(
!
dependent
&&
unlikely
(
!
rtree_node_valid
(
subtree
))
)
if
(
!
rtree_node_valid
(
subtree
))
subtree
=
atomic_read_p
(
&
rtree
->
levels
[
level
].
subtree_pun
);
assert
(
!
dependent
||
subtree
!=
NULL
);
return
(
subtree
);
}
JEMALLOC_
ALWAYS_
INLINE
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
,
bool
dependent
)
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
)
{
rtree_node_elm_t
*
subtree
;
subtree
=
rtree_subtree_tryread
(
rtree
,
level
,
dependent
);
if
(
!
dependent
&&
unlikely
(
!
rtree_node_valid
(
subtree
)))
subtree
=
rtree_subtree_tryread
(
rtree
,
level
);
if
(
unlikely
(
!
rtree_node_valid
(
subtree
)))
subtree
=
rtree_subtree_read_hard
(
rtree
,
level
);
assert
(
!
dependent
||
subtree
!=
NULL
);
return
(
subtree
);
}
JEMALLOC_
ALWAYS_
INLINE
extent_node_t
*
JEMALLOC_INLINE
extent_node_t
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
,
bool
dependent
)
{
uintptr_t
subkey
;
unsigned
start_level
;
rtree_node_elm_t
*
node
;
unsigned
i
,
start_level
;
rtree_node_elm_t
*
node
,
*
child
;
start_level
=
rtree_start_level
(
rtree
,
key
);
node
=
rtree_subtree_tryread
(
rtree
,
start_level
,
dependent
);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch
(
start_level
+
RTREE_GET_BIAS
)
{
#define RTREE_GET_SUBTREE(level) \
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
node = rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
#define RTREE_GET_LEAF(level) \
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
/* \
* node is a leaf, so it contains values rather than \
* child pointers. \
*/
\
return (rtree_val_read(rtree, &node[subkey], \
dependent));
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE
(
0
)
#endif
#if RTREE_HEIGHT_MAX > 2
RTREE_GET_SUBTREE
(
1
)
#endif
#if RTREE_HEIGHT_MAX > 3
RTREE_GET_SUBTREE
(
2
)
#endif
#if RTREE_HEIGHT_MAX > 4
RTREE_GET_SUBTREE
(
3
)
#endif
#if RTREE_HEIGHT_MAX > 5
RTREE_GET_SUBTREE
(
4
)
#endif
#if RTREE_HEIGHT_MAX > 6
RTREE_GET_SUBTREE
(
5
)
#endif
#if RTREE_HEIGHT_MAX > 7
RTREE_GET_SUBTREE
(
6
)
#endif
#if RTREE_HEIGHT_MAX > 8
RTREE_GET_SUBTREE
(
7
)
#endif
#if RTREE_HEIGHT_MAX > 9
RTREE_GET_SUBTREE
(
8
)
#endif
#if RTREE_HEIGHT_MAX > 10
RTREE_GET_SUBTREE
(
9
)
#endif
#if RTREE_HEIGHT_MAX > 11
RTREE_GET_SUBTREE
(
10
)
#endif
#if RTREE_HEIGHT_MAX > 12
RTREE_GET_SUBTREE
(
11
)
#endif
#if RTREE_HEIGHT_MAX > 13
RTREE_GET_SUBTREE
(
12
)
#endif
#if RTREE_HEIGHT_MAX > 14
RTREE_GET_SUBTREE
(
13
)
#endif
#if RTREE_HEIGHT_MAX > 15
RTREE_GET_SUBTREE
(
14
)
#endif
#if RTREE_HEIGHT_MAX > 16
# error Unsupported RTREE_HEIGHT_MAX
#endif
RTREE_GET_LEAF
(
RTREE_HEIGHT_MAX
-
1
)
#undef RTREE_GET_SUBTREE
#undef RTREE_GET_LEAF
default:
not_reached
();
for
(
i
=
start_level
,
node
=
rtree_subtree_tryread
(
rtree
,
start_level
);
/**/
;
i
++
,
node
=
child
)
{
if
(
!
dependent
&&
unlikely
(
!
rtree_node_valid
(
node
)))
return
(
NULL
);
subkey
=
rtree_subkey
(
rtree
,
key
,
i
);
if
(
i
==
rtree
->
height
-
1
)
{
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
return
(
rtree_val_read
(
rtree
,
&
node
[
subkey
],
dependent
));
}
assert
(
i
<
rtree
->
height
-
1
);
child
=
rtree_child_tryread
(
&
node
[
subkey
]);
}
#undef RTREE_GET_BIAS
not_reached
();
}
...
...
@@ -340,7 +268,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
start_level
=
rtree_start_level
(
rtree
,
key
);
node
=
rtree_subtree_read
(
rtree
,
start_level
,
false
);
node
=
rtree_subtree_read
(
rtree
,
start_level
);
if
(
node
==
NULL
)
return
(
true
);
for
(
i
=
start_level
;
/**/
;
i
++
,
node
=
child
)
{
...
...
@@ -354,7 +282,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
return
(
false
);
}
assert
(
i
+
1
<
rtree
->
height
);
child
=
rtree_child_read
(
rtree
,
&
node
[
subkey
],
i
,
false
);
child
=
rtree_child_read
(
rtree
,
&
node
[
subkey
],
i
);
if
(
child
==
NULL
)
return
(
true
);
}
...
...
deps/jemalloc/include/jemalloc/internal/size_classes.sh
View file @
e3b8492e
...
...
@@ -48,21 +48,6 @@ size_class() {
lg_p
=
$5
lg_kmax
=
$6
if
[
${
lg_delta
}
-ge
${
lg_p
}
]
;
then
psz
=
"yes"
else
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
pow2
${
lg_grp
}
;
grp
=
${
pow2_result
}
pow2
${
lg_delta
}
;
delta
=
${
pow2_result
}
sz
=
$((${
grp
}
+
${
delta
}
*
${
ndelta
}))
npgs
=
$((${
sz
}
/
${
p
}))
if
[
${
sz
}
-eq
$((${
npgs
}
*
${
p
}))
]
;
then
psz
=
"yes"
else
psz
=
"no"
fi
fi
lg
${
ndelta
}
;
lg_ndelta
=
${
lg_result
}
;
pow2
${
lg_ndelta
}
if
[
${
pow2_result
}
-lt
${
ndelta
}
]
;
then
rem
=
"yes"
...
...
@@ -89,15 +74,14 @@ size_class() {
else
lg_delta_lookup
=
"no"
fi
printf
' SC(%3d, %6d, %8d, %6d, %3s,
%3s,
%2s) \\\n'
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
psz
}
${
bin
}
${
lg_delta_lookup
}
printf
' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n'
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
bin
}
${
lg_delta_lookup
}
# Defined upon return:
# - psz ("yes" or "no")
# - bin ("yes" or "no")
# - lg_delta_lookup (${lg_delta} or "no")
# - bin ("yes" or "no")
}
sep_line
()
{
echo
"
\\
"
echo
"
\\
"
}
size_classes
()
{
...
...
@@ -111,13 +95,12 @@ size_classes() {
pow2
${
lg_g
}
;
g
=
${
pow2_result
}
echo
"#define SIZE_CLASSES
\\
"
echo
" /* index, lg_grp, lg_delta, ndelta,
psz,
bin, lg_delta_lookup */
\\
"
echo
" /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */
\\
"
ntbins
=
0
nlbins
=
0
lg_tiny_maxclass
=
'"NA"'
nbins
=
0
npsizes
=
0
# Tiny size classes.
ndelta
=
0
...
...
@@ -129,9 +112,6 @@ size_classes() {
if
[
${
lg_delta_lookup
}
!=
"no"
]
;
then
nlbins
=
$((${
index
}
+
1
))
fi
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
fi
...
...
@@ -153,25 +133,19 @@ size_classes() {
index
=
$((${
index
}
+
1
))
lg_grp
=
$((${
lg_grp
}
+
1
))
lg_delta
=
$((${
lg_delta
}
+
1
))
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
fi
while
[
${
ndelta
}
-lt
${
g
}
]
;
do
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
done
# All remaining groups.
lg_grp
=
$((${
lg_grp
}
+
${
lg_g
}))
while
[
${
lg_grp
}
-lt
$((
${
ptr_bits
}
-
1
))
]
;
do
while
[
${
lg_grp
}
-lt
${
ptr_bits
}
]
;
do
sep_line
ndelta
=
1
if
[
${
lg_grp
}
-eq
$((${
ptr_bits
}
-
2
))
]
;
then
if
[
${
lg_grp
}
-eq
$((${
ptr_bits
}
-
1
))
]
;
then
ndelta_limit
=
$((${
g
}
-
1
))
else
ndelta_limit
=
${
g
}
...
...
@@ -183,9 +157,6 @@ size_classes() {
# Final written value is correct:
lookup_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
fi
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
# Final written value is correct:
...
...
@@ -212,7 +183,6 @@ size_classes() {
# - nlbins
# - nbins
# - nsizes
# - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
...
...
@@ -230,13 +200,13 @@ cat <<EOF
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
* bin, lg_delta_lookup) tuples.
* SIZE_CLASSES: Complete table of
* SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
* tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
...
...
@@ -244,7 +214,6 @@ cat <<EOF
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
...
...
@@ -269,7 +238,6 @@ for lg_z in ${lg_zarr} ; do
echo
"#define NLBINS
${
nlbins
}
"
echo
"#define NBINS
${
nbins
}
"
echo
"#define NSIZES
${
nsizes
}
"
echo
"#define NPSIZES
${
npsizes
}
"
echo
"#define LG_TINY_MAXCLASS
${
lg_tiny_maxclass
}
"
echo
"#define LOOKUP_MAXCLASS
${
lookup_maxclass
}
"
echo
"#define SMALL_MAXCLASS
${
small_maxclass
}
"
...
...
deps/jemalloc/include/jemalloc/internal/smoothstep.h
deleted
100644 → 0
View file @
238cebdd
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */
\
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/smoothstep.sh
deleted
100755 → 0
View file @
238cebdd
#!/bin/sh
#
# Generate a discrete lookup table for a sigmoid function in the smoothstep
# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
# the entries using a binary fixed point representation.
#
# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
#
# <variant> is in {smooth, smoother, smoothest}.
# <nsteps> must be greater than zero.
# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
# <xprec> is x decimal precision.
# <yprec> is y decimal precision.
#set -x
cmd
=
"sh smoothstep.sh
$*
"
variant
=
$1
nsteps
=
$2
bfp
=
$3
xprec
=
$4
yprec
=
$5
case
"
${
variant
}
"
in
smooth
)
;;
smoother
)
;;
smoothest
)
;;
*
)
echo
"Unsupported variant"
exit
1
;;
esac
smooth
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _2 lx 3 ^
'*'
3 lx 2 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoother
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx 6 lx 5 ^
'*'
_15 lx 4 ^
'*'
+ 10 lx 3 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoothest
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _20 lx 7 ^
'*'
70 lx 6 ^
'*'
+ _84 lx 5 ^
'*'
+ 35 lx 4 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
cat
<<
EOF
/*
* This file was generated by the following command:
*
$cmd
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "
${
variant
}
"
#define SMOOTHSTEP_NSTEPS
${
nsteps
}
#define SMOOTHSTEP_BFP
${
bfp
}
#define SMOOTHSTEP
\\
/* STEP(step, h, x, y) */
\\
EOF
s
=
1
while
[
$s
-le
$nsteps
]
;
do
$variant
${
s
}
x
=
`
echo
${
xprec
}
k
${
s
}
${
nsteps
}
/ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
printf
' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n'
${
s
}
${
h
}
${
x
}
${
y
}
s
=
$((
s+1
))
done
echo
cat
<<
EOF
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
EOF
deps/jemalloc/include/jemalloc/internal/spin.h
deleted
100644 → 0
View file @
238cebdd
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
spin_s
spin_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
spin_s
{
unsigned
iteration
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
spin_init
(
spin_t
*
spin
);
void
spin_adaptive
(
spin_t
*
spin
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
JEMALLOC_INLINE
void
spin_init
(
spin_t
*
spin
)
{
spin
->
iteration
=
0
;
}
JEMALLOC_INLINE
void
spin_adaptive
(
spin_t
*
spin
)
{
volatile
uint64_t
i
;
for
(
i
=
0
;
i
<
(
KQU
(
1
)
<<
spin
->
iteration
);
i
++
)
CPU_SPINWAIT
;
if
(
spin
->
iteration
<
63
)
spin
->
iteration
++
;
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/stats.h
View file @
e3b8492e
...
...
@@ -102,14 +102,6 @@ struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t
mapped
;
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
*/
size_t
retained
;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
...
...
@@ -176,9 +168,6 @@ JEMALLOC_INLINE void
stats_cactive_add
(
size_t
size
)
{
assert
(
size
>
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
atomic_add_z
(
&
stats_cactive
,
size
);
}
...
...
@@ -186,9 +175,6 @@ JEMALLOC_INLINE void
stats_cactive_sub
(
size_t
size
)
{
assert
(
size
>
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
atomic_sub_z
(
&
stats_cactive
,
size
);
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/tcache.h
View file @
e3b8492e
...
...
@@ -70,20 +70,13 @@ struct tcache_bin_s {
int
low_water
;
/* Min # cached since last GC. */
unsigned
lg_fill_div
;
/* Fill (ncached_max >> lg_fill_div). */
unsigned
ncached
;
/* # of cached objects. */
/*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void
**
avail
;
/* Stack of available objects. */
};
struct
tcache_s
{
ql_elm
(
tcache_t
)
link
;
/* Used for aggregating stats. */
uint64_t
prof_accumbytes
;
/* Cleared after arena_prof_accum(). */
ticker_t
gc_ticker
;
/* Drives
incremental GC. */
unsigned
ev_cnt
;
/* Event count since
incremental GC. */
szind_t
next_gc_bin
;
/* Next bin to GC. */
tcache_bin_t
tbins
[
1
];
/* Dynamically sized. */
/*
...
...
@@ -115,7 +108,7 @@ extern tcache_bin_info_t *tcache_bin_info;
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
unsigned
nhbins
;
extern
size_t
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
...
...
@@ -130,25 +123,27 @@ extern size_t tcache_maxclass;
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
size_t
tcache_salloc
(
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsd
n
_t
*
tsd
n
,
arena_t
*
arena
,
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
*
tcache_alloc_small_hard
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
szind_t
binind
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
oldarena
,
arena_t
*
newarena
);
void
tcache_arena_associate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_arena_reassociate
(
tcache_t
*
tcache
,
arena_t
*
oldarena
,
arena_t
*
newarena
);
void
tcache_arena_dissociate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_get_hard
(
tsd_t
*
tsd
);
tcache_t
*
tcache_create
(
tsd
n
_t
*
tsd
n
,
arena_t
*
arena
);
tcache_t
*
tcache_create
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_enabled_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_stats_merge
(
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
bool
tcache_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
@@ -160,15 +155,15 @@ void tcache_flush(void);
bool
tcache_enabled_get
(
void
);
tcache_t
*
tcache_get
(
tsd_t
*
tsd
,
bool
create
);
void
tcache_enabled_set
(
bool
enabled
);
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
,
bool
*
tcache_success
);
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
);
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
ind
,
bool
zero
,
bool
slow_path
);
size_t
size
,
bool
zero
);
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
ind
,
bool
zero
,
bool
slow_path
);
size_t
size
,
bool
zero
);
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
);
szind_t
binind
);
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
,
bool
slow_path
);
size_t
size
);
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
);
#endif
...
...
@@ -245,74 +240,51 @@ tcache_event(tsd_t *tsd, tcache_t *tcache)
if
(
TCACHE_GC_INCR
==
0
)
return
;
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
tcache
->
ev_cnt
++
;
assert
(
tcache
->
ev_cnt
<=
TCACHE_GC_INCR
);
if
(
unlikely
(
tcache
->
ev_cnt
==
TCACHE_GC_INCR
))
tcache_event_hard
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
,
bool
*
tcache_success
)
tcache_alloc_easy
(
tcache_bin_t
*
tbin
)
{
void
*
ret
;
if
(
unlikely
(
tbin
->
ncached
==
0
))
{
tbin
->
low_water
=
-
1
;
*
tcache_success
=
false
;
return
(
NULL
);
}
/*
* tcache_success (instead of ret) should be checked upon the return of
* this function. We avoid checking (ret == NULL) because there is
* never a null stored on the avail stack (which is unknown to the
* compiler), and eagerly checking ret would cause pipeline stall
* (waiting for the cacheline).
*/
*
tcache_success
=
true
;
ret
=
*
(
tbin
->
avail
-
tbin
->
ncached
);
tbin
->
ncached
--
;
if
(
unlikely
((
int
)
tbin
->
ncached
<
tbin
->
low_water
))
tbin
->
low_water
=
tbin
->
ncached
;
ret
=
tbin
->
avail
[
tbin
->
ncached
];
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
bool
zero
)
{
void
*
ret
;
szind_t
binind
;
size_t
usize
;
tcache_bin_t
*
tbin
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
binind
=
size2index
(
size
);
assert
(
binind
<
NBINS
);
tbin
=
&
tcache
->
tbins
[
binind
];
ret
=
tcache_alloc_easy
(
tbin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
return
(
NULL
);
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
tbin
,
binind
,
&
tcache_hard_success
);
if
(
tcache_hard_success
==
false
)
usize
=
index2size
(
binind
);
ret
=
tcache_alloc_easy
(
tbin
);
if
(
unlikely
(
ret
==
NULL
))
{
ret
=
tcache_alloc_small_hard
(
tsd
,
arena
,
tcache
,
tbin
,
binind
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
assert
(
ret
);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
assert
(
tcache_salloc
(
ret
)
==
usize
);
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
false
);
...
...
@@ -320,7 +292,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
memset
(
ret
,
0
,
usize
);
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
true
);
}
...
...
@@ -337,38 +309,28 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
bool
zero
)
{
void
*
ret
;
szind_t
binind
;
size_t
usize
;
tcache_bin_t
*
tbin
;
bool
tcache_success
;
binind
=
size2index
(
size
);
usize
=
index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
assert
(
binind
<
nhbins
);
tbin
=
&
tcache
->
tbins
[
binind
];
ret
=
tcache_alloc_easy
(
tbin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
ret
=
tcache_alloc_easy
(
tbin
);
if
(
unlikely
(
ret
==
NULL
))
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
return
(
NULL
);
ret
=
arena_malloc_large
(
tsd_tsdn
(
tsd
),
arena
,
binind
,
zero
);
ret
=
arena_malloc_large
(
arena
,
usize
,
zero
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
config_prof
&&
usize
==
LARGE_MINCLASS
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ret
);
...
...
@@ -378,11 +340,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
BININD_INVALID
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
if
(
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
memset
(
ret
,
0xa5
,
usize
);
else
if
(
unlikely
(
opt_zero
))
memset
(
ret
,
0
,
usize
);
}
}
else
...
...
@@ -399,15 +360,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
)
{
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
<=
SMALL_MAXCLASS
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
arena_dalloc_junk_small
(
ptr
,
&
arena_bin_info
[
binind
]);
tbin
=
&
tcache
->
tbins
[
binind
];
...
...
@@ -417,27 +377,26 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
(
tbin_info
->
ncached_max
>>
1
));
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
*
(
tbin
->
avail
-
tbin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
,
bool
slow_path
)
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
)
{
szind_t
binind
;
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
assert
(
tcache_salloc
(
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
<=
tcache_maxclass
);
binind
=
size2index
(
size
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
arena_dalloc_junk_large
(
ptr
,
size
);
tbin
=
&
tcache
->
tbins
[
binind
];
...
...
@@ -447,8 +406,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
(
tbin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
*
(
tbin
->
avail
-
tbin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
...
...
@@ -457,10 +416,8 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
{
elm
->
tcache
=
tcache_create
(
tsd_tsdn
(
tsd
),
arena_choose
(
tsd
,
NULL
));
}
if
(
unlikely
(
elm
->
tcache
==
NULL
))
elm
->
tcache
=
tcache_create
(
tsd
,
arena_choose
(
tsd
,
NULL
));
return
(
elm
->
tcache
);
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/ticker.h
deleted
100644 → 0
View file @
238cebdd
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
ticker_s
ticker_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
ticker_s
{
int32_t
tick
;
int32_t
nticks
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
ticker_init
(
ticker_t
*
ticker
,
int32_t
nticks
);
void
ticker_copy
(
ticker_t
*
ticker
,
const
ticker_t
*
other
);
int32_t
ticker_read
(
const
ticker_t
*
ticker
);
bool
ticker_ticks
(
ticker_t
*
ticker
,
int32_t
nticks
);
bool
ticker_tick
(
ticker_t
*
ticker
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
JEMALLOC_INLINE
void
ticker_init
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
=
nticks
;
ticker
->
nticks
=
nticks
;
}
JEMALLOC_INLINE
void
ticker_copy
(
ticker_t
*
ticker
,
const
ticker_t
*
other
)
{
*
ticker
=
*
other
;
}
JEMALLOC_INLINE
int32_t
ticker_read
(
const
ticker_t
*
ticker
)
{
return
(
ticker
->
tick
);
}
JEMALLOC_INLINE
bool
ticker_ticks
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
if
(
unlikely
(
ticker
->
tick
<
nticks
))
{
ticker
->
tick
=
ticker
->
nticks
;
return
(
true
);
}
ticker
->
tick
-=
nticks
;
return
(
false
);
}
JEMALLOC_INLINE
bool
ticker_tick
(
ticker_t
*
ticker
)
{
return
(
ticker_ticks
(
ticker
,
1
));
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/tsd.h
View file @
e3b8492e
...
...
@@ -13,9 +13,6 @@ typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef
struct
tsd_s
tsd_t
;
typedef
struct
tsdn_s
tsdn_t
;
#define TSDN_NULL ((tsdn_t *)0)
typedef
enum
{
tsd_state_uninitialized
,
...
...
@@ -47,8 +44,7 @@ typedef enum {
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get(bool init) {...}
* example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
...
...
@@ -102,10 +98,8 @@ a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr bool \
a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(
bool init
); \
a_name##tsd_get(
void
);
\
a_attr void \
a_name##tsd_set(a_type *val);
...
...
@@ -207,21 +201,9 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(
bool init)
\
a_name##tsd_get(
void)
\
{ \
\
assert(a_name##tsd_booted); \
...
...
@@ -264,21 +246,9 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(
bool init)
\
a_name##tsd_get(
void)
\
{ \
\
assert(a_name##tsd_booted); \
...
...
@@ -337,14 +307,14 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(
bool init)
\
a_name##tsd_wrapper_get(
void)
\
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (
init &&
unlikely(wrapper == NULL)) { \
if (unlikely(wrapper == NULL)) {
\
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
...
...
@@ -398,28 +368,14 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(
bool init)
\
a_name##tsd_get(
void)
\
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
...
...
@@ -428,7 +384,7 @@ a_name##tsd_set(a_type *val) \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(
true
); \
wrapper = a_name##tsd_wrapper_get();
\
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
...
...
@@ -472,12 +428,12 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(
bool init)
\
a_name##tsd_wrapper_get(
void)
\
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
if (
init &&
unlikely(wrapper == NULL)) { \
if (unlikely(wrapper == NULL)) {
\
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
...
...
@@ -534,28 +490,14 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(
bool init)
\
a_name##tsd_get(
void)
\
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
...
...
@@ -564,7 +506,7 @@ a_name##tsd_set(a_type *val) \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(
true
); \
wrapper = a_name##tsd_wrapper_get();
\
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
...
...
@@ -594,15 +536,12 @@ struct tsd_init_head_s {
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_
tdata
, arena_t
data_t *)
\
O(narenas_
tdata
, unsigned) \
O(arenas_
tdata
_bypass, bool) \
O(arenas_
cache
, arena_t
**)
\
O(narenas_
cache
, unsigned) \
O(arenas_
cache
_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
...
...
@@ -612,13 +551,10 @@ struct tsd_init_head_s {
NULL, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL, \
ql_head_initializer(witnesses), \
false \
NULL \
}
struct
tsd_s
{
...
...
@@ -629,15 +565,6 @@ MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct
tsdn_s
{
tsd_t
tsd
;
};
static
const
tsd_t
tsd_initializer
=
TSD_INITIALIZER
;
malloc_tsd_types
(,
tsd_t
)
...
...
@@ -650,7 +577,7 @@ void *malloc_tsd_malloc(size_t size);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_no_cleanup
(
void
*
arg
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
tsd_t
*
malloc_tsd_boot0
(
void
);
bool
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
...
...
@@ -667,9 +594,7 @@ void tsd_cleanup(void *arg);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
,
tsd_t
)
tsd_t
*
tsd_fetch_impl
(
bool
init
);
tsd_t
*
tsd_fetch
(
void
);
tsdn_t
*
tsd_tsdn
(
tsd_t
*
tsd
);
bool
tsd_nominal
(
tsd_t
*
tsd
);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
...
...
@@ -677,9 +602,6 @@ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
tsdn_t
*
tsdn_fetch
(
void
);
bool
tsdn_null
(
const
tsdn_t
*
tsdn
);
tsd_t
*
tsdn_tsd
(
tsdn_t
*
tsdn
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
...
...
@@ -687,13 +609,9 @@ malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
,
tsd_t
,
tsd_initializer
,
tsd_cleanup
)
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
_impl
(
bool
init
)
tsd_fetch
(
void
)
{
tsd_t
*
tsd
=
tsd_get
(
init
);
if
(
!
init
&&
tsd_get_allocates
()
&&
tsd
==
NULL
)
return
(
NULL
);
assert
(
tsd
!=
NULL
);
tsd_t
*
tsd
=
tsd_get
();
if
(
unlikely
(
tsd
->
state
!=
tsd_state_nominal
))
{
if
(
tsd
->
state
==
tsd_state_uninitialized
)
{
...
...
@@ -710,20 +628,6 @@ tsd_fetch_impl(bool init)
return
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
(
void
)
{
return
(
tsd_fetch_impl
(
true
));
}
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsd_tsdn
(
tsd_t
*
tsd
)
{
return
((
tsdn_t
*
)
tsd
);
}
JEMALLOC_INLINE
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
...
...
@@ -755,32 +659,6 @@ tsd_##n##_set(tsd_t *tsd, t n) \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsdn_fetch
(
void
)
{
if
(
!
tsd_booted_get
())
return
(
NULL
);
return
(
tsd_tsdn
(
tsd_fetch_impl
(
false
)));
}
JEMALLOC_ALWAYS_INLINE
bool
tsdn_null
(
const
tsdn_t
*
tsdn
)
{
return
(
tsdn
==
NULL
);
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsdn_tsd
(
tsdn_t
*
tsdn
)
{
assert
(
!
tsdn_null
(
tsdn
));
return
(
&
tsdn
->
tsd
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
...
...
deps/jemalloc/include/jemalloc/internal/util.h
View file @
e3b8492e
...
...
@@ -40,14 +40,6 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
...
...
@@ -65,21 +57,73 @@
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
#define JEMALLOC_GNUC_PREREQ(major, minor) \
(!defined(__clang__) && \
(__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
#ifndef __has_builtin
# define __has_builtin(builtin) (0)
#endif
#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \
(defined(__clang__) && __has_builtin(builtin))
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
# if JEMALLOC_GNUC_PREREQ(4, 6) || \
JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
# define unreachable() __builtin_unreachable()
# else
# define unreachable()
# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
# define unreachable()
#endif
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#include "jemalloc/internal/assert.h"
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
...
...
@@ -104,9 +148,9 @@ void malloc_write(const char *s);
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_
t
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
in
t
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
va_list
ap
);
size_
t
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
in
t
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
va_list
ap
);
...
...
@@ -119,16 +163,10 @@ void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned
ffs_llu
(
unsigned
long
long
bitmap
);
unsigned
ffs_lu
(
unsigned
long
bitmap
);
unsigned
ffs_u
(
unsigned
bitmap
);
unsigned
ffs_zu
(
size_t
bitmap
);
unsigned
ffs_u64
(
uint64_t
bitmap
);
unsigned
ffs_u32
(
uint32_t
bitmap
);
uint64_t
pow2_ceil_u64
(
uint64_t
x
);
uint32_t
pow2_ceil_u32
(
uint32_t
x
);
size_t
pow2_ceil_zu
(
size_t
x
);
unsigned
lg_floor
(
size_t
x
);
int
jemalloc_ffsl
(
long
bitmap
);
int
jemalloc_ffs
(
int
bitmap
);
size_t
pow2_ceil
(
size_t
x
);
size_t
lg_floor
(
size_t
x
);
void
set_errno
(
int
errnum
);
int
get_errno
(
void
);
#endif
...
...
@@ -136,74 +174,27 @@ int get_errno(void);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE
unsigned
ffs_llu
(
unsigned
long
long
bitmap
)
{
return
(
JEMALLOC_INTERNAL_FFSLL
(
bitmap
));
}
JEMALLOC_ALWAYS_INLINE
unsigned
ffs_lu
(
unsigned
long
bitmap
)
JEMALLOC_ALWAYS_INLINE
int
jemalloc_ffsl
(
long
bitmap
)
{
return
(
JEMALLOC_INTERNAL_FFSL
(
bitmap
));
}
JEMALLOC_ALWAYS_INLINE
unsigned
ffs_u
(
unsigned
bitmap
)
JEMALLOC_ALWAYS_INLINE
int
jemalloc_ffs
(
int
bitmap
)
{
return
(
JEMALLOC_INTERNAL_FFS
(
bitmap
));
}
JEMALLOC_ALWAYS_INLINE
unsigned
ffs_zu
(
size_t
bitmap
)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return
(
ffs_u
(
bitmap
));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return
(
ffs_lu
(
bitmap
));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return
(
ffs_llu
(
bitmap
));
#else
#error No implementation for size_t ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE
unsigned
ffs_u64
(
uint64_t
bitmap
)
{
#if LG_SIZEOF_LONG == 3
return
(
ffs_lu
(
bitmap
));
#elif LG_SIZEOF_LONG_LONG == 3
return
(
ffs_llu
(
bitmap
));
#else
#error No implementation for 64-bit ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE
unsigned
ffs_u32
(
uint32_t
bitmap
)
{
#if LG_SIZEOF_INT == 2
return
(
ffs_u
(
bitmap
));
#else
#error No implementation for 32-bit ffs()
#endif
return
(
ffs_u
(
bitmap
));
}
JEMALLOC_INLINE
uint64_t
pow2_ceil_u64
(
uint64_t
x
)
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE
size_t
pow2_ceil
(
size_t
x
)
{
x
--
;
...
...
@@ -212,39 +203,15 @@ pow2_ceil_u64(uint64_t x)
x
|=
x
>>
4
;
x
|=
x
>>
8
;
x
|=
x
>>
16
;
#if (LG_SIZEOF_PTR == 3)
x
|=
x
>>
32
;
#endif
x
++
;
return
(
x
);
}
JEMALLOC_INLINE
uint32_t
pow2_ceil_u32
(
uint32_t
x
)
{
x
--
;
x
|=
x
>>
1
;
x
|=
x
>>
2
;
x
|=
x
>>
4
;
x
|=
x
>>
8
;
x
|=
x
>>
16
;
x
++
;
return
(
x
);
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE
size_t
pow2_ceil_zu
(
size_t
x
)
{
#if (LG_SIZEOF_PTR == 3)
return
(
pow2_ceil_u64
(
x
));
#else
return
(
pow2_ceil_u32
(
x
));
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE
unsigned
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
size_t
ret
;
...
...
@@ -255,11 +222,10 @@ lg_floor(size_t x)
:
"=r"
(
ret
)
// Outputs.
:
"r"
(
x
)
// Inputs.
);
assert
(
ret
<
UINT_MAX
);
return
((
unsigned
)
ret
);
return
(
ret
);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE
unsigned
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
unsigned
long
ret
;
...
...
@@ -271,13 +237,12 @@ lg_floor(size_t x)
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse
(
&
ret
,
x
);
#else
# error "Unsupported type size for lg_floor()"
# error "Unsupported type size
s
for lg_floor()"
#endif
assert
(
ret
<
UINT_MAX
);
return
((
unsigned
)
ret
);
return
(
ret
);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE
unsigned
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
...
...
@@ -288,11 +253,11 @@ lg_floor(size_t x)
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return
(((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clzl
(
x
));
#else
# error "Unsupported type size for lg_floor()"
# error "Unsupported type size
s
for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE
unsigned
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
...
...
@@ -303,13 +268,20 @@ lg_floor(size_t x)
x
|=
(
x
>>
4
);
x
|=
(
x
>>
8
);
x
|=
(
x
>>
16
);
#if (LG_SIZEOF_PTR == 3)
#if (LG_SIZEOF_PTR == 3
&& LG_SIZEOF_PTR == LG_SIZEOF_LONG
)
x
|=
(
x
>>
32
);
#endif
if
(
x
==
SIZE_T_MAX
)
return
((
8
<<
LG_SIZEOF_PTR
)
-
1
);
if
(
x
==
KZU
(
0xffffffffffffffff
))
return
(
63
);
x
++
;
return
(
ffs_zu
(
x
)
-
2
);
return
(
jemalloc_ffsl
(
x
)
-
2
);
#elif (LG_SIZEOF_PTR == 2)
if
(
x
==
KZU
(
0xffffffff
))
return
(
31
);
x
++
;
return
(
jemalloc_ffs
(
x
)
-
2
);
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/valgrind.h
View file @
e3b8492e
...
...
@@ -30,31 +30,17 @@
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
((ptr) != (old_ptr))
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
(ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
(old_ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(
tsdn,
ptr); \
size_t rzsize = p2rz(ptr);
\
\
if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
old_ptr)) { \
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
...
...
@@ -63,13 +49,11 @@
old_usize), usize - old_usize); \
} \
} else { \
if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
old_ptr_null(old_ptr)) { \
if (!old_ptr_maybe_null || old_ptr != NULL) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
ptr_null(ptr)) { \
if (!ptr_maybe_null || ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
...
...
@@ -97,8 +81,8 @@
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond,
tsdn,
ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved,
tsdn,
ptr, usize, \
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize,
\
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
...
...
deps/jemalloc/include/jemalloc/internal/witness.h
deleted
100644 → 0
View file @
238cebdd
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
witness_s
witness_t
;
typedef
unsigned
witness_rank_t
;
typedef
ql_head
(
witness_t
)
witness_list_t
;
typedef
int
witness_comp_t
(
const
witness_t
*
,
const
witness_t
*
);
/*
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
* the witness machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_ARENAS 2U
#define WITNESS_RANK_PROF_DUMP 3U
#define WITNESS_RANK_PROF_BT2GCTX 4U
#define WITNESS_RANK_PROF_TDATAS 5U
#define WITNESS_RANK_PROF_TDATA 6U
#define WITNESS_RANK_PROF_GCTX 7U
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
#define WITNESS_RANK_ARENA_NODE_CACHE 10
#define WITNESS_RANK_BASE 11U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
witness_s
{
/* Name, used for printing lock order reversal messages. */
const
char
*
name
;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t
rank
;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t
*
comp
;
/* Linkage for thread's currently owned locks. */
ql_elm
(
witness_t
)
link
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
witness_init
(
witness_t
*
witness
,
const
char
*
name
,
witness_rank_t
rank
,
witness_comp_t
*
comp
);
#ifdef JEMALLOC_JET
typedef
void
(
witness_lock_error_t
)(
const
witness_list_t
*
,
const
witness_t
*
);
extern
witness_lock_error_t
*
witness_lock_error
;
#else
void
witness_lock_error
(
const
witness_list_t
*
witnesses
,
const
witness_t
*
witness
);
#endif
#ifdef JEMALLOC_JET
typedef
void
(
witness_owner_error_t
)(
const
witness_t
*
);
extern
witness_owner_error_t
*
witness_owner_error
;
#else
void
witness_owner_error
(
const
witness_t
*
witness
);
#endif
#ifdef JEMALLOC_JET
typedef
void
(
witness_not_owner_error_t
)(
const
witness_t
*
);
extern
witness_not_owner_error_t
*
witness_not_owner_error
;
#else
void
witness_not_owner_error
(
const
witness_t
*
witness
);
#endif
#ifdef JEMALLOC_JET
typedef
void
(
witness_lockless_error_t
)(
const
witness_list_t
*
);
extern
witness_lockless_error_t
*
witness_lockless_error
;
#else
void
witness_lockless_error
(
const
witness_list_t
*
witnesses
);
#endif
void
witnesses_cleanup
(
tsd_t
*
tsd
);
void
witness_fork_cleanup
(
tsd_t
*
tsd
);
void
witness_prefork
(
tsd_t
*
tsd
);
void
witness_postfork_parent
(
tsd_t
*
tsd
);
void
witness_postfork_child
(
tsd_t
*
tsd
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool
witness_owner
(
tsd_t
*
tsd
,
const
witness_t
*
witness
);
void
witness_assert_owner
(
tsdn_t
*
tsdn
,
const
witness_t
*
witness
);
void
witness_assert_not_owner
(
tsdn_t
*
tsdn
,
const
witness_t
*
witness
);
void
witness_assert_lockless
(
tsdn_t
*
tsdn
);
void
witness_lock
(
tsdn_t
*
tsdn
,
witness_t
*
witness
);
void
witness_unlock
(
tsdn_t
*
tsdn
,
witness_t
*
witness
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
bool
witness_owner
(
tsd_t
*
tsd
,
const
witness_t
*
witness
)
{
witness_list_t
*
witnesses
;
witness_t
*
w
;
witnesses
=
tsd_witnessesp_get
(
tsd
);
ql_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
==
witness
)
return
(
true
);
}
return
(
false
);
}
JEMALLOC_INLINE
void
witness_assert_owner
(
tsdn_t
*
tsdn
,
const
witness_t
*
witness
)
{
tsd_t
*
tsd
;
if
(
!
config_debug
)
return
;
if
(
tsdn_null
(
tsdn
))
return
;
tsd
=
tsdn_tsd
(
tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
return
;
if
(
witness_owner
(
tsd
,
witness
))
return
;
witness_owner_error
(
witness
);
}
JEMALLOC_INLINE
void
witness_assert_not_owner
(
tsdn_t
*
tsdn
,
const
witness_t
*
witness
)
{
tsd_t
*
tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
return
;
if
(
tsdn_null
(
tsdn
))
return
;
tsd
=
tsdn_tsd
(
tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
return
;
witnesses
=
tsd_witnessesp_get
(
tsd
);
ql_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
==
witness
)
witness_not_owner_error
(
witness
);
}
}
JEMALLOC_INLINE
void
witness_assert_lockless
(
tsdn_t
*
tsdn
)
{
tsd_t
*
tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
return
;
if
(
tsdn_null
(
tsdn
))
return
;
tsd
=
tsdn_tsd
(
tsdn
);
witnesses
=
tsd_witnessesp_get
(
tsd
);
w
=
ql_last
(
witnesses
,
link
);
if
(
w
!=
NULL
)
witness_lockless_error
(
witnesses
);
}
JEMALLOC_INLINE
void
witness_lock
(
tsdn_t
*
tsdn
,
witness_t
*
witness
)
{
tsd_t
*
tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
return
;
if
(
tsdn_null
(
tsdn
))
return
;
tsd
=
tsdn_tsd
(
tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
return
;
witness_assert_not_owner
(
tsdn
,
witness
);
witnesses
=
tsd_witnessesp_get
(
tsd
);
w
=
ql_last
(
witnesses
,
link
);
if
(
w
==
NULL
)
{
/* No other locks; do nothing. */
}
else
if
(
tsd_witness_fork_get
(
tsd
)
&&
w
->
rank
<=
witness
->
rank
)
{
/* Forking, and relaxed ranking satisfied. */
}
else
if
(
w
->
rank
>
witness
->
rank
)
{
/* Not forking, rank order reversal. */
witness_lock_error
(
witnesses
,
witness
);
}
else
if
(
w
->
rank
==
witness
->
rank
&&
(
w
->
comp
==
NULL
||
w
->
comp
!=
witness
->
comp
||
w
->
comp
(
w
,
witness
)
>
0
))
{
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error
(
witnesses
,
witness
);
}
ql_elm_new
(
witness
,
link
);
ql_tail_insert
(
witnesses
,
witness
,
link
);
}
JEMALLOC_INLINE
void
witness_unlock
(
tsdn_t
*
tsdn
,
witness_t
*
witness
)
{
tsd_t
*
tsd
;
witness_list_t
*
witnesses
;
if
(
!
config_debug
)
return
;
if
(
tsdn_null
(
tsdn
))
return
;
tsd
=
tsdn_tsd
(
tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
return
;
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if
(
witness_owner
(
tsd
,
witness
))
{
witnesses
=
tsd_witnessesp_get
(
tsd
);
ql_remove
(
witnesses
,
witness
,
link
);
}
else
witness_assert_owner
(
tsdn
,
witness
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
View file @
e3b8492e
...
...
@@ -33,13 +33,5 @@
*/
#undef JEMALLOC_USE_CXX_THROW
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
View file @
e3b8492e
...
...
@@ -11,13 +11,12 @@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
# define MALLOCX_LG_ALIGN(la)
((int)
(la)
)
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (
(int)(ffs((int)(a)
)-1)
)
# define MALLOCX_ALIGN(a) (
ffs(a
)-1)
# else
# define MALLOCX_ALIGN(a) \
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)(((size_t)(a))>>32))+31))
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
...
...
@@ -29,7 +28,7 @@
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
# define MALLOCX_ARENA(a)
((
((int)(a
)
)+1) << 20)
# define MALLOCX_ARENA(a) ((int)(
((
a)+1) << 20)
)
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
...
...
@@ -37,7 +36,32 @@
# define JEMALLOC_CXX_THROW
#endif
#if _MSC_VER
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
...
...
@@ -63,31 +87,6 @@
# else
# define JEMALLOC_ALLOCATOR
# endif
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
...
...
deps/jemalloc/include/msvc_compat/strings.h
View file @
e3b8492e
...
...
@@ -21,37 +21,7 @@ static __forceinline int ffs(int x)
return
(
ffsl
(
x
));
}
# ifdef _M_X64
# pragma intrinsic(_BitScanForward64)
# endif
static
__forceinline
int
ffsll
(
unsigned
__int64
x
)
{
unsigned
long
i
;
#ifdef _M_X64
if
(
_BitScanForward64
(
&
i
,
x
))
return
(
i
+
1
);
return
(
0
);
#else
// Fallback for 32-bit build where 64-bit version not available
// assuming little endian
union
{
unsigned
__int64
ll
;
unsigned
long
l
[
2
];
}
s
;
s
.
ll
=
x
;
if
(
_BitScanForward
(
&
i
,
s
.
l
[
0
]))
return
(
i
+
1
);
else
if
(
_BitScanForward
(
&
i
,
s
.
l
[
1
]))
return
(
i
+
33
);
return
(
0
);
#endif
}
#else
# define ffsll(x) __builtin_ffsll(x)
# define ffsl(x) __builtin_ffsl(x)
# define ffs(x) __builtin_ffs(x)
#endif
...
...
deps/jemalloc/include/msvc_compat/windows_extra.h
View file @
e3b8492e
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
#include <errno.h>
#ifndef ENOENT
# define ENOENT ERROR_PATH_NOT_FOUND
#endif
#ifndef EINVAL
# define EINVAL ERROR_BAD_ARGUMENTS
#endif
#ifndef EAGAIN
# define EAGAIN ERROR_OUTOFMEMORY
#endif
#ifndef EPERM
# define EPERM ERROR_WRITE_FAULT
#endif
#ifndef EFAULT
# define EFAULT ERROR_INVALID_ADDRESS
#endif
#ifndef ENOMEM
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
#endif
#ifndef ERANGE
# define ERANGE ERROR_INVALID_DATA
#endif
#endif
/* MSVC_COMPAT_WINDOWS_EXTRA_H */
Prev
1
2
3
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment