Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
f63e81c2
Unverified
Commit
f63e81c2
authored
Aug 25, 2018
by
Chris Lamb
Committed by
GitHub
Aug 25, 2018
Browse files
Merge branch 'unstable' into config-set-maxmemory-grammar
parents
eaeba1b2
39c70e72
Changes
209
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
209 of 209+
files are displayed.
Plain diff
Email patch
deps/jemalloc/src/atomic.c
deleted
100644 → 0
View file @
eaeba1b2
#define JEMALLOC_ATOMIC_C_
#include "jemalloc/internal/jemalloc_internal.h"
deps/jemalloc/src/background_thread.c
0 → 100644
View file @
f63e81c2
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/
/* Data. */
/* This option should be opt-in only. */
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool
opt_background_thread
=
BACKGROUND_THREAD_DEFAULT
;
size_t
opt_max_background_threads
=
MAX_BACKGROUND_THREAD_LIMIT
;
/* Used for thread creation, termination and stats. */
malloc_mutex_t
background_thread_lock
;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t
background_thread_enabled_state
;
size_t
n_background_threads
;
size_t
max_background_threads
;
/* Thread info per-index. */
background_thread_info_t
*
background_thread_info
;
/* False if no necessary runtime support. */
bool
can_enable_background_thread
;
/******************************************************************************/
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
#include <dlfcn.h>
static
int
(
*
pthread_create_fptr
)(
pthread_t
*
__restrict
,
const
pthread_attr_t
*
,
void
*
(
*
)(
void
*
),
void
*
__restrict
);
static
void
pthread_create_wrapper_init
(
void
)
{
#ifdef JEMALLOC_LAZY_LOCK
if
(
!
isthreaded
)
{
isthreaded
=
true
;
}
#endif
}
int
pthread_create_wrapper
(
pthread_t
*
__restrict
thread
,
const
pthread_attr_t
*
attr
,
void
*
(
*
start_routine
)(
void
*
),
void
*
__restrict
arg
)
{
pthread_create_wrapper_init
();
return
pthread_create_fptr
(
thread
,
attr
,
start_routine
,
arg
);
}
#endif
/* JEMALLOC_PTHREAD_CREATE_WRAPPER */
#ifndef JEMALLOC_BACKGROUND_THREAD
#define NOT_REACHED { not_reached(); }
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
NOT_REACHED
bool
background_threads_enable
(
tsd_t
*
tsd
)
NOT_REACHED
bool
background_threads_disable
(
tsd_t
*
tsd
)
NOT_REACHED
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
size_t
npages_new
)
NOT_REACHED
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_child
(
tsdn_t
*
tsdn
)
NOT_REACHED
bool
background_thread_stats_read
(
tsdn_t
*
tsdn
,
background_thread_stats_t
*
stats
)
NOT_REACHED
void
background_thread_ctl_init
(
tsdn_t
*
tsdn
)
NOT_REACHED
#undef NOT_REACHED
#else
static
bool
background_thread_enabled_at_fork
;
static
void
background_thread_info_init
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
)
{
background_thread_wakeup_time_set
(
tsdn
,
info
,
0
);
info
->
npages_to_purge_new
=
0
;
if
(
config_stats
)
{
info
->
tot_n_runs
=
0
;
nstime_init
(
&
info
->
tot_sleep_time
,
0
);
}
}
static
inline
bool
set_current_thread_affinity
(
UNUSED
int
cpu
)
{
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t
cpuset
;
CPU_ZERO
(
&
cpuset
);
CPU_SET
(
cpu
,
&
cpuset
);
int
ret
=
sched_setaffinity
(
0
,
sizeof
(
cpu_set_t
),
&
cpuset
);
return
(
ret
!=
0
);
#else
return
false
;
#endif
}
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static
inline
size_t
decay_npurge_after_interval
(
arena_decay_t
*
decay
,
size_t
interval
)
{
size_t
i
;
uint64_t
sum
=
0
;
for
(
i
=
0
;
i
<
interval
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
for
(;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
(
h_steps
[
i
]
-
h_steps
[
i
-
interval
]);
}
return
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
}
static
uint64_t
arena_decay_compute_purge_interval_impl
(
tsdn_t
*
tsdn
,
arena_decay_t
*
decay
,
extents_t
*
extents
)
{
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* Use minimal interval if decay is contended. */
return
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
}
uint64_t
interval
;
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
size_t
npages
=
extents_npages_get
(
extents
);
if
(
npages
==
0
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
if
(
decay
->
backlog
[
i
]
>
0
)
{
break
;
}
}
if
(
i
==
SMOOTHSTEP_NSTEPS
)
{
/* No dirty pages recorded. Sleep indefinitely. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
}
if
(
npages
<=
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
/* Use max interval. */
interval
=
decay_interval_ns
*
SMOOTHSTEP_NSTEPS
;
goto
label_done
;
}
size_t
lb
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
/
decay_interval_ns
;
size_t
ub
=
SMOOTHSTEP_NSTEPS
;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb
=
(
lb
<
2
)
?
2
:
lb
;
if
((
decay_interval_ns
*
ub
<=
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
||
(
lb
+
2
>
ub
))
{
interval
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
goto
label_done
;
}
assert
(
lb
+
2
<=
ub
);
size_t
npurge_lb
,
npurge_ub
;
npurge_lb
=
decay_npurge_after_interval
(
decay
,
lb
);
if
(
npurge_lb
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
lb
;
goto
label_done
;
}
npurge_ub
=
decay_npurge_after_interval
(
decay
,
ub
);
if
(
npurge_ub
<
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
ub
;
goto
label_done
;
}
unsigned
n_search
=
0
;
size_t
target
,
npurge
;
while
((
npurge_lb
+
BACKGROUND_THREAD_NPAGES_THRESHOLD
<
npurge_ub
)
&&
(
lb
+
2
<
ub
))
{
target
=
(
lb
+
ub
)
/
2
;
npurge
=
decay_npurge_after_interval
(
decay
,
target
);
if
(
npurge
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
ub
=
target
;
npurge_ub
=
npurge
;
}
else
{
lb
=
target
;
npurge_lb
=
npurge
;
}
assert
(
n_search
++
<
lg_floor
(
SMOOTHSTEP_NSTEPS
)
+
1
);
}
interval
=
decay_interval_ns
*
(
ub
+
lb
)
/
2
;
label_done:
interval
=
(
interval
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
?
BACKGROUND_THREAD_MIN_INTERVAL_NS
:
interval
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
interval
;
}
/* Compute purge interval for background threads. */
static
uint64_t
arena_decay_compute_purge_interval
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
uint64_t
i1
,
i2
;
i1
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
);
if
(
i1
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
return
i1
;
}
i2
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
);
return
i1
<
i2
?
i1
:
i2
;
}
static
void
background_thread_sleep
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
uint64_t
interval
)
{
if
(
config_stats
)
{
info
->
tot_n_runs
++
;
}
info
->
npages_to_purge_new
=
0
;
struct
timeval
tv
;
/* Specific clock required by timedwait. */
gettimeofday
(
&
tv
,
NULL
);
nstime_t
before_sleep
;
nstime_init2
(
&
before_sleep
,
tv
.
tv_sec
,
tv
.
tv_usec
*
1000
);
int
ret
;
if
(
interval
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
)
{
assert
(
background_thread_indefinite_sleep
(
info
));
ret
=
pthread_cond_wait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
);
assert
(
ret
==
0
);
}
else
{
assert
(
interval
>=
BACKGROUND_THREAD_MIN_INTERVAL_NS
&&
interval
<=
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
/* We need malloc clock (can be different from tv). */
nstime_t
next_wakeup
;
nstime_init
(
&
next_wakeup
,
0
);
nstime_update
(
&
next_wakeup
);
nstime_iadd
(
&
next_wakeup
,
interval
);
assert
(
nstime_ns
(
&
next_wakeup
)
<
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
background_thread_wakeup_time_set
(
tsdn
,
info
,
nstime_ns
(
&
next_wakeup
));
nstime_t
ts_wakeup
;
nstime_copy
(
&
ts_wakeup
,
&
before_sleep
);
nstime_iadd
(
&
ts_wakeup
,
interval
);
struct
timespec
ts
;
ts
.
tv_sec
=
(
size_t
)
nstime_sec
(
&
ts_wakeup
);
ts
.
tv_nsec
=
(
size_t
)
nstime_nsec
(
&
ts_wakeup
);
assert
(
!
background_thread_indefinite_sleep
(
info
));
ret
=
pthread_cond_timedwait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
,
&
ts
);
assert
(
ret
==
ETIMEDOUT
||
ret
==
0
);
background_thread_wakeup_time_set
(
tsdn
,
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
}
if
(
config_stats
)
{
gettimeofday
(
&
tv
,
NULL
);
nstime_t
after_sleep
;
nstime_init2
(
&
after_sleep
,
tv
.
tv_sec
,
tv
.
tv_usec
*
1000
);
if
(
nstime_compare
(
&
after_sleep
,
&
before_sleep
)
>
0
)
{
nstime_subtract
(
&
after_sleep
,
&
before_sleep
);
nstime_add
(
&
info
->
tot_sleep_time
,
&
after_sleep
);
}
}
}
static
bool
background_thread_pause_check
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
)
{
if
(
unlikely
(
info
->
state
==
background_thread_paused
))
{
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
/* Wait on global lock to update status. */
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
return
true
;
}
return
false
;
}
static
inline
void
background_work_sleep_once
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
unsigned
ind
)
{
uint64_t
min_interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
unsigned
narenas
=
narenas_total_get
();
for
(
unsigned
i
=
ind
;
i
<
narenas
;
i
+=
max_background_threads
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
!
arena
)
{
continue
;
}
arena_decay
(
tsdn
,
arena
,
true
,
false
);
if
(
min_interval
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
/* Min interval will be used. */
continue
;
}
uint64_t
interval
=
arena_decay_compute_purge_interval
(
tsdn
,
arena
);
assert
(
interval
>=
BACKGROUND_THREAD_MIN_INTERVAL_NS
);
if
(
min_interval
>
interval
)
{
min_interval
=
interval
;
}
}
background_thread_sleep
(
tsdn
,
info
,
min_interval
);
}
static
bool
background_threads_disable_single
(
tsd_t
*
tsd
,
background_thread_info_t
*
info
)
{
if
(
info
==
&
background_thread_info
[
0
])
{
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
}
else
{
malloc_mutex_assert_not_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
}
pre_reentrancy
(
tsd
,
NULL
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
bool
has_thread
;
assert
(
info
->
state
!=
background_thread_paused
);
if
(
info
->
state
==
background_thread_started
)
{
has_thread
=
true
;
info
->
state
=
background_thread_stopped
;
pthread_cond_signal
(
&
info
->
cond
);
}
else
{
has_thread
=
false
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
if
(
!
has_thread
)
{
post_reentrancy
(
tsd
);
return
false
;
}
void
*
ret
;
if
(
pthread_join
(
info
->
thread
,
&
ret
))
{
post_reentrancy
(
tsd
);
return
true
;
}
assert
(
ret
==
NULL
);
n_background_threads
--
;
post_reentrancy
(
tsd
);
return
false
;
}
static
void
*
background_thread_entry
(
void
*
ind_arg
);
static
int
background_thread_create_signals_masked
(
pthread_t
*
thread
,
const
pthread_attr_t
*
attr
,
void
*
(
*
start_routine
)(
void
*
),
void
*
arg
)
{
/*
* Mask signals during thread creation so that the thread inherits
* an empty signal set.
*/
sigset_t
set
;
sigfillset
(
&
set
);
sigset_t
oldset
;
int
mask_err
=
pthread_sigmask
(
SIG_SETMASK
,
&
set
,
&
oldset
);
if
(
mask_err
!=
0
)
{
return
mask_err
;
}
int
create_err
=
pthread_create_wrapper
(
thread
,
attr
,
start_routine
,
arg
);
/*
* Restore the signal mask. Failure to restore the signal mask here
* changes program behavior.
*/
int
restore_err
=
pthread_sigmask
(
SIG_SETMASK
,
&
oldset
,
NULL
);
if
(
restore_err
!=
0
)
{
malloc_printf
(
"<jemalloc>: background thread creation "
"failed (%d), and signal mask restoration failed "
"(%d)
\n
"
,
create_err
,
restore_err
);
if
(
opt_abort
)
{
abort
();
}
}
return
create_err
;
}
static
bool
check_background_thread_creation
(
tsd_t
*
tsd
,
unsigned
*
n_created
,
bool
*
created_threads
)
{
bool
ret
=
false
;
if
(
likely
(
*
n_created
==
n_background_threads
))
{
return
ret
;
}
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
malloc_mutex_unlock
(
tsdn
,
&
background_thread_info
[
0
].
mtx
);
for
(
unsigned
i
=
1
;
i
<
max_background_threads
;
i
++
)
{
if
(
created_threads
[
i
])
{
continue
;
}
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
/*
* In case of the background_thread_paused state because of
* arena reset, delay the creation.
*/
bool
create
=
(
info
->
state
==
background_thread_started
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
if
(
!
create
)
{
continue
;
}
pre_reentrancy
(
tsd
,
NULL
);
int
err
=
background_thread_create_signals_masked
(
&
info
->
thread
,
NULL
,
background_thread_entry
,
(
void
*
)(
uintptr_t
)
i
);
post_reentrancy
(
tsd
);
if
(
err
==
0
)
{
(
*
n_created
)
++
;
created_threads
[
i
]
=
true
;
}
else
{
malloc_printf
(
"<jemalloc>: background thread "
"creation failed (%d)
\n
"
,
err
);
if
(
opt_abort
)
{
abort
();
}
}
/* Return to restart the loop since we unlocked. */
ret
=
true
;
break
;
}
malloc_mutex_lock
(
tsdn
,
&
background_thread_info
[
0
].
mtx
);
return
ret
;
}
static
void
background_thread0_work
(
tsd_t
*
tsd
)
{
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY
(
bool
,
created_threads
,
max_background_threads
);
unsigned
i
;
for
(
i
=
1
;
i
<
max_background_threads
;
i
++
)
{
created_threads
[
i
]
=
false
;
}
/* Start working, and create more threads when asked. */
unsigned
n_created
=
1
;
while
(
background_thread_info
[
0
].
state
!=
background_thread_stopped
)
{
if
(
background_thread_pause_check
(
tsd_tsdn
(
tsd
),
&
background_thread_info
[
0
]))
{
continue
;
}
if
(
check_background_thread_creation
(
tsd
,
&
n_created
,
(
bool
*
)
&
created_threads
))
{
continue
;
}
background_work_sleep_once
(
tsd_tsdn
(
tsd
),
&
background_thread_info
[
0
],
0
);
}
/*
* Shut down other threads at exit. Note that the ctl thread is holding
* the global background_thread mutex (and is waiting) for us.
*/
assert
(
!
background_thread_enabled
());
for
(
i
=
1
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
assert
(
info
->
state
!=
background_thread_paused
);
if
(
created_threads
[
i
])
{
background_threads_disable_single
(
tsd
,
info
);
}
else
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
if
(
info
->
state
!=
background_thread_stopped
)
{
/* The thread was not created. */
assert
(
info
->
state
==
background_thread_started
);
n_background_threads
--
;
info
->
state
=
background_thread_stopped
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
}
background_thread_info
[
0
].
state
=
background_thread_stopped
;
assert
(
n_background_threads
==
1
);
}
static
void
background_work
(
tsd_t
*
tsd
,
unsigned
ind
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
ind
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
background_thread_wakeup_time_set
(
tsd_tsdn
(
tsd
),
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
if
(
ind
==
0
)
{
background_thread0_work
(
tsd
);
}
else
{
while
(
info
->
state
!=
background_thread_stopped
)
{
if
(
background_thread_pause_check
(
tsd_tsdn
(
tsd
),
info
))
{
continue
;
}
background_work_sleep_once
(
tsd_tsdn
(
tsd
),
info
,
ind
);
}
}
assert
(
info
->
state
==
background_thread_stopped
);
background_thread_wakeup_time_set
(
tsd_tsdn
(
tsd
),
info
,
0
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
static
void
*
background_thread_entry
(
void
*
ind_arg
)
{
unsigned
thread_ind
=
(
unsigned
)(
uintptr_t
)
ind_arg
;
assert
(
thread_ind
<
max_background_threads
);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
#endif
if
(
opt_percpu_arena
!=
percpu_arena_disabled
)
{
set_current_thread_affinity
((
int
)
thread_ind
);
}
/*
* Start periodic background work. We use internal tsd which avoids
* side effects, for example triggering new arena creation (which in
* turn triggers another background thread creation).
*/
background_work
(
tsd_internal_fetch
(),
thread_ind
);
assert
(
pthread_equal
(
pthread_self
(),
background_thread_info
[
thread_ind
].
thread
));
return
NULL
;
}
static
void
background_thread_init
(
tsd_t
*
tsd
,
background_thread_info_t
*
info
)
{
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
info
->
state
=
background_thread_started
;
background_thread_info_init
(
tsd_tsdn
(
tsd
),
info
);
n_background_threads
++
;
}
/* Create a new background thread if needed. */
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
{
assert
(
have_background_thread
);
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
/* We create at most NCPUs threads. */
size_t
thread_ind
=
arena_ind
%
max_background_threads
;
background_thread_info_t
*
info
=
&
background_thread_info
[
thread_ind
];
bool
need_new_thread
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
need_new_thread
=
background_thread_enabled
()
&&
(
info
->
state
==
background_thread_stopped
);
if
(
need_new_thread
)
{
background_thread_init
(
tsd
,
info
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
if
(
!
need_new_thread
)
{
return
false
;
}
if
(
arena_ind
!=
0
)
{
/* Threads are created asynchronously by Thread 0. */
background_thread_info_t
*
t0
=
&
background_thread_info
[
0
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
t0
->
mtx
);
assert
(
t0
->
state
==
background_thread_started
);
pthread_cond_signal
(
&
t0
->
cond
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
t0
->
mtx
);
return
false
;
}
pre_reentrancy
(
tsd
,
NULL
);
/*
* To avoid complications (besides reentrancy), create internal
* background threads with the underlying pthread_create.
*/
int
err
=
background_thread_create_signals_masked
(
&
info
->
thread
,
NULL
,
background_thread_entry
,
(
void
*
)
thread_ind
);
post_reentrancy
(
tsd
);
if
(
err
!=
0
)
{
malloc_printf
(
"<jemalloc>: arena 0 background thread creation "
"failed (%d)
\n
"
,
err
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
info
->
state
=
background_thread_stopped
;
n_background_threads
--
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
return
true
;
}
return
false
;
}
bool
background_threads_enable
(
tsd_t
*
tsd
)
{
assert
(
n_background_threads
==
0
);
assert
(
background_thread_enabled
());
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
VARIABLE_ARRAY
(
bool
,
marked
,
max_background_threads
);
unsigned
i
,
nmarked
;
for
(
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
marked
[
i
]
=
false
;
}
nmarked
=
0
;
/* Thread 0 is required and created at the end. */
marked
[
0
]
=
true
;
/* Mark the threads we need to create for thread 0. */
unsigned
n
=
narenas_total_get
();
for
(
i
=
1
;
i
<
n
;
i
++
)
{
if
(
marked
[
i
%
max_background_threads
]
||
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
)
==
NULL
)
{
continue
;
}
background_thread_info_t
*
info
=
&
background_thread_info
[
i
%
max_background_threads
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
assert
(
info
->
state
==
background_thread_stopped
);
background_thread_init
(
tsd
,
info
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
marked
[
i
%
max_background_threads
]
=
true
;
if
(
++
nmarked
==
max_background_threads
)
{
break
;
}
}
return
background_thread_create
(
tsd
,
0
);
}
bool
background_threads_disable
(
tsd_t
*
tsd
)
{
assert
(
!
background_thread_enabled
());
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
/* Thread 0 will be responsible for terminating other threads. */
if
(
background_threads_disable_single
(
tsd
,
&
background_thread_info
[
0
]))
{
return
true
;
}
assert
(
n_background_threads
==
0
);
return
false
;
}
/* Check if we need to signal the background thread early. */
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
size_t
npages_new
)
{
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return
;
}
if
(
info
->
state
!=
background_thread_started
)
{
goto
label_done
;
}
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
goto
label_done
;
}
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
goto
label_done_unlock2
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
nstime_t
diff
;
nstime_init
(
&
diff
,
background_thread_wakeup_time_get
(
info
));
if
(
nstime_compare
(
&
diff
,
&
decay
->
epoch
)
<=
0
)
{
goto
label_done_unlock2
;
}
nstime_subtract
(
&
diff
,
&
decay
->
epoch
);
if
(
nstime_ns
(
&
diff
)
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
goto
label_done_unlock2
;
}
if
(
npages_new
>
0
)
{
size_t
n_epoch
=
(
size_t
)(
nstime_ns
(
&
diff
)
/
decay_interval_ns
);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t
npurge_new
;
if
(
n_epoch
>=
SMOOTHSTEP_NSTEPS
)
{
npurge_new
=
npages_new
;
}
else
{
uint64_t
h_steps_max
=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
];
assert
(
h_steps_max
>=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
=
npages_new
*
(
h_steps_max
-
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
>>=
SMOOTHSTEP_BFP
;
}
info
->
npages_to_purge_new
+=
npurge_new
;
}
bool
should_signal
;
if
(
info
->
npages_to_purge_new
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
should_signal
=
true
;
}
else
if
(
unlikely
(
background_thread_indefinite_sleep
(
info
))
&&
(
extents_npages_get
(
&
arena
->
extents_dirty
)
>
0
||
extents_npages_get
(
&
arena
->
extents_muzzy
)
>
0
||
info
->
npages_to_purge_new
>
0
))
{
should_signal
=
true
;
}
else
{
should_signal
=
false
;
}
if
(
should_signal
)
{
info
->
npages_to_purge_new
=
0
;
pthread_cond_signal
(
&
info
->
cond
);
}
label_done_unlock2:
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
label_done:
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
{
malloc_mutex_prefork
(
tsdn
,
&
background_thread_lock
);
background_thread_enabled_at_fork
=
background_thread_enabled
();
}
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
malloc_mutex_prefork
(
tsdn
,
&
background_thread_info
[
i
].
mtx
);
}
}
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
background_thread_info
[
i
].
mtx
);
}
malloc_mutex_postfork_parent
(
tsdn
,
&
background_thread_lock
);
}
void
background_thread_postfork_child
(
tsdn_t
*
tsdn
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
background_thread_info
[
i
].
mtx
);
}
malloc_mutex_postfork_child
(
tsdn
,
&
background_thread_lock
);
if
(
!
background_thread_enabled_at_fork
)
{
return
;
}
/* Clear background_thread state (reset to disabled for child). */
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
n_background_threads
=
0
;
background_thread_enabled_set
(
tsdn
,
false
);
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
info
->
state
=
background_thread_stopped
;
int
ret
=
pthread_cond_init
(
&
info
->
cond
,
NULL
);
assert
(
ret
==
0
);
background_thread_info_init
(
tsdn
,
info
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
}
bool
background_thread_stats_read
(
tsdn_t
*
tsdn
,
background_thread_stats_t
*
stats
)
{
assert
(
config_stats
);
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
if
(
!
background_thread_enabled
())
{
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
return
true
;
}
stats
->
num_threads
=
n_background_threads
;
uint64_t
num_runs
=
0
;
nstime_init
(
&
stats
->
run_interval
,
0
);
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
if
(
info
->
state
!=
background_thread_stopped
)
{
num_runs
+=
info
->
tot_n_runs
;
nstime_add
(
&
stats
->
run_interval
,
&
info
->
tot_sleep_time
);
}
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
stats
->
num_runs
=
num_runs
;
if
(
num_runs
>
0
)
{
nstime_idivide
(
&
stats
->
run_interval
,
num_runs
);
}
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
return
false
;
}
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
static
bool
pthread_create_fptr_init
(
void
)
{
if
(
pthread_create_fptr
!=
NULL
)
{
return
false
;
}
pthread_create_fptr
=
dlsym
(
RTLD_NEXT
,
"pthread_create"
);
if
(
pthread_create_fptr
==
NULL
)
{
can_enable_background_thread
=
false
;
if
(
config_lazy_lock
||
opt_background_thread
)
{
malloc_write
(
"<jemalloc>: Error in dlsym(RTLD_NEXT, "
"
\"
pthread_create
\"
)
\n
"
);
abort
();
}
}
else
{
can_enable_background_thread
=
true
;
}
return
false
;
}
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
* taking any background_thread locks. This is called early in ctl (instead of
* wait for the pthread_create calls to trigger) because the mutex is required
* before creating background threads.
*/
void
background_thread_ctl_init
(
tsdn_t
*
tsdn
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
background_thread_lock
);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_create_fptr_init
();
pthread_create_wrapper_init
();
#endif
}
#endif
/* defined(JEMALLOC_BACKGROUND_THREAD) */
bool
background_thread_boot0
(
void
)
{
if
(
!
have_background_thread
&&
opt_background_thread
)
{
malloc_printf
(
"<jemalloc>: option background_thread currently "
"supports pthread only
\n
"
);
return
true
;
}
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
if
((
config_lazy_lock
||
opt_background_thread
)
&&
pthread_create_fptr_init
())
{
return
true
;
}
#endif
return
false
;
}
bool
background_thread_boot1
(
tsdn_t
*
tsdn
)
{
#ifdef JEMALLOC_BACKGROUND_THREAD
assert
(
have_background_thread
);
assert
(
narenas_total_get
()
>
0
);
if
(
opt_max_background_threads
==
MAX_BACKGROUND_THREAD_LIMIT
&&
ncpus
<
MAX_BACKGROUND_THREAD_LIMIT
)
{
opt_max_background_threads
=
ncpus
;
}
max_background_threads
=
opt_max_background_threads
;
background_thread_enabled_set
(
tsdn
,
opt_background_thread
);
if
(
malloc_mutex_init
(
&
background_thread_lock
,
"background_thread_global"
,
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
background_thread_info
=
(
background_thread_info_t
*
)
base_alloc
(
tsdn
,
b0get
(),
opt_max_background_threads
*
sizeof
(
background_thread_info_t
),
CACHELINE
);
if
(
background_thread_info
==
NULL
)
{
return
true
;
}
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
/* Thread mutex is rank_inclusive because of thread0. */
if
(
malloc_mutex_init
(
&
info
->
mtx
,
"background_thread"
,
WITNESS_RANK_BACKGROUND_THREAD
,
malloc_mutex_address_ordered
))
{
return
true
;
}
if
(
pthread_cond_init
(
&
info
->
cond
,
NULL
))
{
return
true
;
}
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
info
->
state
=
background_thread_stopped
;
background_thread_info_init
(
tsdn
,
info
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
#endif
return
false
;
}
deps/jemalloc/src/base.c
View file @
f63e81c2
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
static
malloc_mutex_t
base_mtx
;
static
base_t
*
b0
;
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
* false cache line sharing.
*/
static
void
*
base_pages
;
static
void
*
base_next_addr
;
static
void
*
base_past_addr
;
/* Addr immediately past base_pages. */
static
extent_node_t
*
base_nodes
;
metadata_thp_mode_t
opt_metadata_thp
=
METADATA_THP_DEFAULT
;
const
char
*
metadata_thp_mode_names
[]
=
{
"disabled"
,
"auto"
,
"always"
};
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
bool
base_pages_alloc
(
size_t
minsize
);
static
inline
bool
metadata_thp_madvise
(
void
)
{
return
(
metadata_thp_enabled
()
&&
(
init_system_thp_mode
==
thp_mode_default
));
}
/******************************************************************************/
static
void
*
base_map
(
tsdn_t
*
tsdn
,
extent_hooks_t
*
extent_hooks
,
unsigned
ind
,
size_t
size
)
{
void
*
addr
;
bool
zero
=
true
;
bool
commit
=
true
;
static
bool
base_pages_alloc
(
size_t
minsize
)
{
size_t
csize
;
bool
zero
;
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert
(
size
==
HUGEPAGE_CEILING
(
size
));
size_t
alignment
=
HUGEPAGE
;
if
(
extent_hooks
==
&
extent_hooks_default
)
{
addr
=
extent_alloc_mmap
(
NULL
,
size
,
alignment
,
&
zero
,
&
commit
);
}
else
{
/* No arena context as we are creating new arenas. */
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
pre_reentrancy
(
tsd
,
NULL
);
addr
=
extent_hooks
->
alloc
(
extent_hooks
,
NULL
,
size
,
alignment
,
&
zero
,
&
commit
,
ind
);
post_reentrancy
(
tsd
);
}
assert
(
minsize
!=
0
);
csize
=
CHUNK_CEILING
(
minsize
);
zero
=
false
;
base_pages
=
chunk_alloc
(
csize
,
chunksize
,
true
,
&
zero
,
chunk_dss_prec_get
());
if
(
base_pages
==
NULL
)
return
(
true
);
base_next_addr
=
base_pages
;
base_past_addr
=
(
void
*
)((
uintptr_t
)
base_pages
+
csize
);
return
addr
;
}
return
(
false
);
static
void
base_unmap
(
tsdn_t
*
tsdn
,
extent_hooks_t
*
extent_hooks
,
unsigned
ind
,
void
*
addr
,
size_t
size
)
{
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
* with the cascade in extent_dalloc_wrapper() because an application's
* custom hooks may not support e.g. dalloc. This function is only ever
* called as a side effect of arena destruction, so although it might
* seem pointless to do anything besides dalloc here, the application
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if
(
extent_hooks
==
&
extent_hooks_default
)
{
if
(
!
extent_dalloc_mmap
(
addr
,
size
))
{
goto
label_done
;
}
if
(
!
pages_decommit
(
addr
,
size
))
{
goto
label_done
;
}
if
(
!
pages_purge_forced
(
addr
,
size
))
{
goto
label_done
;
}
if
(
!
pages_purge_lazy
(
addr
,
size
))
{
goto
label_done
;
}
/* Nothing worked. This should never happen. */
not_reached
();
}
else
{
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
pre_reentrancy
(
tsd
,
NULL
);
if
(
extent_hooks
->
dalloc
!=
NULL
&&
!
extent_hooks
->
dalloc
(
extent_hooks
,
addr
,
size
,
true
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
decommit
!=
NULL
&&
!
extent_hooks
->
decommit
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_forced
!=
NULL
&&
!
extent_hooks
->
purge_forced
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_lazy
!=
NULL
&&
!
extent_hooks
->
purge_lazy
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy
(
tsd
);
}
label_done:
if
(
metadata_thp_madvise
())
{
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
assert
(((
uintptr_t
)
addr
&
HUGEPAGE_MASK
)
==
0
&&
(
size
&
HUGEPAGE_MASK
)
==
0
);
pages_nohuge
(
addr
,
size
);
}
}
void
*
base_alloc
(
size_t
size
)
{
static
void
base_extent_init
(
size_t
*
extent_sn_next
,
extent_t
*
extent
,
void
*
addr
,
size_t
size
)
{
size_t
sn
;
sn
=
*
extent_sn_next
;
(
*
extent_sn_next
)
++
;
extent_binit
(
extent
,
addr
,
size
,
sn
);
}
static
size_t
base_get_num_blocks
(
base_t
*
base
,
bool
with_new_block
)
{
base_block_t
*
b
=
base
->
blocks
;
assert
(
b
!=
NULL
);
size_t
n_blocks
=
with_new_block
?
2
:
1
;
while
(
b
->
next
!=
NULL
)
{
n_blocks
++
;
b
=
b
->
next
;
}
return
n_blocks
;
}
static
void
base_auto_thp_switch
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
assert
(
opt_metadata_thp
==
metadata_thp_auto
);
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
if
(
base
->
auto_thp_switched
)
{
return
;
}
/* Called when adding a new block. */
bool
should_switch
;
if
(
base_ind_get
(
base
)
!=
0
)
{
should_switch
=
(
base_get_num_blocks
(
base
,
true
)
==
BASE_AUTO_THP_THRESHOLD
);
}
else
{
should_switch
=
(
base_get_num_blocks
(
base
,
true
)
==
BASE_AUTO_THP_THRESHOLD_A0
);
}
if
(
!
should_switch
)
{
return
;
}
base
->
auto_thp_switched
=
true
;
assert
(
!
config_stats
||
base
->
n_thp
==
0
);
/* Make the initial blocks THP lazily. */
base_block_t
*
block
=
base
->
blocks
;
while
(
block
!=
NULL
)
{
assert
((
block
->
size
&
HUGEPAGE_MASK
)
==
0
);
pages_huge
(
block
,
block
->
size
);
if
(
config_stats
)
{
base
->
n_thp
+=
HUGEPAGE_CEILING
(
block
->
size
-
extent_bsize_get
(
&
block
->
extent
))
>>
LG_HUGEPAGE
;
}
block
=
block
->
next
;
assert
(
block
==
NULL
||
(
base_ind_get
(
base
)
==
0
));
}
}
static
void
*
base_extent_bump_alloc_helper
(
extent_t
*
extent
,
size_t
*
gap_size
,
size_t
size
,
size_t
alignment
)
{
void
*
ret
;
assert
(
alignment
==
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
));
assert
(
size
==
ALIGNMENT_CEILING
(
size
,
alignment
));
*
gap_size
=
ALIGNMENT_CEILING
((
uintptr_t
)
extent_addr_get
(
extent
),
alignment
)
-
(
uintptr_t
)
extent_addr_get
(
extent
);
ret
=
(
void
*
)((
uintptr_t
)
extent_addr_get
(
extent
)
+
*
gap_size
);
assert
(
extent_bsize_get
(
extent
)
>=
*
gap_size
+
size
);
extent_binit
(
extent
,
(
void
*
)((
uintptr_t
)
extent_addr_get
(
extent
)
+
*
gap_size
+
size
),
extent_bsize_get
(
extent
)
-
*
gap_size
-
size
,
extent_sn_get
(
extent
));
return
ret
;
}
static
void
base_extent_bump_alloc_post
(
base_t
*
base
,
extent_t
*
extent
,
size_t
gap_size
,
void
*
addr
,
size_t
size
)
{
if
(
extent_bsize_get
(
extent
)
>
0
)
{
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t
index_floor
=
sz_size2index
(
extent_bsize_get
(
extent
)
+
1
)
-
1
;
extent_heap_insert
(
&
base
->
avail
[
index_floor
],
extent
);
}
if
(
config_stats
)
{
base
->
allocated
+=
size
;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation. Adjust n_thp similarly when
* metadata_thp is enabled.
*/
base
->
resident
+=
PAGE_CEILING
((
uintptr_t
)
addr
+
size
)
-
PAGE_CEILING
((
uintptr_t
)
addr
-
gap_size
);
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
if
(
metadata_thp_madvise
()
&&
(
opt_metadata_thp
==
metadata_thp_always
||
base
->
auto_thp_switched
))
{
base
->
n_thp
+=
(
HUGEPAGE_CEILING
((
uintptr_t
)
addr
+
size
)
-
HUGEPAGE_CEILING
((
uintptr_t
)
addr
-
gap_size
))
>>
LG_HUGEPAGE
;
assert
(
base
->
mapped
>=
base
->
n_thp
<<
LG_HUGEPAGE
);
}
}
}
static
void
*
base_extent_bump_alloc
(
base_t
*
base
,
extent_t
*
extent
,
size_t
size
,
size_t
alignment
)
{
void
*
ret
;
size_t
c
size
;
size_t
gap_
size
;
/* Round size up to nearest multiple of the cacheline size. */
csize
=
CACHELINE_CEILING
(
size
);
ret
=
base_extent_bump_alloc_helper
(
extent
,
&
gap_size
,
size
,
alignment
);
base_extent_bump_alloc_post
(
base
,
extent
,
gap_size
,
ret
,
size
);
return
ret
;
}
/*
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
* On success a pointer to the initialized base_block_t header is returned.
*/
static
base_block_t
*
base_block_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
extent_hooks_t
*
extent_hooks
,
unsigned
ind
,
pszind_t
*
pind_last
,
size_t
*
extent_sn_next
,
size_t
size
,
size_t
alignment
)
{
alignment
=
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
header_size
=
sizeof
(
base_block_t
);
size_t
gap_size
=
ALIGNMENT_CEILING
(
header_size
,
alignment
)
-
header_size
;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t
min_block_size
=
HUGEPAGE_CEILING
(
sz_psz2u
(
header_size
+
gap_size
+
usize
));
pszind_t
pind_next
=
(
*
pind_last
+
1
<
NPSIZES
)
?
*
pind_last
+
1
:
*
pind_last
;
size_t
next_block_size
=
HUGEPAGE_CEILING
(
sz_pind2sz
(
pind_next
));
size_t
block_size
=
(
min_block_size
>
next_block_size
)
?
min_block_size
:
next_block_size
;
base_block_t
*
block
=
(
base_block_t
*
)
base_map
(
tsdn
,
extent_hooks
,
ind
,
block_size
);
if
(
block
==
NULL
)
{
return
NULL
;
}
malloc_mutex_lock
(
&
base_mtx
);
/* Make sure there's enough space for the allocation. */
if
((
uintptr_t
)
base_next_addr
+
csize
>
(
uintptr_t
)
base_past_addr
)
{
if
(
base_pages_alloc
(
csize
))
{
malloc_mutex_unlock
(
&
base_mtx
);
return
(
NULL
);
if
(
metadata_thp_madvise
())
{
void
*
addr
=
(
void
*
)
block
;
assert
(((
uintptr_t
)
addr
&
HUGEPAGE_MASK
)
==
0
&&
(
block_size
&
HUGEPAGE_MASK
)
==
0
);
if
(
opt_metadata_thp
==
metadata_thp_always
)
{
pages_huge
(
addr
,
block_size
);
}
else
if
(
opt_metadata_thp
==
metadata_thp_auto
&&
base
!=
NULL
)
{
/* base != NULL indicates this is not a new base. */
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
base_auto_thp_switch
(
tsdn
,
base
);
if
(
base
->
auto_thp_switched
)
{
pages_huge
(
addr
,
block_size
);
}
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
}
}
/* Allocate. */
ret
=
base_next_addr
;
base_next_addr
=
(
void
*
)((
uintptr_t
)
base_next_addr
+
csize
);
malloc_mutex_unlock
(
&
base_mtx
);
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
csize
);
return
(
ret
);
*
pind_last
=
sz_psz2ind
(
block_size
);
block
->
size
=
block_size
;
block
->
next
=
NULL
;
assert
(
block_size
>=
header_size
);
base_extent_init
(
extent_sn_next
,
&
block
->
extent
,
(
void
*
)((
uintptr_t
)
block
+
header_size
),
block_size
-
header_size
);
return
block
;
}
void
*
base_calloc
(
size_t
number
,
size_t
size
)
{
void
*
ret
=
base_alloc
(
number
*
size
);
/*
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static
extent_t
*
base_extent_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
if
(
ret
!=
NULL
)
memset
(
ret
,
0
,
number
*
size
);
extent_hooks_t
*
extent_hooks
=
base_extent_hooks_get
(
base
);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
base
,
extent_hooks
,
base_ind_get
(
base
),
&
base
->
pind_last
,
&
base
->
extent_sn_next
,
size
,
alignment
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
if
(
block
==
NULL
)
{
return
NULL
;
}
block
->
next
=
base
->
blocks
;
base
->
blocks
=
block
;
if
(
config_stats
)
{
base
->
allocated
+=
sizeof
(
base_block_t
);
base
->
resident
+=
PAGE_CEILING
(
sizeof
(
base_block_t
));
base
->
mapped
+=
block
->
size
;
if
(
metadata_thp_madvise
()
&&
!
(
opt_metadata_thp
==
metadata_thp_auto
&&
!
base
->
auto_thp_switched
))
{
assert
(
base
->
n_thp
>
0
);
base
->
n_thp
+=
HUGEPAGE_CEILING
(
sizeof
(
base_block_t
))
>>
LG_HUGEPAGE
;
}
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
return
&
block
->
extent
;
}
return
(
ret
);
base_t
*
b0get
(
void
)
{
return
b0
;
}
extent_node_t
*
base_node_alloc
(
void
)
{
extent_node_t
*
ret
;
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
pszind_t
pind_last
=
0
;
size_t
extent_sn_next
=
0
;
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
NULL
,
extent_hooks
,
ind
,
&
pind_last
,
&
extent_sn_next
,
sizeof
(
base_t
),
QUANTUM
);
if
(
block
==
NULL
)
{
return
NULL
;
}
malloc_mutex_lock
(
&
base_mtx
);
if
(
base_nodes
!=
NULL
)
{
ret
=
base_nodes
;
base_nodes
=
*
(
extent_node_t
**
)
ret
;
malloc_mutex_unlock
(
&
base_mtx
);
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
sizeof
(
extent_node_t
));
}
else
{
malloc_mutex_unlock
(
&
base_mtx
);
ret
=
(
extent_node_t
*
)
base_alloc
(
sizeof
(
extent_node_t
));
size_t
gap_size
;
size_t
base_alignment
=
CACHELINE
;
size_t
base_size
=
ALIGNMENT_CEILING
(
sizeof
(
base_t
),
base_alignment
);
base_t
*
base
=
(
base_t
*
)
base_extent_bump_alloc_helper
(
&
block
->
extent
,
&
gap_size
,
base_size
,
base_alignment
);
base
->
ind
=
ind
;
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELAXED
);
if
(
malloc_mutex_init
(
&
base
->
mtx
,
"base"
,
WITNESS_RANK_BASE
,
malloc_mutex_rank_exclusive
))
{
base_unmap
(
tsdn
,
extent_hooks
,
ind
,
block
,
block
->
size
);
return
NULL
;
}
base
->
pind_last
=
pind_last
;
base
->
extent_sn_next
=
extent_sn_next
;
base
->
blocks
=
block
;
base
->
auto_thp_switched
=
false
;
for
(
szind_t
i
=
0
;
i
<
NSIZES
;
i
++
)
{
extent_heap_new
(
&
base
->
avail
[
i
]);
}
if
(
config_stats
)
{
base
->
allocated
=
sizeof
(
base_block_t
);
base
->
resident
=
PAGE_CEILING
(
sizeof
(
base_block_t
));
base
->
mapped
=
block
->
size
;
base
->
n_thp
=
(
opt_metadata_thp
==
metadata_thp_always
)
&&
metadata_thp_madvise
()
?
HUGEPAGE_CEILING
(
sizeof
(
base_block_t
))
>>
LG_HUGEPAGE
:
0
;
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
base_extent_bump_alloc_post
(
base
,
&
block
->
extent
,
gap_size
,
base
,
base_size
);
return
(
ret
)
;
return
base
;
}
void
base_node_dealloc
(
extent_node_t
*
node
)
{
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
extent_hooks_t
*
extent_hooks
=
base_extent_hooks_get
(
base
);
base_block_t
*
next
=
base
->
blocks
;
do
{
base_block_t
*
block
=
next
;
next
=
block
->
next
;
base_unmap
(
tsdn
,
extent_hooks
,
base_ind_get
(
base
),
block
,
block
->
size
);
}
while
(
next
!=
NULL
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
node
,
sizeof
(
extent_node_t
));
malloc_mutex_lock
(
&
base_mtx
);
*
(
extent_node_t
**
)
node
=
base_nodes
;
base_nodes
=
node
;
malloc_mutex_unlock
(
&
base_mtx
);
extent_hooks_t
*
base_extent_hooks_get
(
base_t
*
base
)
{
return
(
extent_hooks_t
*
)
atomic_load_p
(
&
base
->
extent_hooks
,
ATOMIC_ACQUIRE
);
}
bool
base_boot
(
void
)
{
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
)
{
extent_hooks_t
*
old_extent_hooks
=
base_extent_hooks_get
(
base
);
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELEASE
);
return
old_extent_hooks
;
}
static
void
*
base_alloc_impl
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
,
size_t
*
esn
)
{
alignment
=
QUANTUM_CEILING
(
alignment
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
asize
=
usize
+
alignment
-
QUANTUM
;
extent_t
*
extent
=
NULL
;
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
for
(
szind_t
i
=
sz_size2index
(
asize
);
i
<
NSIZES
;
i
++
)
{
extent
=
extent_heap_remove_first
(
&
base
->
avail
[
i
]);
if
(
extent
!=
NULL
)
{
/* Use existing space. */
break
;
}
}
if
(
extent
==
NULL
)
{
/* Try to allocate more space. */
extent
=
base_extent_alloc
(
tsdn
,
base
,
usize
,
alignment
);
}
void
*
ret
;
if
(
extent
==
NULL
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
base_extent_bump_alloc
(
base
,
extent
,
usize
,
alignment
);
if
(
esn
!=
NULL
)
{
*
esn
=
extent_sn_get
(
extent
);
}
label_return:
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
return
ret
;
}
base_nodes
=
NULL
;
if
(
malloc_mutex_init
(
&
base_mtx
))
return
(
true
);
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void
*
base_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
return
base_alloc_impl
(
tsdn
,
base
,
size
,
alignment
,
NULL
);
}
return
(
false
);
extent_t
*
base_alloc_extent
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
size_t
esn
;
extent_t
*
extent
=
base_alloc_impl
(
tsdn
,
base
,
sizeof
(
extent_t
),
CACHELINE
,
&
esn
);
if
(
extent
==
NULL
)
{
return
NULL
;
}
extent_esn_set
(
extent
,
esn
);
return
extent
;
}
void
base_prefork
(
void
)
{
base_stats_get
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
,
size_t
*
n_thp
)
{
cassert
(
config_stats
);
malloc_mutex_prefork
(
&
base_mtx
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
*
allocated
=
base
->
allocated
;
*
resident
=
base
->
resident
;
*
mapped
=
base
->
mapped
;
*
n_thp
=
base
->
n_thp
;
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
}
void
base_postfork_parent
(
void
)
{
base_prefork
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
malloc_mutex_prefork
(
tsdn
,
&
base
->
mtx
);
}
malloc_mutex_postfork_parent
(
&
base_mtx
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
base
->
mtx
);
}
void
base_postfork_child
(
void
)
{
base_postfork_child
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
base
->
mtx
);
}
malloc_mutex_postfork_child
(
&
base_mtx
);
bool
base_boot
(
tsdn_t
*
tsdn
)
{
b0
=
base_new
(
tsdn
,
0
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
return
(
b0
==
NULL
);
}
deps/jemalloc/src/bin.c
0 → 100644
View file @
f63e81c2
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/witness.h"
const
bin_info_t
bin_infos
[
NBINS
]
=
{
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};
bool
bin_init
(
bin_t
*
bin
)
{
if
(
malloc_mutex_init
(
&
bin
->
lock
,
"bin"
,
WITNESS_RANK_BIN
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
bin
->
slabcur
=
NULL
;
extent_heap_new
(
&
bin
->
slabs_nonfull
);
extent_list_init
(
&
bin
->
slabs_full
);
if
(
config_stats
)
{
memset
(
&
bin
->
stats
,
0
,
sizeof
(
bin_stats_t
));
}
return
false
;
}
void
bin_prefork
(
tsdn_t
*
tsdn
,
bin_t
*
bin
)
{
malloc_mutex_prefork
(
tsdn
,
&
bin
->
lock
);
}
void
bin_postfork_parent
(
tsdn_t
*
tsdn
,
bin_t
*
bin
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
bin
->
lock
);
}
void
bin_postfork_child
(
tsdn_t
*
tsdn
,
bin_t
*
bin
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
bin
->
lock
);
}
deps/jemalloc/src/bitmap.c
View file @
f63e81c2
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
size_t
bits2groups
(
size_t
nbits
);
#include "jemalloc/internal/assert.h"
/******************************************************************************/
static
size_t
bits2groups
(
size_t
nbits
)
{
return
((
nbits
>>
LG_BITMAP_GROUP_NBITS
)
+
!!
(
nbits
&
BITMAP_GROUP_NBITS_MASK
));
}
#ifdef BITMAP_USE_TREE
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
{
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
{
unsigned
i
;
size_t
group_count
;
...
...
@@ -31,60 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
* that requires only one group.
*/
binfo
->
levels
[
0
].
group_offset
=
0
;
group_count
=
bits2groups
(
nbits
);
group_count
=
BITMAP_BITS2GROUPS
(
nbits
);
for
(
i
=
1
;
group_count
>
1
;
i
++
)
{
assert
(
i
<
BITMAP_MAX_LEVELS
);
binfo
->
levels
[
i
].
group_offset
=
binfo
->
levels
[
i
-
1
].
group_offset
+
group_count
;
group_count
=
bits2groups
(
group_count
);
group_count
=
BITMAP_BITS2GROUPS
(
group_count
);
}
binfo
->
levels
[
i
].
group_offset
=
binfo
->
levels
[
i
-
1
].
group_offset
+
group_count
;
assert
(
binfo
->
levels
[
i
].
group_offset
<=
BITMAP_GROUPS_MAX
);
binfo
->
nlevels
=
i
;
binfo
->
nbits
=
nbits
;
}
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
(
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
<<
LG_SIZEOF_BITMAP
);
}
size_t
bitmap_size
(
size_t
nbits
)
{
bitmap_info_t
binfo
;
bitmap_info_init
(
&
binfo
,
nbits
);
return
(
bitmap_info_ngroups
(
&
binfo
));
static
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
;
}
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
bool
fill
)
{
size_t
extra
;
unsigned
i
;
/*
* Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
* interface.
*/
if
(
fill
)
{
/* The "filled" bitmap starts out with all 0 bits. */
memset
(
bitmap
,
0
,
bitmap_size
(
binfo
));
return
;
}
/*
* The "empty" bitmap starts out with all 1 bits, except for trailing
* unused bits (if any). Note that each group uses bit 0 to correspond
* to the first logical bit in the group, so extra bits are the most
* significant bits of the last group.
*/
memset
(
bitmap
,
0xffU
,
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
<<
LG_SIZEOF_BITMAP
);
memset
(
bitmap
,
0xffU
,
bitmap_size
(
binfo
));
extra
=
(
BITMAP_GROUP_NBITS
-
(
binfo
->
nbits
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
if
(
extra
!=
0
)
{
bitmap
[
binfo
->
levels
[
1
].
group_offset
-
1
]
>>=
extra
;
}
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
size_t
group_count
=
binfo
->
levels
[
i
].
group_offset
-
binfo
->
levels
[
i
-
1
].
group_offset
;
extra
=
(
BITMAP_GROUP_NBITS
-
(
group_count
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
if
(
extra
!=
0
)
{
bitmap
[
binfo
->
levels
[
i
+
1
].
group_offset
-
1
]
>>=
extra
;
}
}
}
#else
/* BITMAP_USE_TREE */
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
{
assert
(
nbits
>
0
);
assert
(
nbits
<=
(
ZU
(
1
)
<<
LG_BITMAP_MAXBITS
));
binfo
->
ngroups
=
BITMAP_BITS2GROUPS
(
nbits
);
binfo
->
nbits
=
nbits
;
}
static
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
binfo
->
ngroups
;
}
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
bool
fill
)
{
size_t
extra
;
if
(
fill
)
{
memset
(
bitmap
,
0
,
bitmap_size
(
binfo
));
return
;
}
memset
(
bitmap
,
0xffU
,
bitmap_size
(
binfo
));
extra
=
(
BITMAP_GROUP_NBITS
-
(
binfo
->
nbits
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
{
bitmap
[
binfo
->
ngroups
-
1
]
>>=
extra
;
}
}
#endif
/* BITMAP_USE_TREE */
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
)
{
return
(
bitmap_info_ngroups
(
binfo
)
<<
LG_SIZEOF_BITMAP
);
}
deps/jemalloc/src/chunk.c
deleted
100644 → 0
View file @
eaeba1b2
#define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const
char
*
opt_dss
=
DSS_DEFAULT
;
size_t
opt_lg_chunk
=
LG_CHUNK_DEFAULT
;
malloc_mutex_t
chunks_mtx
;
chunk_stats_t
stats_chunks
;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static
extent_tree_t
chunks_szad_mmap
;
static
extent_tree_t
chunks_ad_mmap
;
static
extent_tree_t
chunks_szad_dss
;
static
extent_tree_t
chunks_ad_dss
;
rtree_t
*
chunks_rtree
;
/* Various chunk-related settings. */
size_t
chunksize
;
size_t
chunksize_mask
;
/* (chunksize - 1). */
size_t
chunk_npages
;
size_t
map_bias
;
size_t
arena_maxclass
;
/* Max size class for arenas. */
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
*
chunk_recycle
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
);
static
void
chunk_record
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
void
*
chunk
,
size_t
size
);
/******************************************************************************/
static
void
*
chunk_recycle
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
)
{
void
*
ret
;
extent_node_t
*
node
;
extent_node_t
key
;
size_t
alloc_size
,
leadsize
,
trailsize
;
bool
zeroed
;
if
(
base
)
{
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning NULL.
*/
return
(
NULL
);
}
alloc_size
=
size
+
alignment
-
chunksize
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
return
(
NULL
);
key
.
addr
=
NULL
;
key
.
size
=
alloc_size
;
malloc_mutex_lock
(
&
chunks_mtx
);
node
=
extent_tree_szad_nsearch
(
chunks_szad
,
&
key
);
if
(
node
==
NULL
)
{
malloc_mutex_unlock
(
&
chunks_mtx
);
return
(
NULL
);
}
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
node
->
addr
,
alignment
)
-
(
uintptr_t
)
node
->
addr
;
assert
(
node
->
size
>=
leadsize
+
size
);
trailsize
=
node
->
size
-
leadsize
-
size
;
ret
=
(
void
*
)((
uintptr_t
)
node
->
addr
+
leadsize
);
zeroed
=
node
->
zeroed
;
if
(
zeroed
)
*
zero
=
true
;
/* Remove node from the tree. */
extent_tree_szad_remove
(
chunks_szad
,
node
);
extent_tree_ad_remove
(
chunks_ad
,
node
);
if
(
leadsize
!=
0
)
{
/* Insert the leading space as a smaller chunk. */
node
->
size
=
leadsize
;
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
node
=
NULL
;
}
if
(
trailsize
!=
0
)
{
/* Insert the trailing space as a smaller chunk. */
if
(
node
==
NULL
)
{
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock
(
&
chunks_mtx
);
node
=
base_node_alloc
();
if
(
node
==
NULL
)
{
chunk_dealloc
(
ret
,
size
,
true
);
return
(
NULL
);
}
malloc_mutex_lock
(
&
chunks_mtx
);
}
node
->
addr
=
(
void
*
)((
uintptr_t
)(
ret
)
+
size
);
node
->
size
=
trailsize
;
node
->
zeroed
=
zeroed
;
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
node
=
NULL
;
}
malloc_mutex_unlock
(
&
chunks_mtx
);
if
(
node
!=
NULL
)
base_node_dealloc
(
node
);
if
(
*
zero
)
{
if
(
zeroed
==
false
)
memset
(
ret
,
0
,
size
);
else
if
(
config_debug
)
{
size_t
i
;
size_t
*
p
=
(
size_t
*
)(
uintptr_t
)
ret
;
VALGRIND_MAKE_MEM_DEFINED
(
ret
,
size
);
for
(
i
=
0
;
i
<
size
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
}
}
return
(
ret
);
}
/*
* If the caller specifies (*zero == false), it is still possible to receive
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
void
*
chunk_alloc
(
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
,
dss_prec_t
dss_prec
)
{
void
*
ret
;
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
/* "primary" dss. */
if
(
config_dss
&&
dss_prec
==
dss_prec_primary
)
{
if
((
ret
=
chunk_recycle
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
}
/* mmap. */
if
((
ret
=
chunk_recycle
(
&
chunks_szad_mmap
,
&
chunks_ad_mmap
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_mmap
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
/* "secondary" dss. */
if
(
config_dss
&&
dss_prec
==
dss_prec_secondary
)
{
if
((
ret
=
chunk_recycle
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
}
/* All strategies for allocation failed. */
ret
=
NULL
;
label_return:
if
(
ret
!=
NULL
)
{
if
(
config_ivsalloc
&&
base
==
false
)
{
if
(
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
ret
,
1
))
{
chunk_dealloc
(
ret
,
size
,
true
);
return
(
NULL
);
}
}
if
(
config_stats
||
config_prof
)
{
bool
gdump
;
malloc_mutex_lock
(
&
chunks_mtx
);
if
(
config_stats
)
stats_chunks
.
nchunks
+=
(
size
/
chunksize
);
stats_chunks
.
curchunks
+=
(
size
/
chunksize
);
if
(
stats_chunks
.
curchunks
>
stats_chunks
.
highchunks
)
{
stats_chunks
.
highchunks
=
stats_chunks
.
curchunks
;
if
(
config_prof
)
gdump
=
true
;
}
else
if
(
config_prof
)
gdump
=
false
;
malloc_mutex_unlock
(
&
chunks_mtx
);
if
(
config_prof
&&
opt_prof
&&
opt_prof_gdump
&&
gdump
)
prof_gdump
();
}
if
(
config_valgrind
)
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
assert
(
CHUNK_ADDR2BASE
(
ret
)
==
ret
);
return
(
ret
);
}
static
void
chunk_record
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
void
*
chunk
,
size_t
size
)
{
bool
unzeroed
;
extent_node_t
*
xnode
,
*
node
,
*
prev
,
*
xprev
,
key
;
unzeroed
=
pages_purge
(
chunk
,
size
);
VALGRIND_MAKE_MEM_NOACCESS
(
chunk
,
size
);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode
=
base_node_alloc
();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev
=
NULL
;
malloc_mutex_lock
(
&
chunks_mtx
);
key
.
addr
=
(
void
*
)((
uintptr_t
)
chunk
+
size
);
node
=
extent_tree_ad_nsearch
(
chunks_ad
,
&
key
);
/* Try to coalesce forward. */
if
(
node
!=
NULL
&&
node
->
addr
==
key
.
addr
)
{
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove
(
chunks_szad
,
node
);
node
->
addr
=
chunk
;
node
->
size
+=
size
;
node
->
zeroed
=
(
node
->
zeroed
&&
(
unzeroed
==
false
));
extent_tree_szad_insert
(
chunks_szad
,
node
);
}
else
{
/* Coalescing forward failed, so insert a new node. */
if
(
xnode
==
NULL
)
{
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
goto
label_return
;
}
node
=
xnode
;
xnode
=
NULL
;
/* Prevent deallocation below. */
node
->
addr
=
chunk
;
node
->
size
=
size
;
node
->
zeroed
=
(
unzeroed
==
false
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
}
/* Try to coalesce backward. */
prev
=
extent_tree_ad_prev
(
chunks_ad
,
node
);
if
(
prev
!=
NULL
&&
(
void
*
)((
uintptr_t
)
prev
->
addr
+
prev
->
size
)
==
chunk
)
{
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove
(
chunks_szad
,
prev
);
extent_tree_ad_remove
(
chunks_ad
,
prev
);
extent_tree_szad_remove
(
chunks_szad
,
node
);
node
->
addr
=
prev
->
addr
;
node
->
size
+=
prev
->
size
;
node
->
zeroed
=
(
node
->
zeroed
&&
prev
->
zeroed
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
xprev
=
prev
;
}
label_return:
malloc_mutex_unlock
(
&
chunks_mtx
);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if
(
xnode
!=
NULL
)
base_node_dealloc
(
xnode
);
if
(
xprev
!=
NULL
)
base_node_dealloc
(
xprev
);
}
void
chunk_unmap
(
void
*
chunk
,
size_t
size
)
{
assert
(
chunk
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
chunk
)
==
chunk
);
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
if
(
config_dss
&&
chunk_in_dss
(
chunk
))
chunk_record
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
chunk
,
size
);
else
if
(
chunk_dealloc_mmap
(
chunk
,
size
))
chunk_record
(
&
chunks_szad_mmap
,
&
chunks_ad_mmap
,
chunk
,
size
);
}
void
chunk_dealloc
(
void
*
chunk
,
size_t
size
,
bool
unmap
)
{
assert
(
chunk
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
chunk
)
==
chunk
);
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
if
(
config_ivsalloc
)
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
chunk
,
0
);
if
(
config_stats
||
config_prof
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
assert
(
stats_chunks
.
curchunks
>=
(
size
/
chunksize
));
stats_chunks
.
curchunks
-=
(
size
/
chunksize
);
malloc_mutex_unlock
(
&
chunks_mtx
);
}
if
(
unmap
)
chunk_unmap
(
chunk
,
size
);
}
bool
chunk_boot
(
void
)
{
/* Set variables according to the value of opt_lg_chunk. */
chunksize
=
(
ZU
(
1
)
<<
opt_lg_chunk
);
assert
(
chunksize
>=
PAGE
);
chunksize_mask
=
chunksize
-
1
;
chunk_npages
=
(
chunksize
>>
LG_PAGE
);
if
(
config_stats
||
config_prof
)
{
if
(
malloc_mutex_init
(
&
chunks_mtx
))
return
(
true
);
memset
(
&
stats_chunks
,
0
,
sizeof
(
chunk_stats_t
));
}
if
(
config_dss
&&
chunk_dss_boot
())
return
(
true
);
extent_tree_szad_new
(
&
chunks_szad_mmap
);
extent_tree_ad_new
(
&
chunks_ad_mmap
);
extent_tree_szad_new
(
&
chunks_szad_dss
);
extent_tree_ad_new
(
&
chunks_ad_dss
);
if
(
config_ivsalloc
)
{
chunks_rtree
=
rtree_new
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
opt_lg_chunk
,
base_alloc
,
NULL
);
if
(
chunks_rtree
==
NULL
)
return
(
true
);
}
return
(
false
);
}
void
chunk_prefork
(
void
)
{
malloc_mutex_prefork
(
&
chunks_mtx
);
if
(
config_ivsalloc
)
rtree_prefork
(
chunks_rtree
);
chunk_dss_prefork
();
}
void
chunk_postfork_parent
(
void
)
{
chunk_dss_postfork_parent
();
if
(
config_ivsalloc
)
rtree_postfork_parent
(
chunks_rtree
);
malloc_mutex_postfork_parent
(
&
chunks_mtx
);
}
void
chunk_postfork_child
(
void
)
{
chunk_dss_postfork_child
();
if
(
config_ivsalloc
)
rtree_postfork_child
(
chunks_rtree
);
malloc_mutex_postfork_child
(
&
chunks_mtx
);
}
deps/jemalloc/src/chunk_dss.c
deleted
100644 → 0
View file @
eaeba1b2
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const
char
*
dss_prec_names
[]
=
{
"disabled"
,
"primary"
,
"secondary"
,
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static
dss_prec_t
dss_prec_default
=
DSS_PREC_DEFAULT
;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static
malloc_mutex_t
dss_mtx
;
/* Base address of the DSS. */
static
void
*
dss_base
;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static
void
*
dss_prev
;
/* Current upper limit on DSS addresses. */
static
void
*
dss_max
;
/******************************************************************************/
static
void
*
chunk_dss_sbrk
(
intptr_t
increment
)
{
#ifdef JEMALLOC_HAVE_SBRK
return
(
sbrk
(
increment
));
#else
not_implemented
();
return
(
NULL
);
#endif
}
dss_prec_t
chunk_dss_prec_get
(
void
)
{
dss_prec_t
ret
;
if
(
config_dss
==
false
)
return
(
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
ret
=
dss_prec_default
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
)
{
if
(
config_dss
==
false
)
return
(
true
);
malloc_mutex_lock
(
&
dss_mtx
);
dss_prec_default
=
dss_prec
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
false
);
}
void
*
chunk_alloc_dss
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
{
void
*
ret
;
cassert
(
config_dss
);
assert
(
size
>
0
&&
(
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
>
0
&&
(
alignment
&
chunksize_mask
)
==
0
);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if
((
intptr_t
)
size
<
0
)
return
(
NULL
);
malloc_mutex_lock
(
&
dss_mtx
);
if
(
dss_prev
!=
(
void
*
)
-
1
)
{
size_t
gap_size
,
cpad_size
;
void
*
cpad
,
*
dss_next
;
intptr_t
incr
;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do
{
/* Get the current end of the DSS. */
dss_max
=
chunk_dss_sbrk
(
0
);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size
=
(
chunksize
-
CHUNK_ADDR2OFFSET
(
dss_max
))
&
chunksize_mask
;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad
=
(
void
*
)((
uintptr_t
)
dss_max
+
gap_size
);
ret
=
(
void
*
)
ALIGNMENT_CEILING
((
uintptr_t
)
dss_max
,
alignment
);
cpad_size
=
(
uintptr_t
)
ret
-
(
uintptr_t
)
cpad
;
dss_next
=
(
void
*
)((
uintptr_t
)
ret
+
size
);
if
((
uintptr_t
)
ret
<
(
uintptr_t
)
dss_max
||
(
uintptr_t
)
dss_next
<
(
uintptr_t
)
dss_max
)
{
/* Wrap-around. */
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
incr
=
gap_size
+
cpad_size
+
size
;
dss_prev
=
chunk_dss_sbrk
(
incr
);
if
(
dss_prev
==
dss_max
)
{
/* Success. */
dss_max
=
dss_next
;
malloc_mutex_unlock
(
&
dss_mtx
);
if
(
cpad_size
!=
0
)
chunk_unmap
(
cpad
,
cpad_size
);
if
(
*
zero
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
return
(
ret
);
}
}
while
(
dss_prev
!=
(
void
*
)
-
1
);
}
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
bool
chunk_in_dss
(
void
*
chunk
)
{
bool
ret
;
cassert
(
config_dss
);
malloc_mutex_lock
(
&
dss_mtx
);
if
((
uintptr_t
)
chunk
>=
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
chunk
<
(
uintptr_t
)
dss_max
)
ret
=
true
;
else
ret
=
false
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_boot
(
void
)
{
cassert
(
config_dss
);
if
(
malloc_mutex_init
(
&
dss_mtx
))
return
(
true
);
dss_base
=
chunk_dss_sbrk
(
0
);
dss_prev
=
dss_base
;
dss_max
=
dss_base
;
return
(
false
);
}
void
chunk_dss_prefork
(
void
)
{
if
(
config_dss
)
malloc_mutex_prefork
(
&
dss_mtx
);
}
void
chunk_dss_postfork_parent
(
void
)
{
if
(
config_dss
)
malloc_mutex_postfork_parent
(
&
dss_mtx
);
}
void
chunk_dss_postfork_child
(
void
)
{
if
(
config_dss
)
malloc_mutex_postfork_child
(
&
dss_mtx
);
}
/******************************************************************************/
deps/jemalloc/src/chunk_mmap.c
deleted
100644 → 0
View file @
eaeba1b2
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
*
pages_map
(
void
*
addr
,
size_t
size
);
static
void
pages_unmap
(
void
*
addr
,
size_t
size
);
static
void
*
chunk_alloc_mmap_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
);
/******************************************************************************/
static
void
*
pages_map
(
void
*
addr
,
size_t
size
)
{
void
*
ret
;
assert
(
size
!=
0
);
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret
=
VirtualAlloc
(
addr
,
size
,
MEM_COMMIT
|
MEM_RESERVE
,
PAGE_READWRITE
);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
ret
=
mmap
(
addr
,
size
,
PROT_READ
|
PROT_WRITE
,
MAP_PRIVATE
|
MAP_ANON
,
-
1
,
0
);
assert
(
ret
!=
NULL
);
if
(
ret
==
MAP_FAILED
)
ret
=
NULL
;
else
if
(
addr
!=
NULL
&&
ret
!=
addr
)
{
/*
* We succeeded in mapping memory, but not in the right place.
*/
if
(
munmap
(
ret
,
size
)
==
-
1
)
{
char
buf
[
BUFERROR_BUF
];
buferror
(
get_errno
(),
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc: Error in munmap(): %s
\n
"
,
buf
);
if
(
opt_abort
)
abort
();
}
ret
=
NULL
;
}
#endif
assert
(
ret
==
NULL
||
(
addr
==
NULL
&&
ret
!=
addr
)
||
(
addr
!=
NULL
&&
ret
==
addr
));
return
(
ret
);
}
static
void
pages_unmap
(
void
*
addr
,
size_t
size
)
{
#ifdef _WIN32
if
(
VirtualFree
(
addr
,
0
,
MEM_RELEASE
)
==
0
)
#else
if
(
munmap
(
addr
,
size
)
==
-
1
)
#endif
{
char
buf
[
BUFERROR_BUF
];
buferror
(
get_errno
(),
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s
\n
"
,
buf
);
if
(
opt_abort
)
abort
();
}
}
static
void
*
pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
)
{
void
*
ret
=
(
void
*
)((
uintptr_t
)
addr
+
leadsize
);
assert
(
alloc_size
>=
leadsize
+
size
);
#ifdef _WIN32
{
void
*
new_addr
;
pages_unmap
(
addr
,
alloc_size
);
new_addr
=
pages_map
(
ret
,
size
);
if
(
new_addr
==
ret
)
return
(
ret
);
if
(
new_addr
)
pages_unmap
(
new_addr
,
size
);
return
(
NULL
);
}
#else
{
size_t
trailsize
=
alloc_size
-
leadsize
-
size
;
if
(
leadsize
!=
0
)
pages_unmap
(
addr
,
leadsize
);
if
(
trailsize
!=
0
)
pages_unmap
((
void
*
)((
uintptr_t
)
ret
+
size
),
trailsize
);
return
(
ret
);
}
#endif
}
bool
pages_purge
(
void
*
addr
,
size_t
length
)
{
bool
unzeroed
;
#ifdef _WIN32
VirtualAlloc
(
addr
,
length
,
MEM_RESET
,
PAGE_READWRITE
);
unzeroed
=
true
;
#else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# error "No method defined for purging unused dirty pages."
# endif
int
err
=
madvise
(
addr
,
length
,
JEMALLOC_MADV_PURGE
);
unzeroed
=
(
JEMALLOC_MADV_ZEROS
==
false
||
err
!=
0
);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#endif
return
(
unzeroed
);
}
static
void
*
chunk_alloc_mmap_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
{
void
*
ret
,
*
pages
;
size_t
alloc_size
,
leadsize
;
alloc_size
=
size
+
alignment
-
PAGE
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
return
(
NULL
);
do
{
pages
=
pages_map
(
NULL
,
alloc_size
);
if
(
pages
==
NULL
)
return
(
NULL
);
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
pages
,
alignment
)
-
(
uintptr_t
)
pages
;
ret
=
pages_trim
(
pages
,
alloc_size
,
leadsize
,
size
);
}
while
(
ret
==
NULL
);
assert
(
ret
!=
NULL
);
*
zero
=
true
;
return
(
ret
);
}
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
{
void
*
ret
;
size_t
offset
;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
pages_map
(
NULL
,
size
);
if
(
ret
==
NULL
)
return
(
NULL
);
offset
=
ALIGNMENT_ADDR2OFFSET
(
ret
,
alignment
);
if
(
offset
!=
0
)
{
pages_unmap
(
ret
,
size
);
return
(
chunk_alloc_mmap_slow
(
size
,
alignment
,
zero
));
}
assert
(
ret
!=
NULL
);
*
zero
=
true
;
return
(
ret
);
}
bool
chunk_dealloc_mmap
(
void
*
chunk
,
size_t
size
)
{
if
(
config_munmap
)
pages_unmap
(
chunk
,
size
);
return
(
config_munmap
==
false
);
}
deps/jemalloc/src/ckh.c
View file @
f63e81c2
...
...
@@ -34,14 +34,24 @@
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
bool
ckh_grow
(
ckh_t
*
ckh
);
static
void
ckh_shrink
(
ckh_t
*
ckh
);
static
bool
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
static
void
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
/******************************************************************************/
...
...
@@ -49,27 +59,26 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
static
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
ckhc_t
*
cell
;
unsigned
i
;
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
return
((
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
);
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
{
return
(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
;
}
}
return
(
SIZE_T_MAX
)
;
return
SIZE_T_MAX
;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
static
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
size_t
hashes
[
2
],
bucket
,
cell
;
assert
(
ckh
!=
NULL
);
...
...
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
if
(
cell
!=
SIZE_T_MAX
)
return
(
cell
);
if
(
cell
!=
SIZE_T_MAX
)
{
return
cell
;
}
/* Search secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
return
(
cell
)
;
return
cell
;
}
JEMALLOC_INLINE_C
bool
static
bool
ckh_try_bucket_insert
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
,
const
void
*
data
)
{
const
void
*
data
)
{
ckhc_t
*
cell
;
unsigned
offset
,
i
;
...
...
@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32
(
offset
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
offset
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
((
i
+
offset
)
&
((
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
)
-
1
))];
...
...
@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell
->
key
=
key
;
cell
->
data
=
data
;
ckh
->
count
++
;
return
(
false
)
;
return
false
;
}
}
return
(
true
)
;
return
true
;
}
/*
...
...
@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C
bool
static
bool
ckh_evict_reloc_insert
(
ckh_t
*
ckh
,
size_t
argbucket
,
void
const
**
argkey
,
void
const
**
argdata
)
{
void
const
**
argdata
)
{
const
void
*
key
,
*
data
,
*
tkey
,
*
tdata
;
ckhc_t
*
cell
;
size_t
hashes
[
2
],
bucket
,
tbucket
;
...
...
@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prng32
(
i
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
i
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
assert
(
cell
->
key
!=
NULL
);
...
...
@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if
(
tbucket
==
argbucket
)
{
*
argkey
=
key
;
*
argdata
=
data
;
return
(
true
)
;
return
true
;
}
bucket
=
tbucket
;
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
}
}
JEMALLOC_INLINE_C
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
static
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
size_t
hashes
[
2
],
bucket
;
const
void
*
key
=
*
argkey
;
const
void
*
data
=
*
argdata
;
...
...
@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/* Try to insert in secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return
(
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
)
)
;
return
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
static
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
size_t
count
,
i
,
nins
;
const
void
*
key
,
*
data
;
...
...
@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data
=
aTab
[
i
].
data
;
if
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
ckh
->
count
=
count
;
return
(
true
)
;
return
true
;
}
nins
++
;
}
}
return
(
false
)
;
return
false
;
}
static
bool
ckh_grow
(
ckh_t
*
ckh
)
{
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
bool
ret
;
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
;
unsigned
lg_prevbuckets
;
unsigned
lg_prevbuckets
,
lg_curcells
;
#ifdef CKH_COUNT
ckh
->
ngrows
++
;
...
...
@@ -265,12 +274,13 @@ ckh_grow(ckh_t *ckh)
size_t
usize
;
lg_curcells
++
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
{
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
tab
=
(
ckhc_t
*
)
ipalloc
(
usize
,
CACHELINE
,
true
);
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -281,28 +291,27 @@ ckh_grow(ckh_t *ckh)
tab
=
ttab
;
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
ckh_rebuild
(
ckh
,
tab
)
==
false
)
{
idalloc
(
tab
);
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloc
tm
(
tsd_tsdn
(
tsd
),
tab
,
NULL
,
NULL
,
true
,
true
);
break
;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc
(
ckh
->
tab
);
idalloc
tm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
}
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
void
ckh_shrink
(
ckh_t
*
ckh
)
{
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
,
usize
;
unsigned
lg_prevbuckets
;
size_t
usize
;
unsigned
lg_prevbuckets
,
lg_curcells
;
/*
* It is possible (though unlikely, given well behaved hashes) that the
...
...
@@ -310,10 +319,12 @@ ckh_shrink(ckh_t *ckh)
*/
lg_prevbuckets
=
ckh
->
lg_curbuckets
;
lg_curcells
=
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
-
1
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
;
tab
=
(
ckhc_t
*
)
ipalloc
(
usize
,
CACHELINE
,
true
);
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
/*
* An OOM error isn't worth propagating, since it doesn't
...
...
@@ -327,8 +338,8 @@ ckh_shrink(ckh_t *ckh)
tab
=
ttab
;
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
ckh_rebuild
(
ckh
,
tab
)
==
false
)
{
idalloc
(
tab
);
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloc
tm
(
tsd_tsdn
(
tsd
),
tab
,
NULL
,
NULL
,
true
,
true
);
#ifdef CKH_COUNT
ckh
->
nshrinks
++
;
#endif
...
...
@@ -336,7 +347,7 @@ ckh_shrink(ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc
(
ckh
->
tab
);
idalloc
tm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
#ifdef CKH_COUNT
...
...
@@ -345,8 +356,8 @@ ckh_shrink(ckh_t *ckh)
}
bool
ckh_new
(
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
{
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
{
bool
ret
;
size_t
mincells
,
usize
;
unsigned
lg_mincells
;
...
...
@@ -366,29 +377,31 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh
->
count
=
0
;
/*
* Find the minimum power of 2 that is large enough to fit
aBaseCount
* Find the minimum power of 2 that is large enough to fit
minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow
2^aLgMinI
tems to fit without ever
* factor that will typically allow
mincells i
tems to fit without ever
* growing the table.
*/
assert
(
LG_CKH_BUCKET_CELLS
>
0
);
mincells
=
((
minitems
+
(
3
-
(
minitems
%
3
)))
/
3
)
<<
2
;
for
(
lg_mincells
=
LG_CKH_BUCKET_CELLS
;
(
ZU
(
1
)
<<
lg_mincells
)
<
mincells
;
lg_mincells
++
)
;
/* Do nothing. */
lg_mincells
++
)
{
/* Do nothing. */
}
ckh
->
lg_minbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
lg_curbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
hash
=
hash
;
ckh
->
keycomp
=
keycomp
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
size
==
0
)
{
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
ckh
->
tab
=
(
ckhc_t
*
)
ipalloc
(
usize
,
CACHELINE
,
true
);
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
ckh
->
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -396,20 +409,18 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
void
ckh_delete
(
ckh_t
*
ckh
)
{
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
#ifdef CKH_VERBOSE
malloc_printf
(
"%s(%p): ngrows: %"
PRI
u64
", nshrinks: %"
PRI
u64
","
" nshrinkfails: %"
PRI
u64
", ninserts: %"
PRI
u64
","
" nrelocs: %"
PRI
u64
"
\n
"
,
__func__
,
ckh
,
"%s(%p): ngrows: %"
FMT
u64
", nshrinks: %"
FMT
u64
","
" nshrinkfails: %"
FMT
u64
", ninserts: %"
FMT
u64
","
" nrelocs: %"
FMT
u64
"
\n
"
,
__func__
,
ckh
,
(
unsigned
long
long
)
ckh
->
ngrows
,
(
unsigned
long
long
)
ckh
->
nshrinks
,
(
unsigned
long
long
)
ckh
->
nshrinkfails
,
...
...
@@ -417,43 +428,42 @@ ckh_delete(ckh_t *ckh)
(
unsigned
long
long
)
ckh
->
nrelocs
);
#endif
idalloc
(
ckh
->
tab
);
if
(
config_debug
)
memset
(
ckh
,
0x5a
,
sizeof
(
ckh_t
));
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
if
(
config_debug
)
{
memset
(
ckh
,
JEMALLOC_FREE_JUNK
,
sizeof
(
ckh_t
));
}
}
size_t
ckh_count
(
ckh_t
*
ckh
)
{
ckh_count
(
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
return
(
ckh
->
count
)
;
return
ckh
->
count
;
}
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
size_t
i
,
ncells
;
for
(
i
=
*
tabind
,
ncells
=
(
ZU
(
1
)
<<
(
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
));
i
<
ncells
;
i
++
)
{
if
(
ckh
->
tab
[
i
].
key
!=
NULL
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
i
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
i
].
data
;
}
*
tabind
=
i
+
1
;
return
(
false
)
;
return
false
;
}
}
return
(
true
)
;
return
true
;
}
bool
ckh_insert
(
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
bool
ret
;
assert
(
ckh
!=
NULL
);
...
...
@@ -464,7 +474,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
#endif
while
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
if
(
ckh_grow
(
ckh
))
{
if
(
ckh_grow
(
tsd
,
ckh
))
{
ret
=
true
;
goto
label_return
;
}
...
...
@@ -472,22 +482,24 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
bool
ckh_remove
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
}
ckh
->
tab
[
cell
].
key
=
NULL
;
ckh
->
tab
[
cell
].
data
=
NULL
;
/* Not necessary. */
...
...
@@ -497,54 +509,50 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+
LG_CKH_BUCKET_CELLS
-
2
))
&&
ckh
->
lg_curbuckets
>
ckh
->
lg_minbuckets
)
{
/* Ignore error due to OOM. */
ckh_shrink
(
ckh
);
ckh_shrink
(
tsd
,
ckh
);
}
return
(
false
)
;
return
false
;
}
return
(
true
)
;
return
true
;
}
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
return
(
false
);
}
return
false
;
}
return
(
true
)
;
return
true
;
}
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
hash
(
key
,
strlen
((
const
char
*
)
key
),
0x94122f33U
,
r_hash
);
}
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
assert
(
k1
!=
NULL
);
assert
(
k2
!=
NULL
);
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
assert
(
k1
!=
NULL
);
assert
(
k2
!=
NULL
);
return
(
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
)
?
false
:
true
)
;
return
!
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
);
}
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
union
{
const
void
*
v
;
size_t
i
;
...
...
@@ -556,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
((
k1
==
k2
)
?
true
:
false
);
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
(
k1
==
k2
);
}
Prev
1
…
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment