Unverified Commit 07d54dc5 authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Merge pull request #12115 from oranagra/update_jemalloc_5_3_0

Upgrade to jemalloc 5.3.0 hoping to resolve a potential for deadlock in a fork child
see https://github.com/jemalloc/jemalloc/issues/2402

steps:
* Upgrade subtree according to the instructions in deps/README
* update iget_defrag_hint by following changes to arena_dalloc_no_tcache
parents 42dd98ec 0897c8af
......@@ -2,59 +2,67 @@
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/stats.h"
/*
* When the amount of pages to be purged exceeds this amount, deferred purge
* should happen.
*/
#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
extern percpu_arena_mode_t opt_percpu_arena;
extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern div_info_t arena_binind_div_info[SC_NBINS];
extern malloc_mutex_t arenas_lock;
extern emap_t arena_emap_global;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;
/*
* arena_bin_offsets[binind] is the offset of the first bin shard for size class
* binind.
*/
extern uint32_t arena_bin_offsets[SC_NBINS];
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
#endif
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
extent_t *extent);
edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
edata_t *edata, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
edata_t *edata, size_t oldsize);
bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms);
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
bool zero);
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
const unsigned nfill);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
......@@ -63,8 +71,12 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *extent, void *ptr);
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin);
void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
edata_t *slab, bin_t *bin);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
......@@ -72,6 +84,9 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
dss_prec_t arena_dss_prec_get(arena_t *arena);
ehooks_t *arena_get_ehooks(arena_t *arena);
extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
......@@ -82,14 +97,15 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
bool arena_init_huge(void);
bool arena_is_huge(unsigned arena_ind);
arena_t *arena_choose_huge(tsd_t *tsd);
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
void arena_boot(sc_data_t *sc_data);
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
......@@ -98,6 +114,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
......
......@@ -3,7 +3,7 @@
static inline unsigned
arena_ind_get(const arena_t *arena) {
return base_ind_get(arena->base);
return arena->ind;
}
static inline void
......@@ -21,37 +21,4 @@ arena_internal_get(arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}
static inline bool
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
return false;
}
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
}
static inline void
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
assert(have_percpu_arena);
arena_t *oldarena = tsd_arena_get(tsd);
assert(oldarena != NULL);
unsigned oldind = arena_ind_get(oldarena);
if (oldind != cpu) {
unsigned newind = cpu;
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
assert(newarena != NULL);
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct arena_slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
......@@ -3,21 +3,14 @@
#include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
......@@ -48,4 +41,18 @@ typedef enum {
*/
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
struct arena_config_s {
/* extent hooks to be used for the arena */
extent_hooks_t *extent_hooks;
/*
* Use extent hooks for metadata (base) allocations when true.
*/
bool metadata_use_hooks;
};
typedef struct arena_config_s arena_config_t;
extern const arena_config_t arena_config_default;
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
......@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new);
bool background_thread_is_started(background_thread_info_t* info);
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
......@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn);
bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
This diff is collapsed.
This diff is collapsed.
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static inline unsigned
base_ind_get(const base_t *base) {
return base->ind;
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment