Commit 6d23d3ac authored by Oran Agra's avatar Oran Agra
Browse files

Squashed 'deps/jemalloc/' changes from ea6b3e973..54eaed1d8

54eaed1d8 Merge branch 'dev'
304c91982 Update ChangeLog for 5.3.0.
8cb814629 Make the default option of zero realloc match the system allocator.
66c889500 Make test/unit/background_thread_enable more conservative.
a7d73dd4c Update TUNING.md to include the new tcache_max option.
254b01191 Small doc tweak of opt.trust_madvise.
f5e840bbf Minor typo fix in doc.
ceca07d2c Correct the name of stats.mutexes.prof_thds_data in doc.
391bad4b9 Avoid abort() in test/integration/cpp/infallible_new_true.
9a242f16d fix some typos
0e29ad4ef Rename zero_realloc option "strict" to "alloc".
5841b6dbe Update FreeBSD image to 12.3 for cirrus ci.
ed5fc14b2 Use volatile to workaround buffer overflow false positives.
25517b852 Reoreder TravisCI jobs to optimize CI time
8a49b62e7 Enable TravisCI for Windows
fdb6c1016 Add FreeBSD to TravisCI
a93931537 Do not disable SEC by default for 64k pages platforms
eaaa368ba Add comments and use meaningful vars in sz_psz2ind.
5bf03f8ce Implement PAGE_FLOOR macro
52631c90f Fix size class calculation for sec
7ae0f15c5 Add a default page size when cross-compile for Apple M1.
eb65d1b07 Fix FreeBSD system jemalloc TSD cleanup
78b58379c Fix possible "nmalloc >= ndalloc" assertion.
ca709c313 Fix failed assertion due to racy memory access
063d134ae Properly detect background thread support on Darwin.
a4e81221c Document 'make uninstall'
20f9802e4 Avoid overflow warnings in test/unit/safety_check.
8c59c44ff Add a dependency checking step at the end of malloc_conf_init.
efc539c04 Initialize prof_leak during prof init.
002f0e939 Disable TravisCI jobs generation for Windows
01a293fc0 Add Windows to TravisCI
b798fabdf Add prof_leak_error option
eafd2ac39 Forbid spaces in prefix and exec_prefix
36a09ba2c Forbid spaces in install suffix
640c3c72e Add support for 'make uninstall'
f15d8f3b4 Echo installed files via verbose 'install' command
eb196815d Avoid calculating size of size class twice & delete sc_data_global.
011449f17 Fix doc build with install-suffix.
8b49eb132 Fix the HELP_STRING of --enable-doc.
ddb170b1d Simplify arena_migrate() to take arena_t* instead of indices.
648b3b9f7 Lower the num_threads in the stress test of test/unit/prof_recent
d66162e03 Fix the extent state checking on the merge error path.
c9946fa7e FreeBSD also needs the OS-X "don't declare system functions as nothrow" fix since it also has jemalloc in the base system
89fe8ee6b Use the isb instruction instead of yield for spin locks on arm
6230cc88b Add background thread sleep retry in test/unit/hpa_background_thread
61978bbe6 Purge all if the last thread migrated away from an arena.
c91e62dd3 #include <features.h> as requested
18510020e Fix symbol conflict with musl libc
f509703af Fix two conversion warnings in tcache.
067c2da07 Fix unnecessary returns in san_(un)guard_pages_two_sided.
d660683d3 Fix test config of lg_san_uaf_align.
eabe88916 Rename full_position to low_bound in cache_bin.h.
dfdd7562f Rename san_enabled() to san_guard_enabled().
01d61a3c6 Fix a conversion warning.
8b34a788b Fix an used-uninitialized warning (false positive).
e491cef9a Add stats for stashed bytes in tcache.
b75822bc6 Implement use-after-free detection using junk and stash.
06aac61c4 Split the core logic of tcache flush into a separate function.
d038160f3 Fix shadowed variable usage.
bd70d8fc0 Add the profiling settings for tests explicit.
e491df1d2 Fix warnings when using autoheader.
60b9637cc Only invoke malloc_cpu_count_is_deterministic() when necessary.
837b37c4c Fix the time-since computation in HPA.
310af725b Add nstime_ns_since which obtains the duration since the input time.
cafe9a315 Disable percpu arena in case of non deterministic CPU count
bb5052ce9 Fix base_ehooks_get_for_metadata
9015e129b Update visual studio projects
d90655390 San: Create a function for committing and zeroing
800ce49c1 San: Bump alloc frequently reused guarded allocations
f56f5b993 Pass 'frequent_reuse' hint to PAI
2c70e8d35 Rename 'arena_decay' to 'arena_util'
0f6da1257 San: Implement bump alloc
34b00f896 San: Avoid running san tests with prof enabled
62f9c54d2 San: Rename 'guard' to 'san'
d9bbf539f CI: Refactor gen_travis.py
7dcf77809 Mark slab as true on sized dealloc fast path.
af6ee27c0 Enforce abort_conf:true when malloc_conf is not fully recognized.
113e8e68e freebsd 14 build fix proposal.
3b3257a70 Correct opt.prof_leak documentation
cdabe908d Track the initialized state of nstime_t on debug build.
400c59895 Fix uninitialized nstime reading / updating on the stack in hpa.
8b81d3f21 Fix the initialization of last_event in thread event init.
6bdb4f5ab Check prof_active in addtion to opt_prof during batch_alloc().
37342a4d3 Add ctl interface for experimental_infallible_new.
6cb585b13 San: Unguard guarded slabs during arena destruction
b6a7a535b Optimize away a branch on the free fastpath.
4d56aaeca Optimize away the tsd_fast() check on free fastpath.
26f5257b8 Remove declaration of an undefined function
215961541 Add new architecture loongarch.
8daac7958 Redefine functions with test hooks only for tests
c9ebff0fd Initialize deferred_work_generated
912324a1a Add debug check outside of the loop in hpa_alloc_batch.
cf9724531 Darwin malloc_size override support proposal.
ab0f1604b Delay the atexit call to prof_log_start().
11b6db744 CPU affinity on BSD platforms support.
83f329402 Small refactors around 7bb05e0.
3c4b717ff Remove unused header base_structs.h.
deb8e62a8 Implement guard pages.
7bb05e04b add experimental.arenas_create_ext mallctl
a9031a097 Allow setting a dump hook
f7d46b811 Allow setting custom backtrace hook
523cfa55c Guard prof related mallctl with opt_prof.
6e848a005 Remove opt_background_thread_hpa_interval_max_ms
8229cc77c Wake up background threads on demand
97da57c13 HPA: Add min_purge_interval_ms option
b8b8027f1 Allow PAI to calculate time until deferred work
26140dd24 Reject --enable-prof-libunwind without --enable-prof
e5062e9fb Makefile.in: make sure doc generated before install
8b24cb8fd Don't assume initialized arena in the default alloc hook.
c01a885e9 HPA: Correctly calculate retained pages
2c625d5cd Fix warnings when compiled with clang
9d02bdc88 Port gen_run_tests.py to python3
5884a076f Rename prof.dump_prefix to prof.prefix
6a0160071 Add Cirrus CI testing matrix
f58064b93 Verify that HPA is used before calling its functions
27f71242b Mutex: Tweak internal spin count.
6f41ba55e Mutex: Make spin count configurable.
dae24589b PH: Insert-below-min fast-path.
40d53e007 ph: Add aux-list counting and pre-merging.
dcb7b83fa Eset: Cache summary information for heap edatas.
252e0942d Eset: Pull per-pszind data into structs.
dc0a4b8b2 Edata: Pull out comparison fields into a summary.
0170dd198 Edata: Fix a couple typos.
08a4cc096 Pairing heap: inline functions instead of macros.
92a1e38f5 edata_cache: Allow unbounded fast caching.
d93eef2f4 HPA: Introduce a redesigned hpa_central_t.
e09eac1d4 Remove hpa_central.
c88fe355e Add unit tests for decay
aaea4fd1e Add more documentation to decay.c
4b633b9a8 Clean up background thread sleep computation
6630c5989 HPA: Hugification hysteresis.
113938b6f HPA: Pull out a hooks type.
1d4a7666d HPA: Do deferred operations on background threads.
583284f2d Add HPA deferral functionality.
ace329d11 HPA batch dalloc: Just do one deferred work check.
47d8a7e6b psset: Purge empty slabs first.
41fd56605 HPA: Purge across retained extents.
347523517 PAI: Fix a typo.
9c42ed2d1 Travis: Don't test "clang" on OS X.
d202218e8 HPA: Fix typos with big performance implications.
de033f56c mpsc_queue: Add module.
4452a4812 Add opt.experimental_infallible_new.
0689448b1 Travis: Unbreak the builds.
4fb93a18e extent_can_acquire_neighbor typo fix
2381efab5 ARC: add Minimum allocation alignment
2c0f4c2ac Fix typo in configure.ac: experimetal -> experimental
36c6bfb96 SEC: Allow arbitrarily many shards, cached sizes.
11beab38b Added --debug-syms-by-id option
08089589f Fix an interaction between the oversize_threshold test and bgthds.
541793821 Red-black tree: add summarize/filter.
b2c08ef2e RB unit tests: don't test reentrantly.
aea91b8c3 Clean up some minor data structure inconsistencies
1f688490e Stats: Fix a printing bug when hpa_dirty_mult = -1
4f7cb3a41 Sized deallocation: fix a typo.
12cd13cd4 Fix thread.name/prof_sys_thread_name interaction
304cdbb13 Fix a prof_recent/prof_sys_thread_name interaction
9b523c6c1 Refactor the locking in extent_recycle().
ce68f326b Avoid the release & re-acquire of the ecache locks around the merge hook.
7dc77527b Delete the mutex_pool module.
03d95cba8 Remove the unnecessary arena_ind_set in base_alloc_edata().
3093d9455 Move the edata mergeability related functions to extent.h.
7c964b035 Add rtree_write_range(): writing the same content to multiple leaf elements.
add636596 Stop checking head state in the merge hook.
49b7d7f0a Passing down the original edata on the expand path.
178493968 Use rtree tracked states to protect edata outside of ecache locks.
9ea235f8f Add witness_assert_positive_depth_to_rank().
4d8c22f9a Store edata->state in rtree leaf and make edata_t 128B aligned.
70d1541c5 Track extent is_head state in rtree leaf.
862219e46 Add quiescence sync before deleting base during arena_destroy.
a137a6825 Remove redundant declaration, pac_retain_grow_limit_get_set was declared twice in pac.h
2ae1ef7db Fix doc large size 54 KiB error
61afb6a40 Fix locking on arena_i_destroy_ctl().
9193ea224 Cirrus: fix build.
391307714 Mark head state during dss alloc.
11127240c Remove redundant enable-debug definition in configure.
22be724af Set is_head in extent_alloc_wrapper w/ retain.
73ca4b8ef HPA: Use dirtiest-first purging.
0f6c420f8 HPA: Make purging/hugifying more principled.
6bddb92ad psset: Rename "bitmap" to "pageslab_bitmap".
154aa5fcc Use the flat bitmap for eset and psset bitmaps.
271a676dc hpdata: early bailout for longest free range.
d21d5b46b Edata: Move sn into its own field.
fb327368d SEC: Expand option configurability.
ce9386370 HPA: Implement batch allocation.
cdae6706a SEC: Use batch fills.
480f3b11c Add a batch allocation interface to the PAI.
bf448d7a5 SEC: Reduce lock hold times.
1944ebbe7 HPA: Implement batch deallocation.
f47b4c2cd PAI/SEC: Add a dalloc_batch function.
4b8870c7d SEC: Fix a comment typo.
cde7097ec Update INSTALL.md to mention 'autoconf'
a11be5033 Implement opt.cache_oblivious.
8c5e5f50a Fix stats for "tcache_max" (was "lg_tcache_max")
041145c27 Report the correct and wrong sizes on sized dealloc bug detection.
f3b2668b3 Report the offending pointer on sized dealloc bug detection.
edbfe6912 Inline malloc fastpath into operator new.
79f81a373 HPA: Make dirty_mult configurable.
32dd15379 HPA: Make dehugification threshold configurable.
4790db15e HPA: make the hugification threshold configurable.
b3df80bc7 Pull HPA options into a containing struct.
bdb7307ff fxp: Add FXP_INIT_PERCENT
caef4c286 FXP: add fxp_mul_frac.
56e85c0e4 HPA: Use a whole-shard purging heuristic.
dc886e560 hpdata: Return the number of pages to be purged.
9fd9c876b psset: keep aggregate stats.
da63f23e6 HPA: Track pending purges/hugifies in the psset.
0ea3d6307 CTL, Stats: report HPA empty slab stats.
bf64557ed Move empty slab tracking to the psset.
99fc0717e psset: Reconceptualize insertion/removal.
061cabb71 HPA stats: report retained instead of inactive.
d3e5ea03c HPA: Track dirty stats.
68a1666e9 hpdata: Rename "dirty" to "touched".
be0d7a53f HPA: Don't track inactive pages.
55e0f60ca psset stats: Simplify handling.
94cd9444c HPA: Some minor reformattings.
b25ee5d88 HPA: Add purge stats.
746ea3de6 HPA stats: Allow some derived stats.
30b9e8162 HPA: Generalize purging.
70692cfb1 hpdata: Add state changing helpers.
9b75808be flat bitmap: Add a bitwise and/or/not.
2ae966222 hpdata: track per-page dirty state.
ff4086aa6 hpdata: count active pages instead of free ones.
3624dd42f hpdata: Add a comment for hpdata_consistent.
20140629b Bin: Move stats closer to the mutex.
c259323ab Use ticker_geom_t for arena tcache decay.
8edfc5b17 Add ticker_geom_t.
396732981 Arena: share bin offsets in a global.
2fcbd1811 Cache bin: Don't reverse flush order.
4c46e1136 Cache an arena's index in the arena.
229994a20 Tcache flush: keep common path state in registers.
31a629c3d Tcache flush: prefetch edata contents.
9f9247a62 Tcache fluhing: increase cache miss parallelism.
181ba7fd4 Tcache flush: Add an emap "batch lookup" path.
c007c537f Tcache flush: Unify edata lookup path.
35a855260 Mac OS: Tag mapped pages.
f6699803e Fix duration in prof log
a943172b7 Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
2e3104ba0 Update config.{sub,guess} to support support-aarch64-apple-darwin as a target
a011c4c22 cache_bin: Separate out local and remote accesses.
14d689c0f Add prof stats mutex stats
9f71b5779 Output prof stats in stats print
1f1a0231e Split macros for initializing stats headers
4352cbc21 Add alignment tests for prof stats
54f3351f1 Add mallctl for prof stats fetching
40fa4d29d Track per size class internal fragmentation
afa489c3c Record request size in prof info
f9bb8dede Un-force-inline do_rallocx.
a9fa2defd Add JEMALLOC_COLD, and mark some functions cold.
5d8e70ab2 prof_recent: cassert(config_prof) more often.
83cad746a prof_log: cassert(config_prof) in public functions
526180b76 Extent.c: Avoid an rtree NULL-check.
b35ac00d5 Do not bump to large size for page aligned request
8a56d6b63 Add last-N mutex stats
22d62d8cb Handle ending gap properly for HPA stats
6c5a3a24d Omit bin stats rows with no data
ea013d8fa Enforce realloc sizing stability
74bd63b20 Optimize stats print using partial name-to-mib
4557c0a67 Enable ctl on partial mib and partial name
006dd0414 Add partial name-to-mib functionality
f2e1a5be7 Do not fail on partial ctl path for ctl_nametomib()
6ab181d2b Extract node lookup given mib input
3a627b967 No need to record all nodes in ctl_lookup()
91e006c4c Enable ctl_lookup() to start from arbitrary node
063a767ff Define JEMALLOC_HAS_ALLOCA_H for QNX
4e3fe218e Use posix_madvise to purge pages when available
26c1dc5a3 Support AutoConf for posix_madvise and POSIX_MADV_DONTNEED
96a59c3bb Fix recursive malloc during bootstrap on QNX
986cbe488 Disable JEMALLOC_TLS for QNX
1e3b8636f HPA: Remove unused malloc_conf options.
e82771807 Cache mallctl mib for batch allocation stress test
0dfdd31e0 Add tiny batch size to batch allocation stress test
9522ae41d Move n_search outside of assert as reported by static analyzer
a559caf74 hpdata: Strengthen assertions.
f51948d9e psset unit test: fix a bug.
54c94c167 flat bitmap: add scount / ucount functions.
e6c057ad3 fb: implement assign in terms of a visitor.
734e72ce8 bit_util: Guarantee popcount's presence.
d9f7e6c66 hpdata: Add a test.
3ed0b4e8a HPA: Add an nevictions counter.
fffcefed3 malloc_conf: Clarify HPA options.
f7cf23aa4 psset: Relegate alloc/dalloc to test code.
f9299ca57 HPA: Use psset fit/insert/remove.
0971e1e4e hpdata: Use addr/size instead of begin/npages.
5228d869e psset: Use fit/insert/remove as basis functions.
089f8fa44 Move hpdata bitmap logic out of the psset.
ca30b5db2 Introduce hpdata_t.
4a15008cf HPA unit test: skip if unsupported.
43af63fff HPA: Manage whole hugepages at a time.
63677dde6 Pages: Statically detect if pages_huge may succeed
c1b2a7793 psset: Move in stats.
d0a991d47 psset: Add insert/remove functions.
d438296b1 narenas_ratio: Accept fractional values.
ecd39418a Add fxp: A fixed-point math library.
99c2d6c23 Backport jeprof --collapse for flamegraph generation
520b75fa2 utrace support with label based signature.
92e189be8 Add some comments to the batch allocation logic flow
d96e4525a Route batch allocation of small batch size to tcache
ac480136d Split out locality checking in batch allocation tests
be5e49f4f Add a batch mode for cache_bin_alloc()
4a65f3493 Fix a cache bin test
566c4a859 Slight changes to cache bin internal functions
9545c2cd3 Add sample interval to prof last-N dump
cf2549a14 Add a per-arena oversize_threshold.
4ca3d91e9 Rename geom_grow -> exp_grow.
b4c37a6e8 Rename edata_tree_t -> edata_avail_t.
95f0a77fd Detect pthread_getname_np explicitly.
b3c5690b7 Update config.{guess,sub} to 2020-11-07@77632d9
589638182 Use the edata_cache_small_t in the HPA.
03a604711 Edata cache small: rewrite.
c9757d9e3 HPA: Don't disable shards that were never started.
1b3ee7566 Add experimental.thread.activity_callback.
27ef02ca9 Android build fix proposal.
d2d941017 MADV_DO[NOT]DUMP support equivalence on FreeBSD.
180b84315 Appveyor: fix 404 errors.
ef6d51ed4 DragonFlyBSD build support.
bf72188f8 Allow opt.tcache_max to accept small size classes.
ea32060f9 SEC: Implement thread affinity.
d16849c91 psset: Do first-fit based on slab age.
634ec6f50 Edata: add an "age" field.
6599651ae PA: Use an SEC in fron of the HPA shard.
ea51e97bb Add SEC module: a small extent cache.
1964b0839 HPA: Add stats for the hpa_shard.
534504d4a HPA: add size-exclusion functionality.
484f04733 HPA: Add central mutex contention stats.
bf025d2ec HPA: Make slab sizes and maxes configurable.
1c7da3331 HPA: Tie components into a PAI implementation.
c8209150f Switch from opt.lg_tcache_max to opt.tcache_max
5ba861715 Add thread name in prof last-N records
4ef5b8b4d Add a logo to doc_internal.
5e41ff9b7 Add a hard limit on tcache max size class.
3de19ba40 Eagerly detect double free and sized dealloc bugs for large sizes.
be9548f2b Tcaches: Fix a subtle race condition.
a9aa6f6d0 Fix the alloc_ctx check in free_fastpath.
b971f7c4d Add "default" option to slab sizes.
21b70cb54 Add hpa_central module
1ed7ec369 Emap: Add emap_assert_not_mapped.
2a6ba121b PRNG test: cleanups.
9e6aa77ab PRNG: Remove atomic functionality.
051304717 PRNG: Allow a a range argument of 1.
bdb60a805 Appveyor: don't update msys2 keyring.
025d8c37c Add a script to check for clang-formattedness.
f6bbfc1e9 Add a .clang-format file.
259c5e3e8 psset: Add stats
018b162d6 Add psset: a set of pageslabs.
ed99d300b Flat bitmap: Add longest-range computation.
e03450069 Edata: rename "ranged" bit to "pai".
7ad2f7866 Avoid a -Wundef warning on LG_SLAB_MAXREGS.
40cf71a06 Remove --with-slab-maxregs options from INSTALL.md
36ebb5abe CI support for PPC64LE architecture
1541ffc76 configure: add --with-lg-slab-maxregs configure option.
d243b4ec4 Add PROFILING_INTERNALS.md
09eda2c9b Add unit tests for usize in prof recent records
b549389e4 Correct usize in prof last-N record
202f01d4f Fix szind computation in profiling
866231fc6 Do not repeat reentrancy test in profiling
20f2479ed Do not create size class tables for non-prof builds
8efcdc3f9 Move unbias data to prof_data
5e90fd006 Geom_grow: Don't keep the mutex internal.
c57494879 Geom_grow: Don't take tsdn at init.
ffe552223 Geom_grow: Move in advancing logic.
131b1b533 Rename ecache_grow -> geom_grow.
b399463fb flat_bitmap unit test: Silence a warning.
b0ffa39ca Mallctl stress test: fix a type.
753bbf184 Benchmarks: Also print ns / iter.
7b187360e IO: Support 0-padding for unsigned numbers.
32d467322 Add a mallctl speed stress test.
38867c5c1 Makefile: alphabetize stress/analyze utilities.
ab274a23b Add narenas_ratio.
9e18ae639 Config: safety checks don't imply size checks.
8f9e958e1 Add alignment stress test for rallocx
743021b63 Fix size miscalculation bug in reallocation
eaed1e39b Add sized-delete size-checking functionality.
53084cc5c Safety check: Don't directly abort.
60993697d Prof: Add prof_unbias.
81c2f841e Add a simple utility to detect profiling bias.
e032a1a1d Add a stress test for batch allocation
f6cf5eb38 Add mallctl for batch allocation API
978f830ee Add batch allocation API
c6f59e9bb Add surplus reading API for thread event lookahead
f80546895 Add zero option to arena batch allocation
49e5c2fe7 Add batch allocation from fresh slabs
2bb8060d5 Add empty test and concat for typed list
f28cc2bc8 Extract bin shard selection out of bin locking
ddb8dc4ad FB: Add range iteration support.
ceee82351 Add flat_bitmap.
7fde6ac49 Nbits: Add a couple more interesting sizes.
efeab1f49 bitset test: Pull NBITS_TAB into its own file.
22da83609 bit_util: Add fls_ functions; "find last set".
1ed0288d9 bit_util: Change ffs functions indexing.
786a27b9e CI: Update keyring.
fb347dc61 Verify output space before doing heavy work in mallctl
f5fb4e5a9 Modify mallctl output length when needed
425840204 Corrections for prof_log_start()
e6cb7a1c9 Shorten wait time for peak events
6107857b7 PA->PAC: Move in PAI implementation.
6041aaba9 PA -> PAC: Move in destruction functions.
cbf096b05 Arena: remove redundant bg inactivity check.
471eb5913 PAC: Move in decay rate setting.
6a2774719 PA->PAC: Move in decay functions.
4ee75be3a PA -> PAC: Move in decay_purge enum.
72435b0ab PA->PAC: Make extent.c forget about PA.
dee5d1c42 PA->PAC: Move in extent_sn.
739138234 PA->PAC: Move in stats.
db211eefb PAC: Move in decay.
c81e38999 PAC: Move in ecache_grow.
65803171a PAC: move in emap
7efcb946c PAC: Add an init function.
722652222 PAC: Move in edata_cache accesses.
777b0ba96 Add PAC: Page allocator classic.
1b5f632e0 Introduce PAI: Page allocator interface
3cf19c6e5 atomic: add atomic_load_sub_store
f1f4ec315 Tcache: Tweak nslots_max tuning parameter.
ae541d3fa Edata: Reserve some space for hugepages.
392f645f4 Edata: split up different list linkage uses.
129b72705 Add typed-list module.
00f06c9be enabling mpss on solaris/illumos.
c2e7a0639 No need to intercept prof_dump_header() in tests
f58ebdff7 Generalize prof_cnt_all() for testing
80d18c18c Pass prof dump parameters explicitly in prof_sys
d4259ea53 Simplify signatures for prof dump functions
5d823f3a9 Consolidate struct definitions for prof dump parameters
1f5fe3a3e Pass write callback explicitly in prof_data
4556d3c0c Define structures for prof dump parameters
1c6742e6a Migrate prof dumping to use buffered writer
dad821bb2 Move unwind to prof_sys
d128efcb6 Relocate a few prof utilities to the right modules
4736fb4fc Move file handling logic in prof_data to prof_sys
767a2e179 Move file handling logic in prof to prof_sys
03ae509f3 Create prof_sys module for reading system thread name
adfd9d7b1 Change tsdn to tsd for thread name allocation
841af2b42 Move thread name handling to prof_data module
8118056c0 Expose prof_data testing internals only in prof tests
f43ac8543 Correct prof header macro namings
c8683bee8 Unify printing for prof counts object
5d292b566 Push error handling logic out of core dumping logic
f541871f5 Reduce prof dump buffer size in debug build
354183b10 Define prof dump buffer size centrally
7455813e5 Make dump file writing replaceable in test
21e44c45d Make maps file opening replaceable in test
4bb4037db Extract utility function for opening maps file
f307b2580 Only replace the dump file opening function in test
d8cea8756 Move size inspections to test/analyze
537a4bedb Add a tool to examine random number distributions
d460333ef Improve naming for prof system thread name option
25e43c602 Witness: Make ranks an enum.
092fcac0b Remove unnecessary source files
a795b1932 Remove beginning define in source files
24bbf376c Unify arena flag reading and selection
e128b170a Do not fallback to auto arena when manual arena is requested
95a59d2f7 Unify tcache flag reading and selection
4b0c00848 Unify zero flag reading and setting
2a84f9b8f Unify alignment flag reading and computation
b7858abfc Expose prof testing internal functions
40fa6674a Fix prof timestamp conf reading
7e09a57b3 stress/sizes: Fix an off-by-one issue.
dcfa6fd50 stress/sizes: Add a couple more types.
40672b0b7 Remove duplicate logging in malloc.
4aea74327 High Resolution Timestamps for Profiling
d82a164d0 Add thread.peak.[read|reset] mallctls.
fe7108305 Add peak_t, for tracking allocator net max.
17a64fe91 Add a small program to print data structure sizes.
3e19ebd2e Add lock to protect prof last-N dumping
a835d9cf8 Make prof last-N dumping non-blocking
fc8bc4b5c Increase dump buffer for prof last-N list
264d89d64 Extract restore and async cleanup functions for prof last-N list
857ebd3da Make edata pointer on prof recent record an atomic fence
b8bdea6b2 Fix: prof_recent_alloc_max_ctl_read() does not take tsd
730658f72 Extract alloc/dalloc utility for last-N nodes
035be4486 Separate out dumping for each prof recent record
8da0896b7 Tcache: Make an integer conversion explicit.
cd28e6033 Don't warn on uniform initialization.
6cdac3c57 Tcache: Make flush fractions configurable.
7503b5b33 Stats, CTL: Expose new tcache settings.
ee72bf1cf Tcache: Add tcache gc delay option.
d338dd45d Tcache: Make incremental gc bytes configurable.
ec0b57956 Tcache: Privatize opt_lg_tcache_max default.
10b96f635 Tcache: Remove some unused gc constants.
181093173 Tcache: make slot sizing configurable.
b58dea8d1 Cache bin: expose ncached_max publicly.
634afc412 Tcache: Make size computation configurable.
97b7a9cf7 Add a fill/flush microbenchmark.
33372cbd4 cpu instruction spin wait for arm32/64
27f29e424 LQ_QUANTUM should be 4 on mips64 hardware.
eda9c2858 Edata: zero stack edatas before initializing.
5dead37a9 Allow narenas:default.
dcea2c0f8 Get rid of TSD -> thread event dependency
75dae934a Always initialize TE counters in TSD init
b06dfb9cc Push event handlers to constituent modules
381c97caa Treat postponed prof sample event as new event
abd467493 Extract out per event postponed wait time fetching
f72014d09 Only compute thread event threshold once per trigger
7324c4f85 Break down event init and handler functions
6de77799d Move thread event wait time update to local
733ae918f Extract out per event new wait time fetching
1e2524e15 Do not reset sample wait time when re-initing tdata
855d20f6f Remove outdated comments in thread event
fc052ff72 Migrate counter to use locked int
b543c20a9 Minor update to locked int
f533ab6da Add forking handling for stats
508303077 Add forking handling for prof idump counter
4d970f8bf Add forking handling for counter module
2097e1945 Unify write callback signature
fef9abdcc Cleanup tcache allocation logic
e6cb6919c Consolidate prof inline function headers
d454af90f Remove unused prof_accum field from arena
8be558449 Initialize prof idump counter once rather than once per arena
e10e5059e Make prof_idump_accum() non-inline
039bfd4e3 Do not rollback prof idump counter in arena_prof_promote()
0295aa38a Deduplicate entries in witness error message
f1f8a7549 Let opt.zero propagate to core allocation.
2c09d4349 Add a benchmark of large allocations.
46471ea32 SC: Name the max lookup constant.
79dd0c04e SC: Simplify SC_NPSIZES computation.
fb6cfffd3 Configure: Get rid of LG_QUANTA.
4f8efba82 TSD: Make rtree_ctx a slow-path field.
cd29ebefd Tcache: treat small and large cache bins uniformly
a13fbad37 Tcache: split up fast and slow path data.
7099c6620 Arena: fill in terms of cache_bins.
40e7aed59 TSD: Move in some of the tcache fields.
58a00df23 TSD: Put all fast-path data together.
3589571bf SC: use SC_LG_NGROUP instead of its value.
877af247a QL, QR: Add documentation.
79ae7f921 Rtree: Remove the per-field accessors.
26e9a3103 PA: Simple decay test.
bb6a41852 Emap: Drop szind/slab splitting parameters.
50289750b Extent: Remove szind/slab knowledge.
dc26b3009 Rtree: Clean up compact/non-compact split.
93b99dd14 Extent: Stop passing an edata_cache everywhere.
a4759a191 Ehooks: avoid touching arena_emap_global in tests.
11c47cb13 Extent: Take "bool zero" over "bool *zero".
1a1124462 PA: Take zero as a bool rather than as a bool *.
294b276fc PA: Parameterize emap.  Move emap_global to arena.
f73057727 Eset: Parameterize last globals accesses.
7bb6e2dc0 Eset: take opt_lg_max_active_fit as a parameter.
883ab327c Emap: Move out last edata state touching.
0c96a2f03 Emap: Move out remaining edata modifications.
dfef0df71 Emap: Move edata modification out of emap_remap.
12eb888e5 Edata: Add a ranged bit.
bd4fdf295 Rtree: Pull leaf contents into their own struct.
faec7219b PA: Move in decay initialization.
45671e4a2 PA: Move in retain growth limit setting.
daefde88f PA: Move in mutex stats reading.
07675840a PA: Move in some more internals accesses.
238f3c743 PA: Move in full stats merging.
81c602759 Arena stats: Give it its own "mapped".
506d907e4 PA: Move in basic stats merging.
f29f6090f PA: Add pa_extra.c and put PA forking there.
8164fad40 Stats: Fix edata_cache size merging.
565045ef7 Arena: Make more derived stats non-atomic/locked.
d0c43217b Arena stats: Move retained to PA, use plain ints.
e2cf3fb1a PA: Move in all modifications of mapped.
436789ad9 PA: Make mapped stat atomic.
3c28aa6f1 PA: Move edata_avail stat in, make it non-atomic.
f6bfa3dcc Move extent stats to the PA module.
527dd4cdb PA: Move in nactive counter.
c075fd0bc PA: Minor cleanups and comment fixes.
46a9d7fc0 PA: Move in rest of purging.
2d6eec7b5 PA: Move in decay-all pathway.
65698b7f2 PA: Remove public visibility of some internals.
f012c43be PA: Move in decay_to_limit
103f5feda Move bg thread activity check out of purging core.
3034f4a50 PA: Move in decay_stashed.
aef28b2f8 PA: Move in stash_decayed.
655a09634 Move bg inactivity check out of purge inner loop.
71fc0dc96 PA: Move in remaining page allocation functions.
74958567a PA: have expand take sizes instead of new usize.
5bcc2c2ab PA: Have expand take szind and slab.
0880c2ab9 PA: Have large expands use it.
7be3dea82 PA: Have slab allocations use it.
9f93625c1 PA: Move in arena large allocation functionality.
7624043a4 PA: Add ehook-getting support.
eba35e2e4 Remove extent knowledge of arena.
e77f47a85 Move arena decay getters to PA.
48a2cd6d7 Decay: Add a (mostly stub) test case.
f77cec311 Decay: Take current time as an argument.
bf55e58e6 Rename test/unit/decay -> test/unit/arena_decay.
d1d7e1076 Decay: move in some background_thread accesses.
cdb916ed3 Decay: Add comments for the public API.
8f2193dc8 Decay: Move in arena decay functions.
4d090d23f Decay: Introduce a stub .c file.
7b6288547 Introduce decay module and put decay objects in PA
497836dbc Arena stats: mark edata_avail as derived.
3192d6b77 Extents: Have extent_dalloc_gap take ehooks.
22a0a7b93 Move arena_decay_extent to extent module.
70d12ffa0 PA: Move mapped into pa stats.
6ca918d0c PA: Add a stats comment.
ce8c0d6c0 PA: Move in arena extent_sn counter.
1ada4aef8 PA: Get rid of arena_ind_get calls.
1ad368c8b PA: Move in decay stats.
356aaa7dc Introduce lockedint module.
acd0bf6a2 PA: move in ecache_grow.
32cb7c2f0 PA: Add a stats type.
688fb3eb8 PA: Move in the arena edata_cache.
8433ad84e PA: move in shard initialization.
a24faed56 PA: Move in the ecache_t objects.
585f92505 Move cache index randomization out of extent.
12be9f572 Add a stub PA module -- a page allocator.
c4e9ea8cc Get rid of locks in prof recent test
2deabac07 Get rid of custom iterator for last-N records
a5ddfa7d9 Use ql for prof last-N list
8da6676a0 Don't do reentrant testing in junk tests.
ce17af422 Better structure ql module
4b66297ea Add move constructor to ql module
a62b7ed92 Add emptiness checking to ql module
1dd24ca6d Add rotate functionality to ql module
0dc95a882 Add concat and split functionality to ql module
1ad06aa53 deduplicate insert and delete logic in qr module
c9d56cddf Optimize meld in qr module
0d6d9e858 configure.ac: Put public symbols on one line.
f9aad7a49 Add piping API to buffered writer
09cd79495 Encapsulate buffer allocation failure in buffered writer
a166c2081 Make prof_tctx_t pointer a true prof atomic fence
d936b46d3 Add malloc_conf_2_conf_harder
3b4a03b92 Mac: don't declare system functions as nothrow.
2256ef896 Add option to fetch system thread name on each prof sample
ccdc70a5c Fix: assertion could abort on past failures
b30a5c2f9 Reorganize cpp APIs and suppress unused function warnings
2e5899c12 Stats: Fix tcache_bytes reporting.
a5780598b Remove thread_event_rollback()
ba783b3a0 Remove prof -> thread_event dependency
441d88d1c Rewrite profiling thread event
0dcd57660 Edata cache: atomic fetch-add -> load-store.
99b1291d1 Edata cache: add edata_cache_small_t.
734109d9c Edata cache: add a unit test.
e732344ef Inspect test: Reduce checks when profiling is on.
92485032b Cache bin: improve comments.
d701a085c Fast path: allow low-water mark changes.
397da0386 Cache bin: rewrite to track more state.
fef0b1ffe Cache bin: Remove last internals accesses.
0a2fcfac0 Tcache: Hold cache bin allocation explicitly.
d498a4bb0 Cache bin: Add an emptiness assertion.
6a7aa46ef Cache bin: Add a debug method for init checking.
370c1ea00 Cache bin: Write the unit test in terms of the API
7f5ebd211 Cache bin: set low-water internally.
60113dfe3 Cache bin: Move in initialization code.
44529da85 Cache-bin: Make flush modifications internal
ff6acc6ed Cache bin: simplify names and argument ordering.
e1dcc557d Cache bin: Only take the relevant cache_bin_info_t
1b00d808d cache_bin: Don't let arena see empty position.
d303f3079 cache_bin nflush -> n.
74d36d78e Cache bin: Make ncached_max a query on the info_t.
b66c0973c cache_bin: Don't allow direct internals access.
da68f7329 Move percpu_arena_update.
909c501b0 Cache_bin: Shouldn't know about tcache.
79f1ee2fc Move junking out of arena/tcache code.
b428dceea Config: Warn on void * pointer arithmetic.
22657a5e6 Extents: Silence the "potentially unused" warning.
4a78c6d81 Correct thread event unit test
305b1f6d9 Correction on geometric sampling
6c3491ad3 Tcache: Unify bin flush logic.
9f4fc2738 Ehooks: Fix a build warning.
bc31041ed Cirrus-CI: test on new freebsd releases.
51bd14742 Make use of assert_* in test/unit/thread_event.c
9d2cc3b0f Make use of assert_* in test/unit/prof_recent.c
a88d22ea1 Make use of assert_* in test/unit/inspect.c
0ceb31184 Make use of assert_* in test/unit/buf_writer.c
fa6157938 Add assert_* functionality to tests
21dfa4300 Change assert_* to expect_* in tests
162c2bcf3 Background thread: take base as a parameter.
29436fa05 Break prof and tcache knowledge of b0.
a0c1f4ac5 Rtree: take the base allocator as a parameter.
7013716aa Emap: Take (and propagate) a zeroed parameter.
182192f83 Base: Pull into a single header.
34b7165fd Put szind_t, pszind_t in sz.h.
7e6c8a728 Emap: Standardize naming.
ac50c1e44 Emap: Remove direct access to emap internals.
06e42090f Make jemalloc.c use the emap interface.
f7d9c6c42 Emap: Move in alloc_ctx lookup functionality.
65a54d771 Emap: Move in szind and slab modifications.
9b5d105fc Emap: Move in iealloc.
1d449bd9a Emap: Internal rtree context setting.
08eb1e6c3 Emap: Comments and cleanup
231d1477e Rename emap_split_prepare_t -> emap_prepare_t.
0586a56f3 Emap: Move in merge functionality.
040eac77c Tell edatas their creation arena immediately.
7c7b70206 Emap: Move over metadata splitting logic.
44f5f5360 Emap: Move over deregistration functions.
6513d9d92 Emap: Move over deregistration boundary functions.
9b5ca0b09 Emap: Move in slab interior registration.
d05b61db4 Emap: Move extent boundary registration in.
ca21ce407 Emap: Move in write_acquired from extent.
01f255161 Add emap, for tracking extent locking.
0f686e82a Avoid variable length array with length 0.
68e8ddcaf Add mallctl for dumping last-N profiling records
bc05ecebf Add const qualifier in assert_cmp()
ba0e35411 Rework the bin locking around tcache refill / flush.
7fd22f7b2 Fix Undefined Behavior in hash.h
ca1f08225 Disallow merge across mmap regions to preserve SN / first-fit.
7014f81e1 Add ASSURED_WRITE in mallctl
247688919 Add inspect.c to MSVC filters
9cac3fa8f Encapsulate buffer allocation in buffered writer
bdc08b515 Better naming buffered writer
c6bfe5585 Update the tsd description.
e89652261 Abbreviate thread-event to te.
5e500523a Remove thread_event_boot().
97dd79db6 Implement deallocation events.
536ea6858 NetBSD specific changes: - NetBSD overcommits - When mapping pages, use the maximum of the alignment requested and the   compiled-in PAGE constant which might be greater than the current kernel   pagesize, since we compile binaries with the maximum page size supported   by the architecture (so that they work with all kernels).
974222c62 Add safety check on sdallocx slow / sampled path.
88d9eca84 Enforce page alignment for sampled allocations.
0f552ed67 Don't purge huge extents when decay is off.
38a48e574 Set reentrancy to 1 for tsd_state_purgatory.
88b0e03a4 Implement opt.stats_interval and the _opts options.
d71a145ec Chagne prof_accum_t to counter_accum_t for general purpose.
ea351a7b5 Fix syntax errors in doc for thread.idle.
d92f0175c Introduce NEITHER_READ_NOR_WRITE in ctl.
6a622867c Add "thread.idle" mallctl.
f81341a48 Fallback to unbuffered printing if OOM
cd6e90824 Add stress test for last-N profiling mode
84b28c6a1 Properly handle tdata deletion race
d33120856 Get rid of redundant logic in prof
a72ea0db6 Restructure and correct sleep utility for testing
7b67ed0b5 Get rid of lock overlap in prof_recent_alloc_reset
bd3be8e0b Remove commit parameter to ecache functions.
b8df719d5 No tdata creation for backtracing on dying thread
dab81bd31 Rework and fix the assertions on malloc fastpath.
ad3f3fc56 Fetch time after tctx and only for samples
a5d3dd405 Fix an assertion on extent head state with dss.
2b604a301 Record request size in prof recent entries
40a391408 Define constructor for buffered writer argument
6d8e61690 Make buffered writer an independent module
6b6b4709b Unify buffered writer naming
9a60cf54e Last-N profiling mode
7a27a0594 Delete tdata states used for cleanup
e98ddf798 Fix unlikely condition in arena_prof_info_get()
3fa142cf3 Remove _externs from prof internal header names
112dc36dd Handle log_mtx during forking
ea42174d0 Refactor profiling headers
6342da097 Ehooks: Further optimize default merge case.
f2f2084e7 Ehooks: Assert alloc isn't NULL
e210ccc57 Move extent2 -> extent.
2f4fa8041 Rename extents -> ecache.
56cc56b69 Break extent split dependence on arena.
0aa9769fb Break commit functions' arena dependence
48ec5d435 Break extent_coalesce arena dependence
282a38232 Extent: Break [de]activation's arena dependence.
576d7047a Ecache: Should know its arena_ind.
372042a08 Remove merge dependence on the arena.
439219be7 Remove extent_can_coalesce arena dependency.
9cad5639f Ehooks: remove arena_ind parameter.
57fe99d4b Move relevant index into the ehooks_t itself.
c792f3e4a edata_cache: Remember the associated base_t.
ae23e5f42 Unify extent_alloc_wrapper with the other wrappers.
d8b0b66c6 Put extent_state_t into ecache as well as eset.
98eb40e56 Move delay_coalesce from the eset to the ecache.
bb70df8e5 Extent refactor: Introduce ecache module.
070451624 Ehooks: Add head tracking.
09475bf8a extent_may_dalloc -> ehooks_dalloc_will_fail
785918417 Pull out edata_t caching into its own module.
a7862df61 Rename extent_t to edata_t.
865debda2 Rename extent.h -> edata.h.
a738a66b5 Ehooks: Add some debug zero and addr checks.
4b2e5ee8b Ehooks: Add a "zero" ehook.
d0f187ad3 Arena: Loosen arena_may_have_muzzy restrictions.
ebbb97327 Base: Remove some unnecessary reentrancy guards.
403f2d166 Extents: Split out introspection functionality.
92a511d38 Make extent module hermetic.
e08c581cf Extent: Get rid of extent-specific pre/post reentrancy calls.
39fdc690a Ehooks comments and cleanup.
c8dae890c Extent -> Ehooks: Move over default hooks.
2fe510826 Extent -> Ehooks: Move merge hook.
1fff4d2ee Extent -> Ehooks: Move split hook.
a5b42a1a1 Extent -> Ehooks: Move purge_forced hook.
368baa42e Extent -> Ehooks: Move purge_lazy hook.
f83fdf533 Extent: Clean up a comma
d78fe241a Extent -> Ehooks: Move commit and decommit hooks.
5459ec9da Extent -> Ehooks: Move destroy hook.
bac8e2e5a Extent -> Ehooks: Move dalloc hook.
dc8b4e6e1 Extent -> Ehooks: Move alloc hook.
703fbc0ff Introduce unsafe reentrancy guards.
ae0d8e859 Move extent ehook calls into ehooks
ba8b9ecbc Add ehooks module
837119a94 base_structs.h: Remove some mid-line tabs.
9f6eb0958 Extents: Eagerly initialize extent hooks.
4278f8460 Move extent hook getters/setters to arena.c
9226e1f0d fix opt.thp:never still use THP with base_new
d5031ea82 Allow dallocx and sdallocx after tsd destruction.
4afd709d1 Restructure setters for profiling info
1d01e4c77 Initialization utilities for nstime
dd649c948 Optimize away the tsd_fast() check on fastpath.
1decf958d Fix incorrect usage of cassert.
45836d7fd Pass nstime_t pointer for profiling
7d2bac5a3 Refactor destroy code path for prof_tctx
055478cca Threshold is no longer updated before prof_realloc()
7e3671911 Get rid of old indentation style for prof
dfdd46f6c Refactor prof_tctx_t creation
aa1d71fb7 Rename prof_tctx to alloc_tctx in prof_info_t
5e0b09099 No need to pass usize to prof_tctx_set()
1b1e76acf Disable some spuriously-triggering warnings
a70909b13 Test on all supported release of FreeBSD
5c47a3022 Guard C++ aligned APIs
694537177 Change tsdn to tsd for profiling code path
b55419f9b Restructure profiling
8b2c2a596 Support C++17 over-aligned allocation
9a3c73800 Refactor arena_bin_malloc_hard().
9a7ae3c97 Reduce footprint of bin_t.
cb1a1f4ad Remove the unnecessary alloc_ctx on free_fastpath.
716061710 Add branch hints to free_fastpath.
a787d2f5b Prefer getaffinity() to detect number of CPUs.
04cb7d4d6 Bail out early for muzzy decay.
73510dfd1 Revert "Fix bug in prof_realloc"
3b5eecf10 Fix bug in prof_realloc
e4c36a6f3 Emphasize no modification through thread.allocatedp allowed.
c462753cc Use __forceinline for JEMALLOC_ALWAYS_INLINE on msvc
836d7a7e6 Check for large size first in the uncommon case of malloc.
9c59abe42 Fix a typo in Makefile.
da50d8ce8 Refactor and optimize prof sampling initialization.
bc774a351 Rename tsd->offset_state to tsd->prng_state.
19a51abf3 Avoid arena->offset_state when tsd not available for prng.
d01b425e5 Add -Wimplicit-fallthrough checks if supported
a8b578d53 Remove mallctl test for zero_realloc
43f0ce92d Define general purpose tsd_thread_event_init()
97f93fa0f Pull tcache GC events into thread event handler
198f02e79 Pull prof_accumbytes into thread event handler
152c0ef95 Build a general purpose thread event handler
6924f83cb use SYS_openat when available
de81a4ead Add stats counters for number of zero reallocs
9cfa80594 Realloc: Make behavior of realloc(ptr, 0) configurable.
ee961c231 Merge realloc and rallocx pathways.
bd6e28d6a Guard slabcur fetching in extent_util
4786099a3 Increase column width for global malloc/free rate
05681e387 Optimize cache_bin_alloc_easy for malloc fast path
4fe50bc7d Fix amd64 MSVC warning
4fbbc817c Simplify time setting and getting for prof log
4094b7c03 Limit # of iters of test_bitmap_xfu.
66e07f986 Suppress tdata creation in reentrancy
beb7c16e9 Guard prof_active reset by opt_prof
1df9dd351 Fix je_ prefix issue in test
3d84bd57f Arena: Add helper function arena_get_from_extent.
c97d25575 Eset: Remove temporary declaration.
ce5b128f1 Remove the undefined extent_size_quantize declarations.
821dd53a1 Extent -> Eset: Rename arena members.
e144b21e4 Extent -> Eset: Move fork handling.
77bbb35a9 Extent -> Eset: Move extent fit functions.
1210af9a4 Extent -> Eset: Move insertion and removal.
a42861540 Extents -> Eset: Convert some stats getters.
820f070c6 Move page quantization to sz module.
63d1b7a7a Extents -> Eset: move extents_state_get.
b416b96a3 Extents -> Eset: rename/move extents_init.
e6180fe1b Eset: Add a source file.
4e5e43f22 Rename extents_t -> eset_t.
723ccc6c2 Extents: Split out extent struct.
41187bdfb Extents: Break extent-struct/arena interactions
529cfe2ab Arena: rename arena_structs_b.h -> arena_structs.h
e7cf84a8d Rearrange slab data and constants
d1be488cd Add --with-lg-page=16 to CI.
ac5185f73 Fix tcache bin stack alignment.
b7c7df24b Add max_per_bg_thd stats for per background thread mutexes.
4b76c684b Add "prof.dump_prefix" to override filename prefixes for dumps.
242af439b Rename "prof_dump_seq_mtx" to "prof_dump_filename_mtx".
e06658cb2 check GNU make exists in path
22bc75ee3 Workaround the stringop-overflow check false positives.
93d615180 Pass tsd down to prof_backtrace()
671f120e2 Fix prof_backtrace() reentrancy level
785b84e60 Make cache_bin_sz_t unsigned.
23dc7a7fb Fix index type for cache_bin_alloc_easy.
2abb02ecd Fix MSVC 2015 build, as proposed by @christianaguilera-foundry.
719583f14 Fix large.nflushes in the merged stats.
adce29c88 Optimize for prof_active off
49e6fbce7 Always adjust thread_(de)allocated
57b81c078 Pull thread_(de)allocated out of config_stats
9e031c1d1 Bug fix for prof_active switch
0043e68d4 Track low_water == -1 case explicitly.
937ca1db9 Store ncached_max * ptr_size in tcache_bin_info.
7599c82d4 Redesign the cache bin metadata for fast path.
d2dddfb82 Add hint in the bogus version string.
d6b7995c1 Update INSTALL.md about the default doc build.
e2c758436 Simplify / refactor tcache_dalloc_large.
9c5c2a2c8 Unify the signature of tcache_flush small and large.
28ed9b9a5 Buffer stats printing
eb70fef8c Make compact json format as default
a219cfcda Clear tcache prof_accumbytes in tcache_flush_cache
ad3f7dbfa Buffer prof_log_stop
593484661 Fix large bin index accessed through cache bin descriptor.
22746d3c9 Properly dalloc prof nodes with idalloctm.
8c8466fa6 Add compact json option for emitter
7fc6b1b25 Add buffered writer
39343555d Report stats for tdatas_mtx and prof_dump_mtx
87e2400cb Fix tcaches mutex pre- / post-fork handling.
07ce2434b Refactor profiling
56126d0d2 Refactor prof log
56c8ecffc Correct tsd layout graph

git-subtree-dir: deps/jemalloc
git-subtree-split: 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
parent 220a0f08
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
* respectively. * respectively.
* *
******************************************************************************/ ******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
...@@ -357,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ...@@ -357,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
} }
bool bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) { ckh_keycomp_t *keycomp) {
bool ret; bool ret;
size_t mincells, usize; size_t mincells, usize;
unsigned lg_mincells; unsigned lg_mincells;
assert(minitems > 0); assert(minitems > 0);
assert(hash != NULL); assert(ckh_hash != NULL);
assert(keycomp != NULL); assert(keycomp != NULL);
#ifdef CKH_COUNT #ifdef CKH_COUNT
...@@ -393,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ...@@ -393,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
} }
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash; ckh->hash = ckh_hash;
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/counter.h"
bool
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
return true;
}
locked_init_u64_unsynchronized(&counter->accumbytes, 0);
counter->interval = interval;
return false;
}
void
counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx);
}
void
counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx);
}
void
counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx);
}
#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
...@@ -6,8 +5,16 @@ ...@@ -6,8 +5,16 @@
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/inspect.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/peak_event.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_log.h"
#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/prof_stats.h"
#include "jemalloc/internal/prof_sys.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
...@@ -60,6 +67,8 @@ CTL_PROTO(background_thread) ...@@ -60,6 +67,8 @@ CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads) CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_peak_read)
CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_name)
CTL_PROTO(thread_prof_active) CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena) CTL_PROTO(thread_arena)
...@@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated) ...@@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp) CTL_PROTO(thread_deallocatedp)
CTL_PROTO(thread_idle)
CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug) CTL_PROTO(config_debug)
CTL_PROTO(config_fill) CTL_PROTO(config_fill)
...@@ -81,7 +91,20 @@ CTL_PROTO(config_utrace) ...@@ -81,7 +91,20 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc) CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort) CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf) CTL_PROTO(opt_abort_conf)
CTL_PROTO(opt_cache_oblivious)
CTL_PROTO(opt_trust_madvise)
CTL_PROTO(opt_confirm_conf) CTL_PROTO(opt_confirm_conf)
CTL_PROTO(opt_hpa)
CTL_PROTO(opt_hpa_slab_max_alloc)
CTL_PROTO(opt_hpa_hugification_threshold)
CTL_PROTO(opt_hpa_hugify_delay_ms)
CTL_PROTO(opt_hpa_min_purge_interval_ms)
CTL_PROTO(opt_hpa_dirty_mult)
CTL_PROTO(opt_hpa_sec_nshards)
CTL_PROTO(opt_hpa_sec_max_alloc)
CTL_PROTO(opt_hpa_sec_max_bytes)
CTL_PROTO(opt_hpa_sec_bytes_after_flush)
CTL_PROTO(opt_hpa_sec_batch_fill_extra)
CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain) CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
...@@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas) ...@@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena) CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_oversize_threshold) CTL_PROTO(opt_oversize_threshold)
CTL_PROTO(opt_background_thread) CTL_PROTO(opt_background_thread)
CTL_PROTO(opt_mutex_max_spin)
CTL_PROTO(opt_max_background_threads) CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms) CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts) CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_stats_interval)
CTL_PROTO(opt_stats_interval_opts)
CTL_PROTO(opt_junk) CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero) CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace) CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_experimental_infallible_new)
CTL_PROTO(opt_tcache) CTL_PROTO(opt_tcache)
CTL_PROTO(opt_tcache_max)
CTL_PROTO(opt_tcache_nslots_small_min)
CTL_PROTO(opt_tcache_nslots_small_max)
CTL_PROTO(opt_tcache_nslots_large)
CTL_PROTO(opt_lg_tcache_nslots_mul)
CTL_PROTO(opt_tcache_gc_incr_bytes)
CTL_PROTO(opt_tcache_gc_delay_bytes)
CTL_PROTO(opt_lg_tcache_flush_small_div)
CTL_PROTO(opt_lg_tcache_flush_large_div)
CTL_PROTO(opt_thp) CTL_PROTO(opt_thp)
CTL_PROTO(opt_lg_extent_max_active_fit) CTL_PROTO(opt_lg_extent_max_active_fit)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof) CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active) CTL_PROTO(opt_prof_active)
...@@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval) ...@@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_leak_error)
CTL_PROTO(opt_prof_accum) CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_prof_recent_alloc_max)
CTL_PROTO(opt_prof_stats)
CTL_PROTO(opt_prof_sys_thread_name)
CTL_PROTO(opt_prof_time_res)
CTL_PROTO(opt_lg_san_uaf_align)
CTL_PROTO(opt_zero_realloc)
CTL_PROTO(tcache_create) CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush) CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy) CTL_PROTO(tcache_destroy)
...@@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge) ...@@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_oversize_threshold)
CTL_PROTO(arena_i_dirty_decay_ms) CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_muzzy_decay_ms) CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks) CTL_PROTO(arena_i_extent_hooks)
...@@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init) ...@@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active) CTL_PROTO(prof_active)
CTL_PROTO(prof_dump) CTL_PROTO(prof_dump)
CTL_PROTO(prof_gdump) CTL_PROTO(prof_gdump)
CTL_PROTO(prof_prefix)
CTL_PROTO(prof_reset) CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval) CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample) CTL_PROTO(lg_prof_sample)
CTL_PROTO(prof_log_start) CTL_PROTO(prof_log_start)
CTL_PROTO(prof_log_stop) CTL_PROTO(prof_log_stop)
CTL_PROTO(prof_stats_bins_i_live)
CTL_PROTO(prof_stats_bins_i_accum)
INDEX_PROTO(prof_stats_bins_i)
CTL_PROTO(prof_stats_lextents_i_live)
CTL_PROTO(prof_stats_lextents_i_accum)
INDEX_PROTO(prof_stats_lextents_i)
CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_ndalloc)
...@@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes) ...@@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes) CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes) CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j) INDEX_PROTO(stats_arenas_i_extents_j)
CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
/* We have a set of stats for full slabs. */
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
/* A parallel set for the empty slabs. */
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
/*
* And one for the slabs that are neither empty nor full, but indexed by how
* full they are.
*/
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
...@@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base) ...@@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal) CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes) CTL_PROTO(stats_arenas_i_tcache_bytes)
CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
CTL_PROTO(stats_arenas_i_resident) CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm) CTL_PROTO(stats_arenas_i_abandoned_vm)
CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
INDEX_PROTO(stats_arenas_i) INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated) CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active) CTL_PROTO(stats_active)
...@@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp) ...@@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident) CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained) CTL_PROTO(stats_retained)
CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove) CTL_PROTO(experimental_hooks_remove)
CTL_PROTO(experimental_hooks_prof_backtrace)
CTL_PROTO(experimental_hooks_prof_dump)
CTL_PROTO(experimental_hooks_safety_check_abort)
CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query) CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query) CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep) CTL_PROTO(experimental_arenas_i_pactivep)
INDEX_PROTO(experimental_arenas_i) INDEX_PROTO(experimental_arenas_i)
CTL_PROTO(experimental_prof_recent_alloc_max)
CTL_PROTO(experimental_prof_recent_alloc_dump)
CTL_PROTO(experimental_batch_alloc)
CTL_PROTO(experimental_arenas_create_ext)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \ #define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \ CTL_PROTO(stats_##n##_num_ops) \
...@@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = { ...@@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = {
{NAME("flush"), CTL(thread_tcache_flush)} {NAME("flush"), CTL(thread_tcache_flush)}
}; };
static const ctl_named_node_t thread_peak_node[] = {
{NAME("read"), CTL(thread_peak_read)},
{NAME("reset"), CTL(thread_peak_reset)},
};
static const ctl_named_node_t thread_prof_node[] = { static const ctl_named_node_t thread_prof_node[] = {
{NAME("name"), CTL(thread_prof_name)}, {NAME("name"), CTL(thread_prof_name)},
{NAME("active"), CTL(thread_prof_active)} {NAME("active"), CTL(thread_prof_active)}
...@@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = { ...@@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = {
{NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, thread_tcache)}, {NAME("tcache"), CHILD(named, thread_tcache)},
{NAME("prof"), CHILD(named, thread_prof)} {NAME("peak"), CHILD(named, thread_peak)},
{NAME("prof"), CHILD(named, thread_prof)},
{NAME("idle"), CTL(thread_idle)}
}; };
static const ctl_named_node_t config_node[] = { static const ctl_named_node_t config_node[] = {
...@@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = { ...@@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = { static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)}, {NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)}, {NAME("abort_conf"), CTL(opt_abort_conf)},
{NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
{NAME("trust_madvise"), CTL(opt_trust_madvise)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)}, {NAME("confirm_conf"), CTL(opt_confirm_conf)},
{NAME("hpa"), CTL(opt_hpa)},
{NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
{NAME("hpa_hugification_threshold"),
CTL(opt_hpa_hugification_threshold)},
{NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
{NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
{NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
{NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
{NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
{NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
{NAME("hpa_sec_bytes_after_flush"),
CTL(opt_hpa_sec_bytes_after_flush)},
{NAME("hpa_sec_batch_fill_extra"),
CTL(opt_hpa_sec_batch_fill_extra)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)}, {NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)}, {NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("oversize_threshold"), CTL(opt_oversize_threshold)}, {NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
{NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
{NAME("background_thread"), CTL(opt_background_thread)}, {NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)}, {NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
{NAME("stats_interval"), CTL(opt_stats_interval)},
{NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
{NAME("junk"), CTL(opt_junk)}, {NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)}, {NAME("zero"), CTL(opt_zero)},
{NAME("utrace"), CTL(opt_utrace)}, {NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("experimental_infallible_new"),
CTL(opt_experimental_infallible_new)},
{NAME("tcache"), CTL(opt_tcache)}, {NAME("tcache"), CTL(opt_tcache)},
{NAME("tcache_max"), CTL(opt_tcache_max)},
{NAME("tcache_nslots_small_min"),
CTL(opt_tcache_nslots_small_min)},
{NAME("tcache_nslots_small_max"),
CTL(opt_tcache_nslots_small_max)},
{NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
{NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
{NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
{NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
{NAME("lg_tcache_flush_small_div"),
CTL(opt_lg_tcache_flush_small_div)},
{NAME("lg_tcache_flush_large_div"),
CTL(opt_lg_tcache_flush_large_div)},
{NAME("thp"), CTL(opt_thp)}, {NAME("thp"), CTL(opt_thp)},
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)}, {NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)}, {NAME("prof_active"), CTL(opt_prof_active)},
...@@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = { ...@@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)} {NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
{NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
{NAME("prof_stats"), CTL(opt_prof_stats)},
{NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
{NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
{NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
{NAME("zero_realloc"), CTL(opt_zero_realloc)}
}; };
static const ctl_named_node_t tcache_node[] = { static const ctl_named_node_t tcache_node[] = {
...@@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = { ...@@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("reset"), CTL(arena_i_reset)}, {NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)}, {NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)}, {NAME("dss"), CTL(arena_i_dss)},
/*
* Undocumented for now, since we anticipate an arena API in flux after
* we cut the last 5-series release.
*/
{NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
...@@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = { ...@@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("lookup"), CTL(arenas_lookup)} {NAME("lookup"), CTL(arenas_lookup)}
}; };
static const ctl_named_node_t prof_stats_bins_i_node[] = {
{NAME("live"), CTL(prof_stats_bins_i_live)},
{NAME("accum"), CTL(prof_stats_bins_i_accum)}
};
static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
{NAME(""), CHILD(named, prof_stats_bins_i)}
};
static const ctl_indexed_node_t prof_stats_bins_node[] = {
{INDEX(prof_stats_bins_i)}
};
static const ctl_named_node_t prof_stats_lextents_i_node[] = {
{NAME("live"), CTL(prof_stats_lextents_i_live)},
{NAME("accum"), CTL(prof_stats_lextents_i_accum)}
};
static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
{NAME(""), CHILD(named, prof_stats_lextents_i)}
};
static const ctl_indexed_node_t prof_stats_lextents_node[] = {
{INDEX(prof_stats_lextents_i)}
};
static const ctl_named_node_t prof_stats_node[] = {
{NAME("bins"), CHILD(indexed, prof_stats_bins)},
{NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
};
static const ctl_named_node_t prof_node[] = { static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)}, {NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)}, {NAME("dump"), CTL(prof_dump)},
{NAME("gdump"), CTL(prof_gdump)}, {NAME("gdump"), CTL(prof_gdump)},
{NAME("prefix"), CTL(prof_prefix)},
{NAME("reset"), CTL(prof_reset)}, {NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)}, {NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)}, {NAME("lg_sample"), CTL(lg_prof_sample)},
{NAME("log_start"), CTL(prof_log_start)}, {NAME("log_start"), CTL(prof_log_start)},
{NAME("log_stop"), CTL(prof_log_stop)} {NAME("log_stop"), CTL(prof_log_stop)},
{NAME("stats"), CHILD(named, prof_stats)}
}; };
static const ctl_named_node_t stats_arenas_i_small_node[] = { static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
...@@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
}; };
static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
};
static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
{NAME(""),
CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
};
static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
{
{INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
{NAME("full_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_full_slabs)},
{NAME("empty_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_empty_slabs)},
{NAME("nonfull_slabs"), CHILD(indexed,
stats_arenas_i_hpa_shard_nonfull_slabs)},
{NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
{NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
{NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
{NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
};
static const ctl_named_node_t stats_arenas_i_node[] = { static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("uptime"), CTL(stats_arenas_i_uptime)}, {NAME("uptime"), CTL(stats_arenas_i_uptime)},
...@@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = { ...@@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("internal"), CTL(stats_arenas_i_internal)}, {NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
{NAME("tcache_stashed_bytes"),
CTL(stats_arenas_i_tcache_stashed_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)}, {NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)}, {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
{NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
{NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
{NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
{NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
}; };
static const ctl_named_node_t super_stats_arenas_i_node[] = { static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)} {NAME(""), CHILD(named, stats_arenas_i)}
...@@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = { ...@@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = {
{NAME("background_thread"), {NAME("background_thread"),
CHILD(named, stats_background_thread)}, CHILD(named, stats_background_thread)},
{NAME("mutexes"), CHILD(named, stats_mutexes)}, {NAME("mutexes"), CHILD(named, stats_mutexes)},
{NAME("arenas"), CHILD(indexed, stats_arenas)} {NAME("arenas"), CHILD(indexed, stats_arenas)},
{NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
}; };
static const ctl_named_node_t experimental_hooks_node[] = { static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)}, {NAME("install"), CTL(experimental_hooks_install)},
{NAME("remove"), CTL(experimental_hooks_remove)} {NAME("remove"), CTL(experimental_hooks_remove)},
{NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
{NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
{NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
};
static const ctl_named_node_t experimental_thread_node[] = {
{NAME("activity_callback"),
CTL(experimental_thread_activity_callback)}
}; };
static const ctl_named_node_t experimental_utilization_node[] = { static const ctl_named_node_t experimental_utilization_node[] = {
...@@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = { ...@@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = {
{INDEX(experimental_arenas_i)} {INDEX(experimental_arenas_i)}
}; };
static const ctl_named_node_t experimental_prof_recent_node[] = {
{NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
{NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
};
static const ctl_named_node_t experimental_node[] = { static const ctl_named_node_t experimental_node[] = {
{NAME("hooks"), CHILD(named, experimental_hooks)}, {NAME("hooks"), CHILD(named, experimental_hooks)},
{NAME("utilization"), CHILD(named, experimental_utilization)}, {NAME("utilization"), CHILD(named, experimental_utilization)},
{NAME("arenas"), CHILD(indexed, experimental_arenas)} {NAME("arenas"), CHILD(indexed, experimental_arenas)},
{NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
{NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
{NAME("batch_alloc"), CTL(experimental_batch_alloc)},
{NAME("thread"), CHILD(named, experimental_thread)}
}; };
static const ctl_named_node_t root_node[] = { static const ctl_named_node_t root_node[] = {
...@@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = { ...@@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = {
* synchronized by the ctl mutex. * synchronized by the ctl mutex.
*/ */
static void static void
ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
#ifdef JEMALLOC_ATOMIC_U64 locked_inc_u64_unsynchronized(dst,
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); locked_read_u64_unsynchronized(src));
uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
#else
*dst += *src;
#endif
}
/* Likewise: with ctl mutex synchronization, reading is simple. */
static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
return *p;
#endif
} }
static void static void
accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
...@@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) { ...@@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nfills_small = 0; ctl_arena->astats->nfills_small = 0;
ctl_arena->astats->nflushes_small = 0; ctl_arena->astats->nflushes_small = 0;
memset(ctl_arena->astats->bstats, 0, SC_NBINS * memset(ctl_arena->astats->bstats, 0, SC_NBINS *
sizeof(bin_stats_t)); sizeof(bin_stats_data_t));
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) * memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t)); sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES * memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
sizeof(arena_stats_extents_t)); sizeof(pac_estats_t));
memset(&ctl_arena->astats->hpastats, 0,
sizeof(hpa_shard_stats_t));
memset(&ctl_arena->astats->secstats, 0,
sizeof(sec_stats_t));
} }
} }
...@@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { ...@@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy, &ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats, &ctl_arena->astats->astats, ctl_arena->astats->bstats,
ctl_arena->astats->lstats, ctl_arena->astats->estats); ctl_arena->astats->lstats, ctl_arena->astats->estats,
&ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
ctl_arena->astats->allocated_small += bin_stats_t *bstats =
ctl_arena->astats->bstats[i].curregs * &ctl_arena->astats->bstats[i].stats_data;
ctl_arena->astats->allocated_small += bstats->curregs *
sz_index2size(i); sz_index2size(i);
ctl_arena->astats->nmalloc_small += ctl_arena->astats->nmalloc_small += bstats->nmalloc;
ctl_arena->astats->bstats[i].nmalloc; ctl_arena->astats->ndalloc_small += bstats->ndalloc;
ctl_arena->astats->ndalloc_small += ctl_arena->astats->nrequests_small += bstats->nrequests;
ctl_arena->astats->bstats[i].ndalloc; ctl_arena->astats->nfills_small += bstats->nfills;
ctl_arena->astats->nrequests_small += ctl_arena->astats->nflushes_small += bstats->nflushes;
ctl_arena->astats->bstats[i].nrequests;
ctl_arena->astats->nfills_small +=
ctl_arena->astats->bstats[i].nfills;
ctl_arena->astats->nflushes_small +=
ctl_arena->astats->bstats[i].nflushes;
} }
} else { } else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
...@@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, ...@@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats; ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) { if (!destroyed) {
accum_atomic_zu(&sdstats->astats.mapped, sdstats->astats.mapped += astats->astats.mapped;
&astats->astats.mapped); sdstats->astats.pa_shard_stats.pac_stats.retained
accum_atomic_zu(&sdstats->astats.retained, += astats->astats.pa_shard_stats.pac_stats.retained;
&astats->astats.retained); sdstats->astats.pa_shard_stats.edata_avail
accum_atomic_zu(&sdstats->astats.extent_avail, += astats->astats.pa_shard_stats.edata_avail;
&astats->astats.extent_avail); }
}
ctl_accum_locked_u64(
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
&astats->astats.decay_dirty.npurge); &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, ctl_accum_locked_u64(
&astats->astats.decay_dirty.nmadvise); &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
&astats->astats.decay_dirty.purged); ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
&astats->astats.decay_muzzy.npurge);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, ctl_accum_locked_u64(
&astats->astats.decay_muzzy.nmadvise); &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
&astats->astats.decay_muzzy.purged); ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \ #define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \ &(sdstats->astats.mutex_prof_data[ \
...@@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, ...@@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
MUTEX_PROF_ARENA_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
if (!destroyed) { if (!destroyed) {
accum_atomic_zu(&sdstats->astats.base, sdstats->astats.base += astats->astats.base;
&astats->astats.base); sdstats->astats.resident += astats->astats.resident;
accum_atomic_zu(&sdstats->astats.internal, sdstats->astats.metadata_thp += astats->astats.metadata_thp;
ctl_accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal); &astats->astats.internal);
accum_atomic_zu(&sdstats->astats.resident,
&astats->astats.resident);
accum_atomic_zu(&sdstats->astats.metadata_thp,
&astats->astats.metadata_thp);
} else { } else {
assert(atomic_load_zu( assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0); &astats->astats.internal, ATOMIC_RELAXED) == 0);
...@@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->nflushes_small += astats->nflushes_small; sdstats->nflushes_small += astats->nflushes_small;
if (!destroyed) { if (!destroyed) {
accum_atomic_zu(&sdstats->astats.allocated_large, sdstats->astats.allocated_large +=
&astats->astats.allocated_large); astats->astats.allocated_large;
} else { } else {
assert(atomic_load_zu(&astats->astats.allocated_large, assert(astats->astats.allocated_large == 0);
ATOMIC_RELAXED) == 0);
} }
ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
&astats->astats.nmalloc_large); sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, sdstats->astats.nrequests_large
&astats->astats.ndalloc_large); += astats->astats.nrequests_large;
ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, sdstats->astats.nflushes_large += astats->astats.nflushes_large;
&astats->astats.nrequests_large); ctl_accum_atomic_zu(
accum_atomic_zu(&sdstats->astats.abandoned_vm, &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
&astats->astats.abandoned_vm); &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
accum_atomic_zu(&sdstats->astats.tcache_bytes, sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
&astats->astats.tcache_bytes); sdstats->astats.tcache_stashed_bytes +=
astats->astats.tcache_stashed_bytes;
if (ctl_arena->arena_ind == 0) { if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime; sdstats->astats.uptime = astats->astats.uptime;
...@@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge bin stats. */ /* Merge bin stats. */
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; bin_stats_t *bstats = &astats->bstats[i].stats_data;
sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; bin_stats_t *merged = &sdstats->bstats[i].stats_data;
sdstats->bstats[i].nrequests += merged->nmalloc += bstats->nmalloc;
astats->bstats[i].nrequests; merged->ndalloc += bstats->ndalloc;
merged->nrequests += bstats->nrequests;
if (!destroyed) { if (!destroyed) {
sdstats->bstats[i].curregs += merged->curregs += bstats->curregs;
astats->bstats[i].curregs;
} else { } else {
assert(astats->bstats[i].curregs == 0); assert(bstats->curregs == 0);
} }
sdstats->bstats[i].nfills += astats->bstats[i].nfills; merged->nfills += bstats->nfills;
sdstats->bstats[i].nflushes += merged->nflushes += bstats->nflushes;
astats->bstats[i].nflushes; merged->nslabs += bstats->nslabs;
sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; merged->reslabs += bstats->reslabs;
sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
if (!destroyed) { if (!destroyed) {
sdstats->bstats[i].curslabs += merged->curslabs += bstats->curslabs;
astats->bstats[i].curslabs; merged->nonfull_slabs += bstats->nonfull_slabs;
sdstats->bstats[i].nonfull_slabs +=
astats->bstats[i].nonfull_slabs;
} else { } else {
assert(astats->bstats[i].curslabs == 0); assert(bstats->curslabs == 0);
assert(astats->bstats[i].nonfull_slabs == 0); assert(bstats->nonfull_slabs == 0);
} }
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data); &astats->bstats[i].mutex_data);
...@@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge stats for large allocations. */ /* Merge stats for large allocations. */
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) { for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc); &astats->lstats[i].nmalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc); &astats->lstats[i].ndalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests); &astats->lstats[i].nrequests);
if (!destroyed) { if (!destroyed) {
sdstats->lstats[i].curlextents += sdstats->lstats[i].curlextents +=
...@@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge extents stats. */ /* Merge extents stats. */
for (i = 0; i < SC_NPSIZES; i++) { for (i = 0; i < SC_NPSIZES; i++) {
accum_atomic_zu(&sdstats->estats[i].ndirty, sdstats->estats[i].ndirty += astats->estats[i].ndirty;
&astats->estats[i].ndirty); sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
accum_atomic_zu(&sdstats->estats[i].nmuzzy, sdstats->estats[i].nretained
&astats->estats[i].nmuzzy); += astats->estats[i].nretained;
accum_atomic_zu(&sdstats->estats[i].nretained, sdstats->estats[i].dirty_bytes
&astats->estats[i].nretained); += astats->estats[i].dirty_bytes;
accum_atomic_zu(&sdstats->estats[i].dirty_bytes, sdstats->estats[i].muzzy_bytes
&astats->estats[i].dirty_bytes); += astats->estats[i].muzzy_bytes;
accum_atomic_zu(&sdstats->estats[i].muzzy_bytes, sdstats->estats[i].retained_bytes
&astats->estats[i].muzzy_bytes); += astats->estats[i].retained_bytes;
accum_atomic_zu(&sdstats->estats[i].retained_bytes,
&astats->estats[i].retained_bytes);
} }
/* Merge HPA stats. */
hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
sec_stats_accum(&sdstats->secstats, &astats->secstats);
} }
} }
...@@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, ...@@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
} }
static unsigned static unsigned
ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
unsigned arena_ind; unsigned arena_ind;
ctl_arena_t *ctl_arena; ctl_arena_t *ctl_arena;
...@@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { ...@@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
} }
/* Initialize new arena. */ /* Initialize new arena. */
if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
return UINT_MAX; return UINT_MAX;
} }
...@@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) { ...@@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
if (!have_background_thread || if (!have_background_thread ||
background_thread_stats_read(tsdn, stats)) { background_thread_stats_read(tsdn, stats)) {
memset(stats, 0, sizeof(background_thread_stats_t)); memset(stats, 0, sizeof(background_thread_stats_t));
nstime_init(&stats->run_interval, 0); nstime_init_zero(&stats->run_interval);
} }
malloc_mutex_prof_copy(
&ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
&stats->max_counter_per_bg_thd);
} }
static void static void
...@@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) { ...@@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) {
if (config_stats) { if (config_stats) {
ctl_stats->allocated = ctl_sarena->astats->allocated_small + ctl_stats->allocated = ctl_sarena->astats->allocated_small +
atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, ctl_sarena->astats->astats.allocated_large;
ATOMIC_RELAXED);
ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
ctl_stats->metadata = atomic_load_zu( ctl_stats->metadata = ctl_sarena->astats->astats.base +
&ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
atomic_load_zu(&ctl_sarena->astats->astats.internal, atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED); ATOMIC_RELAXED);
ctl_stats->metadata_thp = atomic_load_zu( ctl_stats->resident = ctl_sarena->astats->astats.resident;
&ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); ctl_stats->metadata_thp =
ctl_stats->resident = atomic_load_zu( ctl_sarena->astats->astats.metadata_thp;
&ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
ctl_stats->mapped = atomic_load_zu( ctl_stats->retained = ctl_sarena->astats->astats
&ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); .pa_shard_stats.pac_stats.retained;
ctl_stats->retained = atomic_load_zu(
&ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
ctl_background_thread_stats_read(tsdn); ctl_background_thread_stats_read(tsdn);
...@@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) { ...@@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) {
malloc_mutex_unlock(tsdn, &mtx); malloc_mutex_unlock(tsdn, &mtx);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, READ_GLOBAL_MUTEX_PROF_DATA(
bt2gctx_mtx); global_prof_mutex_prof, bt2gctx_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_thds_data, tdatas_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_dump, prof_dump_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_recent_alloc,
prof_recent_alloc_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_recent_dump,
prof_recent_dump_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_stats, prof_stats_mtx);
} }
if (have_background_thread) { if (have_background_thread) {
READ_GLOBAL_MUTEX_PROF_DATA( READ_GLOBAL_MUTEX_PROF_DATA(
...@@ -1191,8 +1460,9 @@ label_return: ...@@ -1191,8 +1460,9 @@ label_return:
} }
static int static int
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
size_t *mibp, size_t *depthp) { const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
size_t *depthp) {
int ret; int ret;
const char *elm, *tdot, *dot; const char *elm, *tdot, *dot;
size_t elen, i, j; size_t elen, i, j;
...@@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ...@@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
} }
node = super_root_node; node = starting_node;
for (i = 0; i < *depthp; i++) { for (i = 0; i < *depthp; i++) {
assert(node); assert(node);
assert(node->nchildren > 0); assert(node->nchildren > 0);
...@@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ...@@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen && if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) { strncmp(elm, child->name, elen) == 0) {
node = child; node = child;
if (nodesp != NULL) {
nodesp[i] =
(const ctl_node_t *)node;
}
mibp[i] = j; mibp[i] = j;
break; break;
} }
...@@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ...@@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return; goto label_return;
} }
if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node;
}
mibp[i] = (size_t)index; mibp[i] = (size_t)index;
} }
if (node->ctl != NULL) { /* Reached the end? */
if (node->ctl != NULL || *dot == '\0') {
/* Terminal node. */ /* Terminal node. */
if (*dot != '\0') { if (*dot != '\0') {
/* /*
...@@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ...@@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
} }
/* Update elm. */ /* Update elm. */
if (*dot == '\0') {
/* No more elements. */
ret = ENOENT;
goto label_return;
}
elm = &dot[1]; elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0'); strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
} }
if (ending_nodep != NULL) {
*ending_nodep = node;
}
ret = 0; ret = 0;
label_return: label_return:
...@@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, ...@@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) { void *newp, size_t newlen) {
int ret; int ret;
size_t depth; size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node; const ctl_named_node_t *node;
...@@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, ...@@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
} }
depth = CTL_MAX_DEPTH; depth = CTL_MAX_DEPTH;
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
&depth);
if (ret != 0) { if (ret != 0) {
goto label_return; goto label_return;
} }
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) { if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
} else { } else {
...@@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { ...@@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
goto label_return; goto label_return;
} }
ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
miblenp);
label_return: label_return:
return(ret); return(ret);
} }
int static int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
size_t *oldlenp, void *newp, size_t newlen) { const size_t *mib, size_t miblen) {
int ret; int ret;
const ctl_named_node_t *node;
size_t i;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
/* Iterate down the tree. */ const ctl_named_node_t *node = super_root_node;
node = super_root_node; for (size_t i = 0; i < miblen; i++) {
for (i = 0; i < miblen; i++) {
assert(node); assert(node);
assert(node->nchildren > 0); assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) { if (ctl_named_node(node->children) != NULL) {
...@@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */ /* Indexed element. */
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); node = inode->index(tsdn, mib, miblen, mib[i]);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
} }
} }
} }
assert(ending_nodep != NULL);
*ending_nodep = node;
ret = 0;
label_return:
return(ret);
}
int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
if (ret != 0) {
goto label_return;
}
/* Call the ctl function. */ /* Call the ctl function. */
if (node && node->ctl) { if (node && node->ctl) {
...@@ -1383,6 +1660,81 @@ label_return: ...@@ -1383,6 +1660,81 @@ label_return:
return(ret); return(ret);
} }
int
ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp) {
int ret;
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
if (ret != 0) {
goto label_return;
}
if (node == NULL || node->ctl != NULL) {
ret = ENOENT;
goto label_return;
}
assert(miblenp != NULL);
assert(*miblenp >= miblen);
*miblenp -= miblen;
ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
miblenp);
*miblenp += miblen;
label_return:
return(ret);
}
int
ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
if (ret != 0) {
goto label_return;
}
if (node == NULL || node->ctl != NULL) {
ret = ENOENT;
goto label_return;
}
assert(miblenp != NULL);
assert(*miblenp >= miblen);
*miblenp -= miblen;
/*
* The same node supplies the starting node and stores the ending node.
*/
ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
miblenp);
*miblenp += miblen;
if (ret != 0) {
goto label_return;
}
if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
newlen);
} else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
}
label_return:
return(ret);
}
bool bool
ctl_boot(void) { ctl_boot(void) {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
...@@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) { ...@@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx); malloc_mutex_postfork_child(tsdn, &ctl_mtx);
} }
void
ctl_mtx_assert_held(tsdn_t *tsdn) {
malloc_mutex_assert_owner(tsdn, &ctl_mtx);
}
/******************************************************************************/ /******************************************************************************/
/* *_ctl() functions. */ /* *_ctl() functions. */
...@@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) { ...@@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \ } \
} while (0) } while (0)
/* Can read or write, but not both. */
#define READ_XOR_WRITE() do { \ #define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \ newlen != 0)) { \
...@@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) { ...@@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \ } \
} while (0) } while (0)
/* Can neither read nor write. */
#define NEITHER_READ_NOR_WRITE() do { \
if (oldp != NULL || oldlenp != NULL || newp != NULL || \
newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
/* Verify that the space provided is enough. */
#define VERIFY_READ(t) do { \
if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
*oldlenp = 0; \
ret = EINVAL; \
goto label_return; \
} \
} while (0)
#define READ(v, t) do { \ #define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \ if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \ if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \ size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \ ? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \ memcpy(oldp, (void *)&(v), copylen); \
*oldlenp = copylen; \
ret = EINVAL; \ ret = EINVAL; \
goto label_return; \ goto label_return; \
} \ } \
...@@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) { ...@@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \ } \
} while (0) } while (0)
#define ASSURED_WRITE(v, t) do { \
if (newp == NULL || newlen != sizeof(t)) { \
ret = EINVAL; \
goto label_return; \
} \
(v) = *(t *)newp; \
} while (0)
#define MIB_UNSIGNED(v, i) do { \ #define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \ if (mib[i] > UINT_MAX) { \
ret = EFAULT; \ ret = EFAULT; \
...@@ -1497,8 +1882,8 @@ label_return: \ ...@@ -1497,8 +1882,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \ #define CTL_RO_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1540,8 +1925,8 @@ label_return: \ ...@@ -1540,8 +1925,8 @@ label_return: \
*/ */
#define CTL_RO_NL_CGEN(c, n, v, t) \ #define CTL_RO_NL_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1559,8 +1944,8 @@ label_return: \ ...@@ -1559,8 +1944,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \ #define CTL_RO_NL_GEN(n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1573,29 +1958,10 @@ label_return: \ ...@@ -1573,29 +1958,10 @@ label_return: \
return ret; \ return ret; \
} }
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) { \
return ENOENT; \
} \
READONLY(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return ret; \
}
#define CTL_RO_CONFIG_GEN(n, t) \ #define CTL_RO_CONFIG_GEN(n, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) ...@@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool) CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
/* HPA options. */
CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
opt_hpa_opts.hugification_threshold, size_t)
CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
uint64_t)
/*
* This will have to change before we publicly document this option; fxp_t and
* its representation are internal implementation details.
*/
CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
/* HPA SEC options */
CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
size_t)
CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
size_t)
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *) const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
...@@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) ...@@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
const char *) const char *)
CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t) CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
...@@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) ...@@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
opt_experimental_infallible_new, bool)
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
unsigned)
CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
unsigned)
CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
unsigned)
CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
unsigned)
CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
size_t) size_t)
CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
...@@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) ...@@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
opt_prof_recent_alloc_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
prof_time_res_mode_names[opt_prof_time_res], const char *)
CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
opt_lg_san_uaf_align, ssize_t)
CTL_RO_NL_GEN(opt_zero_realloc,
zero_realloc_mode_names[opt_zero_realloc_action], const char *)
/******************************************************************************/ /******************************************************************************/
...@@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return; goto label_return;
} }
/* Set new arena/tcache associations. */ /* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind); arena_migrate(tsd, oldarena, newarena);
if (tcache_available(tsd)) { if (tcache_available(tsd)) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_arena_reassociate(tsd_tsdn(tsd),
tsd_tcachep_get(tsd), newarena); tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
newarena);
} }
} }
...@@ -1855,14 +2278,10 @@ label_return: ...@@ -1855,14 +2278,10 @@ label_return:
return ret; return ret;
} }
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
uint64_t) CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
uint64_t *) CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
uint64_t)
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *)
static int static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
...@@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, ...@@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
goto label_return; goto label_return;
} }
READONLY(); NEITHER_READ_NOR_WRITE();
WRITEONLY();
tcache_flush(tsd); tcache_flush(tsd);
...@@ -1907,13 +2325,45 @@ label_return: ...@@ -1907,13 +2325,45 @@ label_return:
return ret; return ret;
} }
static int
thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
READONLY();
peak_event_update(tsd);
uint64_t result = peak_event_max(tsd);
READ(result, uint64_t);
ret = 0;
label_return:
return ret;
}
static int
thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
NEITHER_READ_NOR_WRITE();
peak_event_zero(tsd);
ret = 0;
label_return:
return ret;
}
static int static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) { size_t newlen) {
int ret; int ret;
if (!config_prof) { if (!config_prof || !opt_prof) {
return ENOENT; return ENOENT;
} }
...@@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, ...@@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
return ENOENT; return ENOENT;
} }
oldval = prof_thread_active_get(tsd); oldval = opt_prof ? prof_thread_active_get(tsd) : false;
if (newp != NULL) { if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
...@@ -1968,6 +2422,39 @@ label_return: ...@@ -1968,6 +2422,39 @@ label_return:
return ret; return ret;
} }
static int
thread_idle_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
NEITHER_READ_NOR_WRITE();
if (tcache_available(tsd)) {
tcache_flush(tsd);
}
/*
* This heuristic is perhaps not the most well-considered. But it
* matches the only idling policy we have experience with in the status
* quo. Over time we should investigate more principled approaches.
*/
if (opt_narenas > ncpus * 2) {
arena_t *arena = arena_choose(tsd, NULL);
if (arena != NULL) {
arena_decay(tsd_tsdn(tsd), arena, false, true);
}
/*
* The missing arena case is not actually an error; a thread
* might be idle before it associates itself to one. This is
* unusual, but not wrong.
*/
}
ret = 0;
label_return:
return ret;
}
/******************************************************************************/ /******************************************************************************/
static int static int
...@@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind; unsigned tcache_ind;
READONLY(); READONLY();
if (tcaches_create(tsd, &tcache_ind)) { VERIFY_READ(unsigned);
if (tcaches_create(tsd, b0get(), &tcache_ind)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind; unsigned tcache_ind;
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; ASSURED_WRITE(tcache_ind, unsigned);
WRITE(tcache_ind, unsigned);
if (tcache_ind == UINT_MAX) {
ret = EFAULT;
goto label_return;
}
tcaches_flush(tsd, tcache_ind); tcaches_flush(tsd, tcache_ind);
ret = 0; ret = 0;
...@@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind; unsigned tcache_ind;
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; ASSURED_WRITE(tcache_ind, unsigned);
WRITE(tcache_ind, unsigned);
if (tcache_ind == UINT_MAX) {
ret = EFAULT;
goto label_return;
}
tcaches_destroy(tsd, tcache_ind); tcaches_destroy(tsd, tcache_ind);
ret = 0; ret = 0;
...@@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
READONLY(); NEITHER_READ_NOR_WRITE();
WRITEONLY();
MIB_UNSIGNED(arena_ind, 1); MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false); arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
...@@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
READONLY(); NEITHER_READ_NOR_WRITE();
WRITEONLY();
MIB_UNSIGNED(arena_ind, 1); MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, true); arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
...@@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
arena_t **arena) { arena_t **arena) {
int ret; int ret;
READONLY(); NEITHER_READ_NOR_WRITE();
WRITEONLY();
MIB_UNSIGNED(*arena_ind, 1); MIB_UNSIGNED(*arena_ind, 1);
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
...@@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_t *arena; arena_t *arena;
ctl_arena_t *ctl_darena, *ctl_arena; ctl_arena_t *ctl_darena, *ctl_arena;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena); newp, newlen, &arena_ind, &arena);
if (ret != 0) { if (ret != 0) {
...@@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(ret == 0); assert(ret == 0);
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret; return ret;
} }
...@@ -2306,8 +2785,40 @@ label_return: ...@@ -2306,8 +2785,40 @@ label_return:
} }
static int static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
MIB_UNSIGNED(arena_ind, 1);
arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = atomic_load_zu(
&arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
READ(oldval, size_t);
}
if (newp != NULL) {
if (newlen != sizeof(size_t)) {
ret = EINVAL;
goto label_return;
}
atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
*(size_t *)newp, ATOMIC_RELAXED);
}
ret = 0;
label_return:
return ret;
}
static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
arena_t *arena; arena_t *arena;
...@@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : size_t oldval = arena_decay_ms_get(arena, state);
arena_muzzy_decay_ms_get(arena);
READ(oldval, ssize_t); READ(oldval, ssize_t);
} }
if (newp != NULL) { if (newp != NULL) {
...@@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return; goto label_return;
} }
} }
if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
arena, *(ssize_t *)newp)) { *(ssize_t *)newp)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return; goto label_return;
} }
old_extent_hooks = old_extent_hooks =
(extent_hooks_t *)&extent_hooks_default; (extent_hooks_t *)&ehooks_default_extent_hooks;
READ(old_extent_hooks, extent_hooks_t *); READ(old_extent_hooks, extent_hooks_t *);
if (newp != NULL) { if (newp != NULL) {
/* Initialize a new arena as a side effect. */ /* Initialize a new arena as a side effect. */
extent_hooks_t *new_extent_hooks extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL); JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *); WRITE(new_extent_hooks, extent_hooks_t *);
arena_config_t config = arena_config_default;
config.extent_hooks = new_extent_hooks;
arena = arena_init(tsd_tsdn(tsd), arena_ind, arena = arena_init(tsd_tsdn(tsd), arena_ind,
new_extent_hooks); &config);
if (arena == NULL) { if (arena == NULL) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
...@@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
extent_hooks_t *new_extent_hooks extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL); JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *); WRITE(new_extent_hooks, extent_hooks_t *);
old_extent_hooks = extent_hooks_set(tsd, arena, old_extent_hooks = arena_set_extent_hooks(tsd,
new_extent_hooks); arena, new_extent_hooks);
READ(old_extent_hooks, extent_hooks_t *); READ(old_extent_hooks, extent_hooks_t *);
} else { } else {
old_extent_hooks = extent_hooks_get(arena); old_extent_hooks =
ehooks_get_extent_hooks_ptr(
arena_get_ehooks(arena));
READ(old_extent_hooks, extent_hooks_t *); READ(old_extent_hooks, extent_hooks_t *);
} }
} }
...@@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
narenas = ctl_arenas->narenas; narenas = ctl_arenas->narenas;
READ(narenas, unsigned); READ(narenas, unsigned);
...@@ -2582,14 +3094,14 @@ static int ...@@ -2582,14 +3094,14 @@ static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
extent_hooks_t *extent_hooks;
unsigned arena_ind; unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
extent_hooks = (extent_hooks_t *)&extent_hooks_default; VERIFY_READ(unsigned);
WRITE(extent_hooks, extent_hooks_t *); arena_config_t config = arena_config_default;
if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { WRITE(config.extent_hooks, extent_hooks_t *);
if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -2601,6 +3113,30 @@ label_return: ...@@ -2601,6 +3113,30 @@ label_return:
return ret; return ret;
} }
static int
experimental_arenas_create_ext_ctl(tsd_t *tsd,
const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
arena_config_t config = arena_config_default;
VERIFY_READ(unsigned);
WRITE(config, arena_config_t);
if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
READ(arena_ind, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t miblen, void *oldp, size_t *oldlenp, void *newp,
...@@ -2608,20 +3144,22 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, ...@@ -2608,20 +3144,22 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
void *ptr; void *ptr;
extent_t *extent; edata_t *edata;
arena_t *arena; arena_t *arena;
ptr = NULL; ptr = NULL;
ret = EINVAL; ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *); WRITE(ptr, void *);
extent = iealloc(tsd_tsdn(tsd), ptr); edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
if (extent == NULL) if (edata == NULL) {
goto label_return; goto label_return;
}
arena = extent_arena_get(extent); arena = arena_get_from_edata(edata);
if (arena == NULL) if (arena == NULL) {
goto label_return; goto label_return;
}
arena_ind = arena_ind_get(arena); arena_ind = arena_ind_get(arena);
READ(arena_ind, unsigned); READ(arena_ind, unsigned);
...@@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, ...@@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
} }
if (newp != NULL) { if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
...@@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, ...@@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
oldval = prof_thread_active_init_set(tsd_tsdn(tsd), oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp); *(bool *)newp);
} else { } else {
oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
false;
} }
READ(oldval, bool); READ(oldval, bool);
...@@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval; bool oldval;
if (!config_prof) { if (!config_prof) {
return ENOENT; ret = ENOENT;
goto label_return;
} }
if (newp != NULL) { if (newp != NULL) {
...@@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); bool val = *(bool *)newp;
if (!opt_prof) {
if (val) {
ret = ENOENT;
goto label_return;
} else {
/* No change needed (already off). */
oldval = false;
}
} else {
oldval = prof_active_set(tsd_tsdn(tsd), val);
}
} else { } else {
oldval = prof_active_get(tsd_tsdn(tsd)); oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
} }
READ(oldval, bool); READ(oldval, bool);
...@@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
if (!config_prof) { if (!config_prof || !opt_prof) {
return ENOENT; return ENOENT;
} }
...@@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
} }
if (newp != NULL) { if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else { } else {
oldval = prof_gdump_get(tsd_tsdn(tsd)); oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
} }
READ(oldval, bool); READ(oldval, bool);
...@@ -2737,13 +3296,33 @@ label_return: ...@@ -2737,13 +3296,33 @@ label_return:
return ret; return ret;
} }
static int
prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *prefix = NULL;
if (!config_prof || !opt_prof) {
return ENOENT;
}
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITEONLY();
WRITE(prefix, const char *);
ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
if (!config_prof) { if (!config_prof || !opt_prof) {
return ENOENT; return ENOENT;
} }
...@@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const char *filename = NULL; const char *filename = NULL;
if (!config_prof) { if (!config_prof || !opt_prof) {
return ENOENT; return ENOENT;
} }
...@@ -2790,7 +3369,7 @@ label_return: ...@@ -2790,7 +3369,7 @@ label_return:
static int static int
prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) { size_t *oldlenp, void *newp, size_t newlen) {
if (!config_prof) { if (!config_prof || !opt_prof) {
return ENOENT; return ENOENT;
} }
...@@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
return 0; return 0;
} }
static int
experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL && newp == NULL) {
ret = EINVAL;
goto label_return;
}
if (oldp != NULL) {
prof_backtrace_hook_t old_hook =
prof_backtrace_hook_get();
READ(old_hook, prof_backtrace_hook_t);
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_hook, prof_backtrace_hook_t);
if (new_hook == NULL) {
ret = EINVAL;
goto label_return;
}
prof_backtrace_hook_set(new_hook);
}
ret = 0;
label_return:
return ret;
}
static int
experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL && newp == NULL) {
ret = EINVAL;
goto label_return;
}
if (oldp != NULL) {
prof_dump_hook_t old_hook =
prof_dump_hook_get();
READ(old_hook, prof_dump_hook_t);
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_hook, prof_dump_hook_t);
prof_dump_hook_set(new_hook);
}
ret = 0;
label_return:
return ret;
}
/* For integration test purpose only. No plan to move out of experimental. */
static int
experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
WRITEONLY();
if (newp != NULL) {
if (newlen != sizeof(safety_check_abort_hook_t)) {
ret = EINVAL;
goto label_return;
}
safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(hook, safety_check_abort_hook_t);
safety_check_set_abort(hook);
}
ret = 0;
label_return:
return ret;
}
/******************************************************************************/ /******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
...@@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, ...@@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
CTL_RO_CGEN(config_stats, stats_zero_reallocs,
atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t) ssize_t)
...@@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) ...@@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), arenas_i(mib[2])->astats->astats.mapped, size_t)
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained, CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail, CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail, arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base, CTL_RO_CGEN(config_stats, stats_arenas_i_base,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), arenas_i(mib[2])->astats->astats.base,
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal, CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident, CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), arenas_i(mib[2])->astats->astats.resident,
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm, CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm, atomic_load_zu(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
ATOMIC_RELAXED), size_t) ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
arenas_i(mib[2])->astats->secstats.bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
arenas_i(mib[2])->astats->allocated_small, size_t) arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
...@@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills, ...@@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
arenas_i(mib[2])->astats->nflushes_small, uint64_t) arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, arenas_i(mib[2])->astats->astats.allocated_large, size_t)
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
ctl_arena_stats_read_u64( arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_arena_stats_read_u64( arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
&arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_arena_stats_read_u64( arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
&arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
/* /*
* Note: "nmalloc_large" here instead of "nfills" in the read. This is * Note: "nmalloc_large" here instead of "nfills" in the read. This is
* intentional (large has no batch fill). * intentional (large has no batch fill).
*/ */
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
ctl_arena_stats_read_u64( arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
ctl_arena_stats_read_u64( arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
&arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
/* Lock profiling related APIs below. */ /* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \ #define RO_MUTEX_CTL_GEN(n, l) \
...@@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, ...@@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
} }
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
MUTEX_PROF_RESET(bt2gctx_mtx); MUTEX_PROF_RESET(bt2gctx_mtx);
MUTEX_PROF_RESET(tdatas_mtx);
MUTEX_PROF_RESET(prof_dump_mtx);
MUTEX_PROF_RESET(prof_recent_alloc_mtx);
MUTEX_PROF_RESET(prof_recent_dump_mtx);
MUTEX_PROF_RESET(prof_stats_mtx);
} }
/* Per arena mutexes. */ /* Per arena mutexes. */
unsigned n = narenas_total_get(); unsigned n = narenas_total_get();
...@@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, ...@@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue; continue;
} }
MUTEX_PROF_RESET(arena->large_mtx); MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->extent_avail_mtx); MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
MUTEX_PROF_RESET(arena->extents_dirty.mtx); MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->extents_muzzy.mtx); MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->extents_retained.mtx); MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
MUTEX_PROF_RESET(arena->decay_dirty.mtx); MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
MUTEX_PROF_RESET(arena->decay_muzzy.mtx); MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx); MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < SC_NBINS; i++) { for (szind_t j = 0; j < SC_NBINS; j++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
bin_t *bin = &arena->bins[i].bin_shards[j]; bin_t *bin = arena_get_bin(arena, j, k);
MUTEX_PROF_RESET(bin->lock); MUTEX_PROF_RESET(bin->lock);
} }
} }
...@@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, ...@@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t) arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
...@@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, ...@@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
ctl_arena_stats_read_u64( locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
...@@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, ...@@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty, CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
atomic_load_zu( arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
&arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy, CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
atomic_load_zu( arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
&arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained, CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
atomic_load_zu( arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
&arenas_i(mib[2])->astats->estats[mib[4]].nretained,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes, CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
atomic_load_zu( arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
&arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes, CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
atomic_load_zu( arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
&arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes, CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
atomic_load_zu( arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
&arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
ATOMIC_RELAXED), size_t);
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
...@@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, ...@@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node; return super_stats_arenas_i_extents_j_node;
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
/* Full, nonhuge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
/* Full, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
/* Empty, nonhuge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
/* Empty, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
/* Nonfull, nonhuge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
size_t);
/* Nonfull, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
size_t);
static const ctl_named_node_t *
stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t j) {
if (j >= PSSET_NPSIZES) {
return NULL;
}
return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
}
static bool static bool
ctl_arenas_i_verify(size_t i) { ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true); size_t a = arenas_i2a_impl(i, true, true);
...@@ -3161,6 +3892,32 @@ label_return: ...@@ -3161,6 +3892,32 @@ label_return:
return ret; return ret;
} }
static int
experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
READ(t_old, activity_callback_thunk_t);
if (newp != NULL) {
/*
* This initialization is unnecessary. If it's omitted, though,
* clang gets confused and warns on the subsequent use of t_new.
*/
activity_callback_thunk_t t_new = {NULL, NULL};
WRITE(t_new, activity_callback_thunk_t);
tsd_activity_callback_thunk_set(tsd, t_new);
}
ret = 0;
label_return:
return ret;
}
/* /*
* Output six memory utilization entries for an input pointer, the first one of * Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following * type (void *) and the remaining five of type size_t, describing the following
...@@ -3178,7 +3935,8 @@ label_return: ...@@ -3178,7 +3935,8 @@ label_return:
* otherwise their values are undefined. * otherwise their values are undefined.
* *
* This API is mainly intended for small class allocations, where extents are * This API is mainly intended for small class allocations, where extents are
* used as slab. * used as slab. Note that if the bin the extent belongs to is completely
* full, "(a)" will be NULL.
* *
* In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)" * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
* will be zero (if stats are enabled; otherwise undefined). The other three * will be zero (if stats are enabled; otherwise undefined). The other three
...@@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, ...@@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
assert(sizeof(extent_util_stats_verbose_t) assert(sizeof(inspect_extent_util_stats_verbose_t)
== sizeof(void *) + sizeof(size_t) * 5); == sizeof(void *) + sizeof(size_t) * 5);
if (oldp == NULL || oldlenp == NULL if (oldp == NULL || oldlenp == NULL
|| *oldlenp != sizeof(extent_util_stats_verbose_t) || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
|| newp == NULL) { || newp == NULL) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
...@@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, ...@@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
void *ptr = NULL; void *ptr = NULL;
WRITE(ptr, void *); WRITE(ptr, void *);
extent_util_stats_verbose_t *util_stats inspect_extent_util_stats_verbose_t *util_stats
= (extent_util_stats_verbose_t *)oldp; = (inspect_extent_util_stats_verbose_t *)oldp;
extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr, inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
&util_stats->nfree, &util_stats->nregs, &util_stats->size, &util_stats->nfree, &util_stats->nregs, &util_stats->size,
&util_stats->bin_nfree, &util_stats->bin_nregs, &util_stats->bin_nfree, &util_stats->bin_nregs,
&util_stats->slabcur_addr); &util_stats->slabcur_addr);
...@@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib, ...@@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3); assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
const size_t len = newlen / sizeof(const void *); const size_t len = newlen / sizeof(const void *);
if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0 if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
|| newlen != len * sizeof(const void *) || newlen != len * sizeof(const void *)
|| *oldlenp != len * sizeof(extent_util_stats_t)) { || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
void **ptrs = (void **)newp; void **ptrs = (void **)newp;
extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp; inspect_extent_util_stats_t *util_stats =
(inspect_extent_util_stats_t *)oldp;
size_t i; size_t i;
for (i = 0; i < len; ++i) { for (i = 0; i < len; ++i) {
extent_util_stats_get(tsd_tsdn(tsd), ptrs[i], inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
&util_stats[i].nfree, &util_stats[i].nregs, &util_stats[i].nfree, &util_stats[i].nregs,
&util_stats[i].size); &util_stats[i].size);
} }
...@@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, ...@@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \ #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER) defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
/* Expose the underlying counter for fast read. */ /* Expose the underlying counter for fast read. */
pactivep = (size_t *)&(arena->nactive.repr); pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
READ(pactivep, size_t *); READ(pactivep, size_t *);
ret = 0; ret = 0;
#else #else
...@@ -3433,3 +4192,223 @@ label_return: ...@@ -3433,3 +4192,223 @@ label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret; return ret;
} }
static int
experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!(config_prof && opt_prof)) {
ret = ENOENT;
goto label_return;
}
ssize_t old_max;
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
ssize_t max = *(ssize_t *)newp;
if (max < -1) {
ret = EINVAL;
goto label_return;
}
old_max = prof_recent_alloc_max_ctl_write(tsd, max);
} else {
old_max = prof_recent_alloc_max_ctl_read();
}
READ(old_max, ssize_t);
ret = 0;
label_return:
return ret;
}
typedef struct write_cb_packet_s write_cb_packet_t;
struct write_cb_packet_s {
write_cb_t *write_cb;
void *cbopaque;
};
static int
experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!(config_prof && opt_prof)) {
ret = ENOENT;
goto label_return;
}
assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
WRITEONLY();
write_cb_packet_t write_cb_packet;
ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
write_cb_packet.cbopaque);
ret = 0;
label_return:
return ret;
}
typedef struct batch_alloc_packet_s batch_alloc_packet_t;
struct batch_alloc_packet_s {
void **ptrs;
size_t num;
size_t size;
int flags;
};
static int
experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
VERIFY_READ(size_t);
batch_alloc_packet_t batch_alloc_packet;
ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
size_t filled = batch_alloc(batch_alloc_packet.ptrs,
batch_alloc_packet.num, batch_alloc_packet.size,
batch_alloc_packet.flags);
READ(filled, size_t);
ret = 0;
label_return:
return ret;
}
static int
prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned binind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(binind, 3);
if (binind >= SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_live(tsd, (szind_t)binind, &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static int
prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned binind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(binind, 3);
if (binind >= SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_accum(tsd, (szind_t)binind, &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static const ctl_named_node_t *
prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
if (!(config_prof && opt_prof && opt_prof_stats)) {
return NULL;
}
if (i >= SC_NBINS) {
return NULL;
}
return super_prof_stats_bins_i_node;
}
static int
prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned lextent_ind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(lextent_ind, 3);
if (lextent_ind >= SC_NSIZES - SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static int
prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned lextent_ind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(lextent_ind, 3);
if (lextent_ind >= SC_NSIZES - SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static const ctl_named_node_t *
prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
if (!(config_prof && opt_prof && opt_prof_stats)) {
return NULL;
}
if (i >= SC_NSIZES - SC_NBINS) {
return NULL;
}
return super_prof_stats_lextents_i_node;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/decay.h"
static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
/*
* Generate a new deadline that is uniformly random within the next epoch after
* the current one.
*/
void
decay_deadline_init(decay_t *decay) {
nstime_copy(&decay->deadline, &decay->epoch);
nstime_add(&decay->deadline, &decay->interval);
if (decay_ms_read(decay) > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
nstime_ns(&decay->interval)));
nstime_add(&decay->deadline, &jitter);
}
}
void
decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
if (decay_ms > 0) {
nstime_init(&decay->interval, (uint64_t)decay_ms *
KQU(1000000));
nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
}
nstime_copy(&decay->epoch, cur_time);
decay->jitter_state = (uint64_t)(uintptr_t)decay;
decay_deadline_init(decay);
decay->nunpurged = 0;
memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
bool
decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
if (config_debug) {
for (size_t i = 0; i < sizeof(decay_t); i++) {
assert(((char *)decay)[i] == 0);
}
decay->ceil_npages = 0;
}
if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
malloc_mutex_rank_exclusive)) {
return true;
}
decay->purging = false;
decay_reinit(decay, cur_time, decay_ms);
return false;
}
bool
decay_ms_valid(ssize_t decay_ms) {
if (decay_ms < -1) {
return false;
}
if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
KQU(1000)) {
return true;
}
return false;
}
static void
decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
new_time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&decay->epoch, new_time);
decay_deadline_init(decay);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&decay->epoch, new_time) <= 0);
}
}
static size_t
decay_backlog_npages_limit(const decay_t *decay) {
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
uint64_t sum = 0;
for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * h_steps[i];
}
size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return npages_limit_backlog;
}
/*
* Update backlog, assuming that 'nadvance_u64' time intervals have passed.
* Trailing 'nadvance_u64' records should be erased and 'current_npages' is
* placed as the newest record.
*/
static void
decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
size_t current_npages) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(decay->backlog, &decay->backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
size_t npages_delta = (current_npages > decay->nunpurged) ?
current_npages - decay->nunpurged : 0;
decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
if (config_debug) {
if (current_npages > decay->ceil_npages) {
decay->ceil_npages = current_npages;
}
size_t npages_limit = decay_backlog_npages_limit(decay);
assert(decay->ceil_npages >= npages_limit);
if (decay->ceil_npages > npages_limit) {
decay->ceil_npages = npages_limit;
}
}
}
static inline bool
decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
return (nstime_compare(&decay->deadline, time) <= 0);
}
uint64_t
decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
uint64_t npages_purge;
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
npages_purge = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge >>= SMOOTHSTEP_BFP;
}
return npages_purge;
}
bool
decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t npages_current) {
/* Handle possible non-monotonicity of time. */
decay_maybe_update_time(decay, new_time);
if (!decay_deadline_reached(decay, new_time)) {
return false;
}
nstime_t delta;
nstime_copy(&delta, new_time);
nstime_subtract(&delta, &decay->epoch);
uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &decay->interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&decay->epoch, &delta);
/* Set a new deadline. */
decay_deadline_init(decay);
/* Update the backlog. */
decay_backlog_update(decay, nadvance_u64, npages_current);
decay->npages_limit = decay_backlog_npages_limit(decay);
decay->nunpurged = (decay->npages_limit > npages_current) ?
decay->npages_limit : npages_current;
return true;
}
/*
* Calculate how many pages should be purged after 'interval'.
*
* First, calculate how many pages should remain at the moment, then subtract
* the number of pages that should remain after 'interval'. The difference is
* how many pages should be purged until then.
*
* The number of pages that should remain at a specific moment is calculated
* like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval'
* passes, backlog would shift 'interval' positions to the left and sigmoid
* curve would be applied starting with backlog[interval].
*
* The implementation doesn't directly map to the description, but it's
* essentially the same calculation, optimized to avoid iterating over
* [interval..SMOOTHSTEP_NSTEPS) twice.
*/
static inline size_t
decay_npurge_after_interval(decay_t *decay, size_t interval) {
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] *
(h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold) {
if (!decay_gradually(decay)) {
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
assert(decay_interval_ns > 0);
if (npages_current == 0) {
unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
if (decay->backlog[i] > 0) {
break;
}
}
if (i == SMOOTHSTEP_NSTEPS) {
/* No dirty pages recorded. Sleep indefinitely. */
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
}
if (npages_current <= npages_threshold) {
/* Use max interval. */
return decay_interval_ns * SMOOTHSTEP_NSTEPS;
}
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
size_t lb = 2;
size_t ub = SMOOTHSTEP_NSTEPS;
size_t npurge_lb, npurge_ub;
npurge_lb = decay_npurge_after_interval(decay, lb);
if (npurge_lb > npages_threshold) {
return decay_interval_ns * lb;
}
npurge_ub = decay_npurge_after_interval(decay, ub);
if (npurge_ub < npages_threshold) {
return decay_interval_ns * ub;
}
unsigned n_search = 0;
size_t target, npurge;
while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);
if (npurge > npages_threshold) {
ub = target;
npurge_ub = npurge;
} else {
lb = target;
npurge_lb = npurge;
}
assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
++n_search;
}
return decay_interval_ns * (ub + lb) / 2;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/san.h"
bool
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
bool delay_coalesce) {
if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
ecache->state = state;
ecache->ind = ind;
ecache->delay_coalesce = delay_coalesce;
eset_init(&ecache->eset, state);
eset_init(&ecache->guarded_eset, state);
return false;
}
void
ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_prefork(tsdn, &ecache->mtx);
}
void
ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
}
void
ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_child(tsdn, &ecache->mtx);
}
#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
ph_gen(, edata_avail, edata_t, avail_link,
edata_esnead_comp)
ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
edata_avail_new(&edata_cache->avail);
/*
* This is not strictly necessary, since the edata_cache_t is only
* created inside an arena, which is zeroed on creation. But this is
* handy as a safety measure.
*/
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
return true;
}
edata_cache->base = base;
return false;
}
edata_t *
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_t *edata = edata_avail_first(&edata_cache->avail);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return base_alloc_edata(tsdn, edata_cache->base);
}
edata_avail_remove(&edata_cache->avail, edata);
atomic_load_sub_store_zu(&edata_cache->count, 1);
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return edata;
}
void
edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_avail_insert(&edata_cache->avail, edata);
atomic_load_add_store_zu(&edata_cache->count, 1);
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
}
void
edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_prefork(tsdn, &edata_cache->mtx);
}
void
edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
}
void
edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
}
void
edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
edata_list_inactive_init(&ecs->list);
ecs->fallback = fallback;
ecs->disabled = false;
}
static void
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
edata_cache_fast_t *ecs) {
edata_t *edata;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
edata = edata_avail_remove_first(&ecs->fallback->avail);
if (edata == NULL) {
break;
}
edata_list_inactive_append(&ecs->list, edata);
atomic_load_sub_store_zu(&ecs->fallback->count, 1);
}
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
edata_t *
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
return edata_cache_get(tsdn, ecs->fallback);
}
edata_t *edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
return edata;
}
/* Slow path; requires synchronization. */
edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
} else {
/*
* Slowest path (fallback was also empty); allocate something
* new.
*/
edata = base_alloc_edata(tsdn, ecs->fallback->base);
}
return edata;
}
static void
edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
/*
* You could imagine smarter cache management policies (like
* only flushing down to some threshold in anticipation of
* future get requests). But just flushing everything provides
* a good opportunity to defrag too, and lets us share code between the
* flush and disable pathways.
*/
edata_t *edata;
size_t nflushed = 0;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
edata_avail_insert(&ecs->fallback->avail, edata);
nflushed++;
}
atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
void
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
edata_cache_put(tsdn, ecs->fallback, edata);
return;
}
/*
* Prepend rather than append, to do LIFO ordering in the hopes of some
* cache locality.
*/
edata_list_inactive_prepend(&ecs->list, edata);
}
void
edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
edata_cache_fast_flush_all(tsdn, ecs);
ecs->disabled = true;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_mmap.h"
void
ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
/* All other hooks are optional; this one is not. */
assert(extent_hooks->alloc != NULL);
ehooks->ind = ind;
ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
* advantage of this to avoid demanding zeroed extents, but taking advantage of
* them if they are returned.
*/
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
void *ret;
assert(size != 0);
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* All strategies for allocation failed. */
return NULL;
}
void *
ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
arena_t *arena = arena_get(tsdn, arena_ind, false);
/* NULL arena indicates arena_create. */
assert(arena != NULL || alignment == HUGEPAGE);
dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
(dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
zero, commit, dss);
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
return ret;
}
static void *
ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
}
bool
ehooks_default_dalloc_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return extent_dalloc_mmap(addr, size);
}
return true;
}
static bool
ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
return ehooks_default_dalloc_impl(addr, size);
}
void
ehooks_default_destroy_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
pages_unmap(addr, size);
}
}
static void
ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
ehooks_default_destroy_impl(addr, size);
}
bool
ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return ehooks_default_commit_impl(addr, offset, length);
}
bool
ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return ehooks_default_decommit_impl(addr, offset, length);
}
#ifdef PAGES_CAN_PURGE_LAZY
bool
ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return ehooks_default_purge_lazy_impl(addr, offset, length);
}
#endif
#ifdef PAGES_CAN_PURGE_FORCED
bool
ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
}
static bool
ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return ehooks_default_purge_forced_impl(addr, offset, length);
}
#endif
bool
ehooks_default_split_impl() {
if (!maps_coalesce) {
/*
* Without retain, only whole regions can be purged (required by
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
* comments in extent_head_no_merge().
*/
return !opt_retain;
}
return false;
}
static bool
ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
return ehooks_default_split_impl();
}
bool
ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
assert(addr_a < addr_b);
/*
* For non-DSS cases --
* a) W/o maps_coalesce, merge is not always allowed (Windows):
* 1) w/o retain, never merge (first branch below).
* 2) with retain, only merge extents from the same VirtualAlloc
* region (in which case MEM_DECOMMIT is utilized for purging).
*
* b) With maps_coalesce, it's always possible to merge.
* 1) w/o retain, always allow merge (only about dirty / muzzy).
* 2) with retain, to preserve the SN / first-fit, merge is still
* disallowed if b is a head extent, i.e. no merging across
* different mmap regions.
*
* a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
* sanity checked in the second branch below.
*/
if (!maps_coalesce && !opt_retain) {
return true;
}
if (config_debug) {
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
addr_a);
bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
addr_b);
bool head_b = edata_is_head_get(b);
emap_assert_mapped(tsdn, &arena_emap_global, a);
emap_assert_mapped(tsdn, &arena_emap_global, b);
assert(extent_neighbor_head_state_mergeable(head_a, head_b,
/* forward */ true));
}
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return true;
}
return false;
}
bool
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
tsdn_t *tsdn = tsdn_fetch();
return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
}
void
ehooks_default_zero_impl(void *addr, size_t size) {
/*
* By default, we try to zero out memory using OS-provided demand-zeroed
* pages. If the user has specifically requested hugepages, though, we
* don't want to purge in the middle of a hugepage (which would break it
* up), so we act conservatively and use memset.
*/
bool needs_memset = true;
if (opt_thp != thp_mode_always) {
needs_memset = pages_purge_forced(addr, size);
}
if (needs_memset) {
memset(addr, 0, size);
}
}
void
ehooks_default_guard_impl(void *guard1, void *guard2) {
pages_mark_guards(guard1, guard2);
}
void
ehooks_default_unguard_impl(void *guard1, void *guard2) {
pages_unmark_guards(guard1, guard2);
}
const extent_hooks_t ehooks_default_extent_hooks = {
ehooks_default_alloc,
ehooks_default_dalloc,
ehooks_default_destroy,
ehooks_default_commit,
ehooks_default_decommit,
#ifdef PAGES_CAN_PURGE_LAZY
ehooks_default_purge_lazy,
#else
NULL,
#endif
#ifdef PAGES_CAN_PURGE_FORCED
ehooks_default_purge_forced,
#else
NULL,
#endif
ehooks_default_split,
ehooks_default_merge
};
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/emap.h"
enum emap_lock_result_e {
emap_lock_result_success,
emap_lock_result_failure,
emap_lock_result_no_extent
};
typedef enum emap_lock_result_e emap_lock_result_t;
bool
emap_init(emap_t *emap, base_t *base, bool zeroed) {
return rtree_new(&emap->rtree, base, zeroed);
}
void
emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
edata_state_set(edata, state);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
/* init_missing */ false);
assert(elm1 != NULL);
rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), /* dependent */ true,
/* init_missing */ false);
rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
emap_assert_mapped(tsdn, emap, edata);
}
static inline edata_t *
emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
assert(!edata_guarded_get(edata));
assert(!expanding || forward);
assert(!edata_state_in_transition(expected_state));
assert(expected_state == extent_state_dirty ||
expected_state == extent_state_muzzy ||
expected_state == extent_state_retained);
void *neighbor_addr = forward ? edata_past_get(edata) :
edata_before_get(edata);
/*
* This is subtle; the rtree code asserts that its input pointer is
* non-NULL, and this is a useful thing to check. But it's possible
* that edata corresponds to an address of (void *)PAGE (in practice,
* this has only been observed on FreeBSD when address-space
* randomization is on, but it could in principle happen anywhere). In
* this case, edata_before_get(edata) is NULL, triggering the assert.
*/
if (neighbor_addr == NULL) {
return NULL;
}
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false,
/* init_missing */ false);
if (elm == NULL) {
return NULL;
}
rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
&emap->rtree, elm, /* dependent */ true);
if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
expected_state, forward, expanding)) {
return NULL;
}
/* From this point, the neighbor edata can be safely acquired. */
edata_t *neighbor = neighbor_contents.edata;
assert(edata_state_get(neighbor) == expected_state);
emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
if (expanding) {
extent_assert_can_expand(edata, neighbor);
} else {
extent_assert_can_coalesce(edata, neighbor);
}
return neighbor;
}
edata_t *
emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward) {
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
expected_state, forward, /* expand */ false);
}
edata_t *
emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
/* Try expanding forward. */
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
expected_state, /* forward */ true, /* expand */ true);
}
void
emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state) {
assert(emap_edata_in_transition(tsdn, emap, edata));
assert(emap_edata_is_acquired(tsdn, emap, edata));
emap_update_edata_state(tsdn, emap, edata, new_state);
}
static bool
emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
const edata_t *edata, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
assert(*r_elm_b != NULL);
return false;
}
static void
emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = (edata == NULL) ? false :
edata_is_head_get(edata);
contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
if (elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
}
}
bool
emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab) {
assert(edata_state_get(edata) == extent_state_active);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
false, true, &elm_a, &elm_b);
if (err) {
return true;
}
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ false).edata == NULL);
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ false).edata == NULL);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
return false;
}
/* Invoked *after* emap_register_boundary. */
void
emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
assert(edata_state_get(edata) == extent_state_active);
if (config_debug) {
/* Making sure the boundary is registered already. */
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx,
edata, /* dependent */ true, /* init_missing */ false,
&elm_a, &elm_b);
assert(!err);
rtree_contents_t contents_a, contents_b;
contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ true);
contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ true);
assert(contents_a.edata == edata && contents_b.edata == edata);
assert(contents_a.metadata.slab && contents_b.metadata.slab);
}
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = true;
contents.metadata.state = extent_state_active;
contents.metadata.is_head = false; /* Not allowed to access. */
assert(edata_size_get(edata) > (2 << LG_PAGE));
rtree_write_range(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + PAGE,
(uintptr_t)edata_last_get(edata) - PAGE, contents);
}
void
emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
/*
* The edata must be either in an acquired state, or protected by state
* based locks.
*/
if (!emap_edata_is_acquired(tsdn, emap, edata)) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
}
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
true, false, &elm_a, &elm_b);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
false);
}
void
emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
if (edata_size_get(edata) > (2 << LG_PAGE)) {
rtree_clear_range(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + PAGE,
(uintptr_t)edata_last_get(edata) - PAGE);
}
}
void
emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab) {
EMAP_DECLARE_RTREE_CTX;
if (szind != SC_NSIZES) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = edata_is_head_get(edata);
contents.metadata.state = edata_state_get(edata);
rtree_write(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_addr_get(edata), contents);
/*
* Recall that this is called only for active->inactive and
* inactive->active transitions (since only active extents have
* meaningful values for szind and slab). Active, non-slab
* extents only need to handle lookups at their head (on
* deallocation), so we don't bother filling in the end
* boundary.
*
* For slab extents, we do the end-mapping change. This still
* leaves the interior unmodified; an emap_register_interior
* call is coming in those cases, though.
*/
if (slab && edata_size_get(edata) > PAGE) {
uintptr_t key = (uintptr_t)edata_past_get(edata)
- (uintptr_t)PAGE;
rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
contents);
}
}
}
bool
emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) {
EMAP_DECLARE_RTREE_CTX;
/*
* We use incorrect constants for things like arena ind, zero, ranged,
* and commit state, and head status. This is a fake edata_t, used to
* facilitate a lookup.
*/
edata_t lead = {0};
edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
&prepare->lead_elm_a, &prepare->lead_elm_b);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true,
&prepare->trail_elm_a, &prepare->trail_elm_b);
if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL
|| prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) {
return true;
}
return false;
}
void
emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) {
/*
* We should think about not writing to the lead leaf element. We can
* get into situations where a racing realloc-like call can disagree
* with a size lookup request. I think it's fine to declare that these
* situations are race bugs, but there's an argument to be made that for
* things like xallocx, a size lookup call should return either the old
* size or the new size, but not anything else.
*/
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a,
prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false);
emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a,
prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false);
}
void
emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail) {
EMAP_DECLARE_RTREE_CTX;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
&prepare->lead_elm_a, &prepare->lead_elm_b);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
&prepare->trail_elm_a, &prepare->trail_elm_b);
}
void
emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail) {
rtree_contents_t clear_contents;
clear_contents.edata = NULL;
clear_contents.metadata.szind = SC_NSIZES;
clear_contents.metadata.slab = false;
clear_contents.metadata.is_head = false;
clear_contents.metadata.state = (extent_state_t)0;
if (prepare->lead_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->lead_elm_b, clear_contents);
}
rtree_leaf_elm_t *merged_b;
if (prepare->trail_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->trail_elm_a, clear_contents);
merged_b = prepare->trail_elm_b;
} else {
merged_b = prepare->trail_elm_a;
}
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
lead, SC_NSIZES, false);
}
void
emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
assert(contents.edata == edata);
assert(contents.metadata.is_head == edata_is_head_get(edata));
assert(contents.metadata.state == edata_state_get(edata));
}
void
emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
emap_full_alloc_ctx_t context1 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
&context1);
assert(context1.edata == NULL);
emap_full_alloc_ctx_t context2 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
&context2);
assert(context2.edata == NULL);
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/eset.h"
#define ESET_NPSIZES (SC_NPSIZES + 1)
static void
eset_bin_init(eset_bin_t *bin) {
edata_heap_new(&bin->heap);
/*
* heap_min doesn't need initialization; it gets filled in when the bin
* goes from non-empty to empty.
*/
}
static void
eset_bin_stats_init(eset_bin_stats_t *bin_stats) {
atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED);
atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED);
}
void
eset_init(eset_t *eset, extent_state_t state) {
for (unsigned i = 0; i < ESET_NPSIZES; i++) {
eset_bin_init(&eset->bins[i]);
eset_bin_stats_init(&eset->bin_stats[i]);
}
fb_init(eset->bitmap, ESET_NPSIZES);
edata_list_inactive_init(&eset->lru);
eset->state = state;
}
size_t
eset_npages_get(eset_t *eset) {
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
}
size_t
eset_nextents_get(eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
}
size_t
eset_nbytes_get(eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
}
static void
eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
ATOMIC_RELAXED);
}
static void
eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
ATOMIC_RELAXED);
}
void
eset_insert(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
if (edata_heap_empty(&eset->bins[pind].heap)) {
fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind);
/* Only element is automatically the min element. */
eset->bins[pind].heap_min = edata_cmp_summary;
} else {
/*
* There's already a min element; update the summary if we're
* about to insert a lower one.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) < 0) {
eset->bins[pind].heap_min = edata_cmp_summary;
}
}
edata_heap_insert(&eset->bins[pind].heap, edata);
if (config_stats) {
eset_stats_add(eset, pind, size);
}
edata_list_inactive_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_eset_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
atomic_store_zu(&eset->npages, cur_eset_npages + npages,
ATOMIC_RELAXED);
}
void
eset_remove(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state ||
edata_state_in_transition(edata_state_get(edata)));
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (config_stats) {
eset_stats_sub(eset, pind, size);
}
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
edata_heap_remove(&eset->bins[pind].heap, edata);
if (edata_heap_empty(&eset->bins[pind].heap)) {
fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind);
} else {
/*
* This is a little weird; we compare if the summaries are
* equal, rather than if the edata we removed was the heap
* minimum. The reason why is that getting the heap minimum
* can cause a pairing heap merge operation. We can avoid this
* if we only update the min if it's changed, in which case the
* summaries of the removed element and the min element should
* compare equal.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) == 0) {
eset->bins[pind].heap_min = edata_cmp_summary_get(
edata_heap_first(&eset->bins[pind].heap));
}
}
edata_list_inactive_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert, we hold eset->mtx and so don't need atomic
* operations for updating eset->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&eset->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < pind_max;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(i < SC_NPSIZES);
assert(!edata_heap_empty(&eset->bins[i].heap));
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return edata;
}
}
return NULL;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*
* lg_max_fit is the (log of the) maximum ratio between the requested size and
* the returned size that we'll allow. This can reduce fragmentation by
* avoiding reusing and splitting large extents for smaller sizes. In practice,
* it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
* for others.
*/
static edata_t *
eset_first_fit(eset_t *eset, size_t size, bool exact_only,
unsigned lg_max_fit) {
edata_t *ret = NULL;
edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (exact_only) {
return edata_heap_empty(&eset->bins[pind].heap) ? NULL :
edata_heap_first(&eset->bins[pind].heap);
}
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < ESET_NPSIZES;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(!edata_heap_empty(&eset->bins[i].heap));
if (lg_max_fit == SC_PTR_BITS) {
/*
* We'll shift by this below, and shifting out all the
* bits is undefined. Decreasing is safe, since the
* page size is larger than 1 byte.
*/
lg_max_fit = SC_PTR_BITS - 1;
}
if ((sz_pind2sz(i) >> lg_max_fit) > size) {
break;
}
if (ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min, ret_summ) < 0) {
/*
* We grab the edata as early as possible, even though
* we might change it later. Practically, a large
* portion of eset_fit calls succeed at the first valid
* index, so this doesn't cost much, and we get the
* effect of prefetching the edata as early as possible.
*/
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
assert(edata_size_get(edata) >= size);
assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
assert(ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min,
edata_cmp_summary_get(edata)) == 0);
ret = edata;
ret_summ = eset->bins[i].heap_min;
}
if (i == SC_NPSIZES) {
break;
}
assert(i < SC_NPSIZES);
}
return ret;
}
edata_t *
eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
unsigned lg_max_fit) {
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
if (alignment > PAGE && edata == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
edata = eset_fit_alignment(eset, esize, max_size, alignment);
}
return edata;
}
#define JEMALLOC_PRNG_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
void
exp_grow_init(exp_grow_t *exp_grow) {
exp_grow->next = sz_psz2ind(HUGEPAGE);
exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
}
#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/ph.h" #include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
rtree_t extents_rtree;
/* Keyed by the address of the extent_t being protected. */
mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static const bitmap_info_t extents_bitmap_info = static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); size_t offset, size_t length, bool growing_retained);
static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, edata_t *edata, size_t offset, size_t length, bool growing_retained);
size_t size, size_t alignment, bool *zero, bool *commit, static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
unsigned arena_ind); edata_t *edata, size_t offset, size_t length, bool growing_retained);
static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size, bool committed, unsigned arena_ind); edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size, bool committed, unsigned arena_ind); edata_t *a, edata_t *b, bool holding_core_locks);
static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind);
static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
static bool extent_decommit_default(extent_hooks_t *extent_hooks,
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
#ifdef PAGES_CAN_PURGE_LAZY
static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind);
#endif
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
#ifdef PAGES_CAN_PURGE_FORCED
static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
#endif
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t size_a, size_t size_b, bool committed,
unsigned arena_ind);
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained);
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
size_t size_a, void *addr_b, size_t size_b, bool committed,
unsigned arena_ind);
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained);
const extent_hooks_t extent_hooks_default = {
extent_alloc_default,
extent_dalloc_default,
extent_destroy_default,
extent_commit_default,
extent_decommit_default
#ifdef PAGES_CAN_PURGE_LAZY
,
extent_purge_lazy_default
#else
,
NULL
#endif
#ifdef PAGES_CAN_PURGE_FORCED
,
extent_purge_forced_default
#else
,
NULL
#endif
,
extent_split_default,
extent_merge_default
};
/* Used exclusively for gdump triggering. */ /* Used exclusively for gdump triggering. */
static atomic_zu_t curpages; static atomic_zu_t curpages;
...@@ -99,503 +34,158 @@ static atomic_zu_t highpages; ...@@ -99,503 +34,158 @@ static atomic_zu_t highpages;
* definition. * definition.
*/ */
static void extent_deregister(tsdn_t *tsdn, extent_t *extent); static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, bool zero, bool *commit, bool growing_retained, bool guarded);
bool *zero, bool *commit, bool growing_retained); static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache, edata_t *edata, bool *coalesced);
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
extent_t *extent, bool *coalesced, bool growing_retained); ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
static void extent_record(tsdn_t *tsdn, arena_t *arena, bool zero, bool *commit, bool guarded);
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
bool growing_retained);
/******************************************************************************/ /******************************************************************************/
#define ATTR_NONE /* does nothing */
ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp)
#undef ATTR_NONE
typedef enum {
lock_result_success,
lock_result_failure,
lock_result_no_extent
} lock_result_t;
static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
extent_t **result, bool inactive_only) {
extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
elm, true);
/* Slab implies active extents and should be skipped. */
if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
&extents_rtree, elm, true))) {
return lock_result_no_extent;
}
/*
* It's possible that the extent changed out from under us, and with it
* the leaf->extent mapping. We have to recheck while holding the lock.
*/
extent_lock(tsdn, extent1);
extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
&extents_rtree, elm, true);
if (extent1 == extent2) {
*result = extent1;
return lock_result_success;
} else {
extent_unlock(tsdn, extent1);
return lock_result_failure;
}
}
/*
* Returns a pool-locked extent_t * if there's one associated with the given
* address, and NULL otherwise.
*/
static extent_t *
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
bool inactive_only) {
extent_t *ret = NULL;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)addr, false, false);
if (elm == NULL) {
return NULL;
}
lock_result_t lock_result;
do {
lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
inactive_only);
} while (lock_result == lock_result_failure);
return ret;
}
extent_t *
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_t *extent = extent_avail_first(&arena->extent_avail);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return base_alloc_extent(tsdn, arena->base);
}
extent_avail_remove(&arena->extent_avail, extent);
atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return extent;
}
void
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_avail_insert(&arena->extent_avail, extent);
atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
}
extent_hooks_t *
extent_hooks_get(arena_t *arena) {
return base_extent_hooks_get(arena->base);
}
extent_hooks_t *
extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
background_thread_info_t *info;
if (have_background_thread) {
info = arena_background_thread_info_get(arena);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
}
extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
if (have_background_thread) {
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
return ret;
}
static void
extent_hooks_assure_initialized(arena_t *arena,
extent_hooks_t **r_extent_hooks) {
if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
*r_extent_hooks = extent_hooks_get(arena);
}
}
#ifndef JEMALLOC_JET
static
#endif
size_t size_t
extent_size_quantize_floor(size_t size) { extent_sn_next(pac_t *pac) {
size_t ret; return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
pszind_t pind;
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return size;
}
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
} }
#ifndef JEMALLOC_JET static inline bool
static extent_may_force_decay(pac_t *pac) {
#endif return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
size_t || pac_decay_ms_get(pac, extent_state_muzzy) == -1);
extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
} }
/* Generate pairing heap functions. */ static bool
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata) {
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
bool bool coalesced;
extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
bool delay_coalesce) { edata, &coalesced);
if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
malloc_mutex_rank_exclusive)) {
if (!coalesced) {
return true; return true;
} }
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { eset_insert(&ecache->eset, edata);
extent_heap_new(&extents->heaps[i]);
}
bitmap_init(extents->bitmap, &extents_bitmap_info, true);
extent_list_init(&extents->lru);
atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
extents->state = state;
extents->delay_coalesce = delay_coalesce;
return false; return false;
} }
extent_state_t edata_t *
extents_state_get(const extents_t *extents) { ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
return extents->state; edata_t *expand_edata, size_t size, size_t alignment, bool zero,
} bool guarded) {
assert(size != 0);
size_t assert(alignment != 0);
extents_npages_get(extents_t *extents) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); WITNESS_RANK_CORE, 0);
}
size_t
extents_nextents_get(extents_t *extents, pszind_t pind) {
return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
}
size_t
extents_nbytes_get(extents_t *extents, pszind_t pind) {
return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
}
static void
extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
}
static void
extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
}
static void
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
assert(extent_state_get(extent) == extents->state);
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_unset(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
}
extent_heap_insert(&extents->heaps[pind], extent);
if (config_stats) {
extents_stats_add(extents, pind, size);
}
extent_list_append(&extents->lru, extent);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
atomic_store_zu(&extents->npages, cur_extents_npages + npages,
ATOMIC_RELAXED);
}
static void
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
assert(extent_state_get(extent) == extents->state);
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&extents->heaps[pind], extent);
if (config_stats) {
extents_stats_sub(extents, pind, size);
}
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_set(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
}
extent_list_remove(&extents->lru, extent);
size_t npages = size >> LG_PAGE;
/*
* As in extents_insert_locked, we hold extents->mtx and so don't need
* atomic operations for updating extents->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&extents->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
}
}
return NULL; bool commit = true;
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
size, alignment, zero, &commit, false, guarded);
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
assert(edata == NULL || edata_guarded_get(edata) == guarded);
return edata;
} }
/* edata_t *
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
* large enough. edata_t *expand_edata, size_t size, size_t alignment, bool zero,
*/ bool guarded) {
static extent_t * assert(size != 0);
extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, assert(alignment != 0);
size_t size) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
extent_t *ret = NULL; WITNESS_RANK_CORE, 0);
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
if (!maps_coalesce && !opt_retain) {
/*
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&extents->heaps[pind]) ? NULL :
extent_heap_first(&extents->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, bool commit = true;
&extents_bitmap_info, (size_t)pind); edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
i < SC_NPSIZES + 1; size, alignment, zero, &commit, guarded);
i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, if (edata == NULL) {
(size_t)i+1)) { if (opt_retain && expand_edata != NULL) {
assert(!extent_heap_empty(&extents->heaps[i])); /*
extent_t *extent = extent_heap_first(&extents->heaps[i]); * When retain is enabled and trying to expand, we do
assert(extent_size_get(extent) >= size); * not attempt extent_alloc_wrapper which does mmap that
/* * is very unlikely to succeed (unless it happens to be
* In order to reduce fragmentation, avoid reusing and splitting * at the end).
* large extents for much smaller sizes. */
* return NULL;
* Only do check for dirty extents (delay_coalesce).
*/
if (extents->delay_coalesce &&
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
} }
if (i == SC_NPSIZES) { if (guarded) {
break; /*
* Means no cached guarded extents available (and no
* grow_retained was attempted). The pac_alloc flow
* will alloc regular extents to make new guarded ones.
*/
return NULL;
} }
assert(i < SC_NPSIZES); void *new_addr = (expand_edata == NULL) ? NULL :
} edata_past_get(expand_edata);
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
return ret; size, alignment, zero, &commit,
} /* growing_retained */ false);
/*
* Do first-fit extent selection, where the selection policy choice is
* based on extents->delay_coalesce.
*/
static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
extent_t *extent =
extents_first_fit_locked(tsdn, arena, extents, max_size);
if (alignment > PAGE && extent == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = extents_fit_alignment(extents, esize, max_size,
alignment);
}
return extent;
}
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent) {
extent_state_set(extent, extent_state_active);
bool coalesced;
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, &coalesced, false);
extent_state_set(extent, extents_state_get(extents));
if (!coalesced) {
return true;
} }
extents_insert_locked(tsdn, extents, extent);
return false;
}
extent_t * assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, return edata;
extents_t *extents, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
assert(size + pad != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
new_addr, size, pad, alignment, slab, szind, zero, commit, false);
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
} }
void void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
extents_t *extents, extent_t *extent) { edata_t *edata) {
assert(extent_base_get(extent) != NULL); assert(edata_base_get(edata) != NULL);
assert(extent_size_get(extent) != 0); assert(edata_size_get(edata) != 0);
assert(extent_dumpable_get(extent)); assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent)); edata_addr_set(edata, edata_base_get(edata));
extent_zeroed_set(extent, false); edata_zeroed_set(edata, false);
extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); extent_record(tsdn, pac, ehooks, ecache, edata);
} }
extent_t * edata_t *
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extents_t *extents, size_t npages_min) { ecache_t *ecache, size_t npages_min) {
rtree_ctx_t rtree_ctx_fallback; malloc_mutex_lock(tsdn, &ecache->mtx);
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
malloc_mutex_lock(tsdn, &extents->mtx);
/* /*
* Get the LRU coalesced extent, if any. If coalescing was delayed, * Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced. * the loop will iterate until the LRU extent is fully coalesced.
*/ */
extent_t *extent; edata_t *edata;
while (true) { while (true) {
/* Get the LRU extent, if any. */ /* Get the LRU extent, if any. */
extent = extent_list_first(&extents->lru); eset_t *eset = &ecache->eset;
if (extent == NULL) { edata = edata_list_inactive_first(&eset->lru);
goto label_return; if (edata == NULL) {
/*
* Next check if there are guarded extents. They are
* more expensive to purge (since they are not
* mergeable), thus in favor of caching them longer.
*/
eset = &ecache->guarded_eset;
edata = edata_list_inactive_first(&eset->lru);
if (edata == NULL) {
goto label_return;
}
} }
/* Check the eviction limit. */ /* Check the eviction limit. */
size_t extents_npages = atomic_load_zu(&extents->npages, size_t extents_npages = ecache_npages_get(ecache);
ATOMIC_RELAXED);
if (extents_npages <= npages_min) { if (extents_npages <= npages_min) {
extent = NULL; edata = NULL;
goto label_return; goto label_return;
} }
extents_remove_locked(tsdn, extents, extent); eset_remove(eset, edata);
if (!extents->delay_coalesce) { if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
break; break;
} }
/* Try to coalesce. */ /* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
rtree_ctx, extents, extent)) { edata)) {
break; break;
} }
/* /*
...@@ -608,23 +198,24 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ...@@ -608,23 +198,24 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* Either mark the extent active or deregister it to protect against * Either mark the extent active or deregister it to protect against
* concurrent operations. * concurrent operations.
*/ */
switch (extents_state_get(extents)) { switch (ecache->state) {
case extent_state_active: case extent_state_active:
not_reached(); not_reached();
case extent_state_dirty: case extent_state_dirty:
case extent_state_muzzy: case extent_state_muzzy:
extent_state_set(extent, extent_state_active); emap_update_edata_state(tsdn, pac->emap, edata,
extent_state_active);
break; break;
case extent_state_retained: case extent_state_retained:
extent_deregister(tsdn, extent); extent_deregister(tsdn, pac, edata);
break; break;
default: default:
not_reached(); not_reached();
} }
label_return: label_return:
malloc_mutex_unlock(tsdn, &extents->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
return extent; return edata;
} }
/* /*
...@@ -632,123 +223,73 @@ label_return: ...@@ -632,123 +223,73 @@ label_return:
* indicates OOM), e.g. when trying to split an existing extent. * indicates OOM), e.g. when trying to split an existing extent.
*/ */
static void static void
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
extents_t *extents, extent_t *extent, bool growing_retained) { edata_t *edata, bool growing_retained) {
size_t sz = extent_size_get(extent); size_t sz = edata_size_get(edata);
if (config_stats) { if (config_stats) {
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz); atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
ATOMIC_RELAXED);
} }
/* /*
* Leak extent after making sure its pages have already been purged, so * Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak. * that this is only a virtual memory leak.
*/ */
if (extents_state_get(extents) == extent_state_dirty) { if (ecache->state == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
extent, 0, sz, growing_retained)) { growing_retained)) {
extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent_purge_forced_impl(tsdn, ehooks, edata, 0,
extent, 0, extent_size_get(extent), edata_size_get(edata), growing_retained);
growing_retained);
} }
} }
extent_dalloc(tsdn, arena, extent); edata_cache_put(tsdn, pac->edata_cache, edata);
}
void
extents_prefork(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_prefork(tsdn, &extents->mtx);
}
void
extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_postfork_parent(tsdn, &extents->mtx);
}
void
extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_postfork_child(tsdn, &extents->mtx);
} }
static void static void
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
extent_t *extent) { edata_t *edata) {
assert(extent_arena_get(extent) == arena); malloc_mutex_assert_owner(tsdn, &ecache->mtx);
assert(extent_state_get(extent) == extent_state_active); assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
extent_state_set(extent, extents_state_get(extents)); emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
extents_insert_locked(tsdn, extents, extent); eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
&ecache->eset;
eset_insert(eset, edata);
} }
static void static void
extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
extent_t *extent) { edata_t *edata) {
malloc_mutex_lock(tsdn, &extents->mtx); assert(edata_state_get(edata) == extent_state_active);
extent_deactivate_locked(tsdn, arena, extents, extent); extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
malloc_mutex_unlock(tsdn, &extents->mtx);
} }
static void static void
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
extent_t *extent) { edata_t *edata, extent_state_t expected_state) {
assert(extent_arena_get(extent) == arena); assert(edata_state_get(edata) == expected_state);
assert(extent_state_get(extent) == extents_state_get(extents)); extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
extents_remove_locked(tsdn, extents, extent);
extent_state_set(extent, extent_state_active);
}
static bool
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
const extent_t *extent, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_last_get(extent), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
assert(*r_elm_b != NULL);
return false;
} }
static void static void
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { edata_t *edata) {
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
if (elm_b != NULL) { assert(edata_state_get(edata) == ecache->state ||
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, edata_state_get(edata) == extent_state_merging);
slab);
}
}
static void eset_remove(eset, edata);
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
szind_t szind) {
assert(extent_slab_get(extent));
/* Register interior. */
for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE), extent, szind, true);
}
} }
static void void
extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof); cassert(config_prof);
/* prof_gdump() requirement. */ /* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
if (opt_prof && extent_state_get(extent) == extent_state_active) { if (opt_prof && edata_state_get(edata) == extent_state_active) {
size_t nadd = extent_size_get(extent) >> LG_PAGE; size_t nadd = edata_size_get(edata) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd, size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd; ATOMIC_RELAXED) + nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
...@@ -767,232 +308,184 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { ...@@ -767,232 +308,184 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
} }
static void static void
extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof); cassert(config_prof);
if (opt_prof && extent_state_get(extent) == extent_state_active) { if (opt_prof && edata_state_get(edata) == extent_state_active) {
size_t nsub = extent_size_get(extent) >> LG_PAGE; size_t nsub = edata_size_get(edata) >> LG_PAGE;
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
} }
} }
static bool static bool
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
rtree_ctx_t rtree_ctx_fallback; assert(edata_state_get(edata) == extent_state_active);
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
/* /*
* We need to hold the lock to protect against a concurrent coalesce * No locking needed, as the edata must be in active state, which
* operation that sees us in a partial state. * prevents other threads from accessing the edata.
*/ */
extent_lock(tsdn, extent); if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
/* slab */ false)) {
if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
&elm_a, &elm_b)) {
extent_unlock(tsdn, extent);
return true; return true;
} }
szind_t szind = extent_szind_get_maybe_invalid(extent);
bool slab = extent_slab_get(extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
if (slab) {
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
extent_unlock(tsdn, extent);
if (config_prof && gdump_add) { if (config_prof && gdump_add) {
extent_gdump_add(tsdn, extent); extent_gdump_add(tsdn, edata);
} }
return false; return false;
} }
static bool static bool
extent_register(tsdn_t *tsdn, extent_t *extent) { extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
return extent_register_impl(tsdn, extent, true); return extent_register_impl(tsdn, pac, edata, true);
} }
static bool static bool
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
return extent_register_impl(tsdn, extent, false); return extent_register_impl(tsdn, pac, edata, false);
} }
static void static void
extent_reregister(tsdn_t *tsdn, extent_t *extent) { extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
bool err = extent_register(tsdn, extent); bool err = extent_register(tsdn, pac, edata);
assert(!err); assert(!err);
} }
/*
* Removes all pointers to the given extent from the global rtree indices for
* its interior. This is relevant for slab extents, for which we need to do
* metadata lookups at places other than the head of the extent. We deregister
* on the interior, then, when an extent moves from being an active slab to an
* inactive state.
*/
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
extent_t *extent) {
size_t i;
assert(extent_slab_get(extent));
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_clear(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE));
}
}
/* /*
* Removes all pointers to the given extent from the global rtree. * Removes all pointers to the given extent from the global rtree.
*/ */
static void static void
extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
rtree_ctx_t rtree_ctx_fallback; bool gdump) {
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); emap_deregister_boundary(tsdn, pac->emap, edata);
rtree_leaf_elm_t *elm_a, *elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
&elm_a, &elm_b);
extent_lock(tsdn, extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
extent_unlock(tsdn, extent);
if (config_prof && gdump) { if (config_prof && gdump) {
extent_gdump_sub(tsdn, extent); extent_gdump_sub(tsdn, edata);
} }
} }
static void static void
extent_deregister(tsdn_t *tsdn, extent_t *extent) { extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
extent_deregister_impl(tsdn, extent, true); extent_deregister_impl(tsdn, pac, edata, true);
} }
static void static void
extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
extent_deregister_impl(tsdn, extent, false); edata_t *edata) {
extent_deregister_impl(tsdn, pac, edata, false);
} }
/* /*
* Tries to find and remove an extent from extents that can be used for the * Tries to find and remove an extent from ecache that can be used for the
* given allocation request. * given allocation request.
*/ */
static extent_t * static edata_t *
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, bool guarded) {
bool growing_retained) { malloc_mutex_assert_owner(tsdn, &ecache->mtx);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(alignment > 0); assert(alignment > 0);
if (config_debug && new_addr != NULL) { if (config_debug && expand_edata != NULL) {
/* /*
* Non-NULL new_addr has two use cases: * Non-NULL expand_edata indicates in-place expanding realloc.
* * new_addr must either refer to a non-existing extent, or to
* 1) Recycle a known-extant extent, e.g. during purging. * the base of an extant extent, since only active slabs support
* 2) Perform in-place expanding reallocation. * interior lookups (which of course cannot be recycled).
*
* Regardless of use case, new_addr must either refer to a
* non-existing extent, or to the base of an extant extent,
* since only active slabs support interior lookups (which of
* course cannot be recycled).
*/ */
void *new_addr = edata_past_get(expand_edata);
assert(PAGE_ADDR2BASE(new_addr) == new_addr); assert(PAGE_ADDR2BASE(new_addr) == new_addr);
assert(pad == 0);
assert(alignment <= PAGE); assert(alignment <= PAGE);
} }
size_t esize = size + pad; edata_t *edata;
malloc_mutex_lock(tsdn, &extents->mtx); eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
extent_hooks_assure_initialized(arena, r_extent_hooks); if (expand_edata != NULL) {
extent_t *extent; edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
if (new_addr != NULL) { expand_edata, EXTENT_PAI_PAC, ecache->state);
extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr, if (edata != NULL) {
false); extent_assert_can_expand(expand_edata, edata);
if (extent != NULL) { if (edata_size_get(edata) < size) {
/* emap_release_edata(tsdn, pac->emap, edata,
* We might null-out extent to report an error, but we ecache->state);
* still need to unlock the associated mutex after. edata = NULL;
*/
extent_t *unlock_extent = extent;
assert(extent_base_get(extent) == new_addr);
if (extent_arena_get(extent) != arena ||
extent_size_get(extent) < esize ||
extent_state_get(extent) !=
extents_state_get(extents)) {
extent = NULL;
} }
extent_unlock(tsdn, unlock_extent);
} }
} else { } else {
extent = extents_fit_locked(tsdn, arena, extents, esize, /*
alignment); * A large extent might be broken up from its original size to
* some small size to satisfy a small request. When that small
* request is freed, though, it won't merge back with the larger
* extent if delayed coalescing is on. The large extent can
* then no longer satify a request for its original size. To
* limit this effect, when delayed coalescing is enabled, we
* put a cap on how big an extent we can split for a request.
*/
unsigned lg_max_fit = ecache->delay_coalesce
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
/*
* If split and merge are not allowed (Windows w/o retain), try
* exact fit only.
*
* For simplicity purposes, splitting guarded extents is not
* supported. Hence, we do only exact fit for guarded
* allocations.
*/
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
edata = eset_fit(eset, size, alignment, exact_only,
lg_max_fit);
} }
if (extent == NULL) { if (edata == NULL) {
malloc_mutex_unlock(tsdn, &extents->mtx);
return NULL; return NULL;
} }
assert(!guarded || edata_guarded_get(edata));
extent_activate_locked(tsdn, pac, ecache, eset, edata);
extent_activate_locked(tsdn, arena, extents, extent); return edata;
malloc_mutex_unlock(tsdn, &extents->mtx);
return extent;
} }
/* /*
* Given an allocation request and an extent guaranteed to be able to satisfy * Given an allocation request and an extent guaranteed to be able to satisfy
* it, this splits off lead and trail extents, leaving extent pointing to an * it, this splits off lead and trail extents, leaving edata pointing to an
* extent satisfying the allocation. * extent satisfying the allocation.
* This function doesn't put lead or trail into any extents_t; it's the caller's * This function doesn't put lead or trail into any ecache; it's the caller's
* job to ensure that they can be reused. * job to ensure that they can be reused.
*/ */
typedef enum { typedef enum {
/* /*
* Split successfully. lead, extent, and trail, are modified to extents * Split successfully. lead, edata, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation. * describing the ranges before, in, and after the given allocation.
*/ */
extent_split_interior_ok, extent_split_interior_ok,
/* /*
* The extent can't satisfy the given allocation request. None of the * The extent can't satisfy the given allocation request. None of the
* input extent_t *s are touched. * input edata_t *s are touched.
*/ */
extent_split_interior_cant_alloc, extent_split_interior_cant_alloc,
/* /*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL), * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL). * and salvage what's still salvageable (if *to_salvage is non-NULL).
* None of lead, extent, or trail are valid. * None of lead, edata, or trail are valid.
*/ */
extent_split_interior_error extent_split_interior_error
} extent_split_interior_result_t; } extent_split_interior_result_t;
static extent_split_interior_result_t static extent_split_interior_result_t
extent_split_interior(tsdn_t *tsdn, arena_t *arena, extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
/* The result of splitting, in case of success. */ /* The result of splitting, in case of success. */
extent_t **extent, extent_t **lead, extent_t **trail, edata_t **edata, edata_t **lead, edata_t **trail,
/* The mess to clean up, in case of error. */ /* The mess to clean up, in case of error. */
extent_t **to_leak, extent_t **to_salvage, edata_t **to_leak, edata_t **to_salvage,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, edata_t *expand_edata, size_t size, size_t alignment) {
szind_t szind, bool growing_retained) { size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
size_t esize = size + pad; PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), assert(expand_edata == NULL || leadsize == 0);
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); if (edata_size_get(*edata) < leadsize + size) {
assert(new_addr == NULL || leadsize == 0);
if (extent_size_get(*extent) < leadsize + esize) {
return extent_split_interior_cant_alloc; return extent_split_interior_cant_alloc;
} }
size_t trailsize = extent_size_get(*extent) - leadsize - esize; size_t trailsize = edata_size_get(*edata) - leadsize - size;
*lead = NULL; *lead = NULL;
*trail = NULL; *trail = NULL;
...@@ -1001,11 +494,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ...@@ -1001,11 +494,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the lead. */ /* Split the lead. */
if (leadsize != 0) { if (leadsize != 0) {
*lead = *extent; assert(!edata_guarded_get(*edata));
*extent = extent_split_impl(tsdn, arena, r_extent_hooks, *lead = *edata;
*lead, leadsize, SC_NSIZES, false, esize + trailsize, szind, *edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
slab, growing_retained); size + trailsize, /* holding_core_locks*/ true);
if (*extent == NULL) { if (*edata == NULL) {
*to_leak = *lead; *to_leak = *lead;
*lead = NULL; *lead = NULL;
return extent_split_interior_error; return extent_split_interior_error;
...@@ -1014,36 +507,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ...@@ -1014,36 +507,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */ /* Split the trail. */
if (trailsize != 0) { if (trailsize != 0) {
*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, assert(!edata_guarded_get(*edata));
esize, szind, slab, trailsize, SC_NSIZES, false, *trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
growing_retained); trailsize, /* holding_core_locks */ true);
if (*trail == NULL) { if (*trail == NULL) {
*to_leak = *extent; *to_leak = *edata;
*to_salvage = *lead; *to_salvage = *lead;
*lead = NULL; *lead = NULL;
*extent = NULL; *edata = NULL;
return extent_split_interior_error; return extent_split_interior_error;
} }
} }
if (leadsize == 0 && trailsize == 0) {
/*
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
extent_szind_set(*extent, szind);
if (szind != SC_NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(*extent), szind, slab);
if (slab && extent_size_get(*extent) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
(uintptr_t)extent_past_get(*extent) -
(uintptr_t)PAGE, szind, slab);
}
}
}
return extent_split_interior_ok; return extent_split_interior_ok;
} }
...@@ -1051,42 +526,43 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ...@@ -1051,42 +526,43 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
* This fulfills the indicated allocation request out of the given extent (which * This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space * the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent * before or after the resulting allocation, that space is given its own extent
* and put back into extents. * and put back into ecache.
*/ */
static extent_t * static edata_t *
extent_recycle_split(tsdn_t *tsdn, arena_t *arena, extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, edata_t *edata, bool growing_retained) {
szind_t szind, extent_t *extent, bool growing_retained) { assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
extent_t *lead; malloc_mutex_assert_owner(tsdn, &ecache->mtx);
extent_t *trail;
extent_t *to_leak; edata_t *lead;
extent_t *to_salvage; edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_split_interior_result_t result = extent_split_interior( extent_split_interior_result_t result = extent_split_interior(
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
&to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, expand_edata, size, alignment);
growing_retained);
if (!maps_coalesce && result != extent_split_interior_ok if (!maps_coalesce && result != extent_split_interior_ok
&& !opt_retain) { && !opt_retain) {
/* /*
* Split isn't supported (implies Windows w/o retain). Avoid * Split isn't supported (implies Windows w/o retain). Avoid
* leaking the extents. * leaking the extent.
*/ */
assert(to_leak != NULL && lead == NULL && trail == NULL); assert(to_leak != NULL && lead == NULL && trail == NULL);
extent_deactivate(tsdn, arena, extents, to_leak); extent_deactivate_locked(tsdn, pac, ecache, to_leak);
return NULL; return NULL;
} }
if (result == extent_split_interior_ok) { if (result == extent_split_interior_ok) {
if (lead != NULL) { if (lead != NULL) {
extent_deactivate(tsdn, arena, extents, lead); extent_deactivate_locked(tsdn, pac, ecache, lead);
} }
if (trail != NULL) { if (trail != NULL) {
extent_deactivate(tsdn, arena, extents, trail); extent_deactivate_locked(tsdn, pac, ecache, trail);
} }
return extent; return edata;
} else { } else {
/* /*
* We should have picked an extent that was large enough to * We should have picked an extent that was large enough to
...@@ -1094,294 +570,144 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ...@@ -1094,294 +570,144 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
*/ */
assert(result == extent_split_interior_error); assert(result == extent_split_interior_error);
if (to_salvage != NULL) { if (to_salvage != NULL) {
extent_deregister(tsdn, to_salvage); extent_deregister(tsdn, pac, to_salvage);
} }
if (to_leak != NULL) { if (to_leak != NULL) {
void *leak = extent_base_get(to_leak); extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak); /*
extents_abandon_vm(tsdn, arena, r_extent_hooks, extents, * May go down the purge path (which assume no ecache
to_leak, growing_retained); * locks). Only happens with OOM caused split failures.
assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, */
false) == NULL); malloc_mutex_unlock(tsdn, &ecache->mtx);
extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
growing_retained);
malloc_mutex_lock(tsdn, &ecache->mtx);
} }
return NULL; return NULL;
} }
unreachable(); unreachable();
} }
static bool
extent_need_manual_zero(arena_t *arena) {
/*
* Need to manually zero the extent on repopulating if either; 1) non
* default extent hooks installed (in which case the purge semantics may
* change); or 2) transparent huge pages enabled.
*/
return (!arena_has_default_hooks(arena) ||
(opt_thp == thp_mode_always));
}
/* /*
* Tries to satisfy the given allocation request by reusing one of the extents * Tries to satisfy the given allocation request by reusing one of the extents
* in the given extents_t. * in the given ecache_t.
*/ */
static extent_t * static edata_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
extents_t *extents, void *new_addr, size_t size, size_t pad, edata_t *expand_edata, size_t size, size_t alignment, bool zero,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, bool *commit, bool growing_retained, bool guarded) {
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0); WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(new_addr == NULL || !slab); assert(!guarded || expand_edata == NULL);
assert(pad == 0 || !slab); assert(!guarded || alignment <= PAGE);
assert(!*zero || !slab);
rtree_ctx_t rtree_ctx_fallback; malloc_mutex_lock(tsdn, &ecache->mtx);
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
rtree_ctx, extents, new_addr, size, pad, alignment, slab, expand_edata, size, alignment, guarded);
growing_retained); if (edata == NULL) {
if (extent == NULL) { malloc_mutex_unlock(tsdn, &ecache->mtx);
return NULL; return NULL;
} }
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
extents, new_addr, size, pad, alignment, slab, szind, extent, size, alignment, edata, growing_retained);
growing_retained); malloc_mutex_unlock(tsdn, &ecache->mtx);
if (extent == NULL) { if (edata == NULL) {
return NULL; return NULL;
} }
if (*commit && !extent_committed_get(extent)) { assert(edata_state_get(edata) == extent_state_active);
if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
0, extent_size_get(extent), growing_retained)) { growing_retained)) {
extent_record(tsdn, arena, r_extent_hooks, extents, extent_record(tsdn, pac, ehooks, ecache, edata);
extent, growing_retained); return NULL;
return NULL;
}
if (!extent_need_manual_zero(arena)) {
extent_zeroed_set(extent, true);
}
} }
if (edata_committed_get(edata)) {
if (extent_committed_get(extent)) { /*
* This reverses the purpose of this variable - previously it
* was treated as an input parameter, now it turns into an
* output parameter, reporting if the edata has actually been
* committed.
*/
*commit = true; *commit = true;
} }
if (extent_zeroed_get(extent)) { return edata;
*zero = true;
}
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
assert(extent_state_get(extent) == extent_state_active);
if (slab) {
extent_slab_set(extent, slab);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
if (*zero) {
void *addr = extent_base_get(extent);
if (!extent_zeroed_get(extent)) {
size_t size = extent_size_get(extent);
if (extent_need_manual_zero(arena) ||
pages_purge_forced(addr, size)) {
memset(addr, 0, size);
}
} else if (config_debug) {
size_t *p = (size_t *)(uintptr_t)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
}
}
}
return extent;
} }
/* /*
* If the caller specifies (!*zero), it is still possible to receive zeroed * If virtual memory is retained, create increasingly larger extents from which
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes * to split requested extents in order to limit the total number of disjoint
* advantage of this to avoid demanding zeroed extents, but taking advantage of * virtual memory ranges retained by each shard.
* them if they are returned.
*/ */
static void * static edata_t *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { size_t size, size_t alignment, bool zero, bool *commit) {
void *ret; malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
assert(size != 0); size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* All strategies for allocation failed. */
return NULL;
}
static void *
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit) {
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
ATOMIC_RELAXED));
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
return ret;
}
static void *
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
*/
assert(arena != NULL);
return extent_alloc_default_impl(tsdn, arena, new_addr, size,
ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
}
static void
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
/*
* The only legitimate case of customized extent hooks for a0 is
* hooks with no allocation activities. One such example is to
* place metadata on pre-allocated resources such as huge pages.
* In that case, rely on reentrancy_level checks to catch
* infinite recursions.
*/
pre_reentrancy(tsd, NULL);
} else {
pre_reentrancy(tsd, arena);
}
}
static void
extent_hook_post_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
post_reentrancy(tsd);
}
/*
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
* virtual memory ranges retained by each arena.
*/
static extent_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
assert(pad == 0 || !slab);
assert(!*zero || !slab);
size_t esize = size + pad;
size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */ /* Beware size_t wrap-around. */
if (alloc_size_min < esize) { if (alloc_size_min < size) {
goto label_err; goto label_err;
} }
/* /*
* Find the next extent size in the series that would be large enough to * Find the next extent size in the series that would be large enough to
* satisfy this request. * satisfy this request.
*/ */
pszind_t egn_skip = 0; size_t alloc_size;
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); pszind_t exp_grow_skip;
while (alloc_size < alloc_size_min) { bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
egn_skip++; &alloc_size, &exp_grow_skip);
if (arena->extent_grow_next + egn_skip >= if (err) {
sz_psz2ind(SC_LARGE_MAXCLASS)) { goto label_err;
/* Outside legal range. */
goto label_err;
}
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
} }
extent_t *extent = extent_alloc(tsdn, arena); edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
if (extent == NULL) { if (edata == NULL) {
goto label_err; goto label_err;
} }
bool zeroed = false; bool zeroed = false;
bool committed = false; bool committed = false;
void *ptr; void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
if (*r_extent_hooks == &extent_hooks_default) { &committed);
ptr = extent_alloc_default_impl(tsdn, arena, NULL,
alloc_size, PAGE, &zeroed, &committed);
} else {
extent_hook_pre_reentrancy(tsdn, arena);
ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
alloc_size, PAGE, &zeroed, &committed,
arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed,
committed, true, EXTENT_IS_HEAD);
if (ptr == NULL) { if (ptr == NULL) {
extent_dalloc(tsdn, arena, extent); edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err; goto label_err;
} }
if (extent_register_no_gdump_add(tsdn, extent)) { edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
extent_dalloc(tsdn, arena, extent); alloc_size, false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
EXTENT_IS_HEAD);
if (extent_register_no_gdump_add(tsdn, pac, edata)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err; goto label_err;
} }
if (extent_zeroed_get(extent) && extent_committed_get(extent)) { if (edata_committed_get(edata)) {
*zero = true;
}
if (extent_committed_get(extent)) {
*commit = true; *commit = true;
} }
rtree_ctx_t rtree_ctx_fallback; edata_t *lead;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_t *lead; extent_split_interior_result_t result = extent_split_interior(tsdn,
extent_t *trail; pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
extent_t *to_leak; size, alignment);
extent_t *to_salvage;
extent_split_interior_result_t result = extent_split_interior(
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
&to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
true);
if (result == extent_split_interior_ok) { if (result == extent_split_interior_ok) {
if (lead != NULL) { if (lead != NULL) {
extent_record(tsdn, arena, r_extent_hooks, extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
&arena->extents_retained, lead, true); lead);
} }
if (trail != NULL) { if (trail != NULL) {
extent_record(tsdn, arena, r_extent_hooks, extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
&arena->extents_retained, trail, true); trail);
} }
} else { } else {
/* /*
...@@ -1393,26 +719,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1393,26 +719,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (config_prof) { if (config_prof) {
extent_gdump_add(tsdn, to_salvage); extent_gdump_add(tsdn, to_salvage);
} }
extent_record(tsdn, arena, r_extent_hooks, extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
&arena->extents_retained, to_salvage, true); to_salvage);
} }
if (to_leak != NULL) { if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, to_leak); extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
extents_abandon_vm(tsdn, arena, r_extent_hooks, extents_abandon_vm(tsdn, pac, ehooks,
&arena->extents_retained, to_leak, true); &pac->ecache_retained, to_leak, true);
} }
goto label_err; goto label_err;
} }
if (*commit && !extent_committed_get(extent)) { if (*commit && !edata_committed_get(edata)) {
if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, if (extent_commit_impl(tsdn, ehooks, edata, 0,
extent_size_get(extent), true)) { edata_size_get(edata), true)) {
extent_record(tsdn, arena, r_extent_hooks, extent_record(tsdn, pac, ehooks,
&arena->extents_retained, extent, true); &pac->ecache_retained, edata);
goto label_err; goto label_err;
} }
if (!extent_need_manual_zero(arena)) { /* A successful commit should return zeroed memory. */
extent_zeroed_set(extent, true); if (config_debug) {
void *addr = edata_addr_get(edata);
size_t *p = (size_t *)(uintptr_t)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
}
} }
} }
...@@ -1420,187 +752,74 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1420,187 +752,74 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
* Increment extent_grow_next if doing so wouldn't exceed the allowed * Increment extent_grow_next if doing so wouldn't exceed the allowed
* range. * range.
*/ */
if (arena->extent_grow_next + egn_skip + 1 <=
arena->retain_grow_limit) {
arena->extent_grow_next += egn_skip + 1;
} else {
arena->extent_grow_next = arena->retain_grow_limit;
}
/* All opportunities for failure are past. */ /* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) { if (config_prof) {
/* Adjust gdump stats now that extent is final size. */ /* Adjust gdump stats now that extent is final size. */
extent_gdump_add(tsdn, extent); extent_gdump_add(tsdn, edata);
} }
if (pad != 0) { if (zero && !edata_zeroed_get(edata)) {
extent_addr_randomize(tsdn, extent, alignment); ehooks_zero(tsdn, ehooks, edata_base_get(edata),
} edata_size_get(edata));
if (slab) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
extent_slab_set(extent, true);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
} }
if (*zero && !extent_zeroed_get(extent)) { return edata;
void *addr = extent_base_get(extent);
size_t size = extent_size_get(extent);
if (extent_need_manual_zero(arena) ||
pages_purge_forced(addr, size)) {
memset(addr, 0, size);
}
}
return extent;
label_err: label_err:
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return NULL; return NULL;
} }
static extent_t * static edata_t *
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, edata_t *expand_edata, size_t size, size_t alignment, bool zero,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { bool *commit, bool guarded) {
assert(size != 0); assert(size != 0);
assert(alignment != 0); assert(alignment != 0);
malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); malloc_mutex_lock(tsdn, &pac->grow_mtx);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, edata_t *edata = extent_recycle(tsdn, pac, ehooks,
&arena->extents_retained, new_addr, size, pad, alignment, slab, &pac->ecache_retained, expand_edata, size, alignment, zero, commit,
szind, zero, commit, true); /* growing_retained */ true, guarded);
if (extent != NULL) { if (edata != NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) { if (config_prof) {
extent_gdump_add(tsdn, extent); extent_gdump_add(tsdn, edata);
} }
} else if (opt_retain && new_addr == NULL) { } else if (opt_retain && expand_edata == NULL && !guarded) {
extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, edata = extent_grow_retained(tsdn, pac, ehooks, size,
pad, alignment, slab, szind, zero, commit); alignment, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */ /* extent_grow_retained() always releases pac->grow_mtx. */
} else {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
}
malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
return extent;
}
static extent_t *
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
size_t esize = size + pad;
extent_t *extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
return NULL;
}
void *addr;
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
palignment, zero, commit);
} else { } else {
extent_hook_pre_reentrancy(tsdn, arena); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
esize, palignment, zero, commit, arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
if (addr == NULL) {
extent_dalloc(tsdn, arena, extent);
return NULL;
}
extent_init(extent, arena, addr, esize, slab, szind,
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
true, EXTENT_NOT_HEAD);
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
if (extent_register(tsdn, extent)) {
extent_dalloc(tsdn, arena, extent);
return NULL;
}
return extent;
}
extent_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
if (extent == NULL) {
if (opt_retain && new_addr != NULL) {
/*
* When retain is enabled and new_addr is set, we do not
* attempt extent_alloc_wrapper_hard which does mmap
* that is very unlikely to succeed (unless it happens
* to be at the end).
*/
return NULL;
}
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
} }
malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
assert(extent == NULL || extent_dumpable_get(extent)); return edata;
return extent;
} }
static bool static bool
extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
const extent_t *outer) { edata_t *inner, edata_t *outer, bool forward) {
assert(extent_arena_get(inner) == arena); extent_assert_can_coalesce(inner, outer);
if (extent_arena_get(outer) != arena) { eset_remove(&ecache->eset, outer);
return false;
} bool err = extent_merge_impl(tsdn, pac, ehooks,
forward ? inner : outer, forward ? outer : inner,
assert(extent_state_get(inner) == extent_state_active); /* holding_core_locks */ true);
if (extent_state_get(outer) != extents->state) {
return false;
}
if (extent_committed_get(inner) != extent_committed_get(outer)) {
return false;
}
return true;
}
static bool
extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
bool growing_retained) {
assert(extent_can_coalesce(arena, extents, inner, outer));
extent_activate_locked(tsdn, arena, extents, outer);
malloc_mutex_unlock(tsdn, &extents->mtx);
bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &extents->mtx);
if (err) { if (err) {
extent_deactivate_locked(tsdn, arena, extents, outer); extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
extent_state_merging);
} }
return err; return err;
} }
static extent_t * static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, ecache_t *ecache, edata_t *edata, bool *coalesced) {
extent_t *extent, bool *coalesced, bool growing_retained, assert(!edata_guarded_get(edata));
bool inactive_only) {
/* /*
* We avoid checking / locking inactive neighbors for large size * We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can * classes, since they are eagerly coalesced on deallocation which can
...@@ -1615,467 +834,333 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -1615,467 +834,333 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
again = false; again = false;
/* Try to coalesce forward. */ /* Try to coalesce forward. */
extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
extent_past_get(extent), inactive_only); edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
if (next != NULL) { if (next != NULL) {
/* if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
* extents->mtx only protects against races for next, true)) {
* like-state extents, so call extent_can_coalesce() if (ecache->delay_coalesce) {
* before releasing next's pool lock.
*/
bool can_coalesce = extent_can_coalesce(arena, extents,
extent, next);
extent_unlock(tsdn, next);
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, next, true,
growing_retained)) {
if (extents->delay_coalesce) {
/* Do minimal coalescing. */ /* Do minimal coalescing. */
*coalesced = true; *coalesced = true;
return extent; return edata;
} }
again = true; again = true;
} }
} }
/* Try to coalesce backward. */ /* Try to coalesce backward. */
extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
extent_before_get(extent), inactive_only); edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
if (prev != NULL) { if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(arena, extents, if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
extent, prev); prev, false)) {
extent_unlock(tsdn, prev); edata = prev;
if (ecache->delay_coalesce) {
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, prev, false,
growing_retained)) {
extent = prev;
if (extents->delay_coalesce) {
/* Do minimal coalescing. */ /* Do minimal coalescing. */
*coalesced = true; *coalesced = true;
return extent; return edata;
} }
again = true; again = true;
} }
} }
} while (again); } while (again);
if (extents->delay_coalesce) { if (ecache->delay_coalesce) {
*coalesced = false; *coalesced = false;
} }
return extent; return edata;
} }
static extent_t * static edata_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, ecache_t *ecache, edata_t *edata, bool *coalesced) {
extent_t *extent, bool *coalesced, bool growing_retained) { return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, coalesced);
extents, extent, coalesced, growing_retained, false);
} }
static extent_t * static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, ecache_t *ecache, edata_t *edata, bool *coalesced) {
extent_t *extent, bool *coalesced, bool growing_retained) { return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, coalesced);
extents, extent, coalesced, growing_retained, true); }
/* Purge a single extent to retained / unmapped directly. */
static void
extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
size_t extent_size = edata_size_get(edata);
extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
if (config_stats) {
/* Update stats accordingly. */
LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.nmadvise, 1);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.purged,
extent_size >> LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
ATOMIC_RELAXED);
}
} }
/* /*
* Does the metadata management portions of putting an unused extent into the * Does the metadata management portions of putting an unused extent into the
* given extents_t (coalesces, deregisters slab interiors, the heap operations). * given ecache_t (coalesces and inserts into the eset).
*/ */
static void void
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
extents_t *extents, extent_t *extent, bool growing_retained) { edata_t *edata) {
rtree_ctx_t rtree_ctx_fallback; assert((ecache->state != extent_state_dirty &&
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); ecache->state != extent_state_muzzy) ||
!edata_zeroed_get(edata));
assert((extents_state_get(extents) != extent_state_dirty &&
extents_state_get(extents) != extent_state_muzzy) ||
!extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_szind_set(extent, SC_NSIZES);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, malloc_mutex_lock(tsdn, &ecache->mtx);
(uintptr_t)extent_base_get(extent), true) == extent);
if (!extents->delay_coalesce) { emap_assert_mapped(tsdn, pac->emap, edata);
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent, NULL, growing_retained); if (edata_guarded_get(edata)) {
} else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { goto label_skip_coalesce;
assert(extents == &arena->extents_dirty); }
if (!ecache->delay_coalesce) {
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
NULL);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &pac->ecache_dirty);
/* Always coalesce large extents eagerly. */ /* Always coalesce large extents eagerly. */
bool coalesced; bool coalesced;
do { do {
assert(extent_state_get(extent) == extent_state_active); assert(edata_state_get(edata) == extent_state_active);
extent = extent_try_coalesce_large(tsdn, arena, edata = extent_try_coalesce_large(tsdn, pac, ehooks,
r_extent_hooks, rtree_ctx, extents, extent, ecache, edata, &coalesced);
&coalesced, growing_retained);
} while (coalesced); } while (coalesced);
if (extent_size_get(extent) >= oversize_threshold) { if (edata_size_get(edata) >=
atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
&& extent_may_force_decay(pac)) {
/* Shortcut to purge the oversize extent eagerly. */ /* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &extents->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
arena_decay_extent(tsdn, arena, r_extent_hooks, extent); extent_maximally_purge(tsdn, pac, ehooks, edata);
return; return;
} }
} }
extent_deactivate_locked(tsdn, arena, extents, extent); label_skip_coalesce:
extent_deactivate_locked(tsdn, pac, ecache, edata);
malloc_mutex_unlock(tsdn, &extents->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
} }
void void
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, extent)) { if (extent_register(tsdn, pac, edata)) {
extent_dalloc(tsdn, arena, extent); edata_cache_put(tsdn, pac->edata_cache, edata);
return; return;
} }
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
} }
static bool static bool
extent_may_dalloc(void) { extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* With retain enabled, the default dalloc always fails. */ edata_t *edata) {
return !opt_retain;
}
static bool
extent_dalloc_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return extent_dalloc_mmap(addr, size);
}
return true;
}
static bool
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
return extent_dalloc_default_impl(addr, size);
}
static bool
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
bool err; bool err;
assert(extent_base_get(extent) != NULL); assert(edata_base_get(edata) != NULL);
assert(extent_size_get(extent) != 0); assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent)); edata_addr_set(edata, edata_base_get(edata));
extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to deallocate. */ /* Try to deallocate. */
if (*r_extent_hooks == &extent_hooks_default) { err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
/* Call directly to propagate tsdn. */ edata_size_get(edata), edata_committed_get(edata));
err = extent_dalloc_default_impl(extent_base_get(extent),
extent_size_get(extent));
} else {
extent_hook_pre_reentrancy(tsdn, arena);
err = ((*r_extent_hooks)->dalloc == NULL ||
(*r_extent_hooks)->dalloc(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
extent_committed_get(extent), arena_ind_get(arena)));
extent_hook_post_reentrancy(tsdn);
}
if (!err) { if (!err) {
extent_dalloc(tsdn, arena, extent); edata_cache_put(tsdn, pac->edata_cache, edata);
} }
return err; return err;
} }
edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
if (edata == NULL) {
return NULL;
}
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
&zero, commit);
if (addr == NULL) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zero, *commit, EXTENT_PAI_PAC,
opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
/*
* Retained memory is not counted towards gdump. Only if an extent is
* allocated as a separate mapping, i.e. growing_retained is false, then
* gdump should be updated.
*/
bool gdump_add = !growing_retained;
if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
return edata;
}
void void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, extent_t *extent) { edata_t *edata) {
assert(extent_dumpable_get(extent)); assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
/* Avoid calling the default extent_dalloc unless have to. */ /* Avoid calling the default extent_dalloc unless have to. */
if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) { if (!ehooks_dalloc_will_fail(ehooks)) {
/* Remove guard pages for dalloc / unmap. */
if (edata_guarded_get(edata)) {
assert(ehooks_are_default(ehooks));
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
}
/* /*
* Deregister first to avoid a race with other allocating * Deregister first to avoid a race with other allocating
* threads, and reregister if deallocation fails. * threads, and reregister if deallocation fails.
*/ */
extent_deregister(tsdn, extent); extent_deregister(tsdn, pac, edata);
if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
extent)) {
return; return;
} }
extent_reregister(tsdn, extent); extent_reregister(tsdn, pac, edata);
} }
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
/* Try to decommit; purge if that fails. */ /* Try to decommit; purge if that fails. */
bool zeroed; bool zeroed;
if (!extent_committed_get(extent)) { if (!edata_committed_get(edata)) {
zeroed = true; zeroed = true;
} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
0, extent_size_get(extent))) { edata_size_get(edata))) {
zeroed = true; zeroed = true;
} else if ((*r_extent_hooks)->purge_forced != NULL && } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
!(*r_extent_hooks)->purge_forced(*r_extent_hooks, edata_size_get(edata), 0, edata_size_get(edata))) {
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena))) {
zeroed = true; zeroed = true;
} else if (extent_state_get(extent) == extent_state_muzzy || } else if (edata_state_get(edata) == extent_state_muzzy ||
((*r_extent_hooks)->purge_lazy != NULL && !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks, edata_size_get(edata), 0, edata_size_get(edata))) {
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena)))) {
zeroed = false; zeroed = false;
} else { } else {
zeroed = false; zeroed = false;
} }
if (*r_extent_hooks != &extent_hooks_default) { edata_zeroed_set(edata, zeroed);
extent_hook_post_reentrancy(tsdn);
}
extent_zeroed_set(extent, zeroed);
if (config_prof) { if (config_prof) {
extent_gdump_sub(tsdn, extent); extent_gdump_sub(tsdn, edata);
} }
extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
extent, false);
}
static void
extent_destroy_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
pages_unmap(addr, size);
}
}
static void
extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
extent_destroy_default_impl(addr, size);
} }
void void
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, extent_t *extent) { edata_t *edata) {
assert(extent_base_get(extent) != NULL); assert(edata_base_get(edata) != NULL);
assert(extent_size_get(extent) != 0); assert(edata_size_get(edata) != 0);
extent_state_t state = edata_state_get(edata);
assert(state == extent_state_retained || state == extent_state_active);
assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
/* Deregister first to avoid a race with other allocating threads. */ if (edata_guarded_get(edata)) {
extent_deregister(tsdn, extent); assert(opt_retain);
san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
extent_addr_set(extent, extent_base_get(extent));
extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to destroy; silently fail otherwise. */
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
extent_destroy_default_impl(extent_base_get(extent),
extent_size_get(extent));
} else if ((*r_extent_hooks)->destroy != NULL) {
extent_hook_pre_reentrancy(tsdn, arena);
(*r_extent_hooks)->destroy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
extent_committed_get(extent), arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
} }
edata_addr_set(edata, edata_base_get(edata));
extent_dalloc(tsdn, arena, extent); /* Try to destroy; silently fail otherwise. */
} ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), edata_committed_get(edata));
static bool edata_cache_put(tsdn, pac->edata_cache, edata);
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
} }
static bool static bool
extent_commit_impl(tsdn_t *tsdn, arena_t *arena, extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t offset, size_t length, bool growing_retained) {
size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0); WITNESS_RANK_CORE, growing_retained ? 1 : 0);
bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
extent_hooks_assure_initialized(arena, r_extent_hooks); edata_size_get(edata), offset, length);
if (*r_extent_hooks != &extent_hooks_default) { edata_committed_set(edata, edata_committed_get(edata) || !err);
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = ((*r_extent_hooks)->commit == NULL ||
(*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
extent_size_get(extent), offset, length, arena_ind_get(arena)));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_committed_set(extent, extent_committed_get(extent) || !err);
return err; return err;
} }
bool bool
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t offset, size_t length) {
size_t length) { return extent_commit_impl(tsdn, ehooks, edata, offset, length,
return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, /* growing_retained */ false);
length, false);
}
static bool
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
} }
bool bool
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t offset, size_t length) {
size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
extent_hooks_assure_initialized(arena, r_extent_hooks); edata_size_get(edata), offset, length);
edata_committed_set(edata, edata_committed_get(edata) && err);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = ((*r_extent_hooks)->decommit == NULL ||
(*r_extent_hooks)->decommit(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena)));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_committed_set(extent, extent_committed_get(extent) && err);
return err; return err;
} }
#ifdef PAGES_CAN_PURGE_LAZY
static bool static bool
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length, unsigned arena_ind) { size_t offset, size_t length, bool growing_retained) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
#endif
static bool
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0); WITNESS_RANK_CORE, growing_retained ? 1 : 0);
bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
extent_hooks_assure_initialized(arena, r_extent_hooks); edata_size_get(edata), offset, length);
if ((*r_extent_hooks)->purge_lazy == NULL) {
return true;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
return err; return err;
} }
bool bool
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t offset, size_t length) {
size_t length) { return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, length, false);
offset, length, false);
}
#ifdef PAGES_CAN_PURGE_FORCED
static bool
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
} }
#endif
static bool static bool
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t offset, size_t length, bool growing_retained) {
size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0); WITNESS_RANK_CORE, growing_retained ? 1 : 0);
bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
extent_hooks_assure_initialized(arena, r_extent_hooks); edata_size_get(edata), offset, length);
if ((*r_extent_hooks)->purge_forced == NULL) {
return true;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
return err; return err;
} }
bool bool
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t offset, size_t length) {
size_t length) { return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, false);
offset, length, false);
}
static bool
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
/*
* Without retain, only whole regions can be purged (required by
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
* comments in extent_head_no_merge().
*/
return !opt_retain;
}
return false;
} }
/* /*
...@@ -2085,183 +1170,95 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, ...@@ -2085,183 +1170,95 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
* with the trail (the higher addressed portion). This makes 'extent' the lead, * with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error). * and returns the trail (except in case of error).
*/ */
static extent_t * static edata_t *
extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, assert(edata_size_get(edata) == size_a + size_b);
bool growing_retained) { /* Only the shrink path may split w/o holding core locks. */
assert(extent_size_get(extent) == size_a + size_b); if (holding_core_locks) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_positive_depth_to_rank(
WITNESS_RANK_CORE, growing_retained ? 1 : 0); tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
} else {
extent_hooks_assure_initialized(arena, r_extent_hooks); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
}
if ((*r_extent_hooks)->split == NULL) { if (ehooks_split_will_fail(ehooks)) {
return NULL; return NULL;
} }
extent_t *trail = extent_alloc(tsdn, arena); edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
if (trail == NULL) { if (trail == NULL) {
goto label_error_a; goto label_error_a;
} }
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + edata_init(trail, edata_arena_ind_get(edata),
size_a), size_b, slab_b, szind_b, extent_sn_get(extent), (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
extent_state_get(extent), extent_zeroed_get(extent), /* slab */ false, SC_NSIZES, edata_sn_get(edata),
extent_committed_get(extent), extent_dumpable_get(extent), edata_state_get(edata), edata_zeroed_get(edata),
EXTENT_NOT_HEAD); edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_prepare_t prepare;
rtree_ctx_t rtree_ctx_fallback; bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); size_a, trail, size_b);
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; if (err) {
{
extent_t lead;
extent_init(&lead, arena, extent_addr_get(extent), size_a,
slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent),
EXTENT_NOT_HEAD);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
true, &lead_elm_a, &lead_elm_b);
}
rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
&trail_elm_a, &trail_elm_b);
if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
|| trail_elm_b == NULL) {
goto label_error_b; goto label_error_b;
} }
extent_lock2(tsdn, extent, trail); /*
* No need to acquire trail or edata, because: 1) trail was new (just
* allocated); and 2) edata is either an active allocation (the shrink
* path), or in an acquired state (extracted from the ecache on the
* extent_recycle_split path).
*/
assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
size_a, size_b, edata_committed_get(edata));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
size_a + size_b, size_a, size_b, extent_committed_get(extent),
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
if (err) { if (err) {
goto label_error_c; goto label_error_b;
} }
extent_size_set(extent, size_a); edata_size_set(edata, size_a);
extent_szind_set(extent, szind_a); emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
size_b);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
szind_a, slab_a);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
szind_b, slab_b);
extent_unlock2(tsdn, extent, trail);
return trail; return trail;
label_error_c:
extent_unlock2(tsdn, extent, trail);
label_error_b: label_error_b:
extent_dalloc(tsdn, arena, trail); edata_cache_put(tsdn, pac->edata_cache, trail);
label_error_a: label_error_a:
return NULL; return NULL;
} }
extent_t * edata_t *
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, size_t size_a, size_t size_b, bool holding_core_locks) {
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, holding_core_locks);
szind_a, slab_a, size_b, szind_b, slab_b, false);
} }
static bool static bool
extent_merge_default_impl(void *addr_a, void *addr_b) { extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
if (!maps_coalesce && !opt_retain) { edata_t *b, bool holding_core_locks) {
return true; /* Only the expanding path may merge w/o holding ecache locks. */
} if (holding_core_locks) {
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { witness_assert_positive_depth_to_rank(
return true; tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
} } else {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
return false; WITNESS_RANK_CORE, 0);
}
/*
* Returns true if the given extents can't be merged because of their head bit
* settings. Assumes the second extent has the higher address.
*/
static bool
extent_head_no_merge(extent_t *a, extent_t *b) {
assert(extent_base_get(a) < extent_base_get(b));
/*
* When coalesce is not always allowed (Windows), only merge extents
* from the same VirtualAlloc region under opt.retain (in which case
* MEM_DECOMMIT is utilized for purging).
*/
if (maps_coalesce) {
return false;
}
if (!opt_retain) {
return true;
}
/* If b is a head extent, disallow the cross-region merge. */
if (extent_is_head_get(b)) {
/*
* Additionally, sn should not overflow with retain; sanity
* check that different regions have unique sn.
*/
assert(extent_sn_comp(a, b) != 0);
return true;
}
assert(extent_sn_comp(a, b) == 0);
return false;
}
static bool
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
tsdn_t *tsdn = tsdn_fetch();
extent_t *a = iealloc(tsdn, addr_a);
extent_t *b = iealloc(tsdn, addr_b);
if (extent_head_no_merge(a, b)) {
return true;
}
} }
return extent_merge_default_impl(addr_a, addr_b);
}
static bool
extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(extent_base_get(a) < extent_base_get(b));
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) { assert(edata_base_get(a) < edata_base_get(b));
return true; assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
} assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
emap_assert_mapped(tsdn, pac->emap, a);
emap_assert_mapped(tsdn, pac->emap, b);
bool err; bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
if (*r_extent_hooks == &extent_hooks_default) { edata_size_get(a), edata_base_get(b), edata_size_get(b),
/* Call directly to propagate tsdn. */ edata_committed_get(a));
err = extent_merge_default_impl(extent_base_get(a),
extent_base_get(b));
} else {
extent_hook_pre_reentrancy(tsdn, arena);
err = (*r_extent_hooks)->merge(*r_extent_hooks,
extent_base_get(a), extent_size_get(a), extent_base_get(b),
extent_size_get(b), extent_committed_get(a),
arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
if (err) { if (err) {
return true; return true;
...@@ -2272,132 +1269,58 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -2272,132 +1269,58 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
* owned, so the following code uses decomposed helper functions rather * owned, so the following code uses decomposed helper functions rather
* than extent_{,de}register() to do things in the right order. * than extent_{,de}register() to do things in the right order.
*/ */
rtree_ctx_t rtree_ctx_fallback; emap_prepare_t prepare;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
&a_elm_b);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
&b_elm_b);
extent_lock2(tsdn, a, b);
if (a_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
SC_NSIZES, false);
}
if (b_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
SC_NSIZES, false);
} else {
b_elm_b = b_elm_a;
}
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_szind_set(a, SC_NSIZES);
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
extent_sn_get(a) : extent_sn_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, assert(edata_state_get(a) == extent_state_active ||
false); edata_state_get(a) == extent_state_merging);
edata_state_set(a, extent_state_active);
edata_size_set(a, edata_size_get(a) + edata_size_get(b));
edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
edata_sn_get(a) : edata_sn_get(b));
edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
extent_unlock2(tsdn, a, b); emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
extent_dalloc(tsdn, extent_arena_get(b), b); edata_cache_put(tsdn, pac->edata_cache, b);
return false; return false;
} }
bool bool
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); return extent_merge_impl(tsdn, pac, ehooks, a, b,
/* holding_core_locks */ false);
} }
bool bool
extent_boot(void) { extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
if (rtree_new(&extents_rtree, true)) { bool commit, bool zero, bool growing_retained) {
return true; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
} WITNESS_RANK_CORE, growing_retained ? 1 : 0);
if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", if (commit && !edata_committed_get(edata)) {
WITNESS_RANK_EXTENT_POOL)) { if (extent_commit_impl(tsdn, ehooks, edata, 0,
return true; edata_size_get(edata), growing_retained)) {
return true;
}
} }
if (zero && !edata_zeroed_get(edata)) {
if (have_dss) { void *addr = edata_base_get(edata);
extent_dss_boot(); size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size);
} }
return false; return false;
} }
void bool
extent_util_stats_get(tsdn_t *tsdn, const void *ptr, extent_boot(void) {
size_t *nfree, size_t *nregs, size_t *size) { assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = extent_nfree_get(extent);
*nregs = bin_infos[extent_szind_get(extent)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
}
}
void
extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
*size = extent_size_get(extent); if (have_dss) {
if (!extent_slab_get(extent)) { extent_dss_boot();
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
} }
*nfree = extent_nfree_get(extent); return false;
const szind_t szind = extent_szind_get(extent);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
const arena_t *arena = extent_arena_get(extent);
assert(arena != NULL);
const unsigned binshard = extent_binshard_get(extent);
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {
*bin_nregs = *nregs * bin->stats.curslabs;
assert(*bin_nregs >= bin->stats.curregs);
*bin_nfree = *bin_nregs - bin->stats.curregs;
} else {
*bin_nfree = *bin_nregs = 0;
}
*slabcur_addr = extent_addr_get(bin->slabcur);
assert(*slabcur_addr != NULL);
malloc_mutex_unlock(tsdn, &bin->lock);
} }
#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
...@@ -109,7 +108,7 @@ extent_dss_max_update(void *new_addr) { ...@@ -109,7 +108,7 @@ extent_dss_max_update(void *new_addr) {
void * void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) { size_t alignment, bool *zero, bool *commit) {
extent_t *gap; edata_t *gap;
cassert(have_dss); cassert(have_dss);
assert(size > 0); assert(size > 0);
...@@ -123,7 +122,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -123,7 +122,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL; return NULL;
} }
gap = extent_alloc(tsdn, arena); gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (gap == NULL) { if (gap == NULL) {
return NULL; return NULL;
} }
...@@ -141,6 +140,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -141,6 +140,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
goto label_oom; goto label_oom;
} }
bool head_state = opt_retain ? EXTENT_IS_HEAD :
EXTENT_NOT_HEAD;
/* /*
* Compute how much page-aligned gap space (if any) is * Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be * necessary to satisfy alignment. This space can be
...@@ -153,11 +154,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -153,11 +154,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret - size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page; (uintptr_t)gap_addr_page;
if (gap_size_page != 0) { if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page, edata_init(gap, arena_ind_get(arena),
gap_size_page, false, SC_NSIZES, gap_addr_page, gap_size_page, false,
arena_extent_sn_next(arena), SC_NSIZES, extent_sn_next(
extent_state_active, false, true, true, &arena->pa_shard.pac),
EXTENT_NOT_HEAD); extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
} }
/* /*
* Compute the address just past the end of the desired * Compute the address just past the end of the desired
...@@ -186,25 +188,29 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -186,25 +188,29 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_dss_extending_finish(); extent_dss_extending_finish();
if (gap_size_page != 0) { if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap); ehooks_t *ehooks = arena_get_ehooks(
arena);
extent_dalloc_gap(tsdn,
&arena->pa_shard.pac, ehooks, gap);
} else { } else {
extent_dalloc(tsdn, arena, gap); edata_cache_put(tsdn,
&arena->pa_shard.edata_cache, gap);
} }
if (!*commit) { if (!*commit) {
*commit = pages_decommit(ret, size); *commit = pages_decommit(ret, size);
} }
if (*zero && *commit) { if (*zero && *commit) {
extent_hooks_t *extent_hooks = edata_t edata = {0};
EXTENT_HOOKS_INITIALIZER; ehooks_t *ehooks = arena_get_ehooks(
extent_t extent; arena);
extent_init(&extent, arena, ret, size, edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES, size, false, SC_NSIZES,
extent_state_active, false, true, extent_state_active, false, true,
true, EXTENT_NOT_HEAD); EXTENT_PAI_PAC, head_state);
if (extent_purge_forced_wrapper(tsdn, if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0, ehooks, &edata, 0, size)) {
size)) {
memset(ret, 0, size); memset(ret, 0, size);
} }
} }
...@@ -224,7 +230,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -224,7 +230,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
} }
label_oom: label_oom:
extent_dss_extending_finish(); extent_dss_extending_finish();
extent_dalloc(tsdn, arena, gap); edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
return NULL; return NULL;
} }
......
#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/fxp.h"
static bool
fxp_isdigit(char c) {
return '0' <= c && c <= '9';
}
bool
fxp_parse(fxp_t *result, const char *str, char **end) {
/*
* Using malloc_strtoumax in this method isn't as handy as you might
* expect (I tried). In the fractional part, significant leading zeros
* mean that you still need to do your own parsing, now with trickier
* math. In the integer part, the casting (uintmax_t to uint32_t)
* forces more reasoning about bounds than just checking for overflow as
* we parse.
*/
uint32_t integer_part = 0;
const char *cur = str;
/* The string must start with a digit or a decimal point. */
if (*cur != '.' && !fxp_isdigit(*cur)) {
return true;
}
while ('0' <= *cur && *cur <= '9') {
integer_part *= 10;
integer_part += *cur - '0';
if (integer_part >= (1U << 16)) {
return true;
}
cur++;
}
/*
* We've parsed all digits at the beginning of the string, without
* overflow. Either we're done, or there's a fractional part.
*/
if (*cur != '.') {
*result = (integer_part << 16);
if (end != NULL) {
*end = (char *)cur;
}
return false;
}
/* There's a fractional part. */
cur++;
if (!fxp_isdigit(*cur)) {
/* Shouldn't end on the decimal point. */
return true;
}
/*
* We use a lot of precision for the fractional part, even though we'll
* discard most of it; this lets us get exact values for the important
* special case where the denominator is a small power of 2 (for
* instance, 1/512 == 0.001953125 is exactly representable even with
* only 16 bits of fractional precision). We need to left-shift by 16
* before dividing so we pick the number of digits to be
* floor(log(2**48)) = 14.
*/
uint64_t fractional_part = 0;
uint64_t frac_div = 1;
for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
fractional_part *= 10;
frac_div *= 10;
if (fxp_isdigit(*cur)) {
fractional_part += *cur - '0';
cur++;
}
}
/*
* We only parse the first maxdigits characters, but we can still ignore
* any digits after that.
*/
while (fxp_isdigit(*cur)) {
cur++;
}
assert(fractional_part < frac_div);
uint32_t fractional_repr = (uint32_t)(
(fractional_part << 16) / frac_div);
/* Success! */
*result = (integer_part << 16) + fractional_repr;
if (end != NULL) {
*end = (char *)cur;
}
return false;
}
void
fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
uint32_t integer_part = fxp_round_down(a);
uint32_t fractional_part = (a & ((1U << 16) - 1));
int leading_fraction_zeros = 0;
uint64_t fraction_digits = fractional_part;
for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
if (fraction_digits < (1U << 16)
&& fraction_digits * 10 >= (1U << 16)) {
leading_fraction_zeros = i;
}
fraction_digits *= 10;
}
fraction_digits >>= 16;
while (fraction_digits > 0 && fraction_digits % 10 == 0) {
fraction_digits /= 10;
}
size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".",
integer_part);
for (int i = 0; i < leading_fraction_zeros; i++) {
buf[printed] = '0';
printed++;
}
malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64,
fraction_digits);
}
...@@ -130,9 +130,9 @@ hook_reentrantp() { ...@@ -130,9 +130,9 @@ hook_reentrantp() {
*/ */
static bool in_hook_global = true; static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
tcache_t *tcache = tsdn_tcachep_get(tsdn); bool *in_hook = tsdn_in_hookp_get(tsdn);
if (tcache != NULL) { if (in_hook!= NULL) {
return &tcache->in_hook; return in_hook;
} }
return &in_hook_global; return &in_hook_global;
} }
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpa.h"
#include "jemalloc/internal/fb.h"
#include "jemalloc/internal/witness.h"
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
bool *deferred_work_generated);
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated);
static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list, bool *deferred_work_generated);
static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
bool
hpa_supported() {
#ifdef _WIN32
/*
* At least until the API and implementation is somewhat settled, we
* don't want to try to debug the VM subsystem on the hardest-to-test
* platform.
*/
return false;
#endif
if (!pages_can_hugify) {
return false;
}
/*
* We fundamentally rely on a address-space-hungry growth strategy for
* hugepages.
*/
if (LG_SIZEOF_PTR != 3) {
return false;
}
/*
* If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
* this sentinel value -- see the comment in pages.h.
*/
if (HUGEPAGE_PAGES == 1) {
return false;
}
return true;
}
static void
hpa_do_consistency_checks(hpa_shard_t *shard) {
assert(shard->base != NULL);
}
bool
hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
/* malloc_conf processing should have filtered out these cases. */
assert(hpa_supported());
bool err;
err = malloc_mutex_init(&central->grow_mtx, "hpa_central_grow",
WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
err = malloc_mutex_init(&central->mtx, "hpa_central",
WITNESS_RANK_HPA_CENTRAL, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
central->base = base;
central->eden = NULL;
central->eden_len = 0;
central->age_counter = 0;
central->hooks = *hooks;
return false;
}
static hpdata_t *
hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t),
CACHELINE);
}
hpdata_t *
hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
bool *oom) {
/* Don't yet support big allocations; these should get filtered out. */
assert(size <= HUGEPAGE);
/*
* Should only try to extract from the central allocator if the local
* shard is exhausted. We should hold the grow_mtx on that shard.
*/
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW);
malloc_mutex_lock(tsdn, &central->grow_mtx);
*oom = false;
hpdata_t *ps = NULL;
/* Is eden a perfect fit? */
if (central->eden != NULL && central->eden_len == HUGEPAGE) {
ps = hpa_alloc_ps(tsdn, central);
if (ps == NULL) {
*oom = true;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
return NULL;
}
hpdata_init(ps, central->eden, central->age_counter++);
central->eden = NULL;
central->eden_len = 0;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
return ps;
}
/*
* We're about to try to allocate from eden by splitting. If eden is
* NULL, we have to allocate it too. Otherwise, we just have to
* allocate an edata_t for the new psset.
*/
if (central->eden == NULL) {
/*
* During development, we're primarily concerned with systems
* with overcommit. Eventually, we should be more careful here.
*/
bool commit = true;
/* Allocate address space, bailing if we fail. */
void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
&commit);
if (new_eden == NULL) {
*oom = true;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
return NULL;
}
ps = hpa_alloc_ps(tsdn, central);
if (ps == NULL) {
pages_unmap(new_eden, HPA_EDEN_SIZE);
*oom = true;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
return NULL;
}
central->eden = new_eden;
central->eden_len = HPA_EDEN_SIZE;
} else {
/* Eden is already nonempty; only need an edata for ps. */
ps = hpa_alloc_ps(tsdn, central);
if (ps == NULL) {
*oom = true;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
return NULL;
}
}
assert(ps != NULL);
assert(central->eden != NULL);
assert(central->eden_len > HUGEPAGE);
assert(central->eden_len % HUGEPAGE == 0);
assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden);
hpdata_init(ps, central->eden, central->age_counter++);
char *eden_char = (char *)central->eden;
eden_char += HUGEPAGE;
central->eden = (void *)eden_char;
central->eden_len -= HUGEPAGE;
malloc_mutex_unlock(tsdn, &central->grow_mtx);
return ps;
}
bool
hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
base_t *base, edata_cache_t *edata_cache, unsigned ind,
const hpa_shard_opts_t *opts) {
/* malloc_conf processing should have filtered out these cases. */
assert(hpa_supported());
bool err;
err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
err = malloc_mutex_init(&shard->mtx, "hpa_shard",
WITNESS_RANK_HPA_SHARD, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
assert(edata_cache != NULL);
shard->central = central;
shard->base = base;
edata_cache_fast_init(&shard->ecf, edata_cache);
psset_init(&shard->psset);
shard->age_counter = 0;
shard->ind = ind;
shard->emap = emap;
shard->opts = *opts;
shard->npending_purge = 0;
nstime_init_zero(&shard->last_purge);
shard->stats.npurge_passes = 0;
shard->stats.npurges = 0;
shard->stats.nhugifies = 0;
shard->stats.ndehugifies = 0;
/*
* Fill these in last, so that if an hpa_shard gets used despite
* initialization failing, we'll at least crash instead of just
* operating on corrupted data.
*/
shard->pai.alloc = &hpa_alloc;
shard->pai.alloc_batch = &hpa_alloc_batch;
shard->pai.expand = &hpa_expand;
shard->pai.shrink = &hpa_shrink;
shard->pai.dalloc = &hpa_dalloc;
shard->pai.dalloc_batch = &hpa_dalloc_batch;
shard->pai.time_until_deferred_work = &hpa_time_until_deferred_work;
hpa_do_consistency_checks(shard);
return false;
}
/*
* Note that the stats functions here follow the usual stats naming conventions;
* "merge" obtains the stats from some live object of instance, while "accum"
* only combines the stats from one stats objet to another. Hence the lack of
* locking here.
*/
static void
hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
hpa_shard_nonderived_stats_t *src) {
dst->npurge_passes += src->npurge_passes;
dst->npurges += src->npurges;
dst->nhugifies += src->nhugifies;
dst->ndehugifies += src->ndehugifies;
}
void
hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
psset_stats_accum(&dst->psset_stats, &src->psset_stats);
hpa_shard_nonderived_stats_accum(&dst->nonderived_stats,
&src->nonderived_stats);
}
void
hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
hpa_shard_stats_t *dst) {
hpa_do_consistency_checks(shard);
malloc_mutex_lock(tsdn, &shard->grow_mtx);
malloc_mutex_lock(tsdn, &shard->mtx);
psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, &shard->stats);
malloc_mutex_unlock(tsdn, &shard->mtx);
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
}
static bool
hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) {
/*
* Note that this needs to be >= rather than just >, because of the
* important special case in which the hugification threshold is exactly
* HUGEPAGE.
*/
return hpdata_nactive_get(ps) * PAGE
>= shard->opts.hugification_threshold;
}
static size_t
hpa_adjusted_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
return psset_ndirty(&shard->psset) - shard->npending_purge;
}
static size_t
hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (shard->opts.dirty_mult == (fxp_t)-1) {
return (size_t)-1;
}
return fxp_mul_frac(psset_nactive(&shard->psset),
shard->opts.dirty_mult);
}
static bool
hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
if (to_hugify == NULL) {
return false;
}
return hpa_adjusted_ndirty(tsdn, shard)
+ hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard);
}
static bool
hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (hpa_adjusted_ndirty(tsdn, shard) > hpa_ndirty_max(tsdn, shard)) {
return true;
}
if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
return true;
}
return false;
}
static void
hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard,
hpdata_t *ps) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (hpdata_changing_state_get(ps)) {
hpdata_purge_allowed_set(ps, false);
hpdata_disallow_hugify(ps);
return;
}
/*
* Hugepages are distinctly costly to purge, so try to avoid it unless
* they're *particularly* full of dirty pages. Eventually, we should
* use a smarter / more dynamic heuristic for situations where we have
* to manually hugify.
*
* In situations where we don't manually hugify, this problem is
* reduced. The "bad" situation we're trying to avoid is one's that's
* common in some Linux configurations (where both enabled and defrag
* are set to madvise) that can lead to long latency spikes on the first
* access after a hugification. The ideal policy in such configurations
* is probably time-based for both purging and hugifying; only hugify a
* hugepage if it's met the criteria for some extended period of time,
* and only dehugify it if it's failed to meet the criteria for an
* extended period of time. When background threads are on, we should
* try to take this hit on one of them, as well.
*
* I think the ideal setting is THP always enabled, and defrag set to
* deferred; in that case we don't need any explicit calls on the
* allocator's end at all; we just try to pack allocations in a
* hugepage-friendly manner and let the OS hugify in the background.
*/
hpdata_purge_allowed_set(ps, hpdata_ndirty_get(ps) > 0);
if (hpa_good_hugification_candidate(shard, ps)
&& !hpdata_huge_get(ps)) {
nstime_t now;
shard->central->hooks.curtime(&now, /* first_reading */ true);
hpdata_allow_hugify(ps, now);
}
/*
* Once a hugepage has become eligible for hugification, we don't mark
* it as ineligible just because it stops meeting the criteria (this
* could lead to situations where a hugepage that spends most of its
* time meeting the criteria never quite getting hugified if there are
* intervening deallocations). The idea is that the hugification delay
* will allow them to get purged, reseting their "hugify-allowed" bit.
* If they don't get purged, then the hugification isn't hurting and
* might help. As an exception, we don't hugify hugepages that are now
* empty; it definitely doesn't help there until the hugepage gets
* reused, which is likely not for a while.
*/
if (hpdata_nactive_get(ps) == 0) {
hpdata_disallow_hugify(ps);
}
}
static bool
hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
return to_hugify != NULL || hpa_should_purge(tsdn, shard);
}
/* Returns whether or not we purged anything. */
static bool
hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
hpdata_t *to_purge = psset_pick_purge(&shard->psset);
if (to_purge == NULL) {
return false;
}
assert(hpdata_purge_allowed_get(to_purge));
assert(!hpdata_changing_state_get(to_purge));
/*
* Don't let anyone else purge or hugify this page while
* we're purging it (allocations and deallocations are
* OK).
*/
psset_update_begin(&shard->psset, to_purge);
assert(hpdata_alloc_allowed_get(to_purge));
hpdata_mid_purge_set(to_purge, true);
hpdata_purge_allowed_set(to_purge, false);
hpdata_disallow_hugify(to_purge);
/*
* Unlike with hugification (where concurrent
* allocations are allowed), concurrent allocation out
* of a hugepage being purged is unsafe; we might hand
* out an extent for an allocation and then purge it
* (clearing out user data).
*/
hpdata_alloc_allowed_set(to_purge, false);
psset_update_end(&shard->psset, to_purge);
/* Gather all the metadata we'll need during the purge. */
bool dehugify = hpdata_huge_get(to_purge);
hpdata_purge_state_t purge_state;
size_t num_to_purge = hpdata_purge_begin(to_purge, &purge_state);
shard->npending_purge += num_to_purge;
malloc_mutex_unlock(tsdn, &shard->mtx);
/* Actually do the purging, now that the lock is dropped. */
if (dehugify) {
shard->central->hooks.dehugify(hpdata_addr_get(to_purge),
HUGEPAGE);
}
size_t total_purged = 0;
uint64_t purges_this_pass = 0;
void *purge_addr;
size_t purge_size;
while (hpdata_purge_next(to_purge, &purge_state, &purge_addr,
&purge_size)) {
total_purged += purge_size;
assert(total_purged <= HUGEPAGE);
purges_this_pass++;
shard->central->hooks.purge(purge_addr, purge_size);
}
malloc_mutex_lock(tsdn, &shard->mtx);
/* The shard updates */
shard->npending_purge -= num_to_purge;
shard->stats.npurge_passes++;
shard->stats.npurges += purges_this_pass;
shard->central->hooks.curtime(&shard->last_purge,
/* first_reading */ false);
if (dehugify) {
shard->stats.ndehugifies++;
}
/* The hpdata updates. */
psset_update_begin(&shard->psset, to_purge);
if (dehugify) {
hpdata_dehugify(to_purge);
}
hpdata_purge_end(to_purge, &purge_state);
hpdata_mid_purge_set(to_purge, false);
hpdata_alloc_allowed_set(to_purge, true);
hpa_update_purge_hugify_eligibility(tsdn, shard, to_purge);
psset_update_end(&shard->psset, to_purge);
return true;
}
/* Returns whether or not we hugified anything. */
static bool
hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
return false;
}
hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
if (to_hugify == NULL) {
return false;
}
assert(hpdata_hugify_allowed_get(to_hugify));
assert(!hpdata_changing_state_get(to_hugify));
/* Make sure that it's been hugifiable for long enough. */
nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(to_hugify);
uint64_t millis = shard->central->hooks.ms_since(&time_hugify_allowed);
if (millis < shard->opts.hugify_delay_ms) {
return false;
}
/*
* Don't let anyone else purge or hugify this page while
* we're hugifying it (allocations and deallocations are
* OK).
*/
psset_update_begin(&shard->psset, to_hugify);
hpdata_mid_hugify_set(to_hugify, true);
hpdata_purge_allowed_set(to_hugify, false);
hpdata_disallow_hugify(to_hugify);
assert(hpdata_alloc_allowed_get(to_hugify));
psset_update_end(&shard->psset, to_hugify);
malloc_mutex_unlock(tsdn, &shard->mtx);
shard->central->hooks.hugify(hpdata_addr_get(to_hugify), HUGEPAGE);
malloc_mutex_lock(tsdn, &shard->mtx);
shard->stats.nhugifies++;
psset_update_begin(&shard->psset, to_hugify);
hpdata_hugify(to_hugify);
hpdata_mid_hugify_set(to_hugify, false);
hpa_update_purge_hugify_eligibility(tsdn, shard, to_hugify);
psset_update_end(&shard->psset, to_hugify);
return true;
}
/*
* Execution of deferred work is forced if it's triggered by an explicit
* hpa_shard_do_deferred_work() call.
*/
static void
hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
bool forced) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (!forced && shard->opts.deferral_allowed) {
return;
}
/*
* If we're on a background thread, do work so long as there's work to
* be done. Otherwise, bound latency to not be *too* bad by doing at
* most a small fixed number of operations.
*/
bool hugified = false;
bool purged = false;
size_t max_ops = (forced ? (size_t)-1 : 16);
size_t nops = 0;
do {
/*
* Always purge before hugifying, to make sure we get some
* ability to hit our quiescence targets.
*/
purged = false;
while (hpa_should_purge(tsdn, shard) && nops < max_ops) {
purged = hpa_try_purge(tsdn, shard);
if (purged) {
nops++;
}
}
hugified = hpa_try_hugify(tsdn, shard);
if (hugified) {
nops++;
}
malloc_mutex_assert_owner(tsdn, &shard->mtx);
malloc_mutex_assert_owner(tsdn, &shard->mtx);
} while ((hugified || purged) && nops < max_ops);
}
static edata_t *
hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
bool *oom) {
bool err;
edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
if (edata == NULL) {
*oom = true;
return NULL;
}
hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
if (ps == NULL) {
edata_cache_fast_put(tsdn, &shard->ecf, edata);
return NULL;
}
psset_update_begin(&shard->psset, ps);
if (hpdata_empty(ps)) {
/*
* If the pageslab used to be empty, treat it as though it's
* brand new for fragmentation-avoidance purposes; what we're
* trying to approximate is the age of the allocations *in* that
* pageslab, and the allocations in the new pageslab are
* definitionally the youngest in this hpa shard.
*/
hpdata_age_set(ps, shard->age_counter++);
}
void *addr = hpdata_reserve_alloc(ps, size);
edata_init(edata, shard->ind, addr, size, /* slab */ false,
SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active,
/* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
EXTENT_NOT_HEAD);
edata_ps_set(edata, ps);
/*
* This could theoretically be moved outside of the critical section,
* but that introduces the potential for a race. Without the lock, the
* (initially nonempty, since this is the reuse pathway) pageslab we
* allocated out of could become otherwise empty while the lock is
* dropped. This would force us to deal with a pageslab eviction down
* the error pathway, which is a pain.
*/
err = emap_register_boundary(tsdn, shard->emap, edata,
SC_NSIZES, /* slab */ false);
if (err) {
hpdata_unreserve(ps, edata_addr_get(edata),
edata_size_get(edata));
/*
* We should arguably reset dirty state here, but this would
* require some sort of prepare + commit functionality that's a
* little much to deal with for now.
*
* We don't have a do_deferred_work down this pathway, on the
* principle that we didn't *really* affect shard state (we
* tweaked the stats, but our tweaks weren't really accurate).
*/
psset_update_end(&shard->psset, ps);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
*oom = true;
return NULL;
}
hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
psset_update_end(&shard->psset, ps);
return edata;
}
static size_t
hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
bool *oom, size_t nallocs, edata_list_active_t *results,
bool *deferred_work_generated) {
malloc_mutex_lock(tsdn, &shard->mtx);
size_t nsuccess = 0;
for (; nsuccess < nallocs; nsuccess++) {
edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size,
oom);
if (edata == NULL) {
break;
}
edata_list_active_append(results, edata);
}
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
*deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard);
malloc_mutex_unlock(tsdn, &shard->mtx);
return nsuccess;
}
static size_t
hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
size_t nallocs, edata_list_active_t *results,
bool *deferred_work_generated) {
assert(size <= shard->opts.slab_max_alloc);
bool oom = false;
size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
nallocs, results, deferred_work_generated);
if (nsuccess == nallocs || oom) {
return nsuccess;
}
/*
* We didn't OOM, but weren't able to fill everything requested of us;
* try to grow.
*/
malloc_mutex_lock(tsdn, &shard->grow_mtx);
/*
* Check for grow races; maybe some earlier thread expanded the psset
* in between when we dropped the main mutex and grabbed the grow mutex.
*/
nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
nallocs - nsuccess, results, deferred_work_generated);
if (nsuccess == nallocs || oom) {
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return nsuccess;
}
/*
* Note that we don't hold shard->mtx here (while growing);
* deallocations (and allocations of smaller sizes) may still succeed
* while we're doing this potentially expensive system call.
*/
hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size, &oom);
if (ps == NULL) {
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return nsuccess;
}
/*
* We got the pageslab; allocate from it. This does an unlock followed
* by a lock on the same mutex, and holds the grow mutex while doing
* deferred work, but this is an uncommon path; the simplicity is worth
* it.
*/
malloc_mutex_lock(tsdn, &shard->mtx);
psset_insert(&shard->psset, ps);
malloc_mutex_unlock(tsdn, &shard->mtx);
nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
nallocs - nsuccess, results, deferred_work_generated);
/*
* Drop grow_mtx before doing deferred work; other threads blocked on it
* should be allowed to proceed while we're working.
*/
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return nsuccess;
}
static hpa_shard_t *
hpa_from_pai(pai_t *self) {
assert(self->alloc = &hpa_alloc);
assert(self->expand = &hpa_expand);
assert(self->shrink = &hpa_shrink);
assert(self->dalloc = &hpa_dalloc);
return (hpa_shard_t *)self;
}
static size_t
hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
edata_list_active_t *results, bool *deferred_work_generated) {
assert(nallocs > 0);
assert((size & PAGE_MASK) == 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
hpa_shard_t *shard = hpa_from_pai(self);
if (size > shard->opts.slab_max_alloc) {
return 0;
}
size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs,
results, deferred_work_generated);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/*
* Guard the sanity checks with config_debug because the loop cannot be
* proven non-circular by the compiler, even if everything within the
* loop is optimized away.
*/
if (config_debug) {
edata_t *edata;
ql_foreach(edata, &results->head, ql_link_active) {
emap_assert_mapped(tsdn, shard->emap, edata);
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
assert(edata_state_get(edata) == extent_state_active);
assert(edata_arena_ind_get(edata) == shard->ind);
assert(edata_szind_get_maybe_invalid(edata) ==
SC_NSIZES);
assert(!edata_slab_get(edata));
assert(edata_committed_get(edata));
assert(edata_base_get(edata) == edata_addr_get(edata));
assert(edata_base_get(edata) != NULL);
}
}
return nsuccess;
}
static edata_t *
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
assert((size & PAGE_MASK) == 0);
assert(!guarded);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* We don't handle alignment or zeroing for now. */
if (alignment > PAGE || zero) {
return NULL;
}
/*
* An alloc with alignment == PAGE and zero == false is equivalent to a
* batch alloc of 1. Just do that, so we can share code.
*/
edata_list_active_t results;
edata_list_active_init(&results);
size_t nallocs = hpa_alloc_batch(tsdn, self, size, /* nallocs */ 1,
&results, deferred_work_generated);
assert(nallocs == 0 || nallocs == 1);
edata_t *edata = edata_list_active_first(&results);
return edata;
}
static bool
hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero, bool *deferred_work_generated) {
/* Expand not yet supported. */
return true;
}
static bool
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated) {
/* Shrink not yet supported. */
return true;
}
static void
hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
assert(edata_state_get(edata) == extent_state_active);
assert(edata_arena_ind_get(edata) == shard->ind);
assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
assert(edata_committed_get(edata));
assert(edata_base_get(edata) != NULL);
/*
* Another thread shouldn't be trying to touch the metadata of an
* allocation being freed. The one exception is a merge attempt from a
* lower-addressed PAC extent; in this case we have a nominal race on
* the edata metadata bits, but in practice the fact that the PAI bits
* are different will prevent any further access. The race is bad, but
* benign in practice, and the long term plan is to track enough state
* in the rtree to prevent these merge attempts in the first place.
*/
edata_addr_set(edata, edata_base_get(edata));
edata_zeroed_set(edata, false);
emap_deregister_boundary(tsdn, shard->emap, edata);
}
static void
hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
/*
* Release the metadata early, to avoid having to remember to do it
* while we're also doing tricky purging logic. First, we need to grab
* a few bits of metadata from it.
*
* Note that the shard mutex protects ps's metadata too; it wouldn't be
* correct to try to read most information out of it without the lock.
*/
hpdata_t *ps = edata_ps_get(edata);
/* Currently, all edatas come from pageslabs. */
assert(ps != NULL);
void *unreserve_addr = edata_addr_get(edata);
size_t unreserve_size = edata_size_get(edata);
edata_cache_fast_put(tsdn, &shard->ecf, edata);
psset_update_begin(&shard->psset, ps);
hpdata_unreserve(ps, unreserve_addr, unreserve_size);
hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
psset_update_end(&shard->psset, ps);
}
static void
hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
bool *deferred_work_generated) {
hpa_shard_t *shard = hpa_from_pai(self);
edata_t *edata;
ql_foreach(edata, &list->head, ql_link_active) {
hpa_dalloc_prepare_unlocked(tsdn, shard, edata);
}
malloc_mutex_lock(tsdn, &shard->mtx);
/* Now, remove from the list. */
while ((edata = edata_list_active_first(list)) != NULL) {
edata_list_active_remove(list, edata);
hpa_dalloc_locked(tsdn, shard, edata);
}
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
*deferred_work_generated =
hpa_shard_has_deferred_work(tsdn, shard);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
static void
hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
assert(!edata_guarded_get(edata));
/* Just a dalloc_batch of size 1; this lets us share logic. */
edata_list_active_t dalloc_list;
edata_list_active_init(&dalloc_list);
edata_list_active_append(&dalloc_list, edata);
hpa_dalloc_batch(tsdn, self, &dalloc_list, deferred_work_generated);
}
/*
* Calculate time until either purging or hugification ought to happen.
* Called by background threads.
*/
static uint64_t
hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
hpa_shard_t *shard = hpa_from_pai(self);
uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
malloc_mutex_lock(tsdn, &shard->mtx);
hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
if (to_hugify != NULL) {
nstime_t time_hugify_allowed =
hpdata_time_hugify_allowed(to_hugify);
uint64_t since_hugify_allowed_ms =
shard->central->hooks.ms_since(&time_hugify_allowed);
/*
* If not enough time has passed since hugification was allowed,
* sleep for the rest.
*/
if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) {
time_ns = shard->opts.hugify_delay_ms -
since_hugify_allowed_ms;
time_ns *= 1000 * 1000;
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
return BACKGROUND_THREAD_DEFERRED_MIN;
}
}
if (hpa_should_purge(tsdn, shard)) {
/*
* If we haven't purged before, no need to check interval
* between purges. Simply purge as soon as possible.
*/
if (shard->stats.npurge_passes == 0) {
malloc_mutex_unlock(tsdn, &shard->mtx);
return BACKGROUND_THREAD_DEFERRED_MIN;
}
uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
&shard->last_purge);
if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
uint64_t until_purge_ns;
until_purge_ns = shard->opts.min_purge_interval_ms -
since_last_purge_ms;
until_purge_ns *= 1000 * 1000;
if (until_purge_ns < time_ns) {
time_ns = until_purge_ns;
}
} else {
time_ns = BACKGROUND_THREAD_DEFERRED_MIN;
}
}
malloc_mutex_unlock(tsdn, &shard->mtx);
return time_ns;
}
void
hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
malloc_mutex_lock(tsdn, &shard->mtx);
edata_cache_fast_disable(tsdn, &shard->ecf);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
static void
hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) {
assert(bin_stats->npageslabs == 0);
assert(bin_stats->nactive == 0);
}
static void
hpa_assert_empty(tsdn_t *tsdn, hpa_shard_t *shard, psset_t *psset) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
for (int huge = 0; huge <= 1; huge++) {
hpa_shard_assert_stats_empty(&psset->stats.full_slabs[huge]);
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
hpa_shard_assert_stats_empty(
&psset->stats.nonfull_slabs[i][huge]);
}
}
}
void
hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
/*
* By the time we're here, the arena code should have dalloc'd all the
* active extents, which means we should have eventually evicted
* everything from the psset, so it shouldn't be able to serve even a
* 1-page allocation.
*/
if (config_debug) {
malloc_mutex_lock(tsdn, &shard->mtx);
hpa_assert_empty(tsdn, shard, &shard->psset);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
hpdata_t *ps;
while ((ps = psset_pick_alloc(&shard->psset, PAGE)) != NULL) {
/* There should be no allocations anywhere. */
assert(hpdata_empty(ps));
psset_remove(&shard->psset, ps);
shard->central->hooks.unmap(hpdata_addr_get(ps), HUGEPAGE);
}
}
void
hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
bool deferral_allowed) {
hpa_do_consistency_checks(shard);
malloc_mutex_lock(tsdn, &shard->mtx);
bool deferral_previously_allowed = shard->opts.deferral_allowed;
shard->opts.deferral_allowed = deferral_allowed;
if (deferral_previously_allowed && !deferral_allowed) {
hpa_shard_maybe_do_deferred_work(tsdn, shard,
/* forced */ true);
}
malloc_mutex_unlock(tsdn, &shard->mtx);
}
void
hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
malloc_mutex_lock(tsdn, &shard->mtx);
hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ true);
malloc_mutex_unlock(tsdn, &shard->mtx);
}
void
hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
malloc_mutex_prefork(tsdn, &shard->grow_mtx);
}
void
hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
malloc_mutex_prefork(tsdn, &shard->mtx);
}
void
hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
malloc_mutex_postfork_parent(tsdn, &shard->grow_mtx);
malloc_mutex_postfork_parent(tsdn, &shard->mtx);
}
void
hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_do_consistency_checks(shard);
malloc_mutex_postfork_child(tsdn, &shard->grow_mtx);
malloc_mutex_postfork_child(tsdn, &shard->mtx);
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpa_hooks.h"
static void *hpa_hooks_map(size_t size);
static void hpa_hooks_unmap(void *ptr, size_t size);
static void hpa_hooks_purge(void *ptr, size_t size);
static void hpa_hooks_hugify(void *ptr, size_t size);
static void hpa_hooks_dehugify(void *ptr, size_t size);
static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
hpa_hooks_t hpa_hooks_default = {
&hpa_hooks_map,
&hpa_hooks_unmap,
&hpa_hooks_purge,
&hpa_hooks_hugify,
&hpa_hooks_dehugify,
&hpa_hooks_curtime,
&hpa_hooks_ms_since
};
static void *
hpa_hooks_map(size_t size) {
bool commit = true;
return pages_map(NULL, size, HUGEPAGE, &commit);
}
static void
hpa_hooks_unmap(void *ptr, size_t size) {
pages_unmap(ptr, size);
}
static void
hpa_hooks_purge(void *ptr, size_t size) {
pages_purge_forced(ptr, size);
}
static void
hpa_hooks_hugify(void *ptr, size_t size) {
bool err = pages_huge(ptr, size);
(void)err;
}
static void
hpa_hooks_dehugify(void *ptr, size_t size) {
bool err = pages_nohuge(ptr, size);
(void)err;
}
static void
hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) {
if (first_reading) {
nstime_init_zero(r_nstime);
}
nstime_update(r_nstime);
}
static uint64_t
hpa_hooks_ms_since(nstime_t *past_nstime) {
return nstime_ns_since(past_nstime) / 1000 / 1000;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpdata.h"
static int
hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
uint64_t a_age = hpdata_age_get(a);
uint64_t b_age = hpdata_age_get(b);
/*
* hpdata ages are operation counts in the psset; no two should be the
* same.
*/
assert(a_age != b_age);
return (a_age > b_age) - (a_age < b_age);
}
ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp)
void
hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
hpdata_addr_set(hpdata, addr);
hpdata_age_set(hpdata, age);
hpdata->h_huge = false;
hpdata->h_alloc_allowed = true;
hpdata->h_in_psset_alloc_container = false;
hpdata->h_purge_allowed = false;
hpdata->h_hugify_allowed = false;
hpdata->h_in_psset_hugify_container = false;
hpdata->h_mid_purge = false;
hpdata->h_mid_hugify = false;
hpdata->h_updating = false;
hpdata->h_in_psset = false;
hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES);
hpdata->h_nactive = 0;
fb_init(hpdata->active_pages, HUGEPAGE_PAGES);
hpdata->h_ntouched = 0;
fb_init(hpdata->touched_pages, HUGEPAGE_PAGES);
hpdata_assert_consistent(hpdata);
}
void *
hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
hpdata_assert_consistent(hpdata);
/*
* This is a metadata change; the hpdata should therefore either not be
* in the psset, or should have explicitly marked itself as being
* mid-update.
*/
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(hpdata->h_alloc_allowed);
assert((sz & PAGE_MASK) == 0);
size_t npages = sz >> LG_PAGE;
assert(npages <= hpdata_longest_free_range_get(hpdata));
size_t result;
size_t start = 0;
/*
* These are dead stores, but the compiler will issue warnings on them
* since it can't tell statically that found is always true below.
*/
size_t begin = 0;
size_t len = 0;
size_t largest_unchosen_range = 0;
while (true) {
bool found = fb_urange_iter(hpdata->active_pages,
HUGEPAGE_PAGES, start, &begin, &len);
/*
* A precondition to this function is that hpdata must be able
* to serve the allocation.
*/
assert(found);
assert(len <= hpdata_longest_free_range_get(hpdata));
if (len >= npages) {
/*
* We use first-fit within the page slabs; this gives
* bounded worst-case fragmentation within a slab. It's
* not necessarily right; we could experiment with
* various other options.
*/
break;
}
if (len > largest_unchosen_range) {
largest_unchosen_range = len;
}
start = begin + len;
}
/* We found a range; remember it. */
result = begin;
fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
hpdata->h_nactive += npages;
/*
* We might be about to dirty some memory for the first time; update our
* count if so.
*/
size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES,
result, npages);
fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
hpdata->h_ntouched += new_dirty;
/*
* If we allocated out of a range that was the longest in the hpdata, it
* might be the only one of that size and we'll have to adjust the
* metadata.
*/
if (len == hpdata_longest_free_range_get(hpdata)) {
start = begin + npages;
while (start < HUGEPAGE_PAGES) {
bool found = fb_urange_iter(hpdata->active_pages,
HUGEPAGE_PAGES, start, &begin, &len);
if (!found) {
break;
}
assert(len <= hpdata_longest_free_range_get(hpdata));
if (len == hpdata_longest_free_range_get(hpdata)) {
largest_unchosen_range = len;
break;
}
if (len > largest_unchosen_range) {
largest_unchosen_range = len;
}
start = begin + len;
}
hpdata_longest_free_range_set(hpdata, largest_unchosen_range);
}
hpdata_assert_consistent(hpdata);
return (void *)(
(uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE));
}
void
hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
hpdata_assert_consistent(hpdata);
/* See the comment in reserve. */
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(((uintptr_t)addr & PAGE_MASK) == 0);
assert((sz & PAGE_MASK) == 0);
size_t begin = ((uintptr_t)addr - (uintptr_t)hpdata_addr_get(hpdata))
>> LG_PAGE;
assert(begin < HUGEPAGE_PAGES);
size_t npages = sz >> LG_PAGE;
size_t old_longest_range = hpdata_longest_free_range_get(hpdata);
fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
/* We might have just created a new, larger range. */
size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES,
begin) + 1);
size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES,
begin + npages - 1);
size_t new_range_len = new_end - new_begin;
if (new_range_len > old_longest_range) {
hpdata_longest_free_range_set(hpdata, new_range_len);
}
hpdata->h_nactive -= npages;
hpdata_assert_consistent(hpdata);
}
size_t
hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
hpdata_assert_consistent(hpdata);
/*
* See the comment below; we might purge any inactive extent, so it's
* unsafe for any other thread to turn any inactive extent active while
* we're operating on it.
*/
assert(!hpdata_alloc_allowed_get(hpdata));
purge_state->npurged = 0;
purge_state->next_purge_search_begin = 0;
/*
* Initialize to_purge.
*
* It's possible to end up in situations where two dirty extents are
* separated by a retained extent:
* - 1 page allocated.
* - 1 page allocated.
* - 1 pages allocated.
*
* If the middle page is freed and purged, and then the first and third
* pages are freed, and then another purge pass happens, the hpdata
* looks like this:
* - 1 page dirty.
* - 1 page retained.
* - 1 page dirty.
*
* But it's safe to do a single 3-page purge.
*
* We do this by first computing the dirty pages, and then filling in
* any gaps by extending each range in the dirty bitmap to extend until
* the next active page. This purges more pages, but the expensive part
* of purging is the TLB shootdowns, rather than the kernel state
* tracking; doing a little bit more of the latter is fine if it saves
* us from doing some of the former.
*/
/*
* The dirty pages are those that are touched but not active. Note that
* in a normal-ish case, HUGEPAGE_PAGES is something like 512 and the
* fb_group_t is 64 bits, so this is 64 bytes, spread across 8
* fb_group_ts.
*/
fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
fb_init(dirty_pages, HUGEPAGE_PAGES);
fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES);
fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages,
HUGEPAGE_PAGES);
fb_init(purge_state->to_purge, HUGEPAGE_PAGES);
size_t next_bit = 0;
while (next_bit < HUGEPAGE_PAGES) {
size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES,
next_bit);
/* Recall that fb_ffs returns nbits if no set bit is found. */
if (next_dirty == HUGEPAGE_PAGES) {
break;
}
size_t next_active = fb_ffs(hpdata->active_pages,
HUGEPAGE_PAGES, next_dirty);
/*
* Don't purge past the end of the dirty extent, into retained
* pages. This helps the kernel a tiny bit, but honestly it's
* mostly helpful for testing (where we tend to write test cases
* that think in terms of the dirty ranges).
*/
ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES,
next_active - 1);
assert(last_dirty >= 0);
assert((size_t)last_dirty >= next_dirty);
assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES);
fb_set_range(purge_state->to_purge, HUGEPAGE_PAGES, next_dirty,
last_dirty - next_dirty + 1);
next_bit = next_active + 1;
}
/* We should purge, at least, everything dirty. */
size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive;
purge_state->ndirty_to_purge = ndirty;
assert(ndirty <= fb_scount(
purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0,
HUGEPAGE_PAGES));
hpdata_assert_consistent(hpdata);
return ndirty;
}
bool
hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
void **r_purge_addr, size_t *r_purge_size) {
/*
* Note that we don't have a consistency check here; we're accessing
* hpdata without synchronization, and therefore have no right to expect
* a consistent state.
*/
assert(!hpdata_alloc_allowed_get(hpdata));
if (purge_state->next_purge_search_begin == HUGEPAGE_PAGES) {
return false;
}
size_t purge_begin;
size_t purge_len;
bool found_range = fb_srange_iter(purge_state->to_purge, HUGEPAGE_PAGES,
purge_state->next_purge_search_begin, &purge_begin, &purge_len);
if (!found_range) {
return false;
}
*r_purge_addr = (void *)(
(uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE);
*r_purge_size = purge_len * PAGE;
purge_state->next_purge_search_begin = purge_begin + purge_len;
purge_state->npurged += purge_len;
assert(purge_state->npurged <= HUGEPAGE_PAGES);
return true;
}
void
hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
assert(!hpdata_alloc_allowed_get(hpdata));
hpdata_assert_consistent(hpdata);
/* See the comment in reserve. */
assert(!hpdata->h_in_psset || hpdata->h_updating);
assert(purge_state->npurged == fb_scount(purge_state->to_purge,
HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
assert(purge_state->npurged >= purge_state->ndirty_to_purge);
fb_bit_not(purge_state->to_purge, purge_state->to_purge,
HUGEPAGE_PAGES);
fb_bit_and(hpdata->touched_pages, hpdata->touched_pages,
purge_state->to_purge, HUGEPAGE_PAGES);
assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge);
hpdata->h_ntouched -= purge_state->ndirty_to_purge;
hpdata_assert_consistent(hpdata);
}
void
hpdata_hugify(hpdata_t *hpdata) {
hpdata_assert_consistent(hpdata);
hpdata->h_huge = true;
fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES);
hpdata->h_ntouched = HUGEPAGE_PAGES;
hpdata_assert_consistent(hpdata);
}
void
hpdata_dehugify(hpdata_t *hpdata) {
hpdata_assert_consistent(hpdata);
hpdata->h_huge = false;
hpdata_assert_consistent(hpdata);
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
void
inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = edata_nfree_get(edata);
*nregs = bin_infos[edata_szind_get(edata)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * edata_usize_get(edata) <= *size);
}
}
void
inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree,
size_t *bin_nregs, void **slabcur_addr) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
*size = edata_size_get(edata);
if (!edata_slab_get(edata)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
}
*nfree = edata_nfree_get(edata);
const szind_t szind = edata_szind_get(edata);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * edata_usize_get(edata) <= *size);
arena_t *arena = (arena_t *)atomic_load_p(
&arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
assert(arena != NULL);
const unsigned binshard = edata_binshard_get(edata);
bin_t *bin = arena_get_bin(arena, szind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {
*bin_nregs = *nregs * bin->stats.curslabs;
assert(*bin_nregs >= bin->stats.curregs);
*bin_nfree = *bin_nregs - bin->stats.curregs;
} else {
*bin_nfree = *bin_nregs = 0;
}
edata_t *slab;
if (bin->slabcur != NULL) {
slab = bin->slabcur;
} else {
slab = edata_heap_first(&bin->slabs_nonfull);
}
*slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
malloc_mutex_unlock(tsdn, &bin->lock);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment