Commit 6d23d3ac authored by Oran Agra's avatar Oran Agra
Browse files

Squashed 'deps/jemalloc/' changes from ea6b3e973..54eaed1d8

54eaed1d8 Merge branch 'dev'
304c91982 Update ChangeLog for 5.3.0.
8cb814629 Make the default option of zero realloc match the system allocator.
66c889500 Make test/unit/background_thread_enable more conservative.
a7d73dd4c Update TUNING.md to include the new tcache_max option.
254b01191 Small doc tweak of opt.trust_madvise.
f5e840bbf Minor typo fix in doc.
ceca07d2c Correct the name of stats.mutexes.prof_thds_data in doc.
391bad4b9 Avoid abort() in test/integration/cpp/infallible_new_true.
9a242f16d fix some typos
0e29ad4ef Rename zero_realloc option "strict" to "alloc".
5841b6dbe Update FreeBSD image to 12.3 for cirrus ci.
ed5fc14b2 Use volatile to workaround buffer overflow false positives.
25517b852 Reoreder TravisCI jobs to optimize CI time
8a49b62e7 Enable TravisCI for Windows
fdb6c1016 Add FreeBSD to TravisCI
a93931537 Do not disable SEC by default for 64k pages platforms
eaaa368ba Add comments and use meaningful vars in sz_psz2ind.
5bf03f8ce Implement PAGE_FLOOR macro
52631c90f Fix size class calculation for sec
7ae0f15c5 Add a default page size when cross-compile for Apple M1.
eb65d1b07 Fix FreeBSD system jemalloc TSD cleanup
78b58379c Fix possible "nmalloc >= ndalloc" assertion.
ca709c313 Fix failed assertion due to racy memory access
063d134ae Properly detect background thread support on Darwin.
a4e81221c Document 'make uninstall'
20f9802e4 Avoid overflow warnings in test/unit/safety_check.
8c59c44ff Add a dependency checking step at the end of malloc_conf_init.
efc539c04 Initialize prof_leak during prof init.
002f0e939 Disable TravisCI jobs generation for Windows
01a293fc0 Add Windows to TravisCI
b798fabdf Add prof_leak_error option
eafd2ac39 Forbid spaces in prefix and exec_prefix
36a09ba2c Forbid spaces in install suffix
640c3c72e Add support for 'make uninstall'
f15d8f3b4 Echo installed files via verbose 'install' command
eb196815d Avoid calculating size of size class twice & delete sc_data_global.
011449f17 Fix doc build with install-suffix.
8b49eb132 Fix the HELP_STRING of --enable-doc.
ddb170b1d Simplify arena_migrate() to take arena_t* instead of indices.
648b3b9f7 Lower the num_threads in the stress test of test/unit/prof_recent
d66162e03 Fix the extent state checking on the merge error path.
c9946fa7e FreeBSD also needs the OS-X "don't declare system functions as nothrow" fix since it also has jemalloc in the base system
89fe8ee6b Use the isb instruction instead of yield for spin locks on arm
6230cc88b Add background thread sleep retry in test/unit/hpa_background_thread
61978bbe6 Purge all if the last thread migrated away from an arena.
c91e62dd3 #include <features.h> as requested
18510020e Fix symbol conflict with musl libc
f509703af Fix two conversion warnings in tcache.
067c2da07 Fix unnecessary returns in san_(un)guard_pages_two_sided.
d660683d3 Fix test config of lg_san_uaf_align.
eabe88916 Rename full_position to low_bound in cache_bin.h.
dfdd7562f Rename san_enabled() to san_guard_enabled().
01d61a3c6 Fix a conversion warning.
8b34a788b Fix an used-uninitialized warning (false positive).
e491cef9a Add stats for stashed bytes in tcache.
b75822bc6 Implement use-after-free detection using junk and stash.
06aac61c4 Split the core logic of tcache flush into a separate function.
d038160f3 Fix shadowed variable usage.
bd70d8fc0 Add the profiling settings for tests explicit.
e491df1d2 Fix warnings when using autoheader.
60b9637cc Only invoke malloc_cpu_count_is_deterministic() when necessary.
837b37c4c Fix the time-since computation in HPA.
310af725b Add nstime_ns_since which obtains the duration since the input time.
cafe9a315 Disable percpu arena in case of non deterministic CPU count
bb5052ce9 Fix base_ehooks_get_for_metadata
9015e129b Update visual studio projects
d90655390 San: Create a function for committing and zeroing
800ce49c1 San: Bump alloc frequently reused guarded allocations
f56f5b993 Pass 'frequent_reuse' hint to PAI
2c70e8d35 Rename 'arena_decay' to 'arena_util'
0f6da1257 San: Implement bump alloc
34b00f896 San: Avoid running san tests with prof enabled
62f9c54d2 San: Rename 'guard' to 'san'
d9bbf539f CI: Refactor gen_travis.py
7dcf77809 Mark slab as true on sized dealloc fast path.
af6ee27c0 Enforce abort_conf:true when malloc_conf is not fully recognized.
113e8e68e freebsd 14 build fix proposal.
3b3257a70 Correct opt.prof_leak documentation
cdabe908d Track the initialized state of nstime_t on debug build.
400c59895 Fix uninitialized nstime reading / updating on the stack in hpa.
8b81d3f21 Fix the initialization of last_event in thread event init.
6bdb4f5ab Check prof_active in addtion to opt_prof during batch_alloc().
37342a4d3 Add ctl interface for experimental_infallible_new.
6cb585b13 San: Unguard guarded slabs during arena destruction
b6a7a535b Optimize away a branch on the free fastpath.
4d56aaeca Optimize away the tsd_fast() check on free fastpath.
26f5257b8 Remove declaration of an undefined function
215961541 Add new architecture loongarch.
8daac7958 Redefine functions with test hooks only for tests
c9ebff0fd Initialize deferred_work_generated
912324a1a Add debug check outside of the loop in hpa_alloc_batch.
cf9724531 Darwin malloc_size override support proposal.
ab0f1604b Delay the atexit call to prof_log_start().
11b6db744 CPU affinity on BSD platforms support.
83f329402 Small refactors around 7bb05e0.
3c4b717ff Remove unused header base_structs.h.
deb8e62a8 Implement guard pages.
7bb05e04b add experimental.arenas_create_ext mallctl
a9031a097 Allow setting a dump hook
f7d46b811 Allow setting custom backtrace hook
523cfa55c Guard prof related mallctl with opt_prof.
6e848a005 Remove opt_background_thread_hpa_interval_max_ms
8229cc77c Wake up background threads on demand
97da57c13 HPA: Add min_purge_interval_ms option
b8b8027f1 Allow PAI to calculate time until deferred work
26140dd24 Reject --enable-prof-libunwind without --enable-prof
e5062e9fb Makefile.in: make sure doc generated before install
8b24cb8fd Don't assume initialized arena in the default alloc hook.
c01a885e9 HPA: Correctly calculate retained pages
2c625d5cd Fix warnings when compiled with clang
9d02bdc88 Port gen_run_tests.py to python3
5884a076f Rename prof.dump_prefix to prof.prefix
6a0160071 Add Cirrus CI testing matrix
f58064b93 Verify that HPA is used before calling its functions
27f71242b Mutex: Tweak internal spin count.
6f41ba55e Mutex: Make spin count configurable.
dae24589b PH: Insert-below-min fast-path.
40d53e007 ph: Add aux-list counting and pre-merging.
dcb7b83fa Eset: Cache summary information for heap edatas.
252e0942d Eset: Pull per-pszind data into structs.
dc0a4b8b2 Edata: Pull out comparison fields into a summary.
0170dd198 Edata: Fix a couple typos.
08a4cc096 Pairing heap: inline functions instead of macros.
92a1e38f5 edata_cache: Allow unbounded fast caching.
d93eef2f4 HPA: Introduce a redesigned hpa_central_t.
e09eac1d4 Remove hpa_central.
c88fe355e Add unit tests for decay
aaea4fd1e Add more documentation to decay.c
4b633b9a8 Clean up background thread sleep computation
6630c5989 HPA: Hugification hysteresis.
113938b6f HPA: Pull out a hooks type.
1d4a7666d HPA: Do deferred operations on background threads.
583284f2d Add HPA deferral functionality.
ace329d11 HPA batch dalloc: Just do one deferred work check.
47d8a7e6b psset: Purge empty slabs first.
41fd56605 HPA: Purge across retained extents.
347523517 PAI: Fix a typo.
9c42ed2d1 Travis: Don't test "clang" on OS X.
d202218e8 HPA: Fix typos with big performance implications.
de033f56c mpsc_queue: Add module.
4452a4812 Add opt.experimental_infallible_new.
0689448b1 Travis: Unbreak the builds.
4fb93a18e extent_can_acquire_neighbor typo fix
2381efab5 ARC: add Minimum allocation alignment
2c0f4c2ac Fix typo in configure.ac: experimetal -> experimental
36c6bfb96 SEC: Allow arbitrarily many shards, cached sizes.
11beab38b Added --debug-syms-by-id option
08089589f Fix an interaction between the oversize_threshold test and bgthds.
541793821 Red-black tree: add summarize/filter.
b2c08ef2e RB unit tests: don't test reentrantly.
aea91b8c3 Clean up some minor data structure inconsistencies
1f688490e Stats: Fix a printing bug when hpa_dirty_mult = -1
4f7cb3a41 Sized deallocation: fix a typo.
12cd13cd4 Fix thread.name/prof_sys_thread_name interaction
304cdbb13 Fix a prof_recent/prof_sys_thread_name interaction
9b523c6c1 Refactor the locking in extent_recycle().
ce68f326b Avoid the release & re-acquire of the ecache locks around the merge hook.
7dc77527b Delete the mutex_pool module.
03d95cba8 Remove the unnecessary arena_ind_set in base_alloc_edata().
3093d9455 Move the edata mergeability related functions to extent.h.
7c964b035 Add rtree_write_range(): writing the same content to multiple leaf elements.
add636596 Stop checking head state in the merge hook.
49b7d7f0a Passing down the original edata on the expand path.
178493968 Use rtree tracked states to protect edata outside of ecache locks.
9ea235f8f Add witness_assert_positive_depth_to_rank().
4d8c22f9a Store edata->state in rtree leaf and make edata_t 128B aligned.
70d1541c5 Track extent is_head state in rtree leaf.
862219e46 Add quiescence sync before deleting base during arena_destroy.
a137a6825 Remove redundant declaration, pac_retain_grow_limit_get_set was declared twice in pac.h
2ae1ef7db Fix doc large size 54 KiB error
61afb6a40 Fix locking on arena_i_destroy_ctl().
9193ea224 Cirrus: fix build.
391307714 Mark head state during dss alloc.
11127240c Remove redundant enable-debug definition in configure.
22be724af Set is_head in extent_alloc_wrapper w/ retain.
73ca4b8ef HPA: Use dirtiest-first purging.
0f6c420f8 HPA: Make purging/hugifying more principled.
6bddb92ad psset: Rename "bitmap" to "pageslab_bitmap".
154aa5fcc Use the flat bitmap for eset and psset bitmaps.
271a676dc hpdata: early bailout for longest free range.
d21d5b46b Edata: Move sn into its own field.
fb327368d SEC: Expand option configurability.
ce9386370 HPA: Implement batch allocation.
cdae6706a SEC: Use batch fills.
480f3b11c Add a batch allocation interface to the PAI.
bf448d7a5 SEC: Reduce lock hold times.
1944ebbe7 HPA: Implement batch deallocation.
f47b4c2cd PAI/SEC: Add a dalloc_batch function.
4b8870c7d SEC: Fix a comment typo.
cde7097ec Update INSTALL.md to mention 'autoconf'
a11be5033 Implement opt.cache_oblivious.
8c5e5f50a Fix stats for "tcache_max" (was "lg_tcache_max")
041145c27 Report the correct and wrong sizes on sized dealloc bug detection.
f3b2668b3 Report the offending pointer on sized dealloc bug detection.
edbfe6912 Inline malloc fastpath into operator new.
79f81a373 HPA: Make dirty_mult configurable.
32dd15379 HPA: Make dehugification threshold configurable.
4790db15e HPA: make the hugification threshold configurable.
b3df80bc7 Pull HPA options into a containing struct.
bdb7307ff fxp: Add FXP_INIT_PERCENT
caef4c286 FXP: add fxp_mul_frac.
56e85c0e4 HPA: Use a whole-shard purging heuristic.
dc886e560 hpdata: Return the number of pages to be purged.
9fd9c876b psset: keep aggregate stats.
da63f23e6 HPA: Track pending purges/hugifies in the psset.
0ea3d6307 CTL, Stats: report HPA empty slab stats.
bf64557ed Move empty slab tracking to the psset.
99fc0717e psset: Reconceptualize insertion/removal.
061cabb71 HPA stats: report retained instead of inactive.
d3e5ea03c HPA: Track dirty stats.
68a1666e9 hpdata: Rename "dirty" to "touched".
be0d7a53f HPA: Don't track inactive pages.
55e0f60ca psset stats: Simplify handling.
94cd9444c HPA: Some minor reformattings.
b25ee5d88 HPA: Add purge stats.
746ea3de6 HPA stats: Allow some derived stats.
30b9e8162 HPA: Generalize purging.
70692cfb1 hpdata: Add state changing helpers.
9b75808be flat bitmap: Add a bitwise and/or/not.
2ae966222 hpdata: track per-page dirty state.
ff4086aa6 hpdata: count active pages instead of free ones.
3624dd42f hpdata: Add a comment for hpdata_consistent.
20140629b Bin: Move stats closer to the mutex.
c259323ab Use ticker_geom_t for arena tcache decay.
8edfc5b17 Add ticker_geom_t.
396732981 Arena: share bin offsets in a global.
2fcbd1811 Cache bin: Don't reverse flush order.
4c46e1136 Cache an arena's index in the arena.
229994a20 Tcache flush: keep common path state in registers.
31a629c3d Tcache flush: prefetch edata contents.
9f9247a62 Tcache fluhing: increase cache miss parallelism.
181ba7fd4 Tcache flush: Add an emap "batch lookup" path.
c007c537f Tcache flush: Unify edata lookup path.
35a855260 Mac OS: Tag mapped pages.
f6699803e Fix duration in prof log
a943172b7 Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
2e3104ba0 Update config.{sub,guess} to support support-aarch64-apple-darwin as a target
a011c4c22 cache_bin: Separate out local and remote accesses.
14d689c0f Add prof stats mutex stats
9f71b5779 Output prof stats in stats print
1f1a0231e Split macros for initializing stats headers
4352cbc21 Add alignment tests for prof stats
54f3351f1 Add mallctl for prof stats fetching
40fa4d29d Track per size class internal fragmentation
afa489c3c Record request size in prof info
f9bb8dede Un-force-inline do_rallocx.
a9fa2defd Add JEMALLOC_COLD, and mark some functions cold.
5d8e70ab2 prof_recent: cassert(config_prof) more often.
83cad746a prof_log: cassert(config_prof) in public functions
526180b76 Extent.c: Avoid an rtree NULL-check.
b35ac00d5 Do not bump to large size for page aligned request
8a56d6b63 Add last-N mutex stats
22d62d8cb Handle ending gap properly for HPA stats
6c5a3a24d Omit bin stats rows with no data
ea013d8fa Enforce realloc sizing stability
74bd63b20 Optimize stats print using partial name-to-mib
4557c0a67 Enable ctl on partial mib and partial name
006dd0414 Add partial name-to-mib functionality
f2e1a5be7 Do not fail on partial ctl path for ctl_nametomib()
6ab181d2b Extract node lookup given mib input
3a627b967 No need to record all nodes in ctl_lookup()
91e006c4c Enable ctl_lookup() to start from arbitrary node
063a767ff Define JEMALLOC_HAS_ALLOCA_H for QNX
4e3fe218e Use posix_madvise to purge pages when available
26c1dc5a3 Support AutoConf for posix_madvise and POSIX_MADV_DONTNEED
96a59c3bb Fix recursive malloc during bootstrap on QNX
986cbe488 Disable JEMALLOC_TLS for QNX
1e3b8636f HPA: Remove unused malloc_conf options.
e82771807 Cache mallctl mib for batch allocation stress test
0dfdd31e0 Add tiny batch size to batch allocation stress test
9522ae41d Move n_search outside of assert as reported by static analyzer
a559caf74 hpdata: Strengthen assertions.
f51948d9e psset unit test: fix a bug.
54c94c167 flat bitmap: add scount / ucount functions.
e6c057ad3 fb: implement assign in terms of a visitor.
734e72ce8 bit_util: Guarantee popcount's presence.
d9f7e6c66 hpdata: Add a test.
3ed0b4e8a HPA: Add an nevictions counter.
fffcefed3 malloc_conf: Clarify HPA options.
f7cf23aa4 psset: Relegate alloc/dalloc to test code.
f9299ca57 HPA: Use psset fit/insert/remove.
0971e1e4e hpdata: Use addr/size instead of begin/npages.
5228d869e psset: Use fit/insert/remove as basis functions.
089f8fa44 Move hpdata bitmap logic out of the psset.
ca30b5db2 Introduce hpdata_t.
4a15008cf HPA unit test: skip if unsupported.
43af63fff HPA: Manage whole hugepages at a time.
63677dde6 Pages: Statically detect if pages_huge may succeed
c1b2a7793 psset: Move in stats.
d0a991d47 psset: Add insert/remove functions.
d438296b1 narenas_ratio: Accept fractional values.
ecd39418a Add fxp: A fixed-point math library.
99c2d6c23 Backport jeprof --collapse for flamegraph generation
520b75fa2 utrace support with label based signature.
92e189be8 Add some comments to the batch allocation logic flow
d96e4525a Route batch allocation of small batch size to tcache
ac480136d Split out locality checking in batch allocation tests
be5e49f4f Add a batch mode for cache_bin_alloc()
4a65f3493 Fix a cache bin test
566c4a859 Slight changes to cache bin internal functions
9545c2cd3 Add sample interval to prof last-N dump
cf2549a14 Add a per-arena oversize_threshold.
4ca3d91e9 Rename geom_grow -> exp_grow.
b4c37a6e8 Rename edata_tree_t -> edata_avail_t.
95f0a77fd Detect pthread_getname_np explicitly.
b3c5690b7 Update config.{guess,sub} to 2020-11-07@77632d9
589638182 Use the edata_cache_small_t in the HPA.
03a604711 Edata cache small: rewrite.
c9757d9e3 HPA: Don't disable shards that were never started.
1b3ee7566 Add experimental.thread.activity_callback.
27ef02ca9 Android build fix proposal.
d2d941017 MADV_DO[NOT]DUMP support equivalence on FreeBSD.
180b84315 Appveyor: fix 404 errors.
ef6d51ed4 DragonFlyBSD build support.
bf72188f8 Allow opt.tcache_max to accept small size classes.
ea32060f9 SEC: Implement thread affinity.
d16849c91 psset: Do first-fit based on slab age.
634ec6f50 Edata: add an "age" field.
6599651ae PA: Use an SEC in fron of the HPA shard.
ea51e97bb Add SEC module: a small extent cache.
1964b0839 HPA: Add stats for the hpa_shard.
534504d4a HPA: add size-exclusion functionality.
484f04733 HPA: Add central mutex contention stats.
bf025d2ec HPA: Make slab sizes and maxes configurable.
1c7da3331 HPA: Tie components into a PAI implementation.
c8209150f Switch from opt.lg_tcache_max to opt.tcache_max
5ba861715 Add thread name in prof last-N records
4ef5b8b4d Add a logo to doc_internal.
5e41ff9b7 Add a hard limit on tcache max size class.
3de19ba40 Eagerly detect double free and sized dealloc bugs for large sizes.
be9548f2b Tcaches: Fix a subtle race condition.
a9aa6f6d0 Fix the alloc_ctx check in free_fastpath.
b971f7c4d Add "default" option to slab sizes.
21b70cb54 Add hpa_central module
1ed7ec369 Emap: Add emap_assert_not_mapped.
2a6ba121b PRNG test: cleanups.
9e6aa77ab PRNG: Remove atomic functionality.
051304717 PRNG: Allow a a range argument of 1.
bdb60a805 Appveyor: don't update msys2 keyring.
025d8c37c Add a script to check for clang-formattedness.
f6bbfc1e9 Add a .clang-format file.
259c5e3e8 psset: Add stats
018b162d6 Add psset: a set of pageslabs.
ed99d300b Flat bitmap: Add longest-range computation.
e03450069 Edata: rename "ranged" bit to "pai".
7ad2f7866 Avoid a -Wundef warning on LG_SLAB_MAXREGS.
40cf71a06 Remove --with-slab-maxregs options from INSTALL.md
36ebb5abe CI support for PPC64LE architecture
1541ffc76 configure: add --with-lg-slab-maxregs configure option.
d243b4ec4 Add PROFILING_INTERNALS.md
09eda2c9b Add unit tests for usize in prof recent records
b549389e4 Correct usize in prof last-N record
202f01d4f Fix szind computation in profiling
866231fc6 Do not repeat reentrancy test in profiling
20f2479ed Do not create size class tables for non-prof builds
8efcdc3f9 Move unbias data to prof_data
5e90fd006 Geom_grow: Don't keep the mutex internal.
c57494879 Geom_grow: Don't take tsdn at init.
ffe552223 Geom_grow: Move in advancing logic.
131b1b533 Rename ecache_grow -> geom_grow.
b399463fb flat_bitmap unit test: Silence a warning.
b0ffa39ca Mallctl stress test: fix a type.
753bbf184 Benchmarks: Also print ns / iter.
7b187360e IO: Support 0-padding for unsigned numbers.
32d467322 Add a mallctl speed stress test.
38867c5c1 Makefile: alphabetize stress/analyze utilities.
ab274a23b Add narenas_ratio.
9e18ae639 Config: safety checks don't imply size checks.
8f9e958e1 Add alignment stress test for rallocx
743021b63 Fix size miscalculation bug in reallocation
eaed1e39b Add sized-delete size-checking functionality.
53084cc5c Safety check: Don't directly abort.
60993697d Prof: Add prof_unbias.
81c2f841e Add a simple utility to detect profiling bias.
e032a1a1d Add a stress test for batch allocation
f6cf5eb38 Add mallctl for batch allocation API
978f830ee Add batch allocation API
c6f59e9bb Add surplus reading API for thread event lookahead
f80546895 Add zero option to arena batch allocation
49e5c2fe7 Add batch allocation from fresh slabs
2bb8060d5 Add empty test and concat for typed list
f28cc2bc8 Extract bin shard selection out of bin locking
ddb8dc4ad FB: Add range iteration support.
ceee82351 Add flat_bitmap.
7fde6ac49 Nbits: Add a couple more interesting sizes.
efeab1f49 bitset test: Pull NBITS_TAB into its own file.
22da83609 bit_util: Add fls_ functions; "find last set".
1ed0288d9 bit_util: Change ffs functions indexing.
786a27b9e CI: Update keyring.
fb347dc61 Verify output space before doing heavy work in mallctl
f5fb4e5a9 Modify mallctl output length when needed
425840204 Corrections for prof_log_start()
e6cb7a1c9 Shorten wait time for peak events
6107857b7 PA->PAC: Move in PAI implementation.
6041aaba9 PA -> PAC: Move in destruction functions.
cbf096b05 Arena: remove redundant bg inactivity check.
471eb5913 PAC: Move in decay rate setting.
6a2774719 PA->PAC: Move in decay functions.
4ee75be3a PA -> PAC: Move in decay_purge enum.
72435b0ab PA->PAC: Make extent.c forget about PA.
dee5d1c42 PA->PAC: Move in extent_sn.
739138234 PA->PAC: Move in stats.
db211eefb PAC: Move in decay.
c81e38999 PAC: Move in ecache_grow.
65803171a PAC: move in emap
7efcb946c PAC: Add an init function.
722652222 PAC: Move in edata_cache accesses.
777b0ba96 Add PAC: Page allocator classic.
1b5f632e0 Introduce PAI: Page allocator interface
3cf19c6e5 atomic: add atomic_load_sub_store
f1f4ec315 Tcache: Tweak nslots_max tuning parameter.
ae541d3fa Edata: Reserve some space for hugepages.
392f645f4 Edata: split up different list linkage uses.
129b72705 Add typed-list module.
00f06c9be enabling mpss on solaris/illumos.
c2e7a0639 No need to intercept prof_dump_header() in tests
f58ebdff7 Generalize prof_cnt_all() for testing
80d18c18c Pass prof dump parameters explicitly in prof_sys
d4259ea53 Simplify signatures for prof dump functions
5d823f3a9 Consolidate struct definitions for prof dump parameters
1f5fe3a3e Pass write callback explicitly in prof_data
4556d3c0c Define structures for prof dump parameters
1c6742e6a Migrate prof dumping to use buffered writer
dad821bb2 Move unwind to prof_sys
d128efcb6 Relocate a few prof utilities to the right modules
4736fb4fc Move file handling logic in prof_data to prof_sys
767a2e179 Move file handling logic in prof to prof_sys
03ae509f3 Create prof_sys module for reading system thread name
adfd9d7b1 Change tsdn to tsd for thread name allocation
841af2b42 Move thread name handling to prof_data module
8118056c0 Expose prof_data testing internals only in prof tests
f43ac8543 Correct prof header macro namings
c8683bee8 Unify printing for prof counts object
5d292b566 Push error handling logic out of core dumping logic
f541871f5 Reduce prof dump buffer size in debug build
354183b10 Define prof dump buffer size centrally
7455813e5 Make dump file writing replaceable in test
21e44c45d Make maps file opening replaceable in test
4bb4037db Extract utility function for opening maps file
f307b2580 Only replace the dump file opening function in test
d8cea8756 Move size inspections to test/analyze
537a4bedb Add a tool to examine random number distributions
d460333ef Improve naming for prof system thread name option
25e43c602 Witness: Make ranks an enum.
092fcac0b Remove unnecessary source files
a795b1932 Remove beginning define in source files
24bbf376c Unify arena flag reading and selection
e128b170a Do not fallback to auto arena when manual arena is requested
95a59d2f7 Unify tcache flag reading and selection
4b0c00848 Unify zero flag reading and setting
2a84f9b8f Unify alignment flag reading and computation
b7858abfc Expose prof testing internal functions
40fa6674a Fix prof timestamp conf reading
7e09a57b3 stress/sizes: Fix an off-by-one issue.
dcfa6fd50 stress/sizes: Add a couple more types.
40672b0b7 Remove duplicate logging in malloc.
4aea74327 High Resolution Timestamps for Profiling
d82a164d0 Add thread.peak.[read|reset] mallctls.
fe7108305 Add peak_t, for tracking allocator net max.
17a64fe91 Add a small program to print data structure sizes.
3e19ebd2e Add lock to protect prof last-N dumping
a835d9cf8 Make prof last-N dumping non-blocking
fc8bc4b5c Increase dump buffer for prof last-N list
264d89d64 Extract restore and async cleanup functions for prof last-N list
857ebd3da Make edata pointer on prof recent record an atomic fence
b8bdea6b2 Fix: prof_recent_alloc_max_ctl_read() does not take tsd
730658f72 Extract alloc/dalloc utility for last-N nodes
035be4486 Separate out dumping for each prof recent record
8da0896b7 Tcache: Make an integer conversion explicit.
cd28e6033 Don't warn on uniform initialization.
6cdac3c57 Tcache: Make flush fractions configurable.
7503b5b33 Stats, CTL: Expose new tcache settings.
ee72bf1cf Tcache: Add tcache gc delay option.
d338dd45d Tcache: Make incremental gc bytes configurable.
ec0b57956 Tcache: Privatize opt_lg_tcache_max default.
10b96f635 Tcache: Remove some unused gc constants.
181093173 Tcache: make slot sizing configurable.
b58dea8d1 Cache bin: expose ncached_max publicly.
634afc412 Tcache: Make size computation configurable.
97b7a9cf7 Add a fill/flush microbenchmark.
33372cbd4 cpu instruction spin wait for arm32/64
27f29e424 LQ_QUANTUM should be 4 on mips64 hardware.
eda9c2858 Edata: zero stack edatas before initializing.
5dead37a9 Allow narenas:default.
dcea2c0f8 Get rid of TSD -> thread event dependency
75dae934a Always initialize TE counters in TSD init
b06dfb9cc Push event handlers to constituent modules
381c97caa Treat postponed prof sample event as new event
abd467493 Extract out per event postponed wait time fetching
f72014d09 Only compute thread event threshold once per trigger
7324c4f85 Break down event init and handler functions
6de77799d Move thread event wait time update to local
733ae918f Extract out per event new wait time fetching
1e2524e15 Do not reset sample wait time when re-initing tdata
855d20f6f Remove outdated comments in thread event
fc052ff72 Migrate counter to use locked int
b543c20a9 Minor update to locked int
f533ab6da Add forking handling for stats
508303077 Add forking handling for prof idump counter
4d970f8bf Add forking handling for counter module
2097e1945 Unify write callback signature
fef9abdcc Cleanup tcache allocation logic
e6cb6919c Consolidate prof inline function headers
d454af90f Remove unused prof_accum field from arena
8be558449 Initialize prof idump counter once rather than once per arena
e10e5059e Make prof_idump_accum() non-inline
039bfd4e3 Do not rollback prof idump counter in arena_prof_promote()
0295aa38a Deduplicate entries in witness error message
f1f8a7549 Let opt.zero propagate to core allocation.
2c09d4349 Add a benchmark of large allocations.
46471ea32 SC: Name the max lookup constant.
79dd0c04e SC: Simplify SC_NPSIZES computation.
fb6cfffd3 Configure: Get rid of LG_QUANTA.
4f8efba82 TSD: Make rtree_ctx a slow-path field.
cd29ebefd Tcache: treat small and large cache bins uniformly
a13fbad37 Tcache: split up fast and slow path data.
7099c6620 Arena: fill in terms of cache_bins.
40e7aed59 TSD: Move in some of the tcache fields.
58a00df23 TSD: Put all fast-path data together.
3589571bf SC: use SC_LG_NGROUP instead of its value.
877af247a QL, QR: Add documentation.
79ae7f921 Rtree: Remove the per-field accessors.
26e9a3103 PA: Simple decay test.
bb6a41852 Emap: Drop szind/slab splitting parameters.
50289750b Extent: Remove szind/slab knowledge.
dc26b3009 Rtree: Clean up compact/non-compact split.
93b99dd14 Extent: Stop passing an edata_cache everywhere.
a4759a191 Ehooks: avoid touching arena_emap_global in tests.
11c47cb13 Extent: Take "bool zero" over "bool *zero".
1a1124462 PA: Take zero as a bool rather than as a bool *.
294b276fc PA: Parameterize emap.  Move emap_global to arena.
f73057727 Eset: Parameterize last globals accesses.
7bb6e2dc0 Eset: take opt_lg_max_active_fit as a parameter.
883ab327c Emap: Move out last edata state touching.
0c96a2f03 Emap: Move out remaining edata modifications.
dfef0df71 Emap: Move edata modification out of emap_remap.
12eb888e5 Edata: Add a ranged bit.
bd4fdf295 Rtree: Pull leaf contents into their own struct.
faec7219b PA: Move in decay initialization.
45671e4a2 PA: Move in retain growth limit setting.
daefde88f PA: Move in mutex stats reading.
07675840a PA: Move in some more internals accesses.
238f3c743 PA: Move in full stats merging.
81c602759 Arena stats: Give it its own "mapped".
506d907e4 PA: Move in basic stats merging.
f29f6090f PA: Add pa_extra.c and put PA forking there.
8164fad40 Stats: Fix edata_cache size merging.
565045ef7 Arena: Make more derived stats non-atomic/locked.
d0c43217b Arena stats: Move retained to PA, use plain ints.
e2cf3fb1a PA: Move in all modifications of mapped.
436789ad9 PA: Make mapped stat atomic.
3c28aa6f1 PA: Move edata_avail stat in, make it non-atomic.
f6bfa3dcc Move extent stats to the PA module.
527dd4cdb PA: Move in nactive counter.
c075fd0bc PA: Minor cleanups and comment fixes.
46a9d7fc0 PA: Move in rest of purging.
2d6eec7b5 PA: Move in decay-all pathway.
65698b7f2 PA: Remove public visibility of some internals.
f012c43be PA: Move in decay_to_limit
103f5feda Move bg thread activity check out of purging core.
3034f4a50 PA: Move in decay_stashed.
aef28b2f8 PA: Move in stash_decayed.
655a09634 Move bg inactivity check out of purge inner loop.
71fc0dc96 PA: Move in remaining page allocation functions.
74958567a PA: have expand take sizes instead of new usize.
5bcc2c2ab PA: Have expand take szind and slab.
0880c2ab9 PA: Have large expands use it.
7be3dea82 PA: Have slab allocations use it.
9f93625c1 PA: Move in arena large allocation functionality.
7624043a4 PA: Add ehook-getting support.
eba35e2e4 Remove extent knowledge of arena.
e77f47a85 Move arena decay getters to PA.
48a2cd6d7 Decay: Add a (mostly stub) test case.
f77cec311 Decay: Take current time as an argument.
bf55e58e6 Rename test/unit/decay -> test/unit/arena_decay.
d1d7e1076 Decay: move in some background_thread accesses.
cdb916ed3 Decay: Add comments for the public API.
8f2193dc8 Decay: Move in arena decay functions.
4d090d23f Decay: Introduce a stub .c file.
7b6288547 Introduce decay module and put decay objects in PA
497836dbc Arena stats: mark edata_avail as derived.
3192d6b77 Extents: Have extent_dalloc_gap take ehooks.
22a0a7b93 Move arena_decay_extent to extent module.
70d12ffa0 PA: Move mapped into pa stats.
6ca918d0c PA: Add a stats comment.
ce8c0d6c0 PA: Move in arena extent_sn counter.
1ada4aef8 PA: Get rid of arena_ind_get calls.
1ad368c8b PA: Move in decay stats.
356aaa7dc Introduce lockedint module.
acd0bf6a2 PA: move in ecache_grow.
32cb7c2f0 PA: Add a stats type.
688fb3eb8 PA: Move in the arena edata_cache.
8433ad84e PA: move in shard initialization.
a24faed56 PA: Move in the ecache_t objects.
585f92505 Move cache index randomization out of extent.
12be9f572 Add a stub PA module -- a page allocator.
c4e9ea8cc Get rid of locks in prof recent test
2deabac07 Get rid of custom iterator for last-N records
a5ddfa7d9 Use ql for prof last-N list
8da6676a0 Don't do reentrant testing in junk tests.
ce17af422 Better structure ql module
4b66297ea Add move constructor to ql module
a62b7ed92 Add emptiness checking to ql module
1dd24ca6d Add rotate functionality to ql module
0dc95a882 Add concat and split functionality to ql module
1ad06aa53 deduplicate insert and delete logic in qr module
c9d56cddf Optimize meld in qr module
0d6d9e858 configure.ac: Put public symbols on one line.
f9aad7a49 Add piping API to buffered writer
09cd79495 Encapsulate buffer allocation failure in buffered writer
a166c2081 Make prof_tctx_t pointer a true prof atomic fence
d936b46d3 Add malloc_conf_2_conf_harder
3b4a03b92 Mac: don't declare system functions as nothrow.
2256ef896 Add option to fetch system thread name on each prof sample
ccdc70a5c Fix: assertion could abort on past failures
b30a5c2f9 Reorganize cpp APIs and suppress unused function warnings
2e5899c12 Stats: Fix tcache_bytes reporting.
a5780598b Remove thread_event_rollback()
ba783b3a0 Remove prof -> thread_event dependency
441d88d1c Rewrite profiling thread event
0dcd57660 Edata cache: atomic fetch-add -> load-store.
99b1291d1 Edata cache: add edata_cache_small_t.
734109d9c Edata cache: add a unit test.
e732344ef Inspect test: Reduce checks when profiling is on.
92485032b Cache bin: improve comments.
d701a085c Fast path: allow low-water mark changes.
397da0386 Cache bin: rewrite to track more state.
fef0b1ffe Cache bin: Remove last internals accesses.
0a2fcfac0 Tcache: Hold cache bin allocation explicitly.
d498a4bb0 Cache bin: Add an emptiness assertion.
6a7aa46ef Cache bin: Add a debug method for init checking.
370c1ea00 Cache bin: Write the unit test in terms of the API
7f5ebd211 Cache bin: set low-water internally.
60113dfe3 Cache bin: Move in initialization code.
44529da85 Cache-bin: Make flush modifications internal
ff6acc6ed Cache bin: simplify names and argument ordering.
e1dcc557d Cache bin: Only take the relevant cache_bin_info_t
1b00d808d cache_bin: Don't let arena see empty position.
d303f3079 cache_bin nflush -> n.
74d36d78e Cache bin: Make ncached_max a query on the info_t.
b66c0973c cache_bin: Don't allow direct internals access.
da68f7329 Move percpu_arena_update.
909c501b0 Cache_bin: Shouldn't know about tcache.
79f1ee2fc Move junking out of arena/tcache code.
b428dceea Config: Warn on void * pointer arithmetic.
22657a5e6 Extents: Silence the "potentially unused" warning.
4a78c6d81 Correct thread event unit test
305b1f6d9 Correction on geometric sampling
6c3491ad3 Tcache: Unify bin flush logic.
9f4fc2738 Ehooks: Fix a build warning.
bc31041ed Cirrus-CI: test on new freebsd releases.
51bd14742 Make use of assert_* in test/unit/thread_event.c
9d2cc3b0f Make use of assert_* in test/unit/prof_recent.c
a88d22ea1 Make use of assert_* in test/unit/inspect.c
0ceb31184 Make use of assert_* in test/unit/buf_writer.c
fa6157938 Add assert_* functionality to tests
21dfa4300 Change assert_* to expect_* in tests
162c2bcf3 Background thread: take base as a parameter.
29436fa05 Break prof and tcache knowledge of b0.
a0c1f4ac5 Rtree: take the base allocator as a parameter.
7013716aa Emap: Take (and propagate) a zeroed parameter.
182192f83 Base: Pull into a single header.
34b7165fd Put szind_t, pszind_t in sz.h.
7e6c8a728 Emap: Standardize naming.
ac50c1e44 Emap: Remove direct access to emap internals.
06e42090f Make jemalloc.c use the emap interface.
f7d9c6c42 Emap: Move in alloc_ctx lookup functionality.
65a54d771 Emap: Move in szind and slab modifications.
9b5d105fc Emap: Move in iealloc.
1d449bd9a Emap: Internal rtree context setting.
08eb1e6c3 Emap: Comments and cleanup
231d1477e Rename emap_split_prepare_t -> emap_prepare_t.
0586a56f3 Emap: Move in merge functionality.
040eac77c Tell edatas their creation arena immediately.
7c7b70206 Emap: Move over metadata splitting logic.
44f5f5360 Emap: Move over deregistration functions.
6513d9d92 Emap: Move over deregistration boundary functions.
9b5ca0b09 Emap: Move in slab interior registration.
d05b61db4 Emap: Move extent boundary registration in.
ca21ce407 Emap: Move in write_acquired from extent.
01f255161 Add emap, for tracking extent locking.
0f686e82a Avoid variable length array with length 0.
68e8ddcaf Add mallctl for dumping last-N profiling records
bc05ecebf Add const qualifier in assert_cmp()
ba0e35411 Rework the bin locking around tcache refill / flush.
7fd22f7b2 Fix Undefined Behavior in hash.h
ca1f08225 Disallow merge across mmap regions to preserve SN / first-fit.
7014f81e1 Add ASSURED_WRITE in mallctl
247688919 Add inspect.c to MSVC filters
9cac3fa8f Encapsulate buffer allocation in buffered writer
bdc08b515 Better naming buffered writer
c6bfe5585 Update the tsd description.
e89652261 Abbreviate thread-event to te.
5e500523a Remove thread_event_boot().
97dd79db6 Implement deallocation events.
536ea6858 NetBSD specific changes: - NetBSD overcommits - When mapping pages, use the maximum of the alignment requested and the   compiled-in PAGE constant which might be greater than the current kernel   pagesize, since we compile binaries with the maximum page size supported   by the architecture (so that they work with all kernels).
974222c62 Add safety check on sdallocx slow / sampled path.
88d9eca84 Enforce page alignment for sampled allocations.
0f552ed67 Don't purge huge extents when decay is off.
38a48e574 Set reentrancy to 1 for tsd_state_purgatory.
88b0e03a4 Implement opt.stats_interval and the _opts options.
d71a145ec Chagne prof_accum_t to counter_accum_t for general purpose.
ea351a7b5 Fix syntax errors in doc for thread.idle.
d92f0175c Introduce NEITHER_READ_NOR_WRITE in ctl.
6a622867c Add "thread.idle" mallctl.
f81341a48 Fallback to unbuffered printing if OOM
cd6e90824 Add stress test for last-N profiling mode
84b28c6a1 Properly handle tdata deletion race
d33120856 Get rid of redundant logic in prof
a72ea0db6 Restructure and correct sleep utility for testing
7b67ed0b5 Get rid of lock overlap in prof_recent_alloc_reset
bd3be8e0b Remove commit parameter to ecache functions.
b8df719d5 No tdata creation for backtracing on dying thread
dab81bd31 Rework and fix the assertions on malloc fastpath.
ad3f3fc56 Fetch time after tctx and only for samples
a5d3dd405 Fix an assertion on extent head state with dss.
2b604a301 Record request size in prof recent entries
40a391408 Define constructor for buffered writer argument
6d8e61690 Make buffered writer an independent module
6b6b4709b Unify buffered writer naming
9a60cf54e Last-N profiling mode
7a27a0594 Delete tdata states used for cleanup
e98ddf798 Fix unlikely condition in arena_prof_info_get()
3fa142cf3 Remove _externs from prof internal header names
112dc36dd Handle log_mtx during forking
ea42174d0 Refactor profiling headers
6342da097 Ehooks: Further optimize default merge case.
f2f2084e7 Ehooks: Assert alloc isn't NULL
e210ccc57 Move extent2 -> extent.
2f4fa8041 Rename extents -> ecache.
56cc56b69 Break extent split dependence on arena.
0aa9769fb Break commit functions' arena dependence
48ec5d435 Break extent_coalesce arena dependence
282a38232 Extent: Break [de]activation's arena dependence.
576d7047a Ecache: Should know its arena_ind.
372042a08 Remove merge dependence on the arena.
439219be7 Remove extent_can_coalesce arena dependency.
9cad5639f Ehooks: remove arena_ind parameter.
57fe99d4b Move relevant index into the ehooks_t itself.
c792f3e4a edata_cache: Remember the associated base_t.
ae23e5f42 Unify extent_alloc_wrapper with the other wrappers.
d8b0b66c6 Put extent_state_t into ecache as well as eset.
98eb40e56 Move delay_coalesce from the eset to the ecache.
bb70df8e5 Extent refactor: Introduce ecache module.
070451624 Ehooks: Add head tracking.
09475bf8a extent_may_dalloc -> ehooks_dalloc_will_fail
785918417 Pull out edata_t caching into its own module.
a7862df61 Rename extent_t to edata_t.
865debda2 Rename extent.h -> edata.h.
a738a66b5 Ehooks: Add some debug zero and addr checks.
4b2e5ee8b Ehooks: Add a "zero" ehook.
d0f187ad3 Arena: Loosen arena_may_have_muzzy restrictions.
ebbb97327 Base: Remove some unnecessary reentrancy guards.
403f2d166 Extents: Split out introspection functionality.
92a511d38 Make extent module hermetic.
e08c581cf Extent: Get rid of extent-specific pre/post reentrancy calls.
39fdc690a Ehooks comments and cleanup.
c8dae890c Extent -> Ehooks: Move over default hooks.
2fe510826 Extent -> Ehooks: Move merge hook.
1fff4d2ee Extent -> Ehooks: Move split hook.
a5b42a1a1 Extent -> Ehooks: Move purge_forced hook.
368baa42e Extent -> Ehooks: Move purge_lazy hook.
f83fdf533 Extent: Clean up a comma
d78fe241a Extent -> Ehooks: Move commit and decommit hooks.
5459ec9da Extent -> Ehooks: Move destroy hook.
bac8e2e5a Extent -> Ehooks: Move dalloc hook.
dc8b4e6e1 Extent -> Ehooks: Move alloc hook.
703fbc0ff Introduce unsafe reentrancy guards.
ae0d8e859 Move extent ehook calls into ehooks
ba8b9ecbc Add ehooks module
837119a94 base_structs.h: Remove some mid-line tabs.
9f6eb0958 Extents: Eagerly initialize extent hooks.
4278f8460 Move extent hook getters/setters to arena.c
9226e1f0d fix opt.thp:never still use THP with base_new
d5031ea82 Allow dallocx and sdallocx after tsd destruction.
4afd709d1 Restructure setters for profiling info
1d01e4c77 Initialization utilities for nstime
dd649c948 Optimize away the tsd_fast() check on fastpath.
1decf958d Fix incorrect usage of cassert.
45836d7fd Pass nstime_t pointer for profiling
7d2bac5a3 Refactor destroy code path for prof_tctx
055478cca Threshold is no longer updated before prof_realloc()
7e3671911 Get rid of old indentation style for prof
dfdd46f6c Refactor prof_tctx_t creation
aa1d71fb7 Rename prof_tctx to alloc_tctx in prof_info_t
5e0b09099 No need to pass usize to prof_tctx_set()
1b1e76acf Disable some spuriously-triggering warnings
a70909b13 Test on all supported release of FreeBSD
5c47a3022 Guard C++ aligned APIs
694537177 Change tsdn to tsd for profiling code path
b55419f9b Restructure profiling
8b2c2a596 Support C++17 over-aligned allocation
9a3c73800 Refactor arena_bin_malloc_hard().
9a7ae3c97 Reduce footprint of bin_t.
cb1a1f4ad Remove the unnecessary alloc_ctx on free_fastpath.
716061710 Add branch hints to free_fastpath.
a787d2f5b Prefer getaffinity() to detect number of CPUs.
04cb7d4d6 Bail out early for muzzy decay.
73510dfd1 Revert "Fix bug in prof_realloc"
3b5eecf10 Fix bug in prof_realloc
e4c36a6f3 Emphasize no modification through thread.allocatedp allowed.
c462753cc Use __forceinline for JEMALLOC_ALWAYS_INLINE on msvc
836d7a7e6 Check for large size first in the uncommon case of malloc.
9c59abe42 Fix a typo in Makefile.
da50d8ce8 Refactor and optimize prof sampling initialization.
bc774a351 Rename tsd->offset_state to tsd->prng_state.
19a51abf3 Avoid arena->offset_state when tsd not available for prng.
d01b425e5 Add -Wimplicit-fallthrough checks if supported
a8b578d53 Remove mallctl test for zero_realloc
43f0ce92d Define general purpose tsd_thread_event_init()
97f93fa0f Pull tcache GC events into thread event handler
198f02e79 Pull prof_accumbytes into thread event handler
152c0ef95 Build a general purpose thread event handler
6924f83cb use SYS_openat when available
de81a4ead Add stats counters for number of zero reallocs
9cfa80594 Realloc: Make behavior of realloc(ptr, 0) configurable.
ee961c231 Merge realloc and rallocx pathways.
bd6e28d6a Guard slabcur fetching in extent_util
4786099a3 Increase column width for global malloc/free rate
05681e387 Optimize cache_bin_alloc_easy for malloc fast path
4fe50bc7d Fix amd64 MSVC warning
4fbbc817c Simplify time setting and getting for prof log
4094b7c03 Limit # of iters of test_bitmap_xfu.
66e07f986 Suppress tdata creation in reentrancy
beb7c16e9 Guard prof_active reset by opt_prof
1df9dd351 Fix je_ prefix issue in test
3d84bd57f Arena: Add helper function arena_get_from_extent.
c97d25575 Eset: Remove temporary declaration.
ce5b128f1 Remove the undefined extent_size_quantize declarations.
821dd53a1 Extent -> Eset: Rename arena members.
e144b21e4 Extent -> Eset: Move fork handling.
77bbb35a9 Extent -> Eset: Move extent fit functions.
1210af9a4 Extent -> Eset: Move insertion and removal.
a42861540 Extents -> Eset: Convert some stats getters.
820f070c6 Move page quantization to sz module.
63d1b7a7a Extents -> Eset: move extents_state_get.
b416b96a3 Extents -> Eset: rename/move extents_init.
e6180fe1b Eset: Add a source file.
4e5e43f22 Rename extents_t -> eset_t.
723ccc6c2 Extents: Split out extent struct.
41187bdfb Extents: Break extent-struct/arena interactions
529cfe2ab Arena: rename arena_structs_b.h -> arena_structs.h
e7cf84a8d Rearrange slab data and constants
d1be488cd Add --with-lg-page=16 to CI.
ac5185f73 Fix tcache bin stack alignment.
b7c7df24b Add max_per_bg_thd stats for per background thread mutexes.
4b76c684b Add "prof.dump_prefix" to override filename prefixes for dumps.
242af439b Rename "prof_dump_seq_mtx" to "prof_dump_filename_mtx".
e06658cb2 check GNU make exists in path
22bc75ee3 Workaround the stringop-overflow check false positives.
93d615180 Pass tsd down to prof_backtrace()
671f120e2 Fix prof_backtrace() reentrancy level
785b84e60 Make cache_bin_sz_t unsigned.
23dc7a7fb Fix index type for cache_bin_alloc_easy.
2abb02ecd Fix MSVC 2015 build, as proposed by @christianaguilera-foundry.
719583f14 Fix large.nflushes in the merged stats.
adce29c88 Optimize for prof_active off
49e6fbce7 Always adjust thread_(de)allocated
57b81c078 Pull thread_(de)allocated out of config_stats
9e031c1d1 Bug fix for prof_active switch
0043e68d4 Track low_water == -1 case explicitly.
937ca1db9 Store ncached_max * ptr_size in tcache_bin_info.
7599c82d4 Redesign the cache bin metadata for fast path.
d2dddfb82 Add hint in the bogus version string.
d6b7995c1 Update INSTALL.md about the default doc build.
e2c758436 Simplify / refactor tcache_dalloc_large.
9c5c2a2c8 Unify the signature of tcache_flush small and large.
28ed9b9a5 Buffer stats printing
eb70fef8c Make compact json format as default
a219cfcda Clear tcache prof_accumbytes in tcache_flush_cache
ad3f7dbfa Buffer prof_log_stop
593484661 Fix large bin index accessed through cache bin descriptor.
22746d3c9 Properly dalloc prof nodes with idalloctm.
8c8466fa6 Add compact json option for emitter
7fc6b1b25 Add buffered writer
39343555d Report stats for tdatas_mtx and prof_dump_mtx
87e2400cb Fix tcaches mutex pre- / post-fork handling.
07ce2434b Refactor profiling
56126d0d2 Refactor prof log
56c8ecffc Correct tsd layout graph

git-subtree-dir: deps/jemalloc
git-subtree-split: 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
parent 220a0f08
#ifndef JEMALLOC_INTERNAL_PROF_SYS_H
#define JEMALLOC_INTERNAL_PROF_SYS_H
extern malloc_mutex_t prof_dump_filename_mtx;
extern base_t *prof_base;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(tsd_t *tsd, prof_bt_t *bt);
void prof_hooks_init();
void prof_unwind_init();
void prof_sys_thread_name_fetch(tsd_t *tsd);
int prof_getpid(void);
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
bool prof_prefix_set(tsdn_t *tsdn, const char *prefix);
void prof_fdump_impl(tsd_t *tsd);
void prof_idump_impl(tsd_t *tsd);
bool prof_mdump_impl(tsd_t *tsd, const char *filename);
void prof_gdump_impl(tsd_t *tsd);
/* Used in unit tests. */
typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit);
extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read;
typedef int (prof_dump_open_file_t)(const char *, int);
extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file;
typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t);
extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file;
typedef int (prof_dump_open_maps_t)();
extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps;
#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */
......@@ -2,11 +2,12 @@
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_accum_s prof_accum_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_info_s prof_info_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
typedef struct prof_recent_s prof_recent_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
......@@ -28,7 +29,23 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
#ifndef JEMALLOC_PROF
/* Minimize memory bloat for non-prof builds. */
# define PROF_DUMP_BUFSIZE 1
#elif defined(JEMALLOC_DEBUG)
/* Use a small buffer size in debug build, mainly to facilitate testing. */
# define PROF_DUMP_BUFSIZE 16
#else
# define PROF_DUMP_BUFSIZE 65536
#endif
/* Size of size class related tables */
#ifdef JEMALLOC_PROF
# define PROF_SC_NSIZES SC_NSIZES
#else
/* Minimize memory bloat for non-prof builds. */
# define PROF_SC_NSIZES 1
#endif
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
......@@ -45,12 +62,14 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
#else
#define PROF_DUMP_FILENAME_LEN 1
#endif
/* Default number of recent allocations to record. */
#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
#ifndef JEMALLOC_INTERNAL_PSSET_H
#define JEMALLOC_INTERNAL_PSSET_H
#include "jemalloc/internal/hpdata.h"
/*
* A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
* a collection of page-slabs (the intent being that they are backed by
* hugepages, or at least could be), and handles allocation and deallocation
* requests.
*/
/*
* One more than the maximum pszind_t we will serve out of the HPA.
* Practically, we expect only the first few to be actually used. This
* corresponds to a maximum size of of 512MB on systems with 4k pages and
* SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
* can think of this as being SC_NPSIZES, but there's no sense in wasting that
* much space in the arena, making bitmaps that much larger, etc.
*/
#define PSSET_NPSIZES 64
/*
* We keep two purge lists per page size class; one for hugified hpdatas (at
* index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
* 1). This lets us implement a preference for purging non-hugified hpdatas
* among similarly-dirty ones.
* We reserve the last two indices for empty slabs, in that case purging
* hugified ones (which are definitionally all waste) before non-hugified ones
* (i.e. reversing the order).
*/
#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
typedef struct psset_bin_stats_s psset_bin_stats_t;
struct psset_bin_stats_s {
/* How many pageslabs are in this bin? */
size_t npageslabs;
/* Of them, how many pages are active? */
size_t nactive;
/* And how many are dirty? */
size_t ndirty;
};
typedef struct psset_stats_s psset_stats_t;
struct psset_stats_s {
/*
* The second index is huge stats; nonfull_slabs[pszind][0] contains
* stats for the non-huge slabs in bucket pszind, while
* nonfull_slabs[pszind][1] contains stats for the huge slabs.
*/
psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2];
/*
* Full slabs don't live in any edata heap, but we still track their
* stats.
*/
psset_bin_stats_t full_slabs[2];
/* Empty slabs are similar. */
psset_bin_stats_t empty_slabs[2];
};
typedef struct psset_s psset_t;
struct psset_s {
/*
* The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab.
*/
hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
/*
* The sum of all bin stats in stats. This lets us quickly answer
* queries for the number of dirty, active, and retained pages in the
* entire set.
*/
psset_bin_stats_t merged_stats;
psset_stats_t stats;
/*
* Slabs with no active allocations, but which are allowed to serve new
* allocations.
*/
hpdata_empty_list_t empty;
/*
* Slabs which are available to be purged, ordered by how much we want
* to purge them (with later indices indicating slabs we want to purge
* more).
*/
hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS];
/* Bitmap for which set bits correspond to non-empty purge lists. */
fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)];
/* Slabs which are available to be hugified. */
hpdata_hugify_list_t to_hugify;
};
void psset_init(psset_t *psset);
void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
/*
* Begin or end updating the given pageslab's metadata. While the pageslab is
* being updated, it won't be returned from psset_fit calls.
*/
void psset_update_begin(psset_t *psset, hpdata_t *ps);
void psset_update_end(psset_t *psset, hpdata_t *ps);
/* Analogous to the eset_fit; pick a hpdata to serve the request. */
hpdata_t *psset_pick_alloc(psset_t *psset, size_t size);
/* Pick one to purge. */
hpdata_t *psset_pick_purge(psset_t *psset);
/* Pick one to hugify. */
hpdata_t *psset_pick_hugify(psset_t *psset);
void psset_insert(psset_t *psset, hpdata_t *ps);
void psset_remove(psset_t *psset, hpdata_t *ps);
static inline size_t
psset_npageslabs(psset_t *psset) {
return psset->merged_stats.npageslabs;
}
static inline size_t
psset_nactive(psset_t *psset) {
return psset->merged_stats.nactive;
}
static inline size_t
psset_ndirty(psset_t *psset) {
return psset->merged_stats.ndirty;
}
#endif /* JEMALLOC_INTERNAL_PSSET_H */
......@@ -3,37 +3,85 @@
#include "jemalloc/internal/qr.h"
/*
* A linked-list implementation.
*
* This is built on top of the ring implementation, but that can be viewed as an
* implementation detail (i.e. trying to advance past the tail of the list
* doesn't wrap around).
*
* You define a struct like so:
* typedef strucy my_s my_t;
* struct my_s {
* int data;
* ql_elm(my_t) my_link;
* };
*
* // We wobble between "list" and "head" for this type; we're now mostly
* // heading towards "list".
* typedef ql_head(my_t) my_list_t;
*
* You then pass a my_list_t * for a_head arguments, a my_t * for a_elm
* arguments, the token "my_link" for a_field arguments, and the token "my_t"
* for a_type arguments.
*/
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
/* Static initializer for an empty list. */
#define ql_head_initializer(a_head) {NULL}
/* The field definition. */
#define ql_elm(a_type) qr(a_type)
/* List functions. */
/* A pointer to the first element in the list, or NULL if the list is empty. */
#define ql_first(a_head) ((a_head)->qlh_first)
/* Dynamically initializes a list. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
ql_first(a_head) = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
/*
* Sets dest to be the contents of src (overwriting any elements there), leaving
* src empty.
*/
#define ql_move(a_head_dest, a_head_src) do { \
ql_first(a_head_dest) = ql_first(a_head_src); \
ql_new(a_head_src); \
} while (0)
#define ql_first(a_head) ((a_head)->qlh_first)
/* True if the list is empty, otherwise false. */
#define ql_empty(a_head) (ql_first(a_head) == NULL)
/*
* Initializes a ql_elm. Must be called even if the field is about to be
* overwritten.
*/
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
/*
* Obtains the last item in the list.
*/
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
(ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
/*
* Gets a pointer to the next/prev element in the list. Trying to advance past
* the end or retreat before the beginning of the list returns NULL.
*/
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
/* Inserts a_elm before a_qlelm in the list. */
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
......@@ -41,23 +89,41 @@ struct { \
} \
} while (0)
/* Inserts a_elm after a_qlelm in the list. */
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
/* Inserts a_elm as the first item in the list. */
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
/* Inserts a_elm as the last item in the list. */
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
/*
* Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
* a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
*/
#define ql_concat(a_head_a, a_head_b, a_field) do { \
if (ql_empty(a_head_a)) { \
ql_move(a_head_a, a_head_b); \
} else if (!ql_empty(a_head_b)) { \
qr_meld(ql_first(a_head_a), ql_first(a_head_b), \
a_field); \
ql_new(a_head_b); \
} \
} while (0)
/* Removes a_elm from the list. */
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
......@@ -65,20 +131,63 @@ struct { \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
ql_new(a_head); \
} \
} while (0)
/* Removes the first item in the list. */
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
/* Removes the last item in the list. */
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
/*
* Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
* ql_split(a, a_n, b, some_field) results in
* a = [a_1, a_2, ..., a_n-1]
* and replaces b's contents with:
* b = [a_n, a_n+1, ...]
*/
#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \
if (ql_first(a_head_a) == (a_elm)) { \
ql_move(a_head_b, a_head_a); \
} else { \
qr_split(ql_first(a_head_a), (a_elm), a_field); \
ql_first(a_head_b) = (a_elm); \
} \
} while (0)
/*
* An optimized version of:
* a_type *t = ql_first(a_head);
* ql_remove((a_head), t, a_field);
* ql_tail_insert((a_head), t, a_field);
*/
#define ql_rotate(a_head, a_field) do { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} while (0)
/*
* Helper macro to iterate over each element in a list in order, starting from
* the head (or in reverse order, starting from the tail). The usage is
* (assuming my_t and my_list_t defined as above).
*
* int sum(my_list_t *list) {
* int sum = 0;
* my_t *iter;
* ql_foreach(iter, list, link) {
* sum += iter->data;
* }
* return sum;
* }
*/
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
......
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/*
* A ring implementation based on an embedded circular doubly-linked list.
*
* You define your struct like so:
*
* typedef struct my_s my_t;
* struct my_s {
* int data;
* qr(my_t) my_link;
* };
*
* And then pass a my_t * into macros for a_qr arguments, and the token
* "my_link" into a_field fields.
*/
/* Ring definitions. */
#define qr(a_type) \
struct { \
......@@ -8,61 +23,114 @@ struct { \
a_type *qre_prev; \
}
/* Ring functions. */
/*
* Initialize a qr link. Every link must be initialized before being used, even
* if that initialization is going to be immediately overwritten (say, by being
* passed into an insertion macro).
*/
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
/*
* Go forwards or backwards in the ring. Note that (the ring being circular), this
* always succeeds -- you just keep looping around and around the ring if you
* chase pointers without end.
*/
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
/*
* Given two rings:
* a -> a_1 -> ... -> a_n --
* ^ |
* |------------------------
*
* b -> b_1 -> ... -> b_n --
* ^ |
* |------------------------
*
* Results in the ring:
* a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
* ^ |
* |-------------------------------------------------|
*
* a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
*/
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = \
(a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = \
(a_qr_b)->a_field.qre_prev->a_field.qre_next; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
/*
* Logically, this is just a meld. The intent, though, is that a_qrelm is a
* single-element ring, so that "before" has a more obvious interpretation than
* meld.
*/
#define qr_before_insert(a_qrelm, a_qr, a_field) \
qr_meld((a_qrelm), (a_qr), a_field)
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* Ditto, but inserting after rather than before. */
#define qr_after_insert(a_qrelm, a_qr, a_field) \
qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
/*
* Inverts meld; given the ring:
* a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
* ^ |
* |-------------------------------------------------|
*
* Results in two rings:
* a -> a_1 -> ... -> a_n --
* ^ |
* |------------------------
*
* b -> b_1 -> ... -> b_n --
* ^ |
* |------------------------
*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
/*
* Splits off a_qr from the rest of its ring, so that it becomes a
* single-element ring.
*/
#define qr_remove(a_qr, a_field) \
qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
/*
* Helper macro to iterate over each element in a ring exactly once, starting
* with a_qr. The usage is (assuming my_t defined as above):
*
* int sum(my_t *item) {
* int sum = 0;
* my_t *iter;
* qr_foreach(iter, item, link) {
* sum += iter->data;
* }
* return sum;
* }
*/
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
/*
* The same (and with the same usage) as qr_foreach, but in the opposite order,
* ending with a_qr.
*/
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
......
......@@ -30,12 +30,19 @@
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __loongarch__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# if defined(__mips_n32) || defined(__mips_n64)
# define LG_QUANTUM 4
# else
# define LG_QUANTUM 3
# endif
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
......@@ -61,6 +68,9 @@
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifdef __arc__
# define LG_QUANTUM 3
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
......
#ifndef JEMALLOC_INTERNAL_RB_H
#define JEMALLOC_INTERNAL_RB_H
/*-
*******************************************************************************
*
......@@ -19,13 +22,19 @@
*******************************************************************************
*/
#ifndef RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
/*
* Each node in the RB tree consumes at least 1 byte of space (for the linkage
* if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes
* in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree).
* The choice of algorithm bounds the depth of a tree to twice the binary log of
* the number of elements in the tree; the following bound follows.
*/
#define RB_MAX_DEPTH (sizeof(void *) << 4)
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
......@@ -159,12 +168,22 @@ struct { \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define rb_summarized_only_false(...)
#define rb_summarized_only_true(...) __VA_ARGS__
#define rb_empty_summarize(a_node, a_lchild, a_rchild) false
/*
* The rb_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to rb_gen().
* The rb_proto() and rb_summarized_proto() macros generate function prototypes
* that correspond to the functions generated by an equivalently parameterized
* call to rb_gen() or rb_summarized_gen(), respectively.
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false)
#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \
rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true)
#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \
a_is_summarized) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
......@@ -195,31 +214,94 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
void *arg); \
/* Extended API */ \
rb_summarized_only_##a_is_summarized( \
a_attr void \
a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \
a_attr bool \
a_prefix##empty_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##first_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##last_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
)
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
*
* Arguments:
*
* a_attr : Function attribute for generated functions (ex: static).
* a_prefix : Prefix for generated functions (ex: ex_).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_cmp : Node comparison function name, with the following prototype:
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* a_attr:
* Function attribute for generated functions (ex: static).
* a_prefix:
* Prefix for generated functions (ex: ex_).
* a_rb_type:
* Type for red-black tree data structure (ex: ex_t).
* a_type:
* Type for red-black tree node data structure (ex: ex_node_t).
* a_field:
* Name of red-black tree node linkage (ex: ex_link).
* a_cmp:
* Node comparison function name, with the following prototype:
*
* int a_cmp(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first
* argument to the comparison function, which makes it possible
* to write comparison functions that treat the first argument
* specially.
* In all cases, the a_node or a_key macro argument is the first argument to
* the comparison function, which makes it possible to write comparison
* functions that treat the first argument specially. a_cmp must be a total
* order on values inserted into the tree -- duplicates are not allowed.
*
* Assuming the following setup:
*
......@@ -338,8 +420,193 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*
* The rb_summarized_gen() macro generates all the functions above, but has an
* expanded interface. In introduces the notion of summarizing subtrees, and of
* filtering searches in the tree according to the information contained in
* those summaries.
* The extra macro argument is:
* a_summarize:
* Tree summarization function name, with the following prototype:
*
* bool a_summarize(a_type *a_node, const a_type *a_left_child,
* const a_type *a_right_child);
*
* This function should update a_node with the summary of the subtree rooted
* there, using the data contained in it and the summaries in a_left_child
* and a_right_child. One or both of them may be NULL. When the tree
* changes due to an insertion or removal, it updates the summaries of all
* nodes whose subtrees have changed (always updating the summaries of
* children before their parents). If the user alters a node in the tree in
* a way that may change its summary, they can call the generated
* update_summaries function to bubble up the summary changes to the root.
* It should return true if the summary changed (or may have changed), and
* false if it didn't (which will allow the implementation to terminate
* "bubbling up" the summaries early).
* As the parameter names indicate, the children are ordered as they are in
* the tree, a_left_child, if it is not NULL, compares less than a_node,
* which in turn compares less than a_right_child (if a_right_child is not
* NULL).
*
* Using the same setup as above but replacing the macro with
* rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp,
* ex_summarize)
*
* Generates all the previous functions, but adds some more:
*
* static void
* ex_update_summaries(ex_t *tree, ex_node_t *node);
* Description: Recompute all summaries of ancestors of node.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: The element of the tree whose summary may have changed.
*
* For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search,
* ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function
* is generated as well, with the suffix _filtered (e.g. ex_empty_filtered,
* ex_first_filtered, etc.). These use the concept of a "filter"; a binary
* property some node either satisfies or does not satisfy. Clever use of the
* a_summary argument to rb_summarized_gen can allow efficient computation of
* these predicates across whole subtrees of the tree.
* The extended API functions accept three additional arguments after the
* arguments to the corresponding non-extended equivalent.
*
* ex_fn(..., bool (*filter_node)(void *, ex_node_t *),
* bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx);
* filter_node : Returns true if the node passes the filter.
* filter_subtree : Returns true if some node in the subtree rooted at
* node passes the filter.
* filter_ctx : A context argument passed to the filters.
*
* For a more concrete example of summarizing and filtering, suppose we're using
* the red-black tree to track a set of integers:
*
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* unsigned data;
* };
*
* Suppose, for some application-specific reason, we want to be able to quickly
* find numbers in the set which are divisible by large powers of 2 (say, for
* aligned allocation purposes). We augment the node with a summary field:
*
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* unsigned data;
* unsigned max_subtree_ffs;
* }
*
* and define our summarization function as follows:
*
* bool
* ex_summarize(ex_node_t *node, const ex_node_t *lchild,
* const ex_node_t *rchild) {
* unsigned new_max_subtree_ffs = ffs(node->data);
* if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) {
* new_max_subtree_ffs = lchild->max_subtree_ffs;
* }
* if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) {
* new_max_subtree_ffs = rchild->max_subtree_ffs;
* }
* bool changed = (node->max_subtree_ffs != new_max_subtree_ffs)
* node->max_subtree_ffs = new_max_subtree_ffs;
* // This could be "return true" without any correctness or big-O
* // performance changes; but practically, precisely reporting summary
* // changes reduces the amount of work that has to be done when "bubbling
* // up" summary changes.
* return changed;
* }
*
* We can now implement our filter functions as follows:
* bool
* ex_filter_node(void *filter_ctx, ex_node_t *node) {
* unsigned required_ffs = *(unsigned *)filter_ctx;
* return ffs(node->data) >= required_ffs;
* }
* bool
* ex_filter_subtree(void *filter_ctx, ex_node_t *node) {
* unsigned required_ffs = *(unsigned *)filter_ctx;
* return node->max_subtree_ffs >= required_ffs;
* }
*
* We can now easily search for, e.g., the smallest integer in the set that's
* divisible by 128:
* ex_node_t *
* find_div_128(ex_tree_t *tree) {
* unsigned min_ffs = 7;
* return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree,
* &min_ffs);
* }
*
* We could with similar ease:
* - Fnd the next multiple of 128 in the set that's larger than 12345 (with
* ex_nsearch_filtered)
* - Iterate over just those multiples of 64 that are in the set (with
* ex_iter_filtered)
* - Determine if the set contains any multiples of 1024 (with
* ex_empty_filtered).
*
* Some possibly subtle API notes:
* - The node argument to ex_next_filtered and ex_prev_filtered need not pass
* the filter; it will find the next/prev node that passes the filter.
* - ex_search_filtered will fail even for a node in the tree, if that node does
* not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave
* similarly; they may return a node larger/smaller than the key, even if a
* node equivalent to the key is in the tree (but does not pass the filter).
* - Similarly, if the start argument to a filtered iteration function does not
* pass the filter, the callback won't be invoked on it.
*
* These should make sense after a moment's reflection; each post-condition is
* the same as with the unfiltered version, with the added constraint that the
* returned node must pass the filter.
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
rb_empty_summarize, false)
#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \
a_field, a_cmp, a_summarize) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
a_summarize, true)
#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \
a_field, a_cmp, a_summarize, a_is_summarized) \
typedef struct { \
a_type *node; \
int cmp; \
} a_prefix##path_entry_t; \
static inline void \
a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \
a_prefix##path_entry_t *rlast) { \
while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \
a_type *node = rlast->node; \
/* Avoid a warning when a_summarize is rb_empty_summarize. */ \
(void)node; \
bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \
node), rbtn_right_get(a_type, a_field, node)); \
if (!changed) { \
break; \
} \
rlast--; \
} \
} \
/* On the remove pathways, we sometimes swap the node being removed */\
/* and its first successor; in such cases we need to do two range */\
/* updates; one from the node to its (former) swapped successor, the */\
/* next from that successor to the root (with either allowed to */\
/* bail out early if appropriate. */\
static inline void \
a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \
a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \
if (swap_loc == NULL || rlast <= swap_loc) { \
a_prefix##summarize_range(rfirst, rlast); \
} else { \
a_prefix##summarize_range(swap_loc + 1, rlast); \
(void)a_summarize(swap_loc->node, \
rbtn_left_get(a_type, a_field, swap_loc->node), \
rbtn_right_get(a_type, a_field, swap_loc->node)); \
a_prefix##summarize_range(rfirst, swap_loc - 1); \
} \
} \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
......@@ -465,10 +732,8 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
......@@ -484,6 +749,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
} \
pathp->node = node; \
/* A loop invariant we maintain is that all nodes with */\
/* out-of-date summaries live in path[0], path[1], ..., *pathp. */\
/* To maintain this, we have to summarize node, since we */\
/* decrement pathp before the first iteration. */\
assert(rbtn_left_get(a_type, a_field, node) == NULL); \
assert(rbtn_right_get(a_type, a_field, node) == NULL); \
(void)a_summarize(node, NULL, NULL); \
/* Unwind. */ \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
......@@ -498,9 +770,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
} \
} else { \
a_prefix##summarize_range(path, pathp); \
return; \
} \
} else { \
......@@ -521,13 +797,20 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
} \
} else { \
a_prefix##summarize_range(path, pathp); \
return; \
} \
} \
pathp->node = cnode; \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
} \
/* Set root, and make it black. */ \
rbtree->rbt_root = path->node; \
......@@ -535,12 +818,18 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp; \
a_prefix##path_entry_t *nodep; \
a_prefix##path_entry_t *swap_loc; \
/* This is a "real" sentinel -- NULL means we didn't swap the */\
/* node to be pruned with one of its successors, and so */\
/* summarization can terminate early whenever some summary */\
/* doesn't change. */\
swap_loc = NULL; \
/* This is just to silence a compiler warning. */ \
nodep = NULL; \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
......@@ -567,6 +856,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */ \
swap_loc = nodep; \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
......@@ -604,6 +894,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
/* Nothing to summarize -- the subtree rooted at the */\
/* node's left child hasn't changed, and it's now the */\
/* root. */\
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
......@@ -612,6 +905,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
} \
return; \
} else if (pathp == path) { \
......@@ -620,10 +915,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} \
} \
/* We've now established the invariant that the node has no right */\
/* child (well, morally; we didn't bother nulling it out if we */\
/* swapped it with its successor), and that the only nodes with */\
/* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/\
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
......@@ -657,6 +957,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(right, \
rbtn_left_get(a_type, a_field, right), \
rbtn_right_get(a_type, a_field, right)); \
} else { \
/* || */\
/* pathp(r) */\
......@@ -667,7 +973,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* */\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
(void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \
tnode), rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */\
/* root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
......@@ -678,6 +989,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
......@@ -698,6 +1011,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(right, \
rbtn_left_get(a_type, a_field, right), \
rbtn_right_get(a_type, a_field, right)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
......@@ -712,6 +1034,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
a_prefix##summarize_swapped_range(path, \
&pathp[-1], swap_loc); \
} \
return; \
} else { \
......@@ -725,6 +1049,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
pathp->node = tnode; \
} \
} \
......@@ -757,6 +1087,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(unode, \
rbtn_left_get(a_type, a_field, unode), \
rbtn_right_get(a_type, a_field, unode)); \
} else { \
/* || */\
/* pathp(b) */\
......@@ -771,7 +1107,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */\
/* root, which may actually be the tree root. */\
if (pathp == path) { \
......@@ -785,6 +1127,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
......@@ -803,6 +1147,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */\
/* subtree root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
......@@ -813,6 +1163,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
return; \
} else { \
/* || */\
......@@ -824,6 +1176,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */ \
a_prefix##summarize_swapped_range(path, pathp, \
swap_loc); \
return; \
} \
} else { \
......@@ -840,6 +1194,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
......@@ -854,6 +1214,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
a_prefix##summarize_swapped_range(path, \
&pathp[-1], swap_loc); \
} \
return; \
} else { \
......@@ -864,6 +1226,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* / */\
/* (b) */\
rbtn_red_set(a_type, a_field, left); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
} \
} \
......@@ -1001,6 +1366,491 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
} \
/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */ \
rb_summarized_only_##a_is_summarized( \
static inline a_prefix##path_entry_t * \
a_prefix##wind(a_rbt_type *rbtree, \
a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \
a_prefix##path_entry_t *pathp; \
path->node = rbtree->rbt_root; \
for (pathp = path; ; pathp++) { \
assert((size_t)(pathp - path) < RB_MAX_DEPTH); \
pathp->cmp = a_cmp(node, pathp->node); \
if (pathp->cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else if (pathp->cmp == 0) { \
return pathp; \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
unreachable(); \
} \
a_attr void \
a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \
a_prefix##summarize_range(path, pathp); \
} \
a_attr bool \
a_prefix##empty_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
return node == NULL || !filter_subtree(filter_ctx, node); \
} \
static inline a_type * \
a_prefix##first_filtered_from_node(a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
assert(node != NULL && filter_subtree(filter_ctx, node)); \
while (true) { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (left != NULL && filter_subtree(filter_ctx, left)) { \
node = left; \
} else if (filter_node(filter_ctx, node)) { \
return node; \
} else { \
assert(right != NULL \
&& filter_subtree(filter_ctx, right)); \
node = right; \
} \
} \
unreachable(); \
} \
a_attr a_type * \
a_prefix##first_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
return a_prefix##first_filtered_from_node(node, filter_node, \
filter_subtree, filter_ctx); \
} \
static inline a_type * \
a_prefix##last_filtered_from_node(a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
assert(node != NULL && filter_subtree(filter_ctx, node)); \
while (true) { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (right != NULL && filter_subtree(filter_ctx, right)) { \
node = right; \
} else if (filter_node(filter_ctx, node)) { \
return node; \
} else { \
assert(left != NULL \
&& filter_subtree(filter_ctx, left)); \
node = left; \
} \
} \
unreachable(); \
} \
a_attr a_type * \
a_prefix##last_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
return a_prefix##last_filtered_from_node(node, filter_node, \
filter_subtree, filter_ctx); \
} \
/* Internal implementation function. Search for a node comparing */\
/* equal to key matching the filter. If such a node is in the tree, */\
/* return it. Additionally, the caller has the option to ask for */\
/* bounds on the next / prev node in the tree passing the filter. */\
/* If nextbound is true, then this function will do one of the */\
/* following: */\
/* - Fill in *nextbound_node with the smallest node in the tree */\
/* greater than key passing the filter, and NULL-out */\
/* *nextbound_subtree. */\
/* - Fill in *nextbound_subtree with a parent of that node which is */\
/* not a parent of the searched-for node, and NULL-out */\
/* *nextbound_node. */\
/* - NULL-out both *nextbound_node and *nextbound_subtree, in which */\
/* case no node greater than key but passing the filter is in the */\
/* tree. */\
/* The prevbound case is similar. If the caller knows that key is in */\
/* the tree and that the subtree rooted at key does not contain a */\
/* node satisfying the bound being searched for, then they can pass */\
/* false for include_subtree, in which case we won't bother searching */\
/* there (risking a cache miss). */\
/* */\
/* This API is unfortunately complex; but the logic for filtered */\
/* searches is very subtle, and otherwise we would have to repeat it */\
/* multiple times for filtered search, nsearch, psearch, next, and */\
/* prev. */\
static inline a_type * \
a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \
const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx, \
bool include_subtree, \
bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \
bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\
if (nextbound) { \
*nextbound_node = NULL; \
*nextbound_subtree = NULL; \
} \
if (prevbound) { \
*prevbound_node = NULL; \
*prevbound_subtree = NULL; \
} \
a_type *tnode = rbtree->rbt_root; \
while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \
int cmp = a_cmp(key, tnode); \
a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \
a_type *tright = rbtn_right_get(a_type, a_field, tnode); \
if (cmp < 0) { \
if (nextbound) { \
if (filter_node(filter_ctx, tnode)) { \
*nextbound_node = tnode; \
*nextbound_subtree = NULL; \
} else if (tright != NULL && filter_subtree( \
filter_ctx, tright)) { \
*nextbound_node = NULL; \
*nextbound_subtree = tright; \
} \
} \
tnode = tleft; \
} else if (cmp > 0) { \
if (prevbound) { \
if (filter_node(filter_ctx, tnode)) { \
*prevbound_node = tnode; \
*prevbound_subtree = NULL; \
} else if (tleft != NULL && filter_subtree( \
filter_ctx, tleft)) { \
*prevbound_node = NULL; \
*prevbound_subtree = tleft; \
} \
} \
tnode = tright; \
} else { \
if (filter_node(filter_ctx, tnode)) { \
return tnode; \
} \
if (include_subtree) { \
if (prevbound && tleft != NULL && filter_subtree( \
filter_ctx, tleft)) { \
*prevbound_node = NULL; \
*prevbound_subtree = tleft; \
} \
if (nextbound && tright != NULL && filter_subtree( \
filter_ctx, tright)) { \
*nextbound_node = NULL; \
*nextbound_subtree = tright; \
} \
} \
return NULL; \
} \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *nright = rbtn_right_get(a_type, a_field, node); \
if (nright != NULL && filter_subtree(filter_ctx, nright)) { \
return a_prefix##first_filtered_from_node(nright, filter_node, \
filter_subtree, filter_ctx); \
} \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *search_result = a_prefix##search_with_filter_bounds( \
rbtree, node, filter_node, filter_subtree, filter_ctx, \
/* include_subtree */ false, \
/* nextbound */ true, &node_candidate, &subtree_candidate, \
/* prevbound */ false, NULL, NULL); \
assert(node == search_result \
|| !filter_node(filter_ctx, node)); \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##first_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *nleft = rbtn_left_get(a_type, a_field, node); \
if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \
return a_prefix##last_filtered_from_node(nleft, filter_node, \
filter_subtree, filter_ctx); \
} \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *search_result = a_prefix##search_with_filter_bounds( \
rbtree, node, filter_node, filter_subtree, filter_ctx, \
/* include_subtree */ false, \
/* nextbound */ false, NULL, NULL, \
/* prevbound */ true, &node_candidate, &subtree_candidate); \
assert(node == search_result \
|| !filter_node(filter_ctx, node)); \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##last_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */ false, \
/* nextbound */ false, NULL, NULL, \
/* prevbound */ false, NULL, NULL); \
return result; \
} \
a_attr a_type * \
a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */ true, \
/* nextbound */ true, &node_candidate, &subtree_candidate, \
/* prevbound */ false, NULL, NULL); \
if (result != NULL) { \
return result; \
} \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##first_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */ true, \
/* nextbound */ false, NULL, NULL, \
/* prevbound */ true, &node_candidate, &subtree_candidate); \
if (result != NULL) { \
return result; \
} \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##last_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
} \
if (ret != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} \
a_attr a_type * \
a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (!filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
int cmp = a_cmp(start, node); \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (cmp < 0) { \
ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} else if (cmp > 0) { \
return a_prefix##iter_start_filtered(rbtree, start, right, \
cb, arg, filter_node, filter_subtree, filter_ctx); \
} else { \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} \
} \
a_attr a_type * \
a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start_filtered(rbtree, start, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} else { \
ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \
cb, arg, filter_node, filter_subtree, filter_ctx); \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
} \
if (ret != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
} \
a_attr a_type * \
a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (!filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
int cmp = a_cmp(start, node); \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (cmp > 0) { \
ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
right, cb, arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
arg, filter_node, filter_subtree, filter_ctx); \
} else if (cmp < 0) { \
return a_prefix##reverse_iter_start_filtered(rbtree, start, \
left, cb, arg, filter_node, filter_subtree, filter_ctx); \
} else { \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
arg, filter_node, filter_subtree, filter_ctx); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} else { \
ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} \
return ret; \
} \
) /* end rb_summarized_only */
#endif /* RB_H_ */
#endif /* JEMALLOC_INTERNAL_RB_H */
......@@ -35,33 +35,52 @@
# define RTREE_LEAF_COMPACT
#endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
typedef struct rtree_metadata_s rtree_metadata_t;
struct rtree_metadata_s {
szind_t szind;
extent_state_t state; /* Mirrors edata->state. */
bool is_head; /* Mirrors edata->is_head. */
bool slab;
};
typedef struct rtree_contents_s rtree_contents_t;
struct rtree_contents_s {
edata_t *edata;
rtree_metadata_t metadata;
};
#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
#define RTREE_LEAF_STATE_SHIFT 2
#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
struct rtree_leaf_elm_s {
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* memory address bits, the index, edata, and slab fields are packed as
* such:
*
* x: index
* e: extent
* e: edata
* s: state
* h: is_head
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
*/
atomic_p_t le_bits;
#else
atomic_p_t le_extent; /* (extent_t *) */
atomic_u_t le_szind; /* (szind_t) */
atomic_b_t le_slab; /* (bool) */
atomic_p_t le_edata; /* (edata_t *) */
/*
* From high to low bits: szind (8 bits), state (4 bits), is_head, slab
*/
atomic_u_t le_metadata;
#endif
};
......@@ -78,6 +97,7 @@ struct rtree_level_s {
typedef struct rtree_s rtree_t;
struct rtree_s {
base_t *base;
malloc_mutex_t init_lock;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
......@@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = {
#endif
};
bool rtree_new(rtree_t *rtree, bool zeroed);
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
#ifdef JEMALLOC_JET
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
#endif
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leafkey(uintptr_t key) {
JEMALLOC_ALWAYS_INLINE unsigned
rtree_leaf_maskbits(void) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned maskbits = ptrbits - cumbits;
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
return ptrbits - cumbits;
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leafkey(uintptr_t key) {
uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
return (key & mask);
}
JEMALLOC_ALWAYS_INLINE size_t
rtree_cache_direct_map(uintptr_t key) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned maskbits = ptrbits - cumbits;
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
return (size_t)((key >> rtree_leaf_maskbits()) &
(RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
......@@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
uintptr_t edata_bits = (uintptr_t)contents.edata
& (((uintptr_t)1 << LG_VADDR) - 1);
uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
RTREE_LEAF_STATE_SHIFT;
uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
slab_bits;
assert((edata_bits & metadata_bits) == 0);
return edata_bits | metadata_bits;
}
JEMALLOC_ALWAYS_INLINE rtree_contents_t
rtree_leaf_elm_bits_decode(uintptr_t bits) {
rtree_contents_t contents;
/* Do the easy things first. */
contents.metadata.szind = bits >> LG_VADDR;
contents.metadata.slab = (bool)(bits & 1);
contents.metadata.is_head = (bool)(bits & (1 << 1));
uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
RTREE_LEAF_STATE_SHIFT;
assert(state_bits <= extent_state_max);
contents.metadata.state = (extent_state_t)state_bits;
uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits gets zeroed.
* the higher ones. Instead, the high bits get zeroed.
*/
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
/* Mask off the slab bit. */
uintptr_t low_bit_mask = ~(uintptr_t)1;
/* Mask off metadata. */
uintptr_t mask = high_bit_mask & low_bit_mask;
return (extent_t *)(bits & mask);
contents.edata = (edata_t *)(bits & mask);
# else
/* Restore sign-extended high bits, mask slab bit. */
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
RTREE_NHIB) & ~((uintptr_t)0x1));
/* Restore sign-extended high bits, mask metadata bits. */
contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
>> RTREE_NHIB) & low_bit_mask);
# endif
assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
return contents;
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
return (szind_t)(bits >> LG_VADDR);
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
return (bool)(bits & (uintptr_t)0x1);
}
# endif
# endif /* RTREE_LEAF_COMPACT */
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
JEMALLOC_ALWAYS_INLINE rtree_contents_t
rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_extent_get(bits);
rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
return contents;
#else
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
rtree_contents_t contents;
unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
return extent;
#endif
}
contents.metadata.slab = (bool)(metadata_bits & 1);
contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_szind_get(bits);
#else
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
: ATOMIC_ACQUIRE);
uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
RTREE_LEAF_STATE_SHIFT;
assert(state_bits <= extent_state_max);
contents.metadata.state = (extent_state_t)state_bits;
contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
RTREE_LEAF_STATE_WIDTH);
contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
return contents;
#endif
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
JEMALLOC_ALWAYS_INLINE void
rtree_contents_encode(rtree_contents_t contents, void **bits,
unsigned *additional) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_slab_get(bits);
*bits = (void *)rtree_leaf_elm_bits_encode(contents);
#else
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
ATOMIC_ACQUIRE);
*additional = (unsigned)contents.metadata.slab
| ((unsigned)contents.metadata.is_head << 1)
| ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
| ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
RTREE_LEAF_STATE_WIDTH));
*bits = contents.edata;
#endif
}
static inline void
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, extent_t *extent) {
JEMALLOC_ALWAYS_INLINE void
rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
#else
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
/*
* Write edata last, since the element is atomically considered valid
* as soon as the edata field is non-NULL.
*/
atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind) {
assert(szind <= SC_NSIZES);
JEMALLOC_ALWAYS_INLINE void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, rtree_contents_t contents) {
assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
void *bits;
unsigned additional;
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
#endif
rtree_contents_encode(contents, &bits, &additional);
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
}
static inline void
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool slab) {
/* The state field can be updated independently (and more frequently). */
JEMALLOC_ALWAYS_INLINE void
rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
assert(elm1 != NULL);
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
/* dependent */ true);
bits &= ~RTREE_LEAF_STATE_MASK;
bits |= state << RTREE_LEAF_STATE_SHIFT;
atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
if (elm2 != NULL) {
atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
}
#else
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
bits &= ~RTREE_LEAF_STATE_MASK;
bits |= state << RTREE_LEAF_STATE_SHIFT;
atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
if (elm2 != NULL) {
atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
}
#endif
}
static inline void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
/*
* Tries to look up the key in the L1 cache, returning false if there's a hit, or
* true if there's a miss.
* Key is allowed to be NULL; returns true in this case.
*/
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
#endif
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, rtree_leaf_elm_t **elm) {
size_t slot = rtree_cache_direct_map(key);
uintptr_t leafkey = rtree_leafkey(key);
assert(leafkey != RTREE_LEAFKEY_INVALID);
static inline void
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
assert(!slab || szind < SC_NBINS);
if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
return true;
}
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
*/
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
assert(leaf != NULL);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
*elm = &leaf[subkey];
return false;
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
......@@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
dependent, init_missing);
}
/*
* Returns true on lookup failure.
*/
static inline bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
extent_t *extent, szind_t szind, bool slab) {
/* Use rtree_clear() to set the extent to NULL. */
assert(extent != NULL);
rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, rtree_contents_t *r_contents) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, false, true);
key, /* dependent */ false, /* init_missing */ false);
if (elm == NULL) {
return true;
}
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
*r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ false);
return false;
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
bool dependent) {
static inline rtree_contents_t
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, dependent, false);
if (!dependent && elm == NULL) {
return NULL;
}
key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
return elm;
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NULL;
}
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return SC_NSIZES;
}
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
static inline rtree_metadata_t
rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
return rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).metadata;
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
* Returns true when the request cannot be fulfilled by fastpath.
*/
JEMALLOC_ALWAYS_INLINE bool
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
static inline bool
rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
rtree_leaf_elm_t *elm;
/*
* Should check the bool return value (lookup success or not) instead of
* elm == NULL (which will result in an extra branch). This is because
* when the cache lookup succeeds, there will never be a NULL pointer
* returned (which is unknown to the compiler).
*/
if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
return true;
}
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
assert(elm != NULL);
*r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).metadata;
return false;
}
/*
* Try to read szind_slab from the L1 cache. Returns true on a hit,
* and fills in r_szind and r_slab. Otherwise returns false.
*
* Key is allowed to be NULL in order to save an extra branch on the
* fastpath. returns false in this case.
JEMALLOC_ALWAYS_INLINE void
rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
/*
* Only used for emap_(de)register_interior, which implies the
* boundaries have been registered already. Therefore all the lookups
* are dependent w/o init_missing, assuming the range spans across at
* most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
*/
JEMALLOC_ALWAYS_INLINE bool
rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, szind_t *r_szind, bool *r_slab) {
rtree_leaf_elm_t *elm;
size_t slot = rtree_cache_direct_map(key);
uintptr_t leafkey = rtree_leafkey(key);
assert(leafkey != RTREE_LEAFKEY_INVALID);
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
assert(leaf != NULL);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
elm = &leaf[subkey];
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree,
elm, true);
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
#else
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true);
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true);
#endif
return true;
} else {
return false;
void *bits;
unsigned additional;
rtree_contents_encode(contents, &bits, &additional);
rtree_leaf_elm_t *elm = NULL; /* Dead store. */
for (uintptr_t addr = base; addr <= end; addr += PAGE) {
if (addr == base ||
(addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
/* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
}
assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
/* dependent */ true, /* init_missing */ false));
assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).edata != NULL);
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
elm++;
}
}
JEMALLOC_ALWAYS_INLINE void
rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t base, uintptr_t end, rtree_contents_t contents) {
rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
/* clearing */ false);
}
JEMALLOC_ALWAYS_INLINE bool
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
rtree_contents_t contents) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, /* dependent */ false, /* init_missing */ true);
if (elm == NULL) {
return true;
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
#else
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
#endif
return false;
}
static inline void
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, szind_t szind, bool slab) {
assert(!slab || szind < SC_NBINS);
rtree_leaf_elm_write(tsdn, rtree, elm, contents);
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
return false;
}
static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
assert(rtree_leaf_elm_read(tsdn, rtree, elm,
/* dependent */ true).edata != NULL);
rtree_contents_t contents;
contents.edata = NULL;
contents.metadata.szind = SC_NSIZES;
contents.metadata.slab = false;
contents.metadata.is_head = false;
contents.metadata.state = (extent_state_t)0;
rtree_leaf_elm_write(tsdn, rtree, elm, contents);
}
static inline void
rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t base, uintptr_t end) {
rtree_contents_t contents;
contents.edata = NULL;
contents.metadata.szind = SC_NSIZES;
contents.metadata.slab = false;
contents.metadata.is_head = false;
contents.metadata.state = (extent_state_t)0;
rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
/* clearing */ true);
}
#endif /* JEMALLOC_INTERNAL_RTREE_H */
......@@ -18,16 +18,28 @@
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE 16
#define RTREE_CTX_NCACHE_L2 8
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2
#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4
#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8
#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n
#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n)
/*
* Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init().
* Static initializer (to invalidate the cache entries) is required because the
* free fastpath may access the rtree cache before a full tsd initialization.
*/
#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
......
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
size_t true_size, size_t input_size);
void safety_check_fail(const char *format, ...);
typedef void (*safety_check_abort_hook_t)(const char *message);
/* Can set to NULL for a default. */
void safety_check_set_abort(void (*abort_fn)());
void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
JEMALLOC_ALWAYS_INLINE void
safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
......
#ifndef JEMALLOC_INTERNAL_GUARD_H
#define JEMALLOC_INTERNAL_GUARD_H
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/emap.h"
#define SAN_PAGE_GUARD PAGE
#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL;
/* 0 means disabled, i.e. never guarded. */
extern size_t opt_san_guard_large;
extern size_t opt_san_guard_small;
/* -1 means disabled, i.e. never check for use-after-free. */
extern ssize_t opt_lg_san_uaf_align;
void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool left, bool right, bool remap);
void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool left, bool right);
/*
* Unguard the extent, but don't modify emap boundaries. Must be called on an
* extent that has been erased from emap and shouldn't be placed back.
*/
void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, emap_t *emap);
void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
void tsd_san_init(tsd_t *tsd);
void san_init(ssize_t lg_san_uaf_align);
static inline void
san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool remap) {
san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap);
}
static inline void
san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap) {
san_unguard_pages(tsdn, ehooks, edata, emap, true, true);
}
static inline size_t
san_two_side_unguarded_sz(size_t size) {
assert(size % PAGE == 0);
assert(size >= SAN_PAGE_GUARDS_SIZE);
return size - SAN_PAGE_GUARDS_SIZE;
}
static inline size_t
san_two_side_guarded_sz(size_t size) {
assert(size % PAGE == 0);
return size + SAN_PAGE_GUARDS_SIZE;
}
static inline size_t
san_one_side_unguarded_sz(size_t size) {
assert(size % PAGE == 0);
assert(size >= SAN_PAGE_GUARD);
return size - SAN_PAGE_GUARD;
}
static inline size_t
san_one_side_guarded_sz(size_t size) {
assert(size % PAGE == 0);
return size + SAN_PAGE_GUARD;
}
static inline bool
san_guard_enabled(void) {
return (opt_san_guard_large != 0 || opt_san_guard_small != 0);
}
static inline bool
san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
size_t alignment) {
if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
tsdn_null(tsdn)) {
return false;
}
tsd_t *tsd = tsdn_tsd(tsdn);
uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
assert(n >= 1);
if (n > 1) {
/*
* Subtract conditionally because the guard may not happen due
* to alignment or size restriction below.
*/
*tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
}
if (n == 1 && (alignment <= PAGE) &&
(san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
*tsd_san_extents_until_guard_largep_get(tsd) =
opt_san_guard_large;
return true;
} else {
assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
return false;
}
}
static inline bool
san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
tsdn_null(tsdn)) {
return false;
}
tsd_t *tsd = tsdn_tsd(tsdn);
uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
assert(n >= 1);
if (n == 1) {
*tsd_san_extents_until_guard_smallp_get(tsd) =
opt_san_guard_small;
return true;
} else {
*tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
assert(tsd_san_extents_until_guard_small_get(tsd) >= 1);
return false;
}
}
static inline void
san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
void **last) {
size_t ptr_sz = sizeof(void *);
*first = ptr;
*mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
assert(*first != *mid || usize == ptr_sz);
assert((uintptr_t)*first <= (uintptr_t)*mid);
/*
* When usize > 32K, the gap between requested_size and usize might be
* greater than 4K -- this means the last write may access an
* likely-untouched page (default settings w/ 4K pages). However by
* default the tcache only goes up to the 32K size class, and is usually
* tuned lower instead of higher, which makes it less of a concern.
*/
*last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
assert(*first != *last || usize == ptr_sz);
assert(*mid != *last || usize <= ptr_sz * 2);
assert((uintptr_t)*mid <= (uintptr_t)*last);
}
static inline bool
san_junk_ptr_should_slow(void) {
/*
* The latter condition (pointer size greater than the min size class)
* is not expected -- fall back to the slow path for simplicity.
*/
return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN);
}
static inline void
san_junk_ptr(void *ptr, size_t usize) {
if (san_junk_ptr_should_slow()) {
memset(ptr, (char)uaf_detect_junk, usize);
return;
}
void *first, *mid, *last;
san_junk_ptr_locations(ptr, usize, &first, &mid, &last);
*(uintptr_t *)first = uaf_detect_junk;
*(uintptr_t *)mid = uaf_detect_junk;
*(uintptr_t *)last = uaf_detect_junk;
}
static inline bool
san_uaf_detection_enabled(void) {
bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
if (config_uaf_detection && ret) {
assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
opt_lg_san_uaf_align) - 1);
}
return ret;
}
#endif /* JEMALLOC_INTERNAL_GUARD_H */
#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
#define JEMALLOC_INTERNAL_SAN_BUMP_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/mutex.h"
#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
extern bool opt_retain;
typedef struct ehooks_s ehooks_t;
typedef struct pac_s pac_t;
typedef struct san_bump_alloc_s san_bump_alloc_t;
struct san_bump_alloc_s {
malloc_mutex_t mtx;
edata_t *curr_reg;
};
static inline bool
san_bump_enabled() {
/*
* We enable san_bump allocator only when it's possible to break up a
* mapping and unmap a part of it (maps_coalesce). This is needed to
* ensure the arena destruction process can destroy all retained guarded
* extents one by one and to unmap a trailing part of a retained guarded
* region when it's too small to fit a pending allocation.
* opt_retain is required, because this allocator retains a large
* virtual memory mapping and returns smaller parts of it.
*/
return maps_coalesce && opt_retain;
}
static inline bool
san_bump_alloc_init(san_bump_alloc_t* sba) {
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
sba->curr_reg = NULL;
return false;
}
edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
size_t size, bool zero);
#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */
......@@ -197,30 +197,34 @@
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
/* The number of size classes that are a multiple of the page size. */
#define SC_NPSIZES ( \
/* Start with all the size classes. */ \
SC_NSIZES \
/* Subtract out those groups with too small a base. */ \
- (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
/* And the pseudo-group. */ \
- SC_NPSEUDO \
/* And the tiny group. */ \
- SC_NTINY \
/* Sizes where ndelta*delta is not a multiple of the page size. */ \
- (SC_LG_NGROUP * SC_NGROUP))
/*
* Note that the last line is computed as the sum of the second column in the
* following table:
* lg(base) | count of sizes to exclude
* ------------------------------|-----------------------------
* LG_PAGE - 1 | SC_NGROUP - 1
* LG_PAGE | SC_NGROUP - 1
* LG_PAGE + 1 | SC_NGROUP - 2
* LG_PAGE + 2 | SC_NGROUP - 4
* ... | ...
* LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
* The number of size classes that are a multiple of the page size.
*
* Here are the first few bases that have a page-sized SC.
*
* lg(base) | base | highest SC | page-multiple SCs
* --------------|------------------------------------------
* LG_PAGE - 1 | PAGE / 2 | PAGE | 1
* LG_PAGE | PAGE | 2 * PAGE | 1
* LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2
* LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4
*
* The number of page-multiple SCs continues to grow in powers of two, up until
* lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
* SC_LG_NGROUP. So, then, the number of size classes that are multiples of the
* page size whose lg_delta is less than the page size are
* is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
*
* For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
* NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
* NGROUP - 1.
*
* This gives us the quantity we seek.
*/
#define SC_NPSIZES ( \
SC_NGROUP \
+ (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
+ SC_NGROUP - 1)
/*
* We declare a size class is binnable if size < page size * group. Or, in other
......@@ -242,17 +246,23 @@
# error "Too many small size classes"
#endif
/* The largest size class in the lookup table. */
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
/* The largest size class in the lookup table, and its binary log. */
#define SC_LG_MAX_LOOKUP 12
#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
/* The fastpath assumes all lookup-able sizes are small. */
#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
# error "Lookup table sizes must be small"
#endif
/* The smallest size class not allocated out of a slab. */
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
......@@ -264,6 +274,19 @@
/* The largest size class supported. */
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
/* Maximum number of regions in one slab. */
#ifndef CONFIG_LG_SLAB_MAXREGS
# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#else
# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
# error "Unsupported SC_LG_SLAB_MAXREGS"
# else
# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
# endif
#endif
#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
typedef struct sc_s sc_t;
struct sc_s {
/* Size class index, or -1 if not a valid size class. */
......@@ -321,10 +344,11 @@ struct sc_data_s {
sc_t sc[SC_NSIZES];
};
size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
void sc_data_init(sc_data_t *data);
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
* Otherwise, does its best to accomodate the request.
* Otherwise, does its best to accommodate the request.
*/
void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
int pgs);
......
#ifndef JEMALLOC_INTERNAL_SEC_H
#define JEMALLOC_INTERNAL_SEC_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/pai.h"
/*
* Small extent cache.
*
* This includes some utilities to cache small extents. We have a per-pszind
* bin with its own list of extents of that size. We don't try to do any
* coalescing of extents (since it would in general require cross-shard locks or
* knowledge of the underlying PAI implementation).
*/
/*
* For now, this is just one field; eventually, we'll probably want to get more
* fine-grained data out (like per-size class statistics).
*/
typedef struct sec_stats_s sec_stats_t;
struct sec_stats_s {
/* Sum of bytes_cur across all shards. */
size_t bytes;
};
static inline void
sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) {
dst->bytes += src->bytes;
}
/* A collections of free extents, all of the same size. */
typedef struct sec_bin_s sec_bin_t;
struct sec_bin_s {
/*
* When we fail to fulfill an allocation, we do a batch-alloc on the
* underlying allocator to fill extra items, as well. We drop the SEC
* lock while doing so, to allow operations on other bins to succeed.
* That introduces the possibility of other threads also trying to
* allocate out of this bin, failing, and also going to the backing
* allocator. To avoid a thundering herd problem in which lots of
* threads do batch allocs and overfill this bin as a result, we only
* allow one batch allocation at a time for a bin. This bool tracks
* whether or not some thread is already batch allocating.
*
* Eventually, the right answer may be a smarter sharding policy for the
* bins (e.g. a mutex per bin, which would also be more scalable
* generally; the batch-allocating thread could hold it while
* batch-allocating).
*/
bool being_batch_filled;
/*
* Number of bytes in this particular bin (as opposed to the
* sec_shard_t's bytes_cur. This isn't user visible or reported in
* stats; rather, it allows us to quickly determine the change in the
* centralized counter when flushing.
*/
size_t bytes_cur;
edata_list_active_t freelist;
};
typedef struct sec_shard_s sec_shard_t;
struct sec_shard_s {
/*
* We don't keep per-bin mutexes, even though that would allow more
* sharding; this allows global cache-eviction, which in turn allows for
* better balancing across free lists.
*/
malloc_mutex_t mtx;
/*
* A SEC may need to be shut down (i.e. flushed of its contents and
* prevented from further caching). To avoid tricky synchronization
* issues, we just track enabled-status in each shard, guarded by a
* mutex. In practice, this is only ever checked during brief races,
* since the arena-level atomic boolean tracking HPA enabled-ness means
* that we won't go down these pathways very often after custom extent
* hooks are installed.
*/
bool enabled;
sec_bin_t *bins;
/* Number of bytes in all bins in the shard. */
size_t bytes_cur;
/* The next pszind to flush in the flush-some pathways. */
pszind_t to_flush_next;
};
typedef struct sec_s sec_t;
struct sec_s {
pai_t pai;
pai_t *fallback;
sec_opts_t opts;
sec_shard_t *shards;
pszind_t npsizes;
};
bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
const sec_opts_t *opts);
void sec_flush(tsdn_t *tsdn, sec_t *sec);
void sec_disable(tsdn_t *tsdn, sec_t *sec);
/*
* Morally, these two stats methods probably ought to be a single one (and the
* mutex_prof_data ought to live in the sec_stats_t. But splitting them apart
* lets them fit easily into the pa_shard stats framework (which also has this
* split), which simplifies the stats management.
*/
void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats);
void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
mutex_prof_data_t *mutex_prof_data);
/*
* We use the arena lock ordering; these are acquired in phase 2 of forking, but
* should be acquired before the underlying allocator mutexes.
*/
void sec_prefork2(tsdn_t *tsdn, sec_t *sec);
void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec);
void sec_postfork_child(tsdn_t *tsdn, sec_t *sec);
#endif /* JEMALLOC_INTERNAL_SEC_H */
#ifndef JEMALLOC_INTERNAL_SEC_OPTS_H
#define JEMALLOC_INTERNAL_SEC_OPTS_H
/*
* The configuration settings used by an sec_t. Morally, this is part of the
* SEC interface, but we put it here for header-ordering reasons.
*/
typedef struct sec_opts_s sec_opts_t;
struct sec_opts_s {
/*
* We don't necessarily always use all the shards; requests are
* distributed across shards [0, nshards - 1).
*/
size_t nshards;
/*
* We'll automatically refuse to cache any objects in this sec if
* they're larger than max_alloc bytes, instead forwarding such objects
* directly to the fallback.
*/
size_t max_alloc;
/*
* Exceeding this amount of cached extents in a shard causes us to start
* flushing bins in that shard until we fall below bytes_after_flush.
*/
size_t max_bytes;
/*
* The number of bytes (in all bins) we flush down to when we exceed
* bytes_cur. We want this to be less than bytes_cur, because
* otherwise we could get into situations where a shard undergoing
* net-deallocation keeps bytes_cur very near to max_bytes, so that
* most deallocations get immediately forwarded to the underlying PAI
* implementation, defeating the point of the SEC.
*/
size_t bytes_after_flush;
/*
* When we can't satisfy an allocation out of the SEC because there are
* no available ones cached, we allocate multiple of that size out of
* the fallback allocator. Eventually we might want to do something
* cleverer, but for now we just grab a fixed number.
*/
size_t batch_fill_extra;
};
#define SEC_OPTS_DEFAULT { \
/* nshards */ \
4, \
/* max_alloc */ \
(32 * 1024) < PAGE ? PAGE : (32 * 1024), \
/* max_bytes */ \
256 * 1024, \
/* bytes_after_flush */ \
128 * 1024, \
/* batch_fill_extra */ \
0 \
}
#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */
#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H
#define JEMALLOC_INTERNAL_SLAB_DATA_H
#include "jemalloc/internal/bitmap.h"
typedef struct slab_data_s slab_data_t;
struct slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_SLAB_DATA_H */
......@@ -11,7 +11,8 @@
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
OPTION('e', extents, true, false)
OPTION('e', extents, true, false) \
OPTION('h', hpa, config_stats, false)
enum {
#define OPTION(o, v, d, s) stats_print_option_num_##v,
......@@ -24,8 +25,30 @@ enum {
extern bool opt_stats_print;
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
/* Utilities for stats_interval. */
extern int64_t opt_stats_interval;
extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
#define STATS_INTERVAL_DEFAULT -1
/*
* Batch-increment the counter to reduce synchronization overhead. Each thread
* merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
* BATCH_MAX for accuracy when the interval is huge (which is expected).
*/
#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
/* Only accessed by thread event. */
uint64_t stats_interval_new_event_wait(tsd_t *tsd);
uint64_t stats_interval_postponed_event_wait(tsd_t *tsd);
void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed);
/* Implements je_malloc_stats_print. */
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts);
void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts);
bool stats_boot(void);
void stats_prefork(tsdn_t *tsdn);
void stats_postfork_parent(tsdn_t *tsdn);
void stats_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_STATS_H */
......@@ -22,6 +22,12 @@
* size that would result from such an allocation.
*/
/* Page size index type. */
typedef unsigned pszind_t;
/* Size class index type. */
typedef unsigned szind_t;
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
......@@ -39,34 +45,62 @@ extern size_t sz_index2size_tab[SC_NSIZES];
*/
extern uint8_t sz_size2index_tab[];
static const size_t sz_large_pad =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
/*
* Padding for large allocations: PAGE when opt_cache_oblivious == true (to
* enable cache index randomization); 0 otherwise.
*/
extern size_t sz_large_pad;
extern void sz_boot(const sc_data_t *sc_data);
extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious);
JEMALLOC_ALWAYS_INLINE pszind_t
sz_psz2ind(size_t psz) {
assert(psz > 0);
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
return SC_NPSIZES;
}
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
/* x is the lg of the first base >= psz. */
pszind_t x = lg_ceil(psz);
/*
* sc.h introduces a lot of size classes. These size classes are divided
* into different size class groups. There is a very special size class
* group, each size class in or after it is an integer multiple of PAGE.
* We call it first_ps_rg. It means first page size regular group. The
* range of first_ps_rg is (base, base * 2], and base == PAGE *
* SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
* off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
*/
pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ?
0 : x - (SC_LG_NGROUP + LG_PAGE);
pszind_t grp = shift << SC_LG_NGROUP;
pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
LG_PAGE : x - SC_LG_NGROUP - 1;
/*
* Same as sc_s::lg_delta.
* Delta for off_to_first_ps_rg == 1 is PAGE,
* for each increase in offset, it's multiplied by two.
* Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
*/
pszind_t lg_delta = (off_to_first_ps_rg == 0) ?
LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1);
size_t delta_inverse_mask = ZU(-1) << lg_delta;
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << SC_LG_NGROUP) - 1);
/*
* Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
* The leftmost bits whose len is lg_base decide the base of psz.
* The rightmost bits whose len is lg_delta decide (pgz % PAGE).
* The middle bits whose len is SC_LG_NGROUP decide ndelta.
* ndelta is offset to the first size class in the size class group,
* starts from 1.
* If you don't know lg_base, ndelta or lg_delta, see sc.h.
* |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy|
* |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->|
* |<-- ndelta -->|
* rg_inner_off = ndelta - 1
* Why use (psz - 1)?
* To handle case: psz % (1 << lg_delta) == 0.
*/
pszind_t rg_inner_off = (((psz - 1)) >> lg_delta) & (SC_NGROUP - 1);
pszind_t ind = grp + mod;
pszind_t base_ind = off_to_first_ps_rg << SC_LG_NGROUP;
pszind_t ind = base_ind + rg_inner_off;
return ind;
}
......@@ -152,10 +186,15 @@ sz_size2index_compute(size_t size) {
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index_lookup(size_t size) {
sz_size2index_lookup_impl(size_t size) {
assert(size <= SC_LOOKUP_MAXCLASS);
szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
>> SC_LG_TINY_MIN]);
return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
>> SC_LG_TINY_MIN];
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index_lookup(size_t size) {
szind_t ret = sz_size2index_lookup_impl(size);
assert(ret == sz_size2index_compute(size));
return ret;
}
......@@ -194,9 +233,14 @@ sz_index2size_compute(szind_t index) {
}
}
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size_lookup_impl(szind_t index) {
return sz_index2size_tab[index];
}
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size_lookup(szind_t index) {
size_t ret = (size_t)sz_index2size_tab[index];
size_t ret = sz_index2size_lookup_impl(index);
assert(ret == sz_index2size_compute(index));
return ret;
}
......@@ -207,6 +251,12 @@ sz_index2size(szind_t index) {
return sz_index2size_lookup(index);
}
JEMALLOC_ALWAYS_INLINE void
sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) {
*ind = sz_size2index_lookup_impl(size);
*usize = sz_index2size_lookup_impl(*ind);
}
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute(size_t size) {
if (unlikely(size > SC_LARGE_MAXCLASS)) {
......@@ -266,7 +316,7 @@ sz_sa2u(size_t size, size_t alignment) {
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
if (size <= SC_SMALL_MAXCLASS && alignment <= PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
......@@ -315,4 +365,7 @@ sz_sa2u(size_t size, size_t alignment) {
return usize;
}
size_t sz_psz_quantize_floor(size_t size);
size_t sz_psz_quantize_ceil(size_t size);
#endif /* JEMALLOC_INTERNAL_SIZE_H */
......@@ -2,9 +2,16 @@
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern cache_bin_info_t *tcache_bin_info;
extern size_t opt_tcache_max;
extern ssize_t opt_lg_tcache_nslots_mul;
extern unsigned opt_tcache_nslots_small_min;
extern unsigned opt_tcache_nslots_small_max;
extern unsigned opt_tcache_nslots_large;
extern ssize_t opt_lg_tcache_shift;
extern size_t opt_tcache_gc_incr_bytes;
extern size_t opt_tcache_gc_delay_bytes;
extern unsigned opt_lg_tcache_flush_small_div;
extern unsigned opt_lg_tcache_flush_large_div;
/*
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
......@@ -15,6 +22,8 @@ extern unsigned nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
extern cache_bin_info_t *tcache_bin_info;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
......@@ -26,23 +35,26 @@ extern size_t tcache_maxclass;
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *arena);
void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin,
szind_t binind, bool is_small);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);
void tcache_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcache_boot(tsdn_t *tsdn, base_t *base);
void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
......@@ -50,4 +62,14 @@ void tcache_flush(tsd_t *tsd);
bool tsd_tcache_data_init(tsd_t *tsd);
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
void tcache_assert_initialized(tcache_t *tcache);
/* Only accessed by thread event. */
uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd);
void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed);
uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd);
void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
......@@ -3,9 +3,9 @@
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static inline bool
......@@ -27,28 +27,29 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
tsd_slow_update(tsd);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t *tsd, tcache_t *tcache) {
if (TCACHE_GC_INCR == 0) {
return;
JEMALLOC_ALWAYS_INLINE bool
tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
assert(ind < SC_NBINS);
bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0);
if (ret && bin != NULL) {
/* small size class but cache bin disabled. */
assert(ind >= nhbins);
assert((uintptr_t)(*bin->stack_head) ==
cache_bin_preceding_junk);
}
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
tcache_event_hard(tsd, tcache);
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t binind, bool zero, bool slow_path) {
void *ret;
cache_bin_t *bin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < SC_NBINS);
bin = tcache_small_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success);
cache_bin_t *bin = &tcache->bins[binind];
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
......@@ -56,6 +57,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
if (unlikely(arena == NULL)) {
return NULL;
}
if (unlikely(tcache_small_bin_disabled(binind, bin))) {
/* stats and zero are handled directly by the arena. */
return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
binind, zero);
}
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ true);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
bin, binind, &tcache_hard_success);
......@@ -65,38 +73,14 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
}
assert(ret);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = sz_index2size(binind);
if (unlikely(zero)) {
size_t usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &bin_infos[binind],
false);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &bin_infos[binind], true);
}
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
if (config_prof) {
tcache->prof_accumbytes += usize;
}
tcache_event(tsd, tcache);
return ret;
}
......@@ -104,12 +88,11 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
cache_bin_t *bin;
bool tcache_success;
assert(binind >= SC_NBINS &&binind < nhbins);
bin = tcache_large_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success);
assert(binind >= SC_NBINS && binind < nhbins);
cache_bin_t *bin = &tcache->bins[binind];
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
......@@ -120,96 +103,79 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL)) {
return NULL;
}
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ false);
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) {
return NULL;
}
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
usize = sz_index2size(binind);
if (unlikely(zero)) {
size_t usize = sz_index2size(binind);
assert(usize <= tcache_maxclass);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
if (config_prof) {
tcache->prof_accumbytes += usize;
}
}
tcache_event(tsd, tcache);
return ret;
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
cache_bin_t *bin;
cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= SC_SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
cache_bin_t *bin = &tcache->bins[binind];
/*
* Not marking the branch unlikely because this is past free_fastpath()
* (which handles the most common cases), i.e. at this point it's often
* uncommon cases.
*/
if (cache_bin_nonfast_aligned(ptr)) {
/* Junk unconditionally, even if bin is full. */
san_junk_ptr(ptr, sz_index2size(binind));
if (cache_bin_stash(bin, ptr)) {
return;
}
assert(cache_bin_full(bin));
/* Bin full; fall through into the flush branch. */
}
bin = tcache_small_bin_get(tcache, binind);
bin_info = &tcache_bin_info[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) {
tcache_bin_flush_small(tsd, tcache, bin, binind,
(bin_info->ncached_max >> 1));
bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr);
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
if (unlikely(tcache_small_bin_disabled(binind, bin))) {
arena_dalloc_small(tsd_tsdn(tsd), ptr);
return;
}
cache_bin_sz_t max = cache_bin_info_ncached_max(
&tcache_bin_info[binind]);
unsigned remain = max >> opt_lg_tcache_flush_small_div;
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
cache_bin_t *bin;
cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
large_dalloc_junk(ptr, sz_index2size(binind));
}
bin = tcache_large_bin_get(tcache, binind);
bin_info = &tcache_bin_info[binind];
if (unlikely(bin->ncached == bin_info->ncached_max)) {
tcache_bin_flush_large(tsd, bin, binind,
(bin_info->ncached_max >> 1), tcache);
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_info_ncached_max(
&tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div;
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
assert(bin->ncached < bin_info->ncached_max);
bin->ncached++;
*(bin->avail - bin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment