Commit 6d23d3ac authored by Oran Agra's avatar Oran Agra
Browse files

Squashed 'deps/jemalloc/' changes from ea6b3e973..54eaed1d8

54eaed1d8 Merge branch 'dev'
304c91982 Update ChangeLog for 5.3.0.
8cb814629 Make the default option of zero realloc match the system allocator.
66c889500 Make test/unit/background_thread_enable more conservative.
a7d73dd4c Update TUNING.md to include the new tcache_max option.
254b01191 Small doc tweak of opt.trust_madvise.
f5e840bbf Minor typo fix in doc.
ceca07d2c Correct the name of stats.mutexes.prof_thds_data in doc.
391bad4b9 Avoid abort() in test/integration/cpp/infallible_new_true.
9a242f16d fix some typos
0e29ad4ef Rename zero_realloc option "strict" to "alloc".
5841b6dbe Update FreeBSD image to 12.3 for cirrus ci.
ed5fc14b2 Use volatile to workaround buffer overflow false positives.
25517b852 Reoreder TravisCI jobs to optimize CI time
8a49b62e7 Enable TravisCI for Windows
fdb6c1016 Add FreeBSD to TravisCI
a93931537 Do not disable SEC by default for 64k pages platforms
eaaa368ba Add comments and use meaningful vars in sz_psz2ind.
5bf03f8ce Implement PAGE_FLOOR macro
52631c90f Fix size class calculation for sec
7ae0f15c5 Add a default page size when cross-compile for Apple M1.
eb65d1b07 Fix FreeBSD system jemalloc TSD cleanup
78b58379c Fix possible "nmalloc >= ndalloc" assertion.
ca709c313 Fix failed assertion due to racy memory access
063d134ae Properly detect background thread support on Darwin.
a4e81221c Document 'make uninstall'
20f9802e4 Avoid overflow warnings in test/unit/safety_check.
8c59c44ff Add a dependency checking step at the end of malloc_conf_init.
efc539c04 Initialize prof_leak during prof init.
002f0e939 Disable TravisCI jobs generation for Windows
01a293fc0 Add Windows to TravisCI
b798fabdf Add prof_leak_error option
eafd2ac39 Forbid spaces in prefix and exec_prefix
36a09ba2c Forbid spaces in install suffix
640c3c72e Add support for 'make uninstall'
f15d8f3b4 Echo installed files via verbose 'install' command
eb196815d Avoid calculating size of size class twice & delete sc_data_global.
011449f17 Fix doc build with install-suffix.
8b49eb132 Fix the HELP_STRING of --enable-doc.
ddb170b1d Simplify arena_migrate() to take arena_t* instead of indices.
648b3b9f7 Lower the num_threads in the stress test of test/unit/prof_recent
d66162e03 Fix the extent state checking on the merge error path.
c9946fa7e FreeBSD also needs the OS-X "don't declare system functions as nothrow" fix since it also has jemalloc in the base system
89fe8ee6b Use the isb instruction instead of yield for spin locks on arm
6230cc88b Add background thread sleep retry in test/unit/hpa_background_thread
61978bbe6 Purge all if the last thread migrated away from an arena.
c91e62dd3 #include <features.h> as requested
18510020e Fix symbol conflict with musl libc
f509703af Fix two conversion warnings in tcache.
067c2da07 Fix unnecessary returns in san_(un)guard_pages_two_sided.
d660683d3 Fix test config of lg_san_uaf_align.
eabe88916 Rename full_position to low_bound in cache_bin.h.
dfdd7562f Rename san_enabled() to san_guard_enabled().
01d61a3c6 Fix a conversion warning.
8b34a788b Fix an used-uninitialized warning (false positive).
e491cef9a Add stats for stashed bytes in tcache.
b75822bc6 Implement use-after-free detection using junk and stash.
06aac61c4 Split the core logic of tcache flush into a separate function.
d038160f3 Fix shadowed variable usage.
bd70d8fc0 Add the profiling settings for tests explicit.
e491df1d2 Fix warnings when using autoheader.
60b9637cc Only invoke malloc_cpu_count_is_deterministic() when necessary.
837b37c4c Fix the time-since computation in HPA.
310af725b Add nstime_ns_since which obtains the duration since the input time.
cafe9a315 Disable percpu arena in case of non deterministic CPU count
bb5052ce9 Fix base_ehooks_get_for_metadata
9015e129b Update visual studio projects
d90655390 San: Create a function for committing and zeroing
800ce49c1 San: Bump alloc frequently reused guarded allocations
f56f5b993 Pass 'frequent_reuse' hint to PAI
2c70e8d35 Rename 'arena_decay' to 'arena_util'
0f6da1257 San: Implement bump alloc
34b00f896 San: Avoid running san tests with prof enabled
62f9c54d2 San: Rename 'guard' to 'san'
d9bbf539f CI: Refactor gen_travis.py
7dcf77809 Mark slab as true on sized dealloc fast path.
af6ee27c0 Enforce abort_conf:true when malloc_conf is not fully recognized.
113e8e68e freebsd 14 build fix proposal.
3b3257a70 Correct opt.prof_leak documentation
cdabe908d Track the initialized state of nstime_t on debug build.
400c59895 Fix uninitialized nstime reading / updating on the stack in hpa.
8b81d3f21 Fix the initialization of last_event in thread event init.
6bdb4f5ab Check prof_active in addtion to opt_prof during batch_alloc().
37342a4d3 Add ctl interface for experimental_infallible_new.
6cb585b13 San: Unguard guarded slabs during arena destruction
b6a7a535b Optimize away a branch on the free fastpath.
4d56aaeca Optimize away the tsd_fast() check on free fastpath.
26f5257b8 Remove declaration of an undefined function
215961541 Add new architecture loongarch.
8daac7958 Redefine functions with test hooks only for tests
c9ebff0fd Initialize deferred_work_generated
912324a1a Add debug check outside of the loop in hpa_alloc_batch.
cf9724531 Darwin malloc_size override support proposal.
ab0f1604b Delay the atexit call to prof_log_start().
11b6db744 CPU affinity on BSD platforms support.
83f329402 Small refactors around 7bb05e0.
3c4b717ff Remove unused header base_structs.h.
deb8e62a8 Implement guard pages.
7bb05e04b add experimental.arenas_create_ext mallctl
a9031a097 Allow setting a dump hook
f7d46b811 Allow setting custom backtrace hook
523cfa55c Guard prof related mallctl with opt_prof.
6e848a005 Remove opt_background_thread_hpa_interval_max_ms
8229cc77c Wake up background threads on demand
97da57c13 HPA: Add min_purge_interval_ms option
b8b8027f1 Allow PAI to calculate time until deferred work
26140dd24 Reject --enable-prof-libunwind without --enable-prof
e5062e9fb Makefile.in: make sure doc generated before install
8b24cb8fd Don't assume initialized arena in the default alloc hook.
c01a885e9 HPA: Correctly calculate retained pages
2c625d5cd Fix warnings when compiled with clang
9d02bdc88 Port gen_run_tests.py to python3
5884a076f Rename prof.dump_prefix to prof.prefix
6a0160071 Add Cirrus CI testing matrix
f58064b93 Verify that HPA is used before calling its functions
27f71242b Mutex: Tweak internal spin count.
6f41ba55e Mutex: Make spin count configurable.
dae24589b PH: Insert-below-min fast-path.
40d53e007 ph: Add aux-list counting and pre-merging.
dcb7b83fa Eset: Cache summary information for heap edatas.
252e0942d Eset: Pull per-pszind data into structs.
dc0a4b8b2 Edata: Pull out comparison fields into a summary.
0170dd198 Edata: Fix a couple typos.
08a4cc096 Pairing heap: inline functions instead of macros.
92a1e38f5 edata_cache: Allow unbounded fast caching.
d93eef2f4 HPA: Introduce a redesigned hpa_central_t.
e09eac1d4 Remove hpa_central.
c88fe355e Add unit tests for decay
aaea4fd1e Add more documentation to decay.c
4b633b9a8 Clean up background thread sleep computation
6630c5989 HPA: Hugification hysteresis.
113938b6f HPA: Pull out a hooks type.
1d4a7666d HPA: Do deferred operations on background threads.
583284f2d Add HPA deferral functionality.
ace329d11 HPA batch dalloc: Just do one deferred work check.
47d8a7e6b psset: Purge empty slabs first.
41fd56605 HPA: Purge across retained extents.
347523517 PAI: Fix a typo.
9c42ed2d1 Travis: Don't test "clang" on OS X.
d202218e8 HPA: Fix typos with big performance implications.
de033f56c mpsc_queue: Add module.
4452a4812 Add opt.experimental_infallible_new.
0689448b1 Travis: Unbreak the builds.
4fb93a18e extent_can_acquire_neighbor typo fix
2381efab5 ARC: add Minimum allocation alignment
2c0f4c2ac Fix typo in configure.ac: experimetal -> experimental
36c6bfb96 SEC: Allow arbitrarily many shards, cached sizes.
11beab38b Added --debug-syms-by-id option
08089589f Fix an interaction between the oversize_threshold test and bgthds.
541793821 Red-black tree: add summarize/filter.
b2c08ef2e RB unit tests: don't test reentrantly.
aea91b8c3 Clean up some minor data structure inconsistencies
1f688490e Stats: Fix a printing bug when hpa_dirty_mult = -1
4f7cb3a41 Sized deallocation: fix a typo.
12cd13cd4 Fix thread.name/prof_sys_thread_name interaction
304cdbb13 Fix a prof_recent/prof_sys_thread_name interaction
9b523c6c1 Refactor the locking in extent_recycle().
ce68f326b Avoid the release & re-acquire of the ecache locks around the merge hook.
7dc77527b Delete the mutex_pool module.
03d95cba8 Remove the unnecessary arena_ind_set in base_alloc_edata().
3093d9455 Move the edata mergeability related functions to extent.h.
7c964b035 Add rtree_write_range(): writing the same content to multiple leaf elements.
add636596 Stop checking head state in the merge hook.
49b7d7f0a Passing down the original edata on the expand path.
178493968 Use rtree tracked states to protect edata outside of ecache locks.
9ea235f8f Add witness_assert_positive_depth_to_rank().
4d8c22f9a Store edata->state in rtree leaf and make edata_t 128B aligned.
70d1541c5 Track extent is_head state in rtree leaf.
862219e46 Add quiescence sync before deleting base during arena_destroy.
a137a6825 Remove redundant declaration, pac_retain_grow_limit_get_set was declared twice in pac.h
2ae1ef7db Fix doc large size 54 KiB error
61afb6a40 Fix locking on arena_i_destroy_ctl().
9193ea224 Cirrus: fix build.
391307714 Mark head state during dss alloc.
11127240c Remove redundant enable-debug definition in configure.
22be724af Set is_head in extent_alloc_wrapper w/ retain.
73ca4b8ef HPA: Use dirtiest-first purging.
0f6c420f8 HPA: Make purging/hugifying more principled.
6bddb92ad psset: Rename "bitmap" to "pageslab_bitmap".
154aa5fcc Use the flat bitmap for eset and psset bitmaps.
271a676dc hpdata: early bailout for longest free range.
d21d5b46b Edata: Move sn into its own field.
fb327368d SEC: Expand option configurability.
ce9386370 HPA: Implement batch allocation.
cdae6706a SEC: Use batch fills.
480f3b11c Add a batch allocation interface to the PAI.
bf448d7a5 SEC: Reduce lock hold times.
1944ebbe7 HPA: Implement batch deallocation.
f47b4c2cd PAI/SEC: Add a dalloc_batch function.
4b8870c7d SEC: Fix a comment typo.
cde7097ec Update INSTALL.md to mention 'autoconf'
a11be5033 Implement opt.cache_oblivious.
8c5e5f50a Fix stats for "tcache_max" (was "lg_tcache_max")
041145c27 Report the correct and wrong sizes on sized dealloc bug detection.
f3b2668b3 Report the offending pointer on sized dealloc bug detection.
edbfe6912 Inline malloc fastpath into operator new.
79f81a373 HPA: Make dirty_mult configurable.
32dd15379 HPA: Make dehugification threshold configurable.
4790db15e HPA: make the hugification threshold configurable.
b3df80bc7 Pull HPA options into a containing struct.
bdb7307ff fxp: Add FXP_INIT_PERCENT
caef4c286 FXP: add fxp_mul_frac.
56e85c0e4 HPA: Use a whole-shard purging heuristic.
dc886e560 hpdata: Return the number of pages to be purged.
9fd9c876b psset: keep aggregate stats.
da63f23e6 HPA: Track pending purges/hugifies in the psset.
0ea3d6307 CTL, Stats: report HPA empty slab stats.
bf64557ed Move empty slab tracking to the psset.
99fc0717e psset: Reconceptualize insertion/removal.
061cabb71 HPA stats: report retained instead of inactive.
d3e5ea03c HPA: Track dirty stats.
68a1666e9 hpdata: Rename "dirty" to "touched".
be0d7a53f HPA: Don't track inactive pages.
55e0f60ca psset stats: Simplify handling.
94cd9444c HPA: Some minor reformattings.
b25ee5d88 HPA: Add purge stats.
746ea3de6 HPA stats: Allow some derived stats.
30b9e8162 HPA: Generalize purging.
70692cfb1 hpdata: Add state changing helpers.
9b75808be flat bitmap: Add a bitwise and/or/not.
2ae966222 hpdata: track per-page dirty state.
ff4086aa6 hpdata: count active pages instead of free ones.
3624dd42f hpdata: Add a comment for hpdata_consistent.
20140629b Bin: Move stats closer to the mutex.
c259323ab Use ticker_geom_t for arena tcache decay.
8edfc5b17 Add ticker_geom_t.
396732981 Arena: share bin offsets in a global.
2fcbd1811 Cache bin: Don't reverse flush order.
4c46e1136 Cache an arena's index in the arena.
229994a20 Tcache flush: keep common path state in registers.
31a629c3d Tcache flush: prefetch edata contents.
9f9247a62 Tcache fluhing: increase cache miss parallelism.
181ba7fd4 Tcache flush: Add an emap "batch lookup" path.
c007c537f Tcache flush: Unify edata lookup path.
35a855260 Mac OS: Tag mapped pages.
f6699803e Fix duration in prof log
a943172b7 Add runtime detection for MADV_DONTNEED zeroes pages (mostly for qemu)
2e3104ba0 Update config.{sub,guess} to support support-aarch64-apple-darwin as a target
a011c4c22 cache_bin: Separate out local and remote accesses.
14d689c0f Add prof stats mutex stats
9f71b5779 Output prof stats in stats print
1f1a0231e Split macros for initializing stats headers
4352cbc21 Add alignment tests for prof stats
54f3351f1 Add mallctl for prof stats fetching
40fa4d29d Track per size class internal fragmentation
afa489c3c Record request size in prof info
f9bb8dede Un-force-inline do_rallocx.
a9fa2defd Add JEMALLOC_COLD, and mark some functions cold.
5d8e70ab2 prof_recent: cassert(config_prof) more often.
83cad746a prof_log: cassert(config_prof) in public functions
526180b76 Extent.c: Avoid an rtree NULL-check.
b35ac00d5 Do not bump to large size for page aligned request
8a56d6b63 Add last-N mutex stats
22d62d8cb Handle ending gap properly for HPA stats
6c5a3a24d Omit bin stats rows with no data
ea013d8fa Enforce realloc sizing stability
74bd63b20 Optimize stats print using partial name-to-mib
4557c0a67 Enable ctl on partial mib and partial name
006dd0414 Add partial name-to-mib functionality
f2e1a5be7 Do not fail on partial ctl path for ctl_nametomib()
6ab181d2b Extract node lookup given mib input
3a627b967 No need to record all nodes in ctl_lookup()
91e006c4c Enable ctl_lookup() to start from arbitrary node
063a767ff Define JEMALLOC_HAS_ALLOCA_H for QNX
4e3fe218e Use posix_madvise to purge pages when available
26c1dc5a3 Support AutoConf for posix_madvise and POSIX_MADV_DONTNEED
96a59c3bb Fix recursive malloc during bootstrap on QNX
986cbe488 Disable JEMALLOC_TLS for QNX
1e3b8636f HPA: Remove unused malloc_conf options.
e82771807 Cache mallctl mib for batch allocation stress test
0dfdd31e0 Add tiny batch size to batch allocation stress test
9522ae41d Move n_search outside of assert as reported by static analyzer
a559caf74 hpdata: Strengthen assertions.
f51948d9e psset unit test: fix a bug.
54c94c167 flat bitmap: add scount / ucount functions.
e6c057ad3 fb: implement assign in terms of a visitor.
734e72ce8 bit_util: Guarantee popcount's presence.
d9f7e6c66 hpdata: Add a test.
3ed0b4e8a HPA: Add an nevictions counter.
fffcefed3 malloc_conf: Clarify HPA options.
f7cf23aa4 psset: Relegate alloc/dalloc to test code.
f9299ca57 HPA: Use psset fit/insert/remove.
0971e1e4e hpdata: Use addr/size instead of begin/npages.
5228d869e psset: Use fit/insert/remove as basis functions.
089f8fa44 Move hpdata bitmap logic out of the psset.
ca30b5db2 Introduce hpdata_t.
4a15008cf HPA unit test: skip if unsupported.
43af63fff HPA: Manage whole hugepages at a time.
63677dde6 Pages: Statically detect if pages_huge may succeed
c1b2a7793 psset: Move in stats.
d0a991d47 psset: Add insert/remove functions.
d438296b1 narenas_ratio: Accept fractional values.
ecd39418a Add fxp: A fixed-point math library.
99c2d6c23 Backport jeprof --collapse for flamegraph generation
520b75fa2 utrace support with label based signature.
92e189be8 Add some comments to the batch allocation logic flow
d96e4525a Route batch allocation of small batch size to tcache
ac480136d Split out locality checking in batch allocation tests
be5e49f4f Add a batch mode for cache_bin_alloc()
4a65f3493 Fix a cache bin test
566c4a859 Slight changes to cache bin internal functions
9545c2cd3 Add sample interval to prof last-N dump
cf2549a14 Add a per-arena oversize_threshold.
4ca3d91e9 Rename geom_grow -> exp_grow.
b4c37a6e8 Rename edata_tree_t -> edata_avail_t.
95f0a77fd Detect pthread_getname_np explicitly.
b3c5690b7 Update config.{guess,sub} to 2020-11-07@77632d9
589638182 Use the edata_cache_small_t in the HPA.
03a604711 Edata cache small: rewrite.
c9757d9e3 HPA: Don't disable shards that were never started.
1b3ee7566 Add experimental.thread.activity_callback.
27ef02ca9 Android build fix proposal.
d2d941017 MADV_DO[NOT]DUMP support equivalence on FreeBSD.
180b84315 Appveyor: fix 404 errors.
ef6d51ed4 DragonFlyBSD build support.
bf72188f8 Allow opt.tcache_max to accept small size classes.
ea32060f9 SEC: Implement thread affinity.
d16849c91 psset: Do first-fit based on slab age.
634ec6f50 Edata: add an "age" field.
6599651ae PA: Use an SEC in fron of the HPA shard.
ea51e97bb Add SEC module: a small extent cache.
1964b0839 HPA: Add stats for the hpa_shard.
534504d4a HPA: add size-exclusion functionality.
484f04733 HPA: Add central mutex contention stats.
bf025d2ec HPA: Make slab sizes and maxes configurable.
1c7da3331 HPA: Tie components into a PAI implementation.
c8209150f Switch from opt.lg_tcache_max to opt.tcache_max
5ba861715 Add thread name in prof last-N records
4ef5b8b4d Add a logo to doc_internal.
5e41ff9b7 Add a hard limit on tcache max size class.
3de19ba40 Eagerly detect double free and sized dealloc bugs for large sizes.
be9548f2b Tcaches: Fix a subtle race condition.
a9aa6f6d0 Fix the alloc_ctx check in free_fastpath.
b971f7c4d Add "default" option to slab sizes.
21b70cb54 Add hpa_central module
1ed7ec369 Emap: Add emap_assert_not_mapped.
2a6ba121b PRNG test: cleanups.
9e6aa77ab PRNG: Remove atomic functionality.
051304717 PRNG: Allow a a range argument of 1.
bdb60a805 Appveyor: don't update msys2 keyring.
025d8c37c Add a script to check for clang-formattedness.
f6bbfc1e9 Add a .clang-format file.
259c5e3e8 psset: Add stats
018b162d6 Add psset: a set of pageslabs.
ed99d300b Flat bitmap: Add longest-range computation.
e03450069 Edata: rename "ranged" bit to "pai".
7ad2f7866 Avoid a -Wundef warning on LG_SLAB_MAXREGS.
40cf71a06 Remove --with-slab-maxregs options from INSTALL.md
36ebb5abe CI support for PPC64LE architecture
1541ffc76 configure: add --with-lg-slab-maxregs configure option.
d243b4ec4 Add PROFILING_INTERNALS.md
09eda2c9b Add unit tests for usize in prof recent records
b549389e4 Correct usize in prof last-N record
202f01d4f Fix szind computation in profiling
866231fc6 Do not repeat reentrancy test in profiling
20f2479ed Do not create size class tables for non-prof builds
8efcdc3f9 Move unbias data to prof_data
5e90fd006 Geom_grow: Don't keep the mutex internal.
c57494879 Geom_grow: Don't take tsdn at init.
ffe552223 Geom_grow: Move in advancing logic.
131b1b533 Rename ecache_grow -> geom_grow.
b399463fb flat_bitmap unit test: Silence a warning.
b0ffa39ca Mallctl stress test: fix a type.
753bbf184 Benchmarks: Also print ns / iter.
7b187360e IO: Support 0-padding for unsigned numbers.
32d467322 Add a mallctl speed stress test.
38867c5c1 Makefile: alphabetize stress/analyze utilities.
ab274a23b Add narenas_ratio.
9e18ae639 Config: safety checks don't imply size checks.
8f9e958e1 Add alignment stress test for rallocx
743021b63 Fix size miscalculation bug in reallocation
eaed1e39b Add sized-delete size-checking functionality.
53084cc5c Safety check: Don't directly abort.
60993697d Prof: Add prof_unbias.
81c2f841e Add a simple utility to detect profiling bias.
e032a1a1d Add a stress test for batch allocation
f6cf5eb38 Add mallctl for batch allocation API
978f830ee Add batch allocation API
c6f59e9bb Add surplus reading API for thread event lookahead
f80546895 Add zero option to arena batch allocation
49e5c2fe7 Add batch allocation from fresh slabs
2bb8060d5 Add empty test and concat for typed list
f28cc2bc8 Extract bin shard selection out of bin locking
ddb8dc4ad FB: Add range iteration support.
ceee82351 Add flat_bitmap.
7fde6ac49 Nbits: Add a couple more interesting sizes.
efeab1f49 bitset test: Pull NBITS_TAB into its own file.
22da83609 bit_util: Add fls_ functions; "find last set".
1ed0288d9 bit_util: Change ffs functions indexing.
786a27b9e CI: Update keyring.
fb347dc61 Verify output space before doing heavy work in mallctl
f5fb4e5a9 Modify mallctl output length when needed
425840204 Corrections for prof_log_start()
e6cb7a1c9 Shorten wait time for peak events
6107857b7 PA->PAC: Move in PAI implementation.
6041aaba9 PA -> PAC: Move in destruction functions.
cbf096b05 Arena: remove redundant bg inactivity check.
471eb5913 PAC: Move in decay rate setting.
6a2774719 PA->PAC: Move in decay functions.
4ee75be3a PA -> PAC: Move in decay_purge enum.
72435b0ab PA->PAC: Make extent.c forget about PA.
dee5d1c42 PA->PAC: Move in extent_sn.
739138234 PA->PAC: Move in stats.
db211eefb PAC: Move in decay.
c81e38999 PAC: Move in ecache_grow.
65803171a PAC: move in emap
7efcb946c PAC: Add an init function.
722652222 PAC: Move in edata_cache accesses.
777b0ba96 Add PAC: Page allocator classic.
1b5f632e0 Introduce PAI: Page allocator interface
3cf19c6e5 atomic: add atomic_load_sub_store
f1f4ec315 Tcache: Tweak nslots_max tuning parameter.
ae541d3fa Edata: Reserve some space for hugepages.
392f645f4 Edata: split up different list linkage uses.
129b72705 Add typed-list module.
00f06c9be enabling mpss on solaris/illumos.
c2e7a0639 No need to intercept prof_dump_header() in tests
f58ebdff7 Generalize prof_cnt_all() for testing
80d18c18c Pass prof dump parameters explicitly in prof_sys
d4259ea53 Simplify signatures for prof dump functions
5d823f3a9 Consolidate struct definitions for prof dump parameters
1f5fe3a3e Pass write callback explicitly in prof_data
4556d3c0c Define structures for prof dump parameters
1c6742e6a Migrate prof dumping to use buffered writer
dad821bb2 Move unwind to prof_sys
d128efcb6 Relocate a few prof utilities to the right modules
4736fb4fc Move file handling logic in prof_data to prof_sys
767a2e179 Move file handling logic in prof to prof_sys
03ae509f3 Create prof_sys module for reading system thread name
adfd9d7b1 Change tsdn to tsd for thread name allocation
841af2b42 Move thread name handling to prof_data module
8118056c0 Expose prof_data testing internals only in prof tests
f43ac8543 Correct prof header macro namings
c8683bee8 Unify printing for prof counts object
5d292b566 Push error handling logic out of core dumping logic
f541871f5 Reduce prof dump buffer size in debug build
354183b10 Define prof dump buffer size centrally
7455813e5 Make dump file writing replaceable in test
21e44c45d Make maps file opening replaceable in test
4bb4037db Extract utility function for opening maps file
f307b2580 Only replace the dump file opening function in test
d8cea8756 Move size inspections to test/analyze
537a4bedb Add a tool to examine random number distributions
d460333ef Improve naming for prof system thread name option
25e43c602 Witness: Make ranks an enum.
092fcac0b Remove unnecessary source files
a795b1932 Remove beginning define in source files
24bbf376c Unify arena flag reading and selection
e128b170a Do not fallback to auto arena when manual arena is requested
95a59d2f7 Unify tcache flag reading and selection
4b0c00848 Unify zero flag reading and setting
2a84f9b8f Unify alignment flag reading and computation
b7858abfc Expose prof testing internal functions
40fa6674a Fix prof timestamp conf reading
7e09a57b3 stress/sizes: Fix an off-by-one issue.
dcfa6fd50 stress/sizes: Add a couple more types.
40672b0b7 Remove duplicate logging in malloc.
4aea74327 High Resolution Timestamps for Profiling
d82a164d0 Add thread.peak.[read|reset] mallctls.
fe7108305 Add peak_t, for tracking allocator net max.
17a64fe91 Add a small program to print data structure sizes.
3e19ebd2e Add lock to protect prof last-N dumping
a835d9cf8 Make prof last-N dumping non-blocking
fc8bc4b5c Increase dump buffer for prof last-N list
264d89d64 Extract restore and async cleanup functions for prof last-N list
857ebd3da Make edata pointer on prof recent record an atomic fence
b8bdea6b2 Fix: prof_recent_alloc_max_ctl_read() does not take tsd
730658f72 Extract alloc/dalloc utility for last-N nodes
035be4486 Separate out dumping for each prof recent record
8da0896b7 Tcache: Make an integer conversion explicit.
cd28e6033 Don't warn on uniform initialization.
6cdac3c57 Tcache: Make flush fractions configurable.
7503b5b33 Stats, CTL: Expose new tcache settings.
ee72bf1cf Tcache: Add tcache gc delay option.
d338dd45d Tcache: Make incremental gc bytes configurable.
ec0b57956 Tcache: Privatize opt_lg_tcache_max default.
10b96f635 Tcache: Remove some unused gc constants.
181093173 Tcache: make slot sizing configurable.
b58dea8d1 Cache bin: expose ncached_max publicly.
634afc412 Tcache: Make size computation configurable.
97b7a9cf7 Add a fill/flush microbenchmark.
33372cbd4 cpu instruction spin wait for arm32/64
27f29e424 LQ_QUANTUM should be 4 on mips64 hardware.
eda9c2858 Edata: zero stack edatas before initializing.
5dead37a9 Allow narenas:default.
dcea2c0f8 Get rid of TSD -> thread event dependency
75dae934a Always initialize TE counters in TSD init
b06dfb9cc Push event handlers to constituent modules
381c97caa Treat postponed prof sample event as new event
abd467493 Extract out per event postponed wait time fetching
f72014d09 Only compute thread event threshold once per trigger
7324c4f85 Break down event init and handler functions
6de77799d Move thread event wait time update to local
733ae918f Extract out per event new wait time fetching
1e2524e15 Do not reset sample wait time when re-initing tdata
855d20f6f Remove outdated comments in thread event
fc052ff72 Migrate counter to use locked int
b543c20a9 Minor update to locked int
f533ab6da Add forking handling for stats
508303077 Add forking handling for prof idump counter
4d970f8bf Add forking handling for counter module
2097e1945 Unify write callback signature
fef9abdcc Cleanup tcache allocation logic
e6cb6919c Consolidate prof inline function headers
d454af90f Remove unused prof_accum field from arena
8be558449 Initialize prof idump counter once rather than once per arena
e10e5059e Make prof_idump_accum() non-inline
039bfd4e3 Do not rollback prof idump counter in arena_prof_promote()
0295aa38a Deduplicate entries in witness error message
f1f8a7549 Let opt.zero propagate to core allocation.
2c09d4349 Add a benchmark of large allocations.
46471ea32 SC: Name the max lookup constant.
79dd0c04e SC: Simplify SC_NPSIZES computation.
fb6cfffd3 Configure: Get rid of LG_QUANTA.
4f8efba82 TSD: Make rtree_ctx a slow-path field.
cd29ebefd Tcache: treat small and large cache bins uniformly
a13fbad37 Tcache: split up fast and slow path data.
7099c6620 Arena: fill in terms of cache_bins.
40e7aed59 TSD: Move in some of the tcache fields.
58a00df23 TSD: Put all fast-path data together.
3589571bf SC: use SC_LG_NGROUP instead of its value.
877af247a QL, QR: Add documentation.
79ae7f921 Rtree: Remove the per-field accessors.
26e9a3103 PA: Simple decay test.
bb6a41852 Emap: Drop szind/slab splitting parameters.
50289750b Extent: Remove szind/slab knowledge.
dc26b3009 Rtree: Clean up compact/non-compact split.
93b99dd14 Extent: Stop passing an edata_cache everywhere.
a4759a191 Ehooks: avoid touching arena_emap_global in tests.
11c47cb13 Extent: Take "bool zero" over "bool *zero".
1a1124462 PA: Take zero as a bool rather than as a bool *.
294b276fc PA: Parameterize emap.  Move emap_global to arena.
f73057727 Eset: Parameterize last globals accesses.
7bb6e2dc0 Eset: take opt_lg_max_active_fit as a parameter.
883ab327c Emap: Move out last edata state touching.
0c96a2f03 Emap: Move out remaining edata modifications.
dfef0df71 Emap: Move edata modification out of emap_remap.
12eb888e5 Edata: Add a ranged bit.
bd4fdf295 Rtree: Pull leaf contents into their own struct.
faec7219b PA: Move in decay initialization.
45671e4a2 PA: Move in retain growth limit setting.
daefde88f PA: Move in mutex stats reading.
07675840a PA: Move in some more internals accesses.
238f3c743 PA: Move in full stats merging.
81c602759 Arena stats: Give it its own "mapped".
506d907e4 PA: Move in basic stats merging.
f29f6090f PA: Add pa_extra.c and put PA forking there.
8164fad40 Stats: Fix edata_cache size merging.
565045ef7 Arena: Make more derived stats non-atomic/locked.
d0c43217b Arena stats: Move retained to PA, use plain ints.
e2cf3fb1a PA: Move in all modifications of mapped.
436789ad9 PA: Make mapped stat atomic.
3c28aa6f1 PA: Move edata_avail stat in, make it non-atomic.
f6bfa3dcc Move extent stats to the PA module.
527dd4cdb PA: Move in nactive counter.
c075fd0bc PA: Minor cleanups and comment fixes.
46a9d7fc0 PA: Move in rest of purging.
2d6eec7b5 PA: Move in decay-all pathway.
65698b7f2 PA: Remove public visibility of some internals.
f012c43be PA: Move in decay_to_limit
103f5feda Move bg thread activity check out of purging core.
3034f4a50 PA: Move in decay_stashed.
aef28b2f8 PA: Move in stash_decayed.
655a09634 Move bg inactivity check out of purge inner loop.
71fc0dc96 PA: Move in remaining page allocation functions.
74958567a PA: have expand take sizes instead of new usize.
5bcc2c2ab PA: Have expand take szind and slab.
0880c2ab9 PA: Have large expands use it.
7be3dea82 PA: Have slab allocations use it.
9f93625c1 PA: Move in arena large allocation functionality.
7624043a4 PA: Add ehook-getting support.
eba35e2e4 Remove extent knowledge of arena.
e77f47a85 Move arena decay getters to PA.
48a2cd6d7 Decay: Add a (mostly stub) test case.
f77cec311 Decay: Take current time as an argument.
bf55e58e6 Rename test/unit/decay -> test/unit/arena_decay.
d1d7e1076 Decay: move in some background_thread accesses.
cdb916ed3 Decay: Add comments for the public API.
8f2193dc8 Decay: Move in arena decay functions.
4d090d23f Decay: Introduce a stub .c file.
7b6288547 Introduce decay module and put decay objects in PA
497836dbc Arena stats: mark edata_avail as derived.
3192d6b77 Extents: Have extent_dalloc_gap take ehooks.
22a0a7b93 Move arena_decay_extent to extent module.
70d12ffa0 PA: Move mapped into pa stats.
6ca918d0c PA: Add a stats comment.
ce8c0d6c0 PA: Move in arena extent_sn counter.
1ada4aef8 PA: Get rid of arena_ind_get calls.
1ad368c8b PA: Move in decay stats.
356aaa7dc Introduce lockedint module.
acd0bf6a2 PA: move in ecache_grow.
32cb7c2f0 PA: Add a stats type.
688fb3eb8 PA: Move in the arena edata_cache.
8433ad84e PA: move in shard initialization.
a24faed56 PA: Move in the ecache_t objects.
585f92505 Move cache index randomization out of extent.
12be9f572 Add a stub PA module -- a page allocator.
c4e9ea8cc Get rid of locks in prof recent test
2deabac07 Get rid of custom iterator for last-N records
a5ddfa7d9 Use ql for prof last-N list
8da6676a0 Don't do reentrant testing in junk tests.
ce17af422 Better structure ql module
4b66297ea Add move constructor to ql module
a62b7ed92 Add emptiness checking to ql module
1dd24ca6d Add rotate functionality to ql module
0dc95a882 Add concat and split functionality to ql module
1ad06aa53 deduplicate insert and delete logic in qr module
c9d56cddf Optimize meld in qr module
0d6d9e858 configure.ac: Put public symbols on one line.
f9aad7a49 Add piping API to buffered writer
09cd79495 Encapsulate buffer allocation failure in buffered writer
a166c2081 Make prof_tctx_t pointer a true prof atomic fence
d936b46d3 Add malloc_conf_2_conf_harder
3b4a03b92 Mac: don't declare system functions as nothrow.
2256ef896 Add option to fetch system thread name on each prof sample
ccdc70a5c Fix: assertion could abort on past failures
b30a5c2f9 Reorganize cpp APIs and suppress unused function warnings
2e5899c12 Stats: Fix tcache_bytes reporting.
a5780598b Remove thread_event_rollback()
ba783b3a0 Remove prof -> thread_event dependency
441d88d1c Rewrite profiling thread event
0dcd57660 Edata cache: atomic fetch-add -> load-store.
99b1291d1 Edata cache: add edata_cache_small_t.
734109d9c Edata cache: add a unit test.
e732344ef Inspect test: Reduce checks when profiling is on.
92485032b Cache bin: improve comments.
d701a085c Fast path: allow low-water mark changes.
397da0386 Cache bin: rewrite to track more state.
fef0b1ffe Cache bin: Remove last internals accesses.
0a2fcfac0 Tcache: Hold cache bin allocation explicitly.
d498a4bb0 Cache bin: Add an emptiness assertion.
6a7aa46ef Cache bin: Add a debug method for init checking.
370c1ea00 Cache bin: Write the unit test in terms of the API
7f5ebd211 Cache bin: set low-water internally.
60113dfe3 Cache bin: Move in initialization code.
44529da85 Cache-bin: Make flush modifications internal
ff6acc6ed Cache bin: simplify names and argument ordering.
e1dcc557d Cache bin: Only take the relevant cache_bin_info_t
1b00d808d cache_bin: Don't let arena see empty position.
d303f3079 cache_bin nflush -> n.
74d36d78e Cache bin: Make ncached_max a query on the info_t.
b66c0973c cache_bin: Don't allow direct internals access.
da68f7329 Move percpu_arena_update.
909c501b0 Cache_bin: Shouldn't know about tcache.
79f1ee2fc Move junking out of arena/tcache code.
b428dceea Config: Warn on void * pointer arithmetic.
22657a5e6 Extents: Silence the "potentially unused" warning.
4a78c6d81 Correct thread event unit test
305b1f6d9 Correction on geometric sampling
6c3491ad3 Tcache: Unify bin flush logic.
9f4fc2738 Ehooks: Fix a build warning.
bc31041ed Cirrus-CI: test on new freebsd releases.
51bd14742 Make use of assert_* in test/unit/thread_event.c
9d2cc3b0f Make use of assert_* in test/unit/prof_recent.c
a88d22ea1 Make use of assert_* in test/unit/inspect.c
0ceb31184 Make use of assert_* in test/unit/buf_writer.c
fa6157938 Add assert_* functionality to tests
21dfa4300 Change assert_* to expect_* in tests
162c2bcf3 Background thread: take base as a parameter.
29436fa05 Break prof and tcache knowledge of b0.
a0c1f4ac5 Rtree: take the base allocator as a parameter.
7013716aa Emap: Take (and propagate) a zeroed parameter.
182192f83 Base: Pull into a single header.
34b7165fd Put szind_t, pszind_t in sz.h.
7e6c8a728 Emap: Standardize naming.
ac50c1e44 Emap: Remove direct access to emap internals.
06e42090f Make jemalloc.c use the emap interface.
f7d9c6c42 Emap: Move in alloc_ctx lookup functionality.
65a54d771 Emap: Move in szind and slab modifications.
9b5d105fc Emap: Move in iealloc.
1d449bd9a Emap: Internal rtree context setting.
08eb1e6c3 Emap: Comments and cleanup
231d1477e Rename emap_split_prepare_t -> emap_prepare_t.
0586a56f3 Emap: Move in merge functionality.
040eac77c Tell edatas their creation arena immediately.
7c7b70206 Emap: Move over metadata splitting logic.
44f5f5360 Emap: Move over deregistration functions.
6513d9d92 Emap: Move over deregistration boundary functions.
9b5ca0b09 Emap: Move in slab interior registration.
d05b61db4 Emap: Move extent boundary registration in.
ca21ce407 Emap: Move in write_acquired from extent.
01f255161 Add emap, for tracking extent locking.
0f686e82a Avoid variable length array with length 0.
68e8ddcaf Add mallctl for dumping last-N profiling records
bc05ecebf Add const qualifier in assert_cmp()
ba0e35411 Rework the bin locking around tcache refill / flush.
7fd22f7b2 Fix Undefined Behavior in hash.h
ca1f08225 Disallow merge across mmap regions to preserve SN / first-fit.
7014f81e1 Add ASSURED_WRITE in mallctl
247688919 Add inspect.c to MSVC filters
9cac3fa8f Encapsulate buffer allocation in buffered writer
bdc08b515 Better naming buffered writer
c6bfe5585 Update the tsd description.
e89652261 Abbreviate thread-event to te.
5e500523a Remove thread_event_boot().
97dd79db6 Implement deallocation events.
536ea6858 NetBSD specific changes: - NetBSD overcommits - When mapping pages, use the maximum of the alignment requested and the   compiled-in PAGE constant which might be greater than the current kernel   pagesize, since we compile binaries with the maximum page size supported   by the architecture (so that they work with all kernels).
974222c62 Add safety check on sdallocx slow / sampled path.
88d9eca84 Enforce page alignment for sampled allocations.
0f552ed67 Don't purge huge extents when decay is off.
38a48e574 Set reentrancy to 1 for tsd_state_purgatory.
88b0e03a4 Implement opt.stats_interval and the _opts options.
d71a145ec Chagne prof_accum_t to counter_accum_t for general purpose.
ea351a7b5 Fix syntax errors in doc for thread.idle.
d92f0175c Introduce NEITHER_READ_NOR_WRITE in ctl.
6a622867c Add "thread.idle" mallctl.
f81341a48 Fallback to unbuffered printing if OOM
cd6e90824 Add stress test for last-N profiling mode
84b28c6a1 Properly handle tdata deletion race
d33120856 Get rid of redundant logic in prof
a72ea0db6 Restructure and correct sleep utility for testing
7b67ed0b5 Get rid of lock overlap in prof_recent_alloc_reset
bd3be8e0b Remove commit parameter to ecache functions.
b8df719d5 No tdata creation for backtracing on dying thread
dab81bd31 Rework and fix the assertions on malloc fastpath.
ad3f3fc56 Fetch time after tctx and only for samples
a5d3dd405 Fix an assertion on extent head state with dss.
2b604a301 Record request size in prof recent entries
40a391408 Define constructor for buffered writer argument
6d8e61690 Make buffered writer an independent module
6b6b4709b Unify buffered writer naming
9a60cf54e Last-N profiling mode
7a27a0594 Delete tdata states used for cleanup
e98ddf798 Fix unlikely condition in arena_prof_info_get()
3fa142cf3 Remove _externs from prof internal header names
112dc36dd Handle log_mtx during forking
ea42174d0 Refactor profiling headers
6342da097 Ehooks: Further optimize default merge case.
f2f2084e7 Ehooks: Assert alloc isn't NULL
e210ccc57 Move extent2 -> extent.
2f4fa8041 Rename extents -> ecache.
56cc56b69 Break extent split dependence on arena.
0aa9769fb Break commit functions' arena dependence
48ec5d435 Break extent_coalesce arena dependence
282a38232 Extent: Break [de]activation's arena dependence.
576d7047a Ecache: Should know its arena_ind.
372042a08 Remove merge dependence on the arena.
439219be7 Remove extent_can_coalesce arena dependency.
9cad5639f Ehooks: remove arena_ind parameter.
57fe99d4b Move relevant index into the ehooks_t itself.
c792f3e4a edata_cache: Remember the associated base_t.
ae23e5f42 Unify extent_alloc_wrapper with the other wrappers.
d8b0b66c6 Put extent_state_t into ecache as well as eset.
98eb40e56 Move delay_coalesce from the eset to the ecache.
bb70df8e5 Extent refactor: Introduce ecache module.
070451624 Ehooks: Add head tracking.
09475bf8a extent_may_dalloc -> ehooks_dalloc_will_fail
785918417 Pull out edata_t caching into its own module.
a7862df61 Rename extent_t to edata_t.
865debda2 Rename extent.h -> edata.h.
a738a66b5 Ehooks: Add some debug zero and addr checks.
4b2e5ee8b Ehooks: Add a "zero" ehook.
d0f187ad3 Arena: Loosen arena_may_have_muzzy restrictions.
ebbb97327 Base: Remove some unnecessary reentrancy guards.
403f2d166 Extents: Split out introspection functionality.
92a511d38 Make extent module hermetic.
e08c581cf Extent: Get rid of extent-specific pre/post reentrancy calls.
39fdc690a Ehooks comments and cleanup.
c8dae890c Extent -> Ehooks: Move over default hooks.
2fe510826 Extent -> Ehooks: Move merge hook.
1fff4d2ee Extent -> Ehooks: Move split hook.
a5b42a1a1 Extent -> Ehooks: Move purge_forced hook.
368baa42e Extent -> Ehooks: Move purge_lazy hook.
f83fdf533 Extent: Clean up a comma
d78fe241a Extent -> Ehooks: Move commit and decommit hooks.
5459ec9da Extent -> Ehooks: Move destroy hook.
bac8e2e5a Extent -> Ehooks: Move dalloc hook.
dc8b4e6e1 Extent -> Ehooks: Move alloc hook.
703fbc0ff Introduce unsafe reentrancy guards.
ae0d8e859 Move extent ehook calls into ehooks
ba8b9ecbc Add ehooks module
837119a94 base_structs.h: Remove some mid-line tabs.
9f6eb0958 Extents: Eagerly initialize extent hooks.
4278f8460 Move extent hook getters/setters to arena.c
9226e1f0d fix opt.thp:never still use THP with base_new
d5031ea82 Allow dallocx and sdallocx after tsd destruction.
4afd709d1 Restructure setters for profiling info
1d01e4c77 Initialization utilities for nstime
dd649c948 Optimize away the tsd_fast() check on fastpath.
1decf958d Fix incorrect usage of cassert.
45836d7fd Pass nstime_t pointer for profiling
7d2bac5a3 Refactor destroy code path for prof_tctx
055478cca Threshold is no longer updated before prof_realloc()
7e3671911 Get rid of old indentation style for prof
dfdd46f6c Refactor prof_tctx_t creation
aa1d71fb7 Rename prof_tctx to alloc_tctx in prof_info_t
5e0b09099 No need to pass usize to prof_tctx_set()
1b1e76acf Disable some spuriously-triggering warnings
a70909b13 Test on all supported release of FreeBSD
5c47a3022 Guard C++ aligned APIs
694537177 Change tsdn to tsd for profiling code path
b55419f9b Restructure profiling
8b2c2a596 Support C++17 over-aligned allocation
9a3c73800 Refactor arena_bin_malloc_hard().
9a7ae3c97 Reduce footprint of bin_t.
cb1a1f4ad Remove the unnecessary alloc_ctx on free_fastpath.
716061710 Add branch hints to free_fastpath.
a787d2f5b Prefer getaffinity() to detect number of CPUs.
04cb7d4d6 Bail out early for muzzy decay.
73510dfd1 Revert "Fix bug in prof_realloc"
3b5eecf10 Fix bug in prof_realloc
e4c36a6f3 Emphasize no modification through thread.allocatedp allowed.
c462753cc Use __forceinline for JEMALLOC_ALWAYS_INLINE on msvc
836d7a7e6 Check for large size first in the uncommon case of malloc.
9c59abe42 Fix a typo in Makefile.
da50d8ce8 Refactor and optimize prof sampling initialization.
bc774a351 Rename tsd->offset_state to tsd->prng_state.
19a51abf3 Avoid arena->offset_state when tsd not available for prng.
d01b425e5 Add -Wimplicit-fallthrough checks if supported
a8b578d53 Remove mallctl test for zero_realloc
43f0ce92d Define general purpose tsd_thread_event_init()
97f93fa0f Pull tcache GC events into thread event handler
198f02e79 Pull prof_accumbytes into thread event handler
152c0ef95 Build a general purpose thread event handler
6924f83cb use SYS_openat when available
de81a4ead Add stats counters for number of zero reallocs
9cfa80594 Realloc: Make behavior of realloc(ptr, 0) configurable.
ee961c231 Merge realloc and rallocx pathways.
bd6e28d6a Guard slabcur fetching in extent_util
4786099a3 Increase column width for global malloc/free rate
05681e387 Optimize cache_bin_alloc_easy for malloc fast path
4fe50bc7d Fix amd64 MSVC warning
4fbbc817c Simplify time setting and getting for prof log
4094b7c03 Limit # of iters of test_bitmap_xfu.
66e07f986 Suppress tdata creation in reentrancy
beb7c16e9 Guard prof_active reset by opt_prof
1df9dd351 Fix je_ prefix issue in test
3d84bd57f Arena: Add helper function arena_get_from_extent.
c97d25575 Eset: Remove temporary declaration.
ce5b128f1 Remove the undefined extent_size_quantize declarations.
821dd53a1 Extent -> Eset: Rename arena members.
e144b21e4 Extent -> Eset: Move fork handling.
77bbb35a9 Extent -> Eset: Move extent fit functions.
1210af9a4 Extent -> Eset: Move insertion and removal.
a42861540 Extents -> Eset: Convert some stats getters.
820f070c6 Move page quantization to sz module.
63d1b7a7a Extents -> Eset: move extents_state_get.
b416b96a3 Extents -> Eset: rename/move extents_init.
e6180fe1b Eset: Add a source file.
4e5e43f22 Rename extents_t -> eset_t.
723ccc6c2 Extents: Split out extent struct.
41187bdfb Extents: Break extent-struct/arena interactions
529cfe2ab Arena: rename arena_structs_b.h -> arena_structs.h
e7cf84a8d Rearrange slab data and constants
d1be488cd Add --with-lg-page=16 to CI.
ac5185f73 Fix tcache bin stack alignment.
b7c7df24b Add max_per_bg_thd stats for per background thread mutexes.
4b76c684b Add "prof.dump_prefix" to override filename prefixes for dumps.
242af439b Rename "prof_dump_seq_mtx" to "prof_dump_filename_mtx".
e06658cb2 check GNU make exists in path
22bc75ee3 Workaround the stringop-overflow check false positives.
93d615180 Pass tsd down to prof_backtrace()
671f120e2 Fix prof_backtrace() reentrancy level
785b84e60 Make cache_bin_sz_t unsigned.
23dc7a7fb Fix index type for cache_bin_alloc_easy.
2abb02ecd Fix MSVC 2015 build, as proposed by @christianaguilera-foundry.
719583f14 Fix large.nflushes in the merged stats.
adce29c88 Optimize for prof_active off
49e6fbce7 Always adjust thread_(de)allocated
57b81c078 Pull thread_(de)allocated out of config_stats
9e031c1d1 Bug fix for prof_active switch
0043e68d4 Track low_water == -1 case explicitly.
937ca1db9 Store ncached_max * ptr_size in tcache_bin_info.
7599c82d4 Redesign the cache bin metadata for fast path.
d2dddfb82 Add hint in the bogus version string.
d6b7995c1 Update INSTALL.md about the default doc build.
e2c758436 Simplify / refactor tcache_dalloc_large.
9c5c2a2c8 Unify the signature of tcache_flush small and large.
28ed9b9a5 Buffer stats printing
eb70fef8c Make compact json format as default
a219cfcda Clear tcache prof_accumbytes in tcache_flush_cache
ad3f7dbfa Buffer prof_log_stop
593484661 Fix large bin index accessed through cache bin descriptor.
22746d3c9 Properly dalloc prof nodes with idalloctm.
8c8466fa6 Add compact json option for emitter
7fc6b1b25 Add buffered writer
39343555d Report stats for tdatas_mtx and prof_dump_mtx
87e2400cb Fix tcaches mutex pre- / post-fork handling.
07ce2434b Refactor profiling
56126d0d2 Refactor prof log
56c8ecffc Correct tsd layout graph

git-subtree-dir: deps/jemalloc
git-subtree-split: 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
parent 220a0f08
......@@ -2,77 +2,41 @@
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
arena_stats_u64_t nmalloc;
arena_stats_u64_t ndalloc;
locked_u64_t nmalloc;
locked_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
arena_stats_u64_t nrequests; /* Partially derived. */
locked_u64_t nrequests; /* Partially derived. */
/*
* Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched.
*/
arena_stats_u64_t nfills; /* Partially derived. */
arena_stats_u64_t nflushes; /* Partially derived. */
locked_u64_t nfills; /* Partially derived. */
locked_u64_t nflushes; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
};
typedef struct arena_stats_decay_s arena_stats_decay_t;
struct arena_stats_decay_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
};
typedef struct arena_stats_extents_s arena_stats_extents_t;
struct arena_stats_extents_s {
/*
* Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
atomic_zu_t ndirty;
atomic_zu_t dirty_bytes;
atomic_zu_t nmuzzy;
atomic_zu_t muzzy_bytes;
atomic_zu_t nretained;
atomic_zu_t retained_bytes;
};
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
......@@ -80,43 +44,36 @@ struct arena_stats_extents_s {
*/
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
LOCKEDINT_MTX_DECLARE(mtx)
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
* resident includes the base stats -- that's why it lives here and not
* in pa_shard_stats_t.
*/
atomic_zu_t retained; /* Derived. */
/* Number of extent_t structs allocated by base, but not being used. */
atomic_zu_t extent_avail;
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
size_t base; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
size_t mapped; /* Derived. */
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nfills_large; /* Derived. */
arena_stats_u64_t nflushes_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
size_t allocated_large; /* Derived. */
uint64_t nmalloc_large; /* Derived. */
uint64_t ndalloc_large; /* Derived. */
uint64_t nfills_large; /* Derived. */
uint64_t nflushes_large; /* Derived. */
uint64_t nrequests_large; /* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
/*
* The stats logically owned by the pa_shard in the same arena. This
* lives here only because it's convenient for the purposes of the ctl
* module -- it only knows about the single arena_stats.
*/
pa_shard_stats_t pa_shard_stats;
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
size_t tcache_bytes; /* Derived. */
size_t tcache_stashed_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
......@@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
assert(((char *)arena_stats)[i] == 0);
}
}
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
#endif
/* Memory is zeroed, so there is no need to clear stats. */
return false;
}
static inline void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}
static inline void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}
static inline uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
#endif
}
static inline void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
#endif
}
static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
assert(*p + x >= *p);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static inline void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
*dst += src;
#endif
}
static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
static inline void
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
arena_stats_unlock(tsdn, arena_stats);
}
static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
arena_stats_unlock(tsdn, arena_stats);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&lstats->nrequests, nrequests);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
&lstats->nflushes, 1);
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h"
struct arena_decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
arena_stats_decay_t *stats;
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
struct arena_s {
/*
* Number of threads currently assigned to this arena. Each thread has
......@@ -110,28 +53,10 @@ struct arena_s {
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_t) tcache_ql;
ql_head(tcache_slow_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
prof_accum_t prof_accum;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*
* Synchronization: atomic.
*/
atomic_zu_t offset_state;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t extent_sn_next;
/*
* Represents a dss_prec_t, but atomically.
*
......@@ -139,74 +64,23 @@ struct arena_s {
*/
atomic_u_t dss_prec;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t nactive;
/*
* Extant large allocations.
*
* Synchronization: large_mtx.
*/
extent_list_t large;
edata_list_active_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
/* The page-level allocator shard this arena uses. */
pa_shard_t pa_shard;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: internal.
*/
arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
* A cached copy of base->ind. This can get accessed on hot paths;
* looking it up in base requires an extra pointer hop / cache miss.
*/
pszind_t extent_grow_next;
pszind_t retain_grow_limit;
malloc_mutex_t extent_grow_mtx;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_avail_mtx.
*/
extent_tree_t extent_avail;
atomic_zu_t extent_avail_cnt;
malloc_mutex_t extent_avail_mtx;
/*
* bins is used to store heaps of free regions.
*
* Synchronization: internal.
*/
bins_t bins[SC_NBINS];
unsigned ind;
/*
* Base allocator, from which arena metadata are allocated.
......@@ -216,17 +90,12 @@ struct arena_s {
base_t *base;
/* Used to determine uptime. Read-only after initialization. */
nstime_t create_time;
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s {
ticker_t decay_ticker;
};
/* Used to pass rtree lookup context down the path. */
struct alloc_ctx_s {
szind_t szind;
bool slab;
/*
* The arena is allocated alongside its bins; really this is a
* dynamically sized array determined by the binshard settings.
*/
bin_t bins[0];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct arena_slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
......@@ -3,21 +3,14 @@
#include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
......@@ -48,4 +41,18 @@ typedef enum {
*/
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
struct arena_config_s {
/* extent hooks to be used for the arena */
extent_hooks_t *extent_hooks;
/*
* Use extent hooks for metadata (base) allocations when true.
*/
bool metadata_use_hooks;
};
typedef struct arena_config_s arena_config_t;
extern const arena_config_t arena_config_default;
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
......@@ -51,6 +51,27 @@
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
/*
* Another convenience -- simple atomic helper functions.
*/
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
lg_size) \
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
ATOMIC_INLINE void \
atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval + inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
} \
ATOMIC_INLINE void \
atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval - inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
}
/*
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
* fact.
......@@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
*/
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
#ifdef JEMALLOC_ATOMIC_U64
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
#endif
#undef ATOMIC_INLINE
......
......@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new);
bool background_thread_is_started(background_thread_info_t* info);
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
......@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn);
bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
......@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE void
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread) {
if (!background_thread_enabled() || is_background_thread) {
return;
}
background_thread_info_t *info =
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
background_thread_interval_check(tsdn, arena,
&arena->decay_dirty, 0);
}
}
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
......@@ -11,6 +11,17 @@
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
#define DEFAULT_NUM_BACKGROUND_THREAD 4
/*
* These exist only as a transitional state. Eventually, deferral should be
* part of the PAI, and each implementation can indicate wait times with more
* specificity.
*/
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
typedef enum {
background_thread_stopped,
background_thread_started,
......@@ -48,6 +59,7 @@ struct background_thread_stats_s {
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
mutex_prof_data_t max_counter_per_bg_thd;
};
typedef struct background_thread_stats_s background_thread_stats_t;
......
#ifndef JEMALLOC_INTERNAL_BASE_H
#define JEMALLOC_INTERNAL_BASE_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/mutex.h"
enum metadata_thp_mode_e {
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
};
typedef enum metadata_thp_mode_e metadata_thp_mode_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *metadata_thp_mode_names[];
/* Embedded at the beginning of every block of base-managed virtual memory. */
typedef struct base_block_s base_block_t;
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
edata_t edata;
};
typedef struct base_s base_t;
struct base_s {
/*
* User-configurable extent hook functions.
*/
ehooks_t ehooks;
/*
* User-configurable extent hook functions for metadata allocations.
*/
ehooks_t ehooks_base;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
edata_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
size_t n_thp;
};
static inline unsigned
base_ind_get(const base_t *base) {
return ehooks_ind_get(&base->ehooks);
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind,
const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
ehooks_t *base_ehooks_get(base_t *base);
ehooks_t *base_ehooks_get_for_metadata(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_H */
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *metadata_thp_mode_names[];
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
extent_hooks_t *base_extent_hooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static inline unsigned
base_ind_get(const base_t *base) {
return base->ind;
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
extent_t extent;
};
struct base_s {
/* Associated arena's index within the arenas array. */
unsigned ind;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t extent_hooks;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
size_t n_thp;
};
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef struct base_block_s base_block_t;
typedef struct base_s base_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
typedef enum {
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
} metadata_thp_mode_t;
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
......@@ -3,8 +3,7 @@
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
......@@ -12,74 +11,34 @@
* A bin contains a set of extents that are currently being used for slab
* allocations.
*/
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
typedef struct bin_s bin_t;
struct bin_s {
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t lock;
/*
* Bin statistics. These get touched every time the lock is acquired,
* so put them close by in the hopes of getting some cache locality.
*/
bin_stats_t stats;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t *slabcur;
edata_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t slabs_nonfull;
edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
extent_list_t slabs_full;
/* Bin statistics. */
bin_stats_t stats;
edata_list_active_t slabs_full;
};
/* A set of sharded bins of the same size class. */
......@@ -92,7 +51,6 @@ struct bins_s {
void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards);
void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
/* Initializes a bin to empty. Returns true on error. */
bool bin_init(bin_t *bin);
......@@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
/* Stats. */
static inline void
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
dst_bin_stats->nmalloc += bin->stats.nmalloc;
dst_bin_stats->ndalloc += bin->stats.ndalloc;
dst_bin_stats->nrequests += bin->stats.nrequests;
dst_bin_stats->curregs += bin->stats.curregs;
dst_bin_stats->nfills += bin->stats.nfills;
dst_bin_stats->nflushes += bin->stats.nflushes;
dst_bin_stats->nslabs += bin->stats.nslabs;
dst_bin_stats->reslabs += bin->stats.reslabs;
dst_bin_stats->curslabs += bin->stats.curslabs;
dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
bin_stats_t *stats = &dst_bin_stats->stats_data;
stats->nmalloc += bin->stats.nmalloc;
stats->ndalloc += bin->stats.ndalloc;
stats->nrequests += bin->stats.nrequests;
stats->curregs += bin->stats.curregs;
stats->nfills += bin->stats.nfills;
stats->nflushes += bin->stats.nflushes;
stats->nslabs += bin->stats.nslabs;
stats->reslabs += bin->stats.reslabs;
stats->curslabs += bin->stats.curslabs;
stats->nonfull_slabs += bin->stats.nonfull_slabs;
malloc_mutex_unlock(tsdn, &bin->lock);
}
......
#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
#define JEMALLOC_INTERNAL_BIN_INFO_H
#include "jemalloc/internal/bitmap.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
......@@ -47,8 +47,11 @@ struct bin_stats_s {
/* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs;
};
typedef struct bin_stats_data_s bin_stats_data_t;
struct bin_stats_data_s {
bin_stats_t stats_data;
mutex_prof_data_t mutex_data;
};
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
......@@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h"
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */
......
......@@ -3,144 +3,383 @@
#include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
/*
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
* input, and returns the position of the lowest bit set (as opposed to the
* posix versions, which return 1 larger than that position and use a return
* value of zero as a sentinel. This tends to simplify logic in callers, and
* allows for consistency with the builtins we build fls on top of.
*/
static inline unsigned
ffs_llu(unsigned long long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return JEMALLOC_INTERNAL_FFSLL(bitmap);
static inline unsigned
ffs_lu(unsigned long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_FFSL(bitmap);
static inline unsigned
ffs_u(unsigned x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFS(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_u(unsigned bitmap) {
return JEMALLOC_INTERNAL_FFS(bitmap);
#define DO_FLS_SLOW(x, suffix) do { \
util_assume(x != 0); \
x |= (x >> 1); \
x |= (x >> 2); \
x |= (x >> 4); \
x |= (x >> 8); \
x |= (x >> 16); \
if (sizeof(x) > 4) { \
/* \
* If sizeof(x) is 4, then the expression "x >> 32" \
* will generate compiler warnings even if the code \
* never executes. This circumvents the warning, and \
* gets compiled out in optimized builds. \
*/ \
int constant_32 = sizeof(x) * 4; \
x |= (x >> constant_32); \
} \
x++; \
if (x == 0) { \
return 8 * sizeof(x) - 1; \
} \
return ffs_##suffix(x) - 1; \
} while(0)
static inline unsigned
fls_llu_slow(unsigned long long x) {
DO_FLS_SLOW(x, llu);
}
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
BIT_UTIL_INLINE unsigned
static inline unsigned
fls_lu_slow(unsigned long x) {
DO_FLS_SLOW(x, lu);
}
static inline unsigned
fls_u_slow(unsigned x) {
DO_FLS_SLOW(x, u);
}
#undef DO_FLS_SLOW
#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
static inline unsigned
fls_llu(unsigned long long x) {
util_assume(x != 0);
/*
* Note that the xor here is more naturally written as subtraction; the
* last bit set is the number of bits in the type minus the number of
* leading zero bits. But GCC implements that as:
* bsr edi, edi
* mov eax, 31
* xor edi, 31
* sub eax, edi
* If we write it as xor instead, then we get
* bsr eax, edi
* as desired.
*/
return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
}
static inline unsigned
fls_lu(unsigned long x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
}
static inline unsigned
fls_u(unsigned x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
}
#elif defined(_MSC_VER)
#if LG_SIZEOF_PTR == 3
#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
#else
/*
* This never actually runs; we're just dodging a compiler error for the
* never-taken branch where sizeof(void *) == 8.
*/
#define DO_BSR64(bit, x) bit = 0; unreachable()
#endif
#define DO_FLS(x) do { \
if (x == 0) { \
return 8 * sizeof(x); \
} \
unsigned long bit; \
if (sizeof(x) == 4) { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 8) { \
DO_BSR64(bit, x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 4) { \
/* Dodge a compiler warning, as above. */ \
int constant_32 = sizeof(x) * 4; \
if (_BitScanReverse(&bit, \
(unsigned)(x >> constant_32))) { \
return 32 + (unsigned)bit; \
} else { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
} \
unreachable(); \
} while (0)
static inline unsigned
fls_llu(unsigned long long x) {
DO_FLS(x);
}
static inline unsigned
fls_lu(unsigned long x) {
DO_FLS(x);
}
static inline unsigned
fls_u(unsigned x) {
DO_FLS(x);
}
#undef DO_FLS
#undef DO_BSR64
#else
static inline unsigned
fls_llu(unsigned long long x) {
return fls_llu_slow(x);
}
static inline unsigned
fls_lu(unsigned long x) {
return fls_lu_slow(x);
}
static inline unsigned
fls_u(unsigned x) {
return fls_u_slow(x);
}
#endif
#if LG_SIZEOF_LONG_LONG > 3
# error "Haven't implemented popcount for 16-byte ints."
#endif
#define DO_POPCOUNT(x, type) do { \
/* \
* Algorithm from an old AMD optimization reference manual. \
* We're putting a little bit more work than you might expect \
* into the no-instrinsic case, since we only support the \
* GCC intrinsics spelling of popcount (for now). Detecting \
* whether or not the popcount builtin is actually useable in \
* MSVC is nontrivial. \
*/ \
\
type bmul = (type)0x0101010101010101ULL; \
\
/* \
* Replace each 2 bits with the sideways sum of the original \
* values. 0x5 = 0b0101. \
* \
* You might expect this to be: \
* x = (x & 0x55...) + ((x >> 1) & 0x55...). \
* That costs an extra mask relative to this, though. \
*/ \
x = x - ((x >> 1) & (0x55U * bmul)); \
/* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
/* \
* Replace each 8 bits with their sideways sum. Note that we \
* can't overflow within each 4-bit sum here, so we can skip \
* the initial mask. \
*/ \
x = (x + (x >> 4)) & (bmul * 0x0FU); \
/* \
* None of the partial sums in this multiplication (viewed in \
* base-256) can overflow into the next digit. So the least \
* significant byte of the product will be the least \
* significant byte of the original value, the second least \
* significant byte will be the sum of the two least \
* significant bytes of the original value, and so on. \
* Importantly, the high byte will be the byte-wise sum of all \
* the bytes of the original value. \
*/ \
x = x * bmul; \
x >>= ((sizeof(x) - 1) * 8); \
return (unsigned)x; \
} while(0)
static inline unsigned
popcount_u_slow(unsigned bitmap) {
DO_POPCOUNT(bitmap, unsigned);
}
static inline unsigned
popcount_lu_slow(unsigned long bitmap) {
DO_POPCOUNT(bitmap, unsigned long);
}
static inline unsigned
popcount_llu_slow(unsigned long long bitmap) {
DO_POPCOUNT(bitmap, unsigned long long);
}
#undef DO_POPCOUNT
static inline unsigned
popcount_u(unsigned bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNT
return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
#else
return popcount_u_slow(bitmap);
#endif
}
static inline unsigned
popcount_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#else
return popcount_lu_slow(bitmap);
#endif
}
static inline unsigned
popcount_llu(unsigned long long bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
#else
return popcount_llu_slow(bitmap);
#endif
}
/*
* Clears first unset bit in bitmap, and returns
* place of bit. bitmap *must not* be 0.
*/
BIT_UTIL_INLINE size_t
static inline size_t
cfs_lu(unsigned long* bitmap) {
size_t bit = ffs_lu(*bitmap) - 1;
util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
return bit;
}
BIT_UTIL_INLINE unsigned
ffs_zu(size_t bitmap) {
static inline unsigned
ffs_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return ffs_u(bitmap);
return ffs_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return ffs_lu(bitmap);
return ffs_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return ffs_llu(bitmap);
return ffs_llu(x);
#else
#error No implementation for size_t ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u64(uint64_t bitmap) {
static inline unsigned
fls_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return fls_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return fls_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return fls_llu(x);
#else
#error No implementation for size_t fls()
#endif
}
static inline unsigned
ffs_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return ffs_lu(bitmap);
return ffs_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return ffs_llu(bitmap);
return ffs_llu(x);
#else
#error No implementation for 64-bit ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u32(uint32_t bitmap) {
static inline unsigned
fls_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return fls_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return fls_llu(x);
#else
#error No implementation for 64-bit fls()
#endif
}
static inline unsigned
ffs_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return ffs_u(bitmap);
return ffs_u(x);
#else
#error No implementation for 32-bit ffs()
#endif
return ffs_u(bitmap);
return ffs_u(x);
}
static inline unsigned
fls_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return fls_u(x);
#else
#error No implementation for 32-bit fls()
#endif
return fls_u(x);
}
BIT_UTIL_INLINE uint64_t
static inline uint64_t
pow2_ceil_u64(uint64_t x) {
#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
if(unlikely(x <= 1)) {
if (unlikely(x <= 1)) {
return x;
}
size_t msb_on_index;
#if (defined(__amd64__) || defined(__x86_64__))
asm ("bsrq %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (63 ^ __builtin_clzll(x - 1));
#endif
size_t msb_on_index = fls_u64(x - 1);
/*
* Range-check; it's on the callers to ensure that the result of this
* call won't overflow.
*/
assert(msb_on_index < 63);
return 1ULL << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return x;
#endif
}
BIT_UTIL_INLINE uint32_t
static inline uint32_t
pow2_ceil_u32(uint32_t x) {
#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
if(unlikely(x <= 1)) {
return x;
if (unlikely(x <= 1)) {
return x;
}
size_t msb_on_index;
#if (defined(__i386__))
asm ("bsr %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (31 ^ __builtin_clz(x - 1));
#endif
size_t msb_on_index = fls_u32(x - 1);
/* As above. */
assert(msb_on_index < 31);
return 1U << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return x;
#endif
}
/* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE size_t
static inline size_t
pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return pow2_ceil_u64(x);
......@@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) {
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE unsigned
static inline unsigned
lg_floor(size_t x) {
unsigned long ret;
assert(x != 0);
util_assume(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
return fls_u64(x);
#else
# error "Unsupported type size for lg_floor()"
return fls_u32(x);
#endif
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
}
#endif
BIT_UTIL_INLINE unsigned
static inline unsigned
lg_ceil(size_t x) {
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
}
#undef BIT_UTIL_INLINE
/* A compile-time version of lg_floor and lg_ceil. */
#define LG_FLOOR_1(x) 0
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
......
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/sc.h"
......@@ -9,9 +8,9 @@ typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
......@@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
......@@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1);
size_t bit;
do {
bit = ffs_lu(g);
if (bit != 0) {
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
if (g != 0) {
bit = ffs_lu(g);
return (i << LG_BITMAP_GROUP_NBITS) + bit;
}
i++;
g = bitmap[i];
......@@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
bit = ffs_lu(g);
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
while (g == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif
bitmap_set(bitmap, binfo, bit);
return bit;
......
#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
#define JEMALLOC_INTERNAL_BUF_WRITER_H
/*
* Note: when using the buffered writer, cbopaque is passed to write_cb only
* when the buffer is flushed. It would make a difference if cbopaque points
* to something that's changing for each write_cb call, or something that
* affects write_cb in a way dependent on the content of the output string.
* However, the most typical usage case in practice is that cbopaque points to
* some "option like" content for the write_cb, so it doesn't matter.
*/
typedef struct {
write_cb_t *write_cb;
void *cbopaque;
char *buf;
size_t buf_size;
size_t buf_end;
bool internal_buf;
} buf_writer_t;
bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
void buf_writer_flush(buf_writer_t *buf_writer);
write_cb_t buf_writer_cb;
void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque);
#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment